mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-10-01 07:07:08 +02:00
Merge branch 'master' of https://github.com/Atmosphere-NX/Atmosphere into logmanager
This commit is contained in:
commit
bb1cdf2cdd
2
Makefile
2
Makefile
@ -79,7 +79,7 @@ dist-no-debug: all
|
||||
cp sept/sept-secondary/sept-secondary_01.enc atmosphere-$(AMSVER)/sept/sept-secondary_01.enc
|
||||
cp sept/sept-secondary/sept-secondary_dev_00.enc atmosphere-$(AMSVER)/sept/sept-secondary_dev_00.enc
|
||||
cp sept/sept-secondary/sept-secondary_dev_01.enc atmosphere-$(AMSVER)/sept/sept-secondary_dev_01.enc
|
||||
cp config_templates/BCT.ini atmosphere-$(AMSVER)/atmosphere/config/BCT.ini
|
||||
cp config_templates/BCT.ini atmosphere-$(AMSVER)/atmosphere/config_templates/BCT.ini
|
||||
cp config_templates/override_config.ini atmosphere-$(AMSVER)/atmosphere/config_templates/override_config.ini
|
||||
cp config_templates/system_settings.ini atmosphere-$(AMSVER)/atmosphere/config_templates/system_settings.ini
|
||||
cp config_templates/exosphere.ini atmosphere-$(AMSVER)/atmosphere/config_templates/exosphere.ini
|
||||
|
@ -35,6 +35,17 @@
|
||||
# mmc space, encrypted to prevent detection. This backup can be used
|
||||
# to prevent unrecoverable edits in emergencies.
|
||||
|
||||
# Key: log_port, default: 0.
|
||||
# Desc: Controls what uart port exosphere will set up for logging.
|
||||
# NOTE: 0 = UART-A, 1 = UART-B, 2 = UART-C, 3 = UART-D
|
||||
|
||||
# Key: log_baud_rate, default: 115200
|
||||
# Desc: Controls the baud rate exosphere will set up for logging.
|
||||
# NOTE: 0 is treated as equivalent to 115200.
|
||||
|
||||
# Key: log_inverted, default: 0.
|
||||
# Desc: Controls whether the logging uart port is inverted.
|
||||
|
||||
[exosphere]
|
||||
debugmode=1
|
||||
debugmode_user=0
|
||||
@ -43,3 +54,6 @@ enable_user_pmu_access=0
|
||||
blank_prodinfo_sysmmc=0
|
||||
blank_prodinfo_emummc=0
|
||||
allow_writing_to_cal_sysmmc=0
|
||||
log_port=0
|
||||
log_baud_rate=115200
|
||||
log_inverted=0
|
||||
|
@ -38,6 +38,10 @@
|
||||
; Please note this setting may be removed in a
|
||||
; future release of Atmosphere.
|
||||
; enable_deprecated_hid_mitm = u8!0x0
|
||||
; Controls whether am sees system settings "DebugModeFlag" as
|
||||
; enabled or disabled.
|
||||
; 0 = Disabled (not debug mode), 1 = Enabled (debug mode)
|
||||
; enable_am_debug_mode = u8!0x0
|
||||
[hbloader]
|
||||
; Controls the size of the homebrew heap when running as applet.
|
||||
; If set to zero, all available applet memory is used as heap.
|
||||
|
@ -1,4 +1,29 @@
|
||||
# Changelog
|
||||
## 0.16.2
|
||||
+ Atmosphère release zips no longer bundle BCT.ini, instead relying on defaults in code.
|
||||
+ This means atmosphere updates should no longer overwrite any user configuration at all.
|
||||
+ If you wish to modify BCT.ini config, copy the template from /config_templates/ as with other configuration.
|
||||
+ `pgl` and `creport` were further updated to reflect differences in official behavior in 11.0.0.
|
||||
+ An issue was fixed that caused creport to be launched multiple times on process crash.
|
||||
+ This fixes the "duplicate reports" issue that sometimes plagued people.
|
||||
+ A new system setting (`atmosphere!enable_am_debug_mode`) configuring am to use debug mode.
|
||||
+ If you are not a developer or don't see a clear use for this, leave it configured to the default (off).
|
||||
+ Reboot to payload NRO was updated to fix support with certain payloads.
|
||||
+ Support was fixed for atmosphere's extension to support homebrew use of new (8.0.0+) kernel mappings.
|
||||
+ In particular, when running tracing debug builds of `mesosphère`, hbloader now has access to the kernel trace buffer.
|
||||
+ Several issues were fixed, and usability and stability were improved.
|
||||
## 0.16.1
|
||||
+ Support was added for 11.0.1.
|
||||
+ `mesosphère` was updated to reflect the latest official kernel behavior.
|
||||
+ A new svc::InfoType added in 11.0.0 was implemented (it wasn't discovered before 0.16.0 released).
|
||||
+ The new Control Flow Integrity (CFI) logic added in 11.0.0 kernel was implemented.
|
||||
+ `fs` logic was refactored and cleaned up to reflect some newer sysmodule behavioral and structural changes.
|
||||
+ `exosphère` was updated to allow dynamic control of what uart port is used for logging.
|
||||
+ This can be controlled by editing the `log_port`, `log_baud_rate`, and `log_inverted` fields in `exosphere.ini`.
|
||||
+ `mesosphère` was updated to improve debugging capabilities.
|
||||
+ This is still a work in progress, but developers may be interested.
|
||||
+ A bug was fixed that caused `fatal` to fatal error if the fatal process was already being debugged.
|
||||
+ Several issues were fixed, and usability and stability were improved.
|
||||
## 0.16.0
|
||||
+ Support was added for 11.0.0.
|
||||
+ `exosphère` was updated to reflect the latest official secure monitor behavior.
|
||||
|
@ -6,7 +6,7 @@
|
||||
[subrepo]
|
||||
remote = https://github.com/m4xw/emuMMC
|
||||
branch = develop
|
||||
commit = 6fd752dad13c02d482a5d89c24f4e8ce8b9d8f56
|
||||
parent = 4f1a4e74992aa84b8ab84bccacc720e2d5823791
|
||||
commit = 5eed18eb527bbaa63aee5323c26de5b0cca6d28e
|
||||
parent = 021b29d2dbc8ed0469bc822393e58c9f0d174d57
|
||||
method = rebase
|
||||
cmdver = 0.4.1
|
||||
|
@ -2,7 +2,7 @@
|
||||
*A SDMMC driver replacement for Nintendo's Filesystem Services, by **m4xw***
|
||||
|
||||
### Supported Horizon Versions
|
||||
**1.0.0 - 10.0.0**
|
||||
**1.0.0 - 11.0.0**
|
||||
|
||||
## Features
|
||||
* Arbitrary SDMMC backend selection
|
||||
|
@ -71,6 +71,7 @@ static const fs_offsets_t GET_OFFSET_STRUCT_NAME(vers) = { \
|
||||
.nand_mutex = FS_OFFSET##vers##_NAND_MUTEX, \
|
||||
.active_partition = FS_OFFSET##vers##_ACTIVE_PARTITION, \
|
||||
.sdmmc_das_handle = FS_OFFSET##vers##_SDMMC_DAS_HANDLE, \
|
||||
.sdmmc_accessor_controller_open = FS_OFFSET##vers##_SDMMC_WRAPPER_CONTROLLER_OPEN, \
|
||||
.sdmmc_accessor_controller_close = FS_OFFSET##vers##_SDMMC_WRAPPER_CONTROLLER_CLOSE, \
|
||||
.sd_das_init = FS_OFFSET##vers##_SD_DAS_INIT, \
|
||||
.nintendo_paths = FS_OFFSET##vers##_NINTENDO_PATHS, \
|
||||
|
@ -41,6 +41,7 @@ typedef struct {
|
||||
// Misc funcs
|
||||
uintptr_t lock_mutex;
|
||||
uintptr_t unlock_mutex;
|
||||
uintptr_t sdmmc_accessor_controller_open;
|
||||
uintptr_t sdmmc_accessor_controller_close;
|
||||
// Misc data
|
||||
uintptr_t sd_mutex;
|
||||
|
@ -35,7 +35,7 @@ typedef struct sdmmc_accessor_vt
|
||||
void *dtor;
|
||||
void *map_device_addr_space;
|
||||
void *unmap_device_addr_space;
|
||||
void *controller_open;
|
||||
uint64_t (*sdmmc_accessor_controller_open)(void *);
|
||||
uint64_t (*sdmmc_accessor_controller_close)(void *);
|
||||
uint64_t (*read_write)(void *, uint64_t, uint64_t, void *, uint64_t, uint64_t);
|
||||
// More not included because we don't use it.
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_100_LOCK_MUTEX 0x2884
|
||||
#define FS_OFFSET_100_UNLOCK_MUTEX 0x28F0
|
||||
|
||||
#define FS_OFFSET_100_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_100_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x6A8AC
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_1000_LOCK_MUTEX 0x28910
|
||||
#define FS_OFFSET_1000_UNLOCK_MUTEX 0x28960
|
||||
|
||||
#define FS_OFFSET_1000_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_1000_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x1422E0
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_1000_EXFAT_LOCK_MUTEX 0x28910
|
||||
#define FS_OFFSET_1000_EXFAT_UNLOCK_MUTEX 0x28960
|
||||
|
||||
#define FS_OFFSET_1000_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_1000_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x1422E0
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_1020_LOCK_MUTEX 0x28910
|
||||
#define FS_OFFSET_1020_UNLOCK_MUTEX 0x28960
|
||||
|
||||
#define FS_OFFSET_1020_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_1020_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x142740
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_1020_EXFAT_LOCK_MUTEX 0x28910
|
||||
#define FS_OFFSET_1020_EXFAT_UNLOCK_MUTEX 0x28960
|
||||
|
||||
#define FS_OFFSET_1020_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_1020_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x142740
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_1100_LOCK_MUTEX 0x28FF0
|
||||
#define FS_OFFSET_1100_UNLOCK_MUTEX 0x29040
|
||||
|
||||
#define FS_OFFSET_1100_SDMMC_WRAPPER_CONTROLLER_OPEN 0x14B840
|
||||
#define FS_OFFSET_1100_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x14B8F0
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_1100_EXFAT_LOCK_MUTEX 0x28FF0
|
||||
#define FS_OFFSET_1100_EXFAT_UNLOCK_MUTEX 0x29040
|
||||
|
||||
#define FS_OFFSET_1100_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0x14B840
|
||||
#define FS_OFFSET_1100_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x14B8F0
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_200_LOCK_MUTEX 0x3264
|
||||
#define FS_OFFSET_200_UNLOCK_MUTEX 0x32D0
|
||||
|
||||
#define FS_OFFSET_200_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_200_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x733F4
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_200_EXFAT_LOCK_MUTEX 0x3264
|
||||
#define FS_OFFSET_200_EXFAT_UNLOCK_MUTEX 0x32D0
|
||||
|
||||
#define FS_OFFSET_200_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_200_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x733F4
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_210_LOCK_MUTEX 0x3264
|
||||
#define FS_OFFSET_210_UNLOCK_MUTEX 0x32D0
|
||||
|
||||
#define FS_OFFSET_210_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_210_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x737D4
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_210_EXFAT_LOCK_MUTEX 0x3264
|
||||
#define FS_OFFSET_210_EXFAT_UNLOCK_MUTEX 0x32D0
|
||||
|
||||
#define FS_OFFSET_210_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_210_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x737D4
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_300_LOCK_MUTEX 0x35CC
|
||||
#define FS_OFFSET_300_UNLOCK_MUTEX 0x3638
|
||||
|
||||
#define FS_OFFSET_300_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_300_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x8A270
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_300_EXFAT_LOCK_MUTEX 0x35CC
|
||||
#define FS_OFFSET_300_EXFAT_UNLOCK_MUTEX 0x3638
|
||||
|
||||
#define FS_OFFSET_300_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_300_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x8A270
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_301_LOCK_MUTEX 0x3638
|
||||
#define FS_OFFSET_301_UNLOCK_MUTEX 0x36A4
|
||||
|
||||
#define FS_OFFSET_301_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_301_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x8A32C
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_301_EXFAT_LOCK_MUTEX 0x3638
|
||||
#define FS_OFFSET_301_EXFAT_UNLOCK_MUTEX 0x36A4
|
||||
|
||||
#define FS_OFFSET_301_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_301_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x8A32C
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_400_LOCK_MUTEX 0x39A0
|
||||
#define FS_OFFSET_400_UNLOCK_MUTEX 0x3A0C
|
||||
|
||||
#define FS_OFFSET_400_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_400_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x9DB48
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_400_EXFAT_LOCK_MUTEX 0x39A0
|
||||
#define FS_OFFSET_400_EXFAT_UNLOCK_MUTEX 0x3A0C
|
||||
|
||||
#define FS_OFFSET_400_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_400_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x9DB48
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,7 +34,8 @@
|
||||
#define FS_OFFSET_410_LOCK_MUTEX 0x39A0
|
||||
#define FS_OFFSET_410_UNLOCK_MUTEX 0x3A0C
|
||||
|
||||
#define FS_OFFSET_410_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x9DBAC
|
||||
#define FS_OFFSET_410_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_410_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x9DBAC
|
||||
|
||||
// Misc Data
|
||||
#define FS_OFFSET_410_SD_MUTEX 0xE80268
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_410_EXFAT_LOCK_MUTEX 0x39A0
|
||||
#define FS_OFFSET_410_EXFAT_UNLOCK_MUTEX 0x3A0C
|
||||
|
||||
#define FS_OFFSET_410_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_410_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x9DBAC
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_500_LOCK_MUTEX 0x4080
|
||||
#define FS_OFFSET_500_UNLOCK_MUTEX 0x40D0
|
||||
|
||||
#define FS_OFFSET_500_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_500_SDMMC_WRAPPER_CONTROLLER_CLOSE 0xC9380
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_500_EXFAT_LOCK_MUTEX 0x4080
|
||||
#define FS_OFFSET_500_EXFAT_UNLOCK_MUTEX 0x40D0
|
||||
|
||||
#define FS_OFFSET_500_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_500_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0xC9380
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_510_LOCK_MUTEX 0x4080
|
||||
#define FS_OFFSET_510_UNLOCK_MUTEX 0x40D0
|
||||
|
||||
#define FS_OFFSET_510_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_510_SDMMC_WRAPPER_CONTROLLER_CLOSE 0xC9750
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_510_EXFAT_LOCK_MUTEX 0x4080
|
||||
#define FS_OFFSET_510_EXFAT_UNLOCK_MUTEX 0x40D0
|
||||
|
||||
#define FS_OFFSET_510_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_510_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0xC9750
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_600_LOCK_MUTEX 0x1412C0
|
||||
#define FS_OFFSET_600_UNLOCK_MUTEX 0x141310
|
||||
|
||||
#define FS_OFFSET_600_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_600_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x148500
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_600_EXFAT_LOCK_MUTEX 0x14C9C0
|
||||
#define FS_OFFSET_600_EXFAT_UNLOCK_MUTEX 0x14CA10
|
||||
|
||||
#define FS_OFFSET_600_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_600_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x153C00
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_700_LOCK_MUTEX 0x148A90
|
||||
#define FS_OFFSET_700_UNLOCK_MUTEX 0x148AE0
|
||||
|
||||
#define FS_OFFSET_700_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_700_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x14FD50
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_700_EXFAT_LOCK_MUTEX 0x154040
|
||||
#define FS_OFFSET_700_EXFAT_UNLOCK_MUTEX 0x154090
|
||||
|
||||
#define FS_OFFSET_700_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_700_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x15B300
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_800_LOCK_MUTEX 0x14B6D0
|
||||
#define FS_OFFSET_800_UNLOCK_MUTEX 0x14B720
|
||||
|
||||
#define FS_OFFSET_800_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_800_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x1529E0
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_800_EXFAT_LOCK_MUTEX 0x156C80
|
||||
#define FS_OFFSET_800_EXFAT_UNLOCK_MUTEX 0x156CD0
|
||||
|
||||
#define FS_OFFSET_800_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_800_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x15DF90
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_810_LOCK_MUTEX 0x14B6D0
|
||||
#define FS_OFFSET_810_UNLOCK_MUTEX 0x14B720
|
||||
|
||||
#define FS_OFFSET_810_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_810_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x1529E0
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_810_EXFAT_LOCK_MUTEX 0x156C80
|
||||
#define FS_OFFSET_810_EXFAT_UNLOCK_MUTEX 0x156CD0
|
||||
|
||||
#define FS_OFFSET_810_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_810_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x15DF90
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_900_LOCK_MUTEX 0x25280
|
||||
#define FS_OFFSET_900_UNLOCK_MUTEX 0x252D0
|
||||
|
||||
#define FS_OFFSET_900_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_900_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x137740
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_900_EXFAT_LOCK_MUTEX 0x25280
|
||||
#define FS_OFFSET_900_EXFAT_UNLOCK_MUTEX 0x252D0
|
||||
|
||||
#define FS_OFFSET_900_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_900_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x137740
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_910_LOCK_MUTEX 0x25280
|
||||
#define FS_OFFSET_910_UNLOCK_MUTEX 0x252D0
|
||||
|
||||
#define FS_OFFSET_910_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_910_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x137750
|
||||
|
||||
// Misc Data
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define FS_OFFSET_910_EXFAT_LOCK_MUTEX 0x25280
|
||||
#define FS_OFFSET_910_EXFAT_UNLOCK_MUTEX 0x252D0
|
||||
|
||||
#define FS_OFFSET_910_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0
|
||||
#define FS_OFFSET_910_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x137750
|
||||
|
||||
// Misc Data
|
||||
|
@ -271,7 +271,7 @@ int sdmmc_nand_get_active_partition_index()
|
||||
|
||||
static uint64_t emummc_read_write_inner(void *buf, unsigned int sector, unsigned int num_sectors, bool is_write)
|
||||
{
|
||||
if ((emuMMC_ctx.EMMC_Type == emuMMC_SD))
|
||||
if ((emuMMC_ctx.EMMC_Type == emuMMC_SD_Raw))
|
||||
{
|
||||
// raw partition sector offset: emuMMC_ctx.EMMC_StoragePartitionOffset.
|
||||
sector += emuMMC_ctx.EMMC_StoragePartitionOffset;
|
||||
@ -318,6 +318,31 @@ static uint64_t emummc_read_write_inner(void *buf, unsigned int sector, unsigned
|
||||
return res;
|
||||
}
|
||||
|
||||
// Controller open wrapper
|
||||
uint64_t sdmmc_wrapper_controller_open(int mmc_id)
|
||||
{
|
||||
uint64_t result;
|
||||
sdmmc_accessor_t *_this;
|
||||
_this = sdmmc_accessor_get(mmc_id);
|
||||
|
||||
if (_this != NULL)
|
||||
{
|
||||
// Lock eMMC xfer while SD card is being initialized by FS.
|
||||
if (_this == sdmmc_accessor_get(FS_SDMMC_SD))
|
||||
mutex_lock_handler(FS_SDMMC_EMMC); // Recursive Mutex, handler will lock SD as well if custom_driver
|
||||
|
||||
result = _this->vtab->sdmmc_accessor_controller_open(_this);
|
||||
|
||||
// Unlock eMMC.
|
||||
if (_this == sdmmc_accessor_get(FS_SDMMC_SD))
|
||||
mutex_unlock_handler(FS_SDMMC_EMMC);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
fatal_abort(Fatal_OpenAccessor);
|
||||
}
|
||||
|
||||
// Controller close wrapper
|
||||
uint64_t sdmmc_wrapper_controller_close(int mmc_id)
|
||||
{
|
||||
@ -389,7 +414,7 @@ uint64_t sdmmc_wrapper_read(void *buf, uint64_t bufSize, int mmc_id, unsigned in
|
||||
if (first_sd_read)
|
||||
{
|
||||
first_sd_read = false;
|
||||
if (emuMMC_ctx.EMMC_Type == emuMMC_SD)
|
||||
if (emuMMC_ctx.EMMC_Type == emuMMC_SD_Raw)
|
||||
{
|
||||
// Because some SD cards have issues with emuMMC's driver
|
||||
// we currently swap to FS's driver after first SD read
|
||||
@ -400,7 +425,7 @@ uint64_t sdmmc_wrapper_read(void *buf, uint64_t bufSize, int mmc_id, unsigned in
|
||||
}
|
||||
}
|
||||
|
||||
// Call hekates driver.
|
||||
// Call hekate's driver.
|
||||
if (sdmmc_storage_read(&sd_storage, sector, num_sectors, buf))
|
||||
{
|
||||
mutex_unlock_handler(mmc_id);
|
||||
|
@ -52,6 +52,7 @@ void mutex_lock_handler(int mmc_id);
|
||||
void mutex_unlock_handler(int mmc_id);
|
||||
|
||||
// Hooks
|
||||
uint64_t sdmmc_wrapper_controller_open(int mmc_id);
|
||||
uint64_t sdmmc_wrapper_controller_close(int mmc_id);
|
||||
uint64_t sdmmc_wrapper_read(void *buf, uint64_t bufSize, int mmc_id, unsigned int sector, unsigned int num_sectors);
|
||||
uint64_t sdmmc_wrapper_write(int mmc_id, unsigned int sector, unsigned int num_sectors, void *buf, uint64_t bufSize);
|
||||
|
@ -30,7 +30,7 @@ enum emuMMC_Type
|
||||
emuMMC_EMMC = 0,
|
||||
|
||||
// SD Device raw
|
||||
emuMMC_SD,
|
||||
emuMMC_SD_Raw,
|
||||
// SD Device File
|
||||
emuMMC_SD_File,
|
||||
|
||||
|
@ -92,7 +92,7 @@ volatile __attribute__((aligned(0x1000))) emuMMC_ctx_t emuMMC_ctx = {
|
||||
.fs_ver = FS_VER_MAX,
|
||||
|
||||
// SD Default Metadata
|
||||
.SD_Type = emuMMC_SD,
|
||||
.SD_Type = emuMMC_SD_Raw,
|
||||
.SD_StoragePartitionOffset = 0,
|
||||
|
||||
// EMMC Default Metadata
|
||||
@ -285,6 +285,9 @@ void setup_hooks(void)
|
||||
INJECT_HOOK(fs_offsets->sdmmc_wrapper_read, sdmmc_wrapper_read);
|
||||
// sdmmc_wrapper_write hook
|
||||
INJECT_HOOK(fs_offsets->sdmmc_wrapper_write, sdmmc_wrapper_write);
|
||||
// sdmmc_wrapper_controller_open hook
|
||||
if (fs_offsets->sdmmc_accessor_controller_open)
|
||||
INJECT_HOOK(fs_offsets->sdmmc_accessor_controller_open, sdmmc_wrapper_controller_open);
|
||||
// sdmmc_wrapper_controller_close hook
|
||||
INJECT_HOOK(fs_offsets->sdmmc_accessor_controller_close, sdmmc_wrapper_controller_close);
|
||||
|
||||
@ -346,7 +349,7 @@ static void load_emummc_ctx(void)
|
||||
emuMMC_ctx.id = config.base_cfg.id;
|
||||
emuMMC_ctx.EMMC_Type = (enum emuMMC_Type)config.base_cfg.type;
|
||||
emuMMC_ctx.fs_ver = (enum FS_VER)config.base_cfg.fs_version;
|
||||
if (emuMMC_ctx.EMMC_Type == emuMMC_SD)
|
||||
if (emuMMC_ctx.EMMC_Type == emuMMC_SD_Raw)
|
||||
{
|
||||
emuMMC_ctx.EMMC_StoragePartitionOffset = config.partition_cfg.start_sector;
|
||||
}
|
||||
|
@ -72,6 +72,9 @@ namespace ams::secmon::boot {
|
||||
/* care of it here. Perhaps we should read the number of anti-downgrade fuses burnt, and translate that */
|
||||
/* to the warmboot key? To be decided during the process of implementing ams-on-mariko support. */
|
||||
reg::Write(pmc + APBDEV_PMC_SECURE_SCRATCH32, 0x129);
|
||||
|
||||
/* TODO: Fix to ensure correct scratch contents on mariko, as otherwise wb is broken. */
|
||||
AMS_ABORT_UNLESS(fuse::GetSocType() != fuse::SocType_Mariko);
|
||||
}
|
||||
|
||||
/* This function derives the master kek and device keys using the tsec root key. */
|
||||
|
@ -960,7 +960,7 @@ namespace ams::secmon {
|
||||
}
|
||||
|
||||
void SetupLogForBoot() {
|
||||
log::Initialize();
|
||||
log::Initialize(secmon::GetLogPort(), secmon::GetLogBaudRate(), secmon::GetLogFlags());
|
||||
log::SendText("OHAYO\n", 6);
|
||||
log::Flush();
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ namespace ams::secmon::smc {
|
||||
[fuse::DramId_IcosaSamsung4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_IcosaHynix4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_IcosaMicron4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_CopperSamsung4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_FiveHynix1y4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_IcosaSamsung6GB] = pkg1::MemorySize_6GB,
|
||||
[fuse::DramId_CopperHynix4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_CopperMicron4GB] = pkg1::MemorySize_4GB,
|
||||
@ -66,9 +66,13 @@ namespace ams::secmon::smc {
|
||||
[fuse::DramId_HoagSamsung1y4GBX] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_IowaSamsung1y4GBY] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_IowaSamsung1y8GBY] = pkg1::MemorySize_8GB,
|
||||
[fuse::DramId_IowaSamsung1y4GBA] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_FiveSamsung1y8GBX] = pkg1::MemorySize_8GB,
|
||||
[fuse::DramId_FiveSamsung1y4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_HoagSamsung1y8GBX] = pkg1::MemorySize_8GB,
|
||||
[fuse::DramId_FiveSamsung1y4GBX] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_IowaMicron1y4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_HoagMicron1y4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_FiveMicron1y4GB] = pkg1::MemorySize_4GB,
|
||||
[fuse::DramId_FiveSamsung1y8GBX] = pkg1::MemorySize_8GB,
|
||||
};
|
||||
|
||||
constexpr const pkg1::MemoryMode MemoryModes[] = {
|
||||
@ -278,6 +282,10 @@ namespace ams::secmon::smc {
|
||||
return SmcResult::NotInitialized;
|
||||
}
|
||||
break;
|
||||
case ConfigItem::ExosphereLogConfiguration:
|
||||
/* Get the log configuration. */
|
||||
args.r[1] = (static_cast<u64>(static_cast<u8>(secmon::GetLogPort())) << 32) | static_cast<u64>(secmon::GetLogBaudRate());
|
||||
break;
|
||||
default:
|
||||
return SmcResult::InvalidArgument;
|
||||
}
|
||||
|
@ -40,15 +40,16 @@ namespace ams::secmon::smc {
|
||||
Package2Hash = 17,
|
||||
|
||||
/* Extension config items for exosphere. */
|
||||
ExosphereApiVersion = 65000,
|
||||
ExosphereNeedsReboot = 65001,
|
||||
ExosphereNeedsShutdown = 65002,
|
||||
ExosphereGitCommitHash = 65003,
|
||||
ExosphereHasRcmBugPatch = 65004,
|
||||
ExosphereBlankProdInfo = 65005,
|
||||
ExosphereAllowCalWrites = 65006,
|
||||
ExosphereEmummcType = 65007,
|
||||
ExospherePayloadAddress = 65008,
|
||||
ExosphereApiVersion = 65000,
|
||||
ExosphereNeedsReboot = 65001,
|
||||
ExosphereNeedsShutdown = 65002,
|
||||
ExosphereGitCommitHash = 65003,
|
||||
ExosphereHasRcmBugPatch = 65004,
|
||||
ExosphereBlankProdInfo = 65005,
|
||||
ExosphereAllowCalWrites = 65006,
|
||||
ExosphereEmummcType = 65007,
|
||||
ExospherePayloadAddress = 65008,
|
||||
ExosphereLogConfiguration = 65009,
|
||||
};
|
||||
|
||||
SmcResult SmcGetConfigUser(SmcArguments &args);
|
||||
|
@ -409,6 +409,7 @@ namespace ams::secmon::smc {
|
||||
/* NOTE: Nintendo only does this on dev, but we will always do it. */
|
||||
if (true /* !pkg1::IsProduction() */) {
|
||||
log::SendText("OYASUMI\n", 8);
|
||||
log::Flush();
|
||||
}
|
||||
|
||||
/* If we're on erista, configure the bootrom to allow our custom warmboot firmware. */
|
||||
|
@ -37,24 +37,25 @@ static char g_bct0_buffer[BCTO_MAX_SIZE];
|
||||
|
||||
#define CONFIG_LOG_LEVEL_KEY "log_level"
|
||||
|
||||
#define DEFAULT_BCT0 \
|
||||
"BCT0\n"\
|
||||
"[stage1]\n"\
|
||||
"stage2_path = atmosphere/fusee-secondary.bin\n"\
|
||||
"stage2_mtc_path = atmosphere/fusee-mtc.bin\n"\
|
||||
"stage2_addr = 0xF0000000\n"\
|
||||
"stage2_entrypoint = 0xF0000000\n"\
|
||||
"[exosphere]\n"\
|
||||
"debugmode = 1\n"\
|
||||
"debugmode_user = 0\n"\
|
||||
"disable_user_exception_handlers = 0\n"\
|
||||
"[stratosphere]\n"
|
||||
static const char *get_default_bct0(void) {
|
||||
return "BCT0\n"
|
||||
"[stage1]\n"
|
||||
"stage2_path = atmosphere/fusee-secondary.bin\n"
|
||||
"stage2_mtc_path = atmosphere/fusee-mtc.bin\n"
|
||||
"stage2_addr = 0xF0000000\n"
|
||||
"stage2_entrypoint = 0xF0000000\n"
|
||||
"\n"
|
||||
"[stratosphere]\n"
|
||||
"\n";
|
||||
}
|
||||
|
||||
static const char *load_config(void) {
|
||||
if (!read_from_file(g_bct0_buffer, BCTO_MAX_SIZE, "atmosphere/config/BCT.ini")) {
|
||||
print(SCREEN_LOG_LEVEL_DEBUG, "Failed to read BCT0 from SD!\n");
|
||||
print(SCREEN_LOG_LEVEL_DEBUG, "Using default BCT0!\n");
|
||||
memcpy(g_bct0_buffer, DEFAULT_BCT0, sizeof(DEFAULT_BCT0));
|
||||
|
||||
const char * const default_bct0 = get_default_bct0();
|
||||
memcpy(g_bct0_buffer, default_bct0, strlen(default_bct0));
|
||||
}
|
||||
|
||||
if (memcmp(g_bct0_buffer, "BCT0", 4) != 0) {
|
||||
|
@ -34,11 +34,17 @@
|
||||
#define EXOSPHERE_FLAG_BLANK_PRODINFO (1 << 5u)
|
||||
#define EXOSPHERE_FLAG_ALLOW_WRITING_TO_CAL_SYSMMC (1 << 6u)
|
||||
|
||||
#define EXOSPHERE_LOG_FLAG_INVERTED (1 << 0u)
|
||||
|
||||
typedef struct {
|
||||
uint32_t magic;
|
||||
uint32_t target_firmware;
|
||||
uint32_t flags;
|
||||
uint32_t reserved[5];
|
||||
uint32_t flags[2];
|
||||
uint16_t lcd_vendor;
|
||||
uint8_t log_port;
|
||||
uint8_t log_flags;
|
||||
uint32_t log_baud_rate;
|
||||
uint32_t reserved1[2];
|
||||
exo_emummc_config_t emummc_cfg;
|
||||
} exosphere_config_t;
|
||||
|
||||
@ -54,6 +60,9 @@ _Static_assert(sizeof(exosphere_config_t) == 0x20 + sizeof(exo_emummc_config_t),
|
||||
#define EXOSPHERE_BLANK_PRODINFO_SYSMMC_KEY "blank_prodinfo_sysmmc"
|
||||
#define EXOSPHERE_BLANK_PRODINFO_EMUMMC_KEY "blank_prodinfo_emummc"
|
||||
#define EXOSPHERE_ALLOW_WRITING_TO_CAL_SYSMMC_KEY "allow_writing_to_cal_sysmmc"
|
||||
#define EXOSPHERE_LOG_PORT_KEY "log_port"
|
||||
#define EXOSPHERE_LOG_BAUD_RATE_KEY "log_baud_rate"
|
||||
#define EXOSPHERE_LOG_INVERTED_KEY "log_inverted"
|
||||
|
||||
typedef struct {
|
||||
int debugmode;
|
||||
@ -63,6 +72,9 @@ typedef struct {
|
||||
int blank_prodinfo_sysmmc;
|
||||
int blank_prodinfo_emummc;
|
||||
int allow_writing_to_cal_sysmmc;
|
||||
int log_port;
|
||||
int log_baud_rate;
|
||||
int log_inverted;
|
||||
} exosphere_parse_cfg_t;
|
||||
|
||||
#endif
|
||||
|
@ -906,6 +906,35 @@ static const kernel_patch_t g_kernel_patches_1100[] = {
|
||||
}
|
||||
};
|
||||
|
||||
static const kernel_patch_t g_kernel_patches_1101[] = {
|
||||
{ /* Send Message Process ID Patch. */
|
||||
.pattern_size = 0x1C,
|
||||
.pattern = MAKE_KERNEL_PATTERN_NAME(1100, proc_id_send),
|
||||
.pattern_hook_offset = 0x0,
|
||||
.payload_num_instructions = sizeof(MAKE_KERNEL_PATCH_NAME(1100, proc_id_send))/sizeof(instruction_t),
|
||||
.branch_back_offset = 0x10,
|
||||
.payload = MAKE_KERNEL_PATCH_NAME(1100, proc_id_send)
|
||||
},
|
||||
{ /* Receive Message Process ID Patch. */
|
||||
.pattern_size = 0x1C,
|
||||
.pattern = MAKE_KERNEL_PATTERN_NAME(1100, proc_id_recv),
|
||||
.pattern_hook_offset = 0x0,
|
||||
.payload_num_instructions = sizeof(MAKE_KERNEL_PATCH_NAME(1100, proc_id_recv))/sizeof(instruction_t),
|
||||
.branch_back_offset = 0x10,
|
||||
.payload = MAKE_KERNEL_PATCH_NAME(1100, proc_id_recv)
|
||||
},
|
||||
{ /* svcControlCodeMemory Patch. */
|
||||
.payload_num_instructions = sizeof(MAKE_KERNEL_PATCH_NAME(1100, svc_control_codememory))/sizeof(instruction_t),
|
||||
.payload = MAKE_KERNEL_PATCH_NAME(1100, svc_control_codememory),
|
||||
.patch_offset = 0x2FD04,
|
||||
},
|
||||
{ /* System Memory Increase Patch. */
|
||||
.payload_num_instructions = sizeof(MAKE_KERNEL_PATCH_NAME(1100, system_memory_increase))/sizeof(instruction_t),
|
||||
.payload = MAKE_KERNEL_PATCH_NAME(1100, system_memory_increase),
|
||||
.patch_offset = 0x490C4,
|
||||
}
|
||||
};
|
||||
|
||||
#define KERNEL_PATCHES(vers) .num_patches = sizeof(g_kernel_patches_##vers)/sizeof(kernel_patch_t), .patches = g_kernel_patches_##vers,
|
||||
|
||||
/* Kernel Infos. */
|
||||
@ -1000,6 +1029,15 @@ static const kernel_info_t g_kernel_infos[] = {
|
||||
.embedded_ini_ptr = 0x180,
|
||||
.free_code_space_offset = 0x49EE8,
|
||||
KERNEL_PATCHES(1100)
|
||||
},
|
||||
{ /* 11.0.1. */
|
||||
.hash = {0x68, 0xB9, 0x72, 0xB7, 0x97, 0x55, 0x87, 0x5E, 0x24, 0x95, 0x8D, 0x99, 0x0A, 0x77, 0xAB, 0xF1, 0xC5, 0xC1, 0x32, 0x80, 0x67, 0xF0, 0xA2, 0xEC, 0x9C, 0xEF, 0xC3, 0x22, 0xE3, 0x42, 0xC0, 0x4D, },
|
||||
.hash_offset = 0x1C4,
|
||||
.hash_size = 0x69000 - 0x1C4,
|
||||
.embedded_ini_offset = 0x69000,
|
||||
.embedded_ini_ptr = 0x180,
|
||||
.free_code_space_offset = 0x49EE8,
|
||||
KERNEL_PATCHES(1101)
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -70,6 +70,18 @@ static void exit_callback(int rc) {
|
||||
}
|
||||
}
|
||||
|
||||
static const char *get_default_bct0(void) {
|
||||
return "BCT0\n"
|
||||
"[stage1]\n"
|
||||
"stage2_path = atmosphere/fusee-secondary.bin\n"
|
||||
"stage2_mtc_path = atmosphere/fusee-mtc.bin\n"
|
||||
"stage2_addr = 0xF0000000\n"
|
||||
"stage2_entrypoint = 0xF0000000\n"
|
||||
"\n"
|
||||
"[stratosphere]\n"
|
||||
"\n";
|
||||
}
|
||||
|
||||
/* Allow for main(int argc, void **argv) signature. */
|
||||
#pragma GCC diagnostic ignored "-Wmain"
|
||||
|
||||
@ -102,7 +114,8 @@ int main(int argc, void **argv) {
|
||||
if (strcmp(g_stage2_args->bct0, "") == 0) {
|
||||
uint32_t bct_tmp_buf[sizeof(g_stage2_args->bct0) / sizeof(uint32_t)] = {0};
|
||||
if (!read_from_file(bct_tmp_buf, sizeof(bct_tmp_buf) - 1, "atmosphere/config/BCT.ini")) {
|
||||
fatal_error("Failed to read BCT0 from SD!\n");
|
||||
const char * const default_bct0 = get_default_bct0();
|
||||
memcpy(bct_tmp_buf, default_bct0, strlen(default_bct0));
|
||||
}
|
||||
memcpy(g_stage2_args->bct0, bct_tmp_buf, sizeof(bct_tmp_buf));
|
||||
}
|
||||
|
@ -196,6 +196,27 @@ static int exosphere_ini_handler(void *user, const char *section, const char *na
|
||||
} else if (tmp == 0) {
|
||||
parse_cfg->allow_writing_to_cal_sysmmc = 0;
|
||||
}
|
||||
} else if (strcmp(name, EXOSPHERE_LOG_PORT_KEY) == 0) {
|
||||
sscanf(value, "%d", &tmp);
|
||||
if (0 <= tmp && tmp < 4) {
|
||||
parse_cfg->log_port = tmp;
|
||||
} else {
|
||||
parse_cfg->log_port = 0;
|
||||
}
|
||||
} else if (strcmp(name, EXOSPHERE_LOG_BAUD_RATE_KEY) == 0) {
|
||||
sscanf(value, "%d", &tmp);
|
||||
if (tmp > 0) {
|
||||
parse_cfg->log_baud_rate = tmp;
|
||||
} else {
|
||||
parse_cfg->log_baud_rate = 115200;
|
||||
}
|
||||
} else if (strcmp(name, EXOSPHERE_LOG_INVERTED_KEY) == 0) {
|
||||
sscanf(value, "%d", &tmp);
|
||||
if (tmp == 1) {
|
||||
parse_cfg->log_inverted = 1;
|
||||
} else if (tmp == 0) {
|
||||
parse_cfg->log_inverted = 0;
|
||||
}
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
@ -240,6 +261,7 @@ static uint32_t nxboot_get_specific_target_firmware(uint32_t target_firmware){
|
||||
#define CHECK_NCA(NCA_ID, VERSION) do { if (is_nca_present(NCA_ID)) { return ATMOSPHERE_TARGET_FIRMWARE_##VERSION; } } while(0)
|
||||
|
||||
if (target_firmware >= ATMOSPHERE_TARGET_FIRMWARE_11_0_0) {
|
||||
CHECK_NCA("56211c7a5ed20a5332f5cdda67121e37", 11_0_1);
|
||||
CHECK_NCA("594c90bcdbcccad6b062eadba0cd0e7e", 11_0_0);
|
||||
} else if (target_firmware >= ATMOSPHERE_TARGET_FIRMWARE_10_0_0) {
|
||||
CHECK_NCA("26325de4db3909e0ef2379787c7e671d", 10_2_0);
|
||||
@ -464,9 +486,9 @@ static void nxboot_configure_exosphere(uint32_t target_firmware, unsigned int ke
|
||||
const bool is_emummc = exo_emummc_cfg->base_cfg.magic == MAGIC_EMUMMC_CONFIG && exo_emummc_cfg->base_cfg.type != EMUMMC_TYPE_NONE;
|
||||
|
||||
if (keygen_type) {
|
||||
exo_cfg.flags = EXOSPHERE_FLAG_PERFORM_620_KEYGEN;
|
||||
exo_cfg.flags[0] = EXOSPHERE_FLAG_PERFORM_620_KEYGEN;
|
||||
} else {
|
||||
exo_cfg.flags = 0;
|
||||
exo_cfg.flags[0] = 0;
|
||||
}
|
||||
|
||||
/* Setup exosphere parse configuration with defaults. */
|
||||
@ -478,6 +500,9 @@ static void nxboot_configure_exosphere(uint32_t target_firmware, unsigned int ke
|
||||
.blank_prodinfo_sysmmc = 0,
|
||||
.blank_prodinfo_emummc = 0,
|
||||
.allow_writing_to_cal_sysmmc = 0,
|
||||
.log_port = 0,
|
||||
.log_baud_rate = 115200,
|
||||
.log_inverted = 0,
|
||||
};
|
||||
|
||||
/* If we have an ini to read, parse it. */
|
||||
@ -490,13 +515,17 @@ static void nxboot_configure_exosphere(uint32_t target_firmware, unsigned int ke
|
||||
free(exosphere_ini);
|
||||
|
||||
/* Apply parse config. */
|
||||
if (parse_cfg.debugmode) exo_cfg.flags |= EXOSPHERE_FLAG_IS_DEBUGMODE_PRIV;
|
||||
if (parse_cfg.debugmode_user) exo_cfg.flags |= EXOSPHERE_FLAG_IS_DEBUGMODE_USER;
|
||||
if (parse_cfg.disable_user_exception_handlers) exo_cfg.flags |= EXOSPHERE_FLAG_DISABLE_USERMODE_EXCEPTION_HANDLERS;
|
||||
if (parse_cfg.enable_user_pmu_access) exo_cfg.flags |= EXOSPHERE_FLAG_ENABLE_USERMODE_PMU_ACCESS;
|
||||
if (parse_cfg.blank_prodinfo_sysmmc && !is_emummc) exo_cfg.flags |= EXOSPHERE_FLAG_BLANK_PRODINFO;
|
||||
if (parse_cfg.blank_prodinfo_emummc && is_emummc) exo_cfg.flags |= EXOSPHERE_FLAG_BLANK_PRODINFO;
|
||||
if (parse_cfg.allow_writing_to_cal_sysmmc) exo_cfg.flags |= EXOSPHERE_FLAG_ALLOW_WRITING_TO_CAL_SYSMMC;
|
||||
if (parse_cfg.debugmode) exo_cfg.flags[0] |= EXOSPHERE_FLAG_IS_DEBUGMODE_PRIV;
|
||||
if (parse_cfg.debugmode_user) exo_cfg.flags[0] |= EXOSPHERE_FLAG_IS_DEBUGMODE_USER;
|
||||
if (parse_cfg.disable_user_exception_handlers) exo_cfg.flags[0] |= EXOSPHERE_FLAG_DISABLE_USERMODE_EXCEPTION_HANDLERS;
|
||||
if (parse_cfg.enable_user_pmu_access) exo_cfg.flags[0] |= EXOSPHERE_FLAG_ENABLE_USERMODE_PMU_ACCESS;
|
||||
if (parse_cfg.blank_prodinfo_sysmmc && !is_emummc) exo_cfg.flags[0] |= EXOSPHERE_FLAG_BLANK_PRODINFO;
|
||||
if (parse_cfg.blank_prodinfo_emummc && is_emummc) exo_cfg.flags[0] |= EXOSPHERE_FLAG_BLANK_PRODINFO;
|
||||
if (parse_cfg.allow_writing_to_cal_sysmmc) exo_cfg.flags[0] |= EXOSPHERE_FLAG_ALLOW_WRITING_TO_CAL_SYSMMC;
|
||||
|
||||
exo_cfg.log_port = parse_cfg.log_port;
|
||||
exo_cfg.log_baud_rate = parse_cfg.log_baud_rate;
|
||||
if (parse_cfg.log_inverted) exo_cfg.log_flags |= EXOSPHERE_LOG_FLAG_INVERTED;
|
||||
|
||||
if ((exo_cfg.target_firmware < ATMOSPHERE_TARGET_FIRMWARE_MIN) || (exo_cfg.target_firmware > ATMOSPHERE_TARGET_FIRMWARE_MAX)) {
|
||||
fatal_error("[NXBOOT] Invalid Exosphere target firmware!\n");
|
||||
|
@ -6,7 +6,7 @@
|
||||
[subrepo]
|
||||
remote = https://github.com/Atmosphere-NX/Atmosphere-libs
|
||||
branch = master
|
||||
commit = 8233fa00ac730c6e5c6f7866e1c7d26863bf28fc
|
||||
parent = b8fbd0baff74efe50f0aadf9782c47ee33751131
|
||||
commit = 886dfaf6d5cf47c06895173721c6c12dfaf0b476
|
||||
parent = 8b61537aa347e0e6495b8c6d71eb62faab0c652a
|
||||
method = merge
|
||||
cmdver = 0.4.1
|
||||
cmdver = 0.4.0
|
||||
|
@ -51,7 +51,7 @@ namespace ams::fuse {
|
||||
DramId_IcosaSamsung4GB = 0,
|
||||
DramId_IcosaHynix4GB = 1,
|
||||
DramId_IcosaMicron4GB = 2,
|
||||
DramId_CopperSamsung4GB = 3,
|
||||
DramId_FiveHynix1y4GB = 3,
|
||||
DramId_IcosaSamsung6GB = 4,
|
||||
DramId_CopperHynix4GB = 5,
|
||||
DramId_CopperMicron4GB = 6,
|
||||
@ -70,9 +70,13 @@ namespace ams::fuse {
|
||||
DramId_HoagSamsung1y4GBX = 19,
|
||||
DramId_IowaSamsung1y4GBY = 20,
|
||||
DramId_IowaSamsung1y8GBY = 21,
|
||||
DramId_IowaSamsung1y4GBA = 22,
|
||||
DramId_FiveSamsung1y8GBX = 23,
|
||||
DramId_FiveSamsung1y4GB = 22,
|
||||
DramId_HoagSamsung1y8GBX = 23,
|
||||
DramId_FiveSamsung1y4GBX = 24,
|
||||
DramId_IowaMicron1y4GB = 25,
|
||||
DramId_HoagMicron1y4GB = 26,
|
||||
DramId_FiveMicron1y4GB = 27,
|
||||
DramId_FiveSamsung1y8GBX = 28,
|
||||
|
||||
DramId_Count,
|
||||
};
|
||||
|
@ -35,6 +35,7 @@ namespace ams::log {
|
||||
#endif
|
||||
|
||||
void Initialize();
|
||||
void Initialize(uart::Port port, u32 baud_rate, u32 flags);
|
||||
void Finalize();
|
||||
|
||||
void Printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
|
||||
|
@ -116,6 +116,18 @@ namespace ams::secmon {
|
||||
return GetSecmonConfiguration().GetLcdVendor();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uart::Port GetLogPort() {
|
||||
return GetSecmonConfiguration().GetLogPort();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u8 GetLogFlags() {
|
||||
return GetSecmonConfiguration().GetLogFlags();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE u32 GetLogBaudRate() {
|
||||
return GetSecmonConfiguration().GetLogBaudRate();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool IsProduction() {
|
||||
return GetSecmonConfiguration().IsProduction();
|
||||
}
|
||||
|
@ -16,6 +16,7 @@
|
||||
#pragma once
|
||||
#include <vapours.hpp>
|
||||
#include <exosphere/fuse.hpp>
|
||||
#include <exosphere/uart.hpp>
|
||||
#include <exosphere/secmon/secmon_emummc_context.hpp>
|
||||
|
||||
namespace ams::secmon {
|
||||
@ -39,8 +40,10 @@ namespace ams::secmon {
|
||||
ams::TargetFirmware target_firmware;
|
||||
u32 flags[2];
|
||||
u16 lcd_vendor;
|
||||
u16 reserved0;
|
||||
u32 reserved1[3];
|
||||
u8 log_port;
|
||||
u8 log_flags;
|
||||
u32 log_baud_rate;
|
||||
u32 reserved1[2];
|
||||
EmummcConfiguration emummc_cfg;
|
||||
|
||||
constexpr bool IsValid() const { return this->magic == Magic; }
|
||||
@ -54,17 +57,22 @@ namespace ams::secmon {
|
||||
u8 hardware_type;
|
||||
u8 soc_type;
|
||||
u8 hardware_state;
|
||||
u8 pad_0B[1];
|
||||
u8 log_port;
|
||||
u32 flags[2];
|
||||
u16 lcd_vendor;
|
||||
u16 reserved0;
|
||||
u32 reserved1[(0x80 - 0x18) / sizeof(u32)];
|
||||
u8 log_flags;
|
||||
u8 reserved0;
|
||||
u32 log_baud_rate;
|
||||
u32 reserved1[(0x80 - 0x1C) / sizeof(u32)];
|
||||
|
||||
constexpr void CopyFrom(const SecureMonitorStorageConfiguration &storage) {
|
||||
this->target_firmware = storage.target_firmware;
|
||||
this->flags[0] = storage.flags[0];
|
||||
this->flags[1] = storage.flags[1];
|
||||
this->lcd_vendor = storage.lcd_vendor;
|
||||
this->log_port = storage.log_port;
|
||||
this->log_flags = storage.log_flags;
|
||||
this->log_baud_rate = storage.log_baud_rate != 0 ? storage.log_baud_rate : 115200;
|
||||
}
|
||||
|
||||
void SetFuseInfo() {
|
||||
@ -78,9 +86,13 @@ namespace ams::secmon {
|
||||
constexpr fuse::HardwareType GetHardwareType() const { return static_cast<fuse::HardwareType>(this->hardware_type); }
|
||||
constexpr fuse::SocType GetSocType() const { return static_cast<fuse::SocType>(this->soc_type); }
|
||||
constexpr fuse::HardwareState GetHardwareState() const { return static_cast<fuse::HardwareState>(this->hardware_state); }
|
||||
constexpr uart::Port GetLogPort() const { return static_cast<uart::Port>(this->log_port); }
|
||||
constexpr u8 GetLogFlags() const { return this->log_flags; }
|
||||
|
||||
constexpr u16 GetLcdVendor() const { return this->lcd_vendor; }
|
||||
|
||||
constexpr u32 GetLogBaudRate() const { return this->log_baud_rate; }
|
||||
|
||||
constexpr bool IsProduction() const { return this->GetHardwareState() != fuse::HardwareState_Development; }
|
||||
|
||||
constexpr bool IsDevelopmentFunctionEnabledForKernel() const { return (this->flags[0] & SecureMonitorConfigurationFlag_IsDevelopmentFunctionEnabledForKernel) != 0; }
|
||||
@ -101,10 +113,12 @@ namespace ams::secmon {
|
||||
.hardware_type = {},
|
||||
.soc_type = {},
|
||||
.hardware_state = {},
|
||||
.pad_0B = {},
|
||||
.log_port = uart::Port_ReservedDebug,
|
||||
.flags = { SecureMonitorConfigurationFlag_Default, SecureMonitorConfigurationFlag_None },
|
||||
.lcd_vendor = {},
|
||||
.log_flags = {},
|
||||
.reserved0 = {},
|
||||
.log_baud_rate = 115200,
|
||||
.reserved1 = {},
|
||||
};
|
||||
|
||||
|
@ -19,58 +19,46 @@ namespace ams::log {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr inline uart::Port UartLogPort = uart::Port_ReservedDebug;
|
||||
constexpr inline int UartBaudRate = 115200;
|
||||
constexpr inline uart::Port DefaultLogPort = uart::Port_ReservedDebug;
|
||||
constexpr inline u32 DefaultLogFlags = static_cast<u32>(uart::Flag_None);
|
||||
constexpr inline int DefaultBaudRate = 115200;
|
||||
constinit uart::Port g_log_port = DefaultLogPort;
|
||||
constinit bool g_initialized_uart = false;
|
||||
|
||||
constexpr inline u32 UartPortFlags = [] {
|
||||
if constexpr (UartLogPort == uart::Port_ReservedDebug) {
|
||||
/* Logging to the debug port. */
|
||||
/* Don't invert transactions. */
|
||||
return uart::Flag_None;
|
||||
} else if constexpr (UartLogPort == uart::Port_LeftJoyCon) {
|
||||
/* Logging to left joy-con (e.g. with Joyless). */
|
||||
/* Invert transactions. */
|
||||
return uart::Flag_Inverted;
|
||||
} else if constexpr (UartLogPort == uart::Port_RightJoyCon) {
|
||||
/* Logging to right joy-con (e.g. with Joyless). */
|
||||
/* Invert transactions. */
|
||||
return uart::Flag_Inverted;
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}();
|
||||
ALWAYS_INLINE void SetupUartClock(uart::Port port) {
|
||||
/* The debug port must always be set up, for compatibility with official hos. */
|
||||
pinmux::SetupUartA();
|
||||
clkrst::EnableUartAClock();
|
||||
|
||||
ALWAYS_INLINE void SetupUart() {
|
||||
if constexpr (UartLogPort == uart::Port_ReservedDebug) {
|
||||
/* Logging to the debug port. */
|
||||
pinmux::SetupUartA();
|
||||
clkrst::EnableUartAClock();
|
||||
} else if constexpr (UartLogPort == uart::Port_LeftJoyCon) {
|
||||
/* If logging to a joy-con port, configure appropriately. */
|
||||
if (port == uart::Port_LeftJoyCon) {
|
||||
/* Logging to left joy-con (e.g. with Joyless). */
|
||||
static_assert(uart::Port_LeftJoyCon == uart::Port_C);
|
||||
pinmux::SetupUartC();
|
||||
clkrst::EnableUartCClock();
|
||||
} else if constexpr (UartLogPort == uart::Port_RightJoyCon) {
|
||||
} else if (port == uart::Port_RightJoyCon) {
|
||||
/* Logging to right joy-con (e.g. with Joyless). */
|
||||
static_assert(uart::Port_RightJoyCon == uart::Port_B);
|
||||
pinmux::SetupUartB();
|
||||
clkrst::EnableUartBClock();
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void Initialize() {
|
||||
return Initialize(DefaultLogPort, DefaultBaudRate, DefaultLogFlags);
|
||||
}
|
||||
|
||||
void Initialize(uart::Port port, u32 baud_rate, u32 flags) {
|
||||
/* Initialize pinmux and clock for the target uart port. */
|
||||
SetupUart();
|
||||
SetupUartClock(port);
|
||||
|
||||
/* Initialize the target uart port. */
|
||||
uart::Initialize(UartLogPort, UartBaudRate, UartPortFlags);
|
||||
uart::Initialize(port, baud_rate, flags);
|
||||
|
||||
/* Note that we've initialized. */
|
||||
g_log_port = port;
|
||||
g_initialized_uart = true;
|
||||
}
|
||||
|
||||
@ -84,7 +72,7 @@ namespace ams::log {
|
||||
const auto len = util::TVSNPrintf(log_buf, sizeof(log_buf), fmt, vl);
|
||||
|
||||
if (g_initialized_uart) {
|
||||
uart::SendText(UartLogPort, log_buf, len);
|
||||
uart::SendText(g_log_port, log_buf, len);
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,13 +103,13 @@ namespace ams::log {
|
||||
|
||||
void SendText(const void *text, size_t size) {
|
||||
if (g_initialized_uart) {
|
||||
uart::SendText(UartLogPort, text, size);
|
||||
uart::SendText(g_log_port, text, size);
|
||||
}
|
||||
}
|
||||
|
||||
void Flush() {
|
||||
if (g_initialized_uart) {
|
||||
uart::WaitFlush(UartLogPort);
|
||||
uart::WaitFlush(g_log_port);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,7 @@
|
||||
#include <mesosphere/kern_kernel.hpp>
|
||||
#include <mesosphere/kern_k_page_table_manager.hpp>
|
||||
#include <mesosphere/kern_select_page_table.hpp>
|
||||
#include <mesosphere/kern_k_dump_object.hpp>
|
||||
|
||||
/* Miscellaneous objects. */
|
||||
#include <mesosphere/kern_k_shared_memory_info.hpp>
|
||||
|
@ -135,10 +135,10 @@ namespace ams::kern::arch::arm {
|
||||
private:
|
||||
static inline u32 s_mask[cpu::NumCores];
|
||||
private:
|
||||
volatile GicDistributor *gicd;
|
||||
volatile GicCpuInterface *gicc;
|
||||
volatile GicDistributor *m_gicd;
|
||||
volatile GicCpuInterface *m_gicc;
|
||||
public:
|
||||
constexpr KInterruptController() : gicd(nullptr), gicc(nullptr) { /* ... */ }
|
||||
constexpr KInterruptController() : m_gicd(nullptr), m_gicc(nullptr) { /* ... */ }
|
||||
|
||||
void Initialize(s32 core_id);
|
||||
void Finalize(s32 core_id);
|
||||
@ -149,7 +149,7 @@ namespace ams::kern::arch::arm {
|
||||
void RestoreGlobal(const GlobalState *state) const;
|
||||
public:
|
||||
u32 GetIrq() const {
|
||||
return this->gicc->iar;
|
||||
return m_gicc->iar;
|
||||
}
|
||||
|
||||
static constexpr s32 ConvertRawIrq(u32 irq) {
|
||||
@ -157,69 +157,69 @@ namespace ams::kern::arch::arm {
|
||||
}
|
||||
|
||||
void Enable(s32 irq) const {
|
||||
this->gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
m_gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
}
|
||||
|
||||
void Disable(s32 irq) const {
|
||||
this->gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
m_gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
}
|
||||
|
||||
void Clear(s32 irq) const {
|
||||
this->gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
m_gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
|
||||
}
|
||||
|
||||
void SetTarget(s32 irq, s32 core_id) const {
|
||||
this->gicd->itargetsr.bytes[irq] = this->gicd->itargetsr.bytes[irq] | GetGicMask(core_id);
|
||||
m_gicd->itargetsr.bytes[irq] = m_gicd->itargetsr.bytes[irq] | GetGicMask(core_id);
|
||||
}
|
||||
|
||||
void ClearTarget(s32 irq, s32 core_id) const {
|
||||
this->gicd->itargetsr.bytes[irq] = this->gicd->itargetsr.bytes[irq] & ~GetGicMask(core_id);
|
||||
m_gicd->itargetsr.bytes[irq] = m_gicd->itargetsr.bytes[irq] & ~GetGicMask(core_id);
|
||||
}
|
||||
|
||||
void SetPriorityLevel(s32 irq, s32 level) const {
|
||||
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
||||
this->gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level);
|
||||
m_gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level);
|
||||
}
|
||||
|
||||
s32 GetPriorityLevel(s32 irq) const {
|
||||
return FromGicPriorityValue(this->gicd->ipriorityr.bytes[irq]);
|
||||
return FromGicPriorityValue(m_gicd->ipriorityr.bytes[irq]);
|
||||
}
|
||||
|
||||
void SetPriorityLevel(s32 level) const {
|
||||
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
|
||||
this->gicc->pmr = ToGicPriorityValue(level);
|
||||
m_gicc->pmr = ToGicPriorityValue(level);
|
||||
}
|
||||
|
||||
void SetEdge(s32 irq) const {
|
||||
u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||
u32 cfg = m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
cfg |= (0x2 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||
m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||
}
|
||||
|
||||
void SetLevel(s32 irq) const {
|
||||
u32 cfg = this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||
u32 cfg = m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
|
||||
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
cfg |= (0x0 << (2 * (irq % (BITSIZEOF(u32) / 2))));
|
||||
this->gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||
m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
|
||||
}
|
||||
|
||||
void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||
this->gicd->sgir = GetCpuTargetListMask(irq, core_mask);
|
||||
m_gicd->sgir = GetCpuTargetListMask(irq, core_mask);
|
||||
}
|
||||
|
||||
void SendInterProcessorInterrupt(s32 irq) {
|
||||
MESOSPHERE_ASSERT(IsSoftware(irq));
|
||||
this->gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq;
|
||||
m_gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq;
|
||||
}
|
||||
|
||||
void EndOfInterrupt(u32 irq) const {
|
||||
this->gicc->eoir = irq;
|
||||
m_gicc->eoir = irq;
|
||||
}
|
||||
|
||||
bool IsInterruptDefined(s32 irq) const {
|
||||
const s32 num_interrupts = std::min(32 + 32 * (this->gicd->typer & 0x1F), static_cast<u32>(NumInterrupts));
|
||||
const s32 num_interrupts = std::min(32 + 32 * (m_gicd->typer & 0x1F), static_cast<u32>(NumInterrupts));
|
||||
return (0 <= irq && irq < num_interrupts);
|
||||
}
|
||||
public:
|
||||
@ -270,7 +270,7 @@ namespace ams::kern::arch::arm {
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SetGicMask(s32 core_id) const {
|
||||
s_mask[core_id] = this->gicd->itargetsr.bytes[0];
|
||||
s_mask[core_id] = m_gicd->itargetsr.bytes[0];
|
||||
}
|
||||
|
||||
NOINLINE void SetupInterruptLines(s32 core_id) const;
|
||||
|
@ -44,16 +44,16 @@ namespace ams::kern::arch::arm64::init {
|
||||
|
||||
struct NoClear{};
|
||||
private:
|
||||
KPhysicalAddress l1_table;
|
||||
KPhysicalAddress m_l1_table;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ }
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : m_l1_table(l1) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) {
|
||||
ClearNewPageTable(this->l1_table);
|
||||
ClearNewPageTable(m_l1_table);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE uintptr_t GetL1TableAddress() const {
|
||||
return GetInteger(this->l1_table);
|
||||
return GetInteger(m_l1_table);
|
||||
}
|
||||
private:
|
||||
static constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KPhysicalAddress _l1_table, KVirtualAddress address) {
|
||||
@ -83,7 +83,7 @@ namespace ams::kern::arch::arm64::init {
|
||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||
size_t count = 0;
|
||||
while (virt_addr < end_virt_addr) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
|
||||
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
||||
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
||||
@ -137,7 +137,7 @@ namespace ams::kern::arch::arm64::init {
|
||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||
size_t count = 0;
|
||||
while (virt_addr < end_virt_addr) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
|
||||
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
|
||||
if (l1_entry->IsBlock() || l1_entry->IsEmpty()) {
|
||||
@ -194,7 +194,7 @@ namespace ams::kern::arch::arm64::init {
|
||||
}
|
||||
|
||||
PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
|
||||
if (l1_entry->IsBlock()) {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize);
|
||||
@ -301,7 +301,7 @@ namespace ams::kern::arch::arm64::init {
|
||||
|
||||
/* Iteratively map pages until the requested region is mapped. */
|
||||
while (size > 0) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
|
||||
/* Can we make an L1 block? */
|
||||
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
|
||||
@ -382,7 +382,7 @@ namespace ams::kern::arch::arm64::init {
|
||||
|
||||
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
|
||||
/* Get the L1 entry. */
|
||||
const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
const L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
|
||||
if (l1_entry->IsBlock()) {
|
||||
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
|
||||
@ -444,7 +444,7 @@ namespace ams::kern::arch::arm64::init {
|
||||
};
|
||||
|
||||
while (virt_addr < end_virt_addr) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
|
||||
/* If an L1 block is mapped, update. */
|
||||
if (l1_entry->IsBlock()) {
|
||||
@ -485,7 +485,7 @@ namespace ams::kern::arch::arm64::init {
|
||||
|
||||
const KVirtualAddress end_virt_addr = virt_addr + size;
|
||||
while (virt_addr < end_virt_addr) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
|
||||
/* If an L1 block is mapped, the address isn't free. */
|
||||
if (l1_entry->IsBlock()) {
|
||||
@ -534,7 +534,7 @@ namespace ams::kern::arch::arm64::init {
|
||||
|
||||
/* Iteratively reprotect pages until the requested region is reprotected. */
|
||||
while (size > 0) {
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr);
|
||||
L1PageTableEntry *l1_entry = GetL1Entry(m_l1_table, virt_addr);
|
||||
|
||||
/* Check if an L1 block is present. */
|
||||
if (l1_entry->IsBlock()) {
|
||||
@ -680,43 +680,43 @@ namespace ams::kern::arch::arm64::init {
|
||||
uintptr_t free_bitmap;
|
||||
};
|
||||
private:
|
||||
State state;
|
||||
State m_state;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KInitialPageAllocator() : state{} { /* ... */ }
|
||||
constexpr ALWAYS_INLINE KInitialPageAllocator() : m_state{} { /* ... */ }
|
||||
|
||||
ALWAYS_INLINE void Initialize(uintptr_t address) {
|
||||
this->state.next_address = address + BITSIZEOF(this->state.free_bitmap) * PageSize;
|
||||
this->state.free_bitmap = ~uintptr_t();
|
||||
m_state.next_address = address + BITSIZEOF(m_state.free_bitmap) * PageSize;
|
||||
m_state.free_bitmap = ~uintptr_t();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void InitializeFromState(uintptr_t state_val) {
|
||||
if (kern::GetTargetFirmware() >= ams::TargetFirmware_10_0_0) {
|
||||
this->state = *reinterpret_cast<State *>(state_val);
|
||||
m_state = *reinterpret_cast<State *>(state_val);
|
||||
} else {
|
||||
this->state.next_address = state_val;
|
||||
this->state.free_bitmap = 0;
|
||||
m_state.next_address = state_val;
|
||||
m_state.free_bitmap = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void GetFinalState(State *out) {
|
||||
*out = this->state;
|
||||
this->state = {};
|
||||
*out = m_state;
|
||||
m_state = {};
|
||||
}
|
||||
public:
|
||||
virtual KPhysicalAddress Allocate() override {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(this->state.next_address != Null<uintptr_t>);
|
||||
uintptr_t allocated = this->state.next_address;
|
||||
if (this->state.free_bitmap != 0) {
|
||||
MESOSPHERE_INIT_ABORT_UNLESS(m_state.next_address != Null<uintptr_t>);
|
||||
uintptr_t allocated = m_state.next_address;
|
||||
if (m_state.free_bitmap != 0) {
|
||||
u64 index;
|
||||
uintptr_t mask;
|
||||
do {
|
||||
index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(this->state.free_bitmap) - 1);
|
||||
index = KSystemControl::Init::GenerateRandomRange(0, BITSIZEOF(m_state.free_bitmap) - 1);
|
||||
mask = (static_cast<uintptr_t>(1) << index);
|
||||
} while ((this->state.free_bitmap & mask) == 0);
|
||||
this->state.free_bitmap &= ~mask;
|
||||
allocated = this->state.next_address - ((BITSIZEOF(this->state.free_bitmap) - index) * PageSize);
|
||||
} while ((m_state.free_bitmap & mask) == 0);
|
||||
m_state.free_bitmap &= ~mask;
|
||||
allocated = m_state.next_address - ((BITSIZEOF(m_state.free_bitmap) - index) * PageSize);
|
||||
} else {
|
||||
this->state.next_address += PageSize;
|
||||
m_state.next_address += PageSize;
|
||||
}
|
||||
|
||||
ClearPhysicalMemory(allocated, PageSize);
|
||||
|
@ -135,36 +135,36 @@ namespace ams::kern::arch::arm64::cpu {
|
||||
NON_COPYABLE(GenericRegisterAccessorBase);
|
||||
NON_MOVEABLE(GenericRegisterAccessorBase);
|
||||
private:
|
||||
u64 value;
|
||||
u64 m_value;
|
||||
public:
|
||||
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ }
|
||||
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : m_value(v) { /* ... */ }
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetValue() const {
|
||||
return this->value;
|
||||
return m_value;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||
return (this->value >> offset) & ((1ul << count) - 1);
|
||||
return (m_value >> offset) & ((1ul << count) - 1);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
||||
const u64 mask = ((1ul << count) - 1) << offset;
|
||||
this->value &= ~mask;
|
||||
this->value |= (value & (mask >> offset)) << offset;
|
||||
m_value &= ~mask;
|
||||
m_value |= (value & (mask >> offset)) << offset;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
||||
const u64 mask = ((1ul << count) - 1) << offset;
|
||||
this->value &= ~mask;
|
||||
this->value |= (value & mask);
|
||||
m_value &= ~mask;
|
||||
m_value |= (value & mask);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
||||
const u64 mask = 1ul << offset;
|
||||
if (enabled) {
|
||||
this->value |= mask;
|
||||
m_value |= mask;
|
||||
} else {
|
||||
this->value &= ~mask;
|
||||
m_value &= ~mask;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -44,6 +44,9 @@ namespace ams::kern::arch::arm64 {
|
||||
static uintptr_t GetProgramCounter(const KThread &thread);
|
||||
static void SetPreviousProgramCounter();
|
||||
|
||||
static void PrintRegister(KThread *thread = nullptr);
|
||||
static void PrintBacktrace(KThread *thread = nullptr);
|
||||
|
||||
static Result BreakIfAttached(ams::svc::BreakReason break_reason, uintptr_t address, size_t size);
|
||||
static Result SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, u64 flags, u64 value);
|
||||
|
||||
@ -61,8 +64,6 @@ namespace ams::kern::arch::arm64 {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: This is a placeholder definition. */
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -21,9 +21,9 @@ namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KHardwareTimer : public KInterruptTask, public KHardwareTimerBase {
|
||||
private:
|
||||
s64 maximum_time;
|
||||
s64 m_maximum_time;
|
||||
public:
|
||||
constexpr KHardwareTimer() : KInterruptTask(), KHardwareTimerBase(), maximum_time(std::numeric_limits<s64>::max()) { /* ... */ }
|
||||
constexpr KHardwareTimer() : KInterruptTask(), KHardwareTimerBase(), m_maximum_time(std::numeric_limits<s64>::max()) { /* ... */ }
|
||||
public:
|
||||
/* Public API. */
|
||||
NOINLINE void Initialize();
|
||||
@ -38,7 +38,7 @@ namespace ams::kern::arch::arm64 {
|
||||
KScopedSpinLock lk(this->GetLock());
|
||||
|
||||
if (this->RegisterAbsoluteTaskImpl(task, task_time)) {
|
||||
if (task_time <= this->maximum_time) {
|
||||
if (task_time <= m_maximum_time) {
|
||||
SetCompareValue(task_time);
|
||||
EnableInterrupt();
|
||||
}
|
||||
|
@ -47,18 +47,18 @@ namespace ams::kern::arch::arm64 {
|
||||
constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ }
|
||||
};
|
||||
private:
|
||||
KCoreLocalInterruptEntry core_local_interrupts[cpu::NumCores][KInterruptController::NumLocalInterrupts]{};
|
||||
KInterruptController interrupt_controller{};
|
||||
KInterruptController::LocalState local_states[cpu::NumCores]{};
|
||||
bool local_state_saved[cpu::NumCores]{};
|
||||
mutable KSpinLock global_interrupt_lock{};
|
||||
KGlobalInterruptEntry global_interrupts[KInterruptController::NumGlobalInterrupts]{};
|
||||
KInterruptController::GlobalState global_state{};
|
||||
bool global_state_saved{};
|
||||
KCoreLocalInterruptEntry m_core_local_interrupts[cpu::NumCores][KInterruptController::NumLocalInterrupts]{};
|
||||
KInterruptController m_interrupt_controller{};
|
||||
KInterruptController::LocalState m_local_states[cpu::NumCores]{};
|
||||
bool m_local_state_saved[cpu::NumCores]{};
|
||||
mutable KSpinLock m_global_interrupt_lock{};
|
||||
KGlobalInterruptEntry m_global_interrupts[KInterruptController::NumGlobalInterrupts]{};
|
||||
KInterruptController::GlobalState m_global_state{};
|
||||
bool m_global_state_saved{};
|
||||
private:
|
||||
ALWAYS_INLINE KSpinLock &GetGlobalInterruptLock() const { return this->global_interrupt_lock; }
|
||||
ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return this->global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; }
|
||||
ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return this->core_local_interrupts[GetCurrentCoreId()][KInterruptController::GetLocalInterruptIndex(irq)]; }
|
||||
ALWAYS_INLINE KSpinLock &GetGlobalInterruptLock() const { return m_global_interrupt_lock; }
|
||||
ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return m_global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; }
|
||||
ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return m_core_local_interrupts[GetCurrentCoreId()][KInterruptController::GetLocalInterruptIndex(irq)]; }
|
||||
|
||||
bool OnHandleInterrupt();
|
||||
public:
|
||||
@ -71,15 +71,15 @@ namespace ams::kern::arch::arm64 {
|
||||
NOINLINE void Restore(s32 core_id);
|
||||
|
||||
bool IsInterruptDefined(s32 irq) const {
|
||||
return this->interrupt_controller.IsInterruptDefined(irq);
|
||||
return m_interrupt_controller.IsInterruptDefined(irq);
|
||||
}
|
||||
|
||||
bool IsGlobal(s32 irq) const {
|
||||
return this->interrupt_controller.IsGlobal(irq);
|
||||
return m_interrupt_controller.IsGlobal(irq);
|
||||
}
|
||||
|
||||
bool IsLocal(s32 irq) const {
|
||||
return this->interrupt_controller.IsLocal(irq);
|
||||
return m_interrupt_controller.IsLocal(irq);
|
||||
}
|
||||
|
||||
NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
|
||||
@ -89,11 +89,11 @@ namespace ams::kern::arch::arm64 {
|
||||
NOINLINE Result ClearInterrupt(s32 irq, s32 core_id);
|
||||
|
||||
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
|
||||
this->interrupt_controller.SendInterProcessorInterrupt(irq, core_mask);
|
||||
m_interrupt_controller.SendInterProcessorInterrupt(irq, core_mask);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq) {
|
||||
this->interrupt_controller.SendInterProcessorInterrupt(irq);
|
||||
m_interrupt_controller.SendInterProcessorInterrupt(irq);
|
||||
}
|
||||
|
||||
static void HandleInterrupt(bool user_mode);
|
||||
|
@ -32,7 +32,7 @@ namespace ams::kern::arch::arm64 {
|
||||
KInterruptName_VirtualMaintenance = 25,
|
||||
KInterruptName_HypervisorTimer = 26,
|
||||
KInterruptName_VirtualTimer = 27,
|
||||
KInterruptName_LegacyNFiq = 38,
|
||||
KInterruptName_LegacyNFiq = 28,
|
||||
KInterruptName_SecurePhysicalTimer = 29,
|
||||
KInterruptName_NonSecurePhysicalTimer = 30,
|
||||
KInterruptName_LegacyNIrq = 31,
|
||||
|
@ -92,15 +92,15 @@ namespace ams::kern::arch::arm64 {
|
||||
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
|
||||
}
|
||||
private:
|
||||
KPageTableManager *manager;
|
||||
u64 ttbr;
|
||||
u8 asid;
|
||||
KPageTableManager *m_manager;
|
||||
u64 m_ttbr;
|
||||
u8 m_asid;
|
||||
protected:
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
virtual Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) override;
|
||||
virtual void FinalizeUpdate(PageLinkedList *page_list) override;
|
||||
|
||||
KPageTableManager &GetPageTableManager() const { return *this->manager; }
|
||||
KPageTableManager &GetPageTableManager() const { return *m_manager; }
|
||||
private:
|
||||
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
|
||||
/* Set basic attributes. */
|
||||
@ -166,13 +166,13 @@ namespace ams::kern::arch::arm64 {
|
||||
return entry;
|
||||
}
|
||||
public:
|
||||
constexpr KPageTable() : KPageTableBase(), manager(), ttbr(), asid() { /* ... */ }
|
||||
constexpr KPageTable() : KPageTableBase(), m_manager(), m_ttbr(), m_asid() { /* ... */ }
|
||||
|
||||
static NOINLINE void Initialize(s32 core_id);
|
||||
|
||||
ALWAYS_INLINE void Activate(u32 proc_id) {
|
||||
cpu::DataSynchronizationBarrier();
|
||||
cpu::SwitchProcess(this->ttbr, proc_id);
|
||||
cpu::SwitchProcess(m_ttbr, proc_id);
|
||||
}
|
||||
|
||||
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
|
||||
@ -225,7 +225,7 @@ namespace ams::kern::arch::arm64 {
|
||||
}
|
||||
|
||||
void OnTableUpdated() const {
|
||||
cpu::InvalidateTlbByAsid(this->asid);
|
||||
cpu::InvalidateTlbByAsid(m_asid);
|
||||
}
|
||||
|
||||
void OnKernelTableUpdated() const {
|
||||
|
@ -105,50 +105,50 @@ namespace ams::kern::arch::arm64 {
|
||||
ContigType_Contiguous = (0x1ul << 52),
|
||||
};
|
||||
protected:
|
||||
u64 attributes;
|
||||
u64 m_attributes;
|
||||
public:
|
||||
/* Take in a raw attribute. */
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry() : attributes() { /* ... */ }
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : attributes(attr) { /* ... */ }
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry() : m_attributes() { /* ... */ }
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : m_attributes(attr) { /* ... */ }
|
||||
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : attributes(0) { /* ... */ }
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : m_attributes(0) { /* ... */ }
|
||||
|
||||
/* Extend a previous attribute. */
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : attributes(rhs.attributes | new_attr) { /* ... */ }
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : m_attributes(rhs.m_attributes | new_attr) { /* ... */ }
|
||||
|
||||
/* Construct a new attribute. */
|
||||
constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m)
|
||||
: attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m))
|
||||
: m_attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m))
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
|
||||
return (this->attributes >> offset) & ((1ul << count) - 1);
|
||||
return (m_attributes >> offset) & ((1ul << count) - 1);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const {
|
||||
return this->attributes & (((1ul << count) - 1) << offset);
|
||||
return m_attributes & (((1ul << count) - 1) << offset);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
|
||||
const u64 mask = ((1ul << count) - 1) << offset;
|
||||
this->attributes &= ~mask;
|
||||
this->attributes |= (value & (mask >> offset)) << offset;
|
||||
m_attributes &= ~mask;
|
||||
m_attributes |= (value & (mask >> offset)) << offset;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
|
||||
const u64 mask = ((1ul << count) - 1) << offset;
|
||||
this->attributes &= ~mask;
|
||||
this->attributes |= (value & mask);
|
||||
m_attributes &= ~mask;
|
||||
m_attributes |= (value & mask);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
|
||||
const u64 mask = 1ul << offset;
|
||||
if (enabled) {
|
||||
this->attributes |= mask;
|
||||
m_attributes |= mask;
|
||||
} else {
|
||||
this->attributes &= ~mask;
|
||||
m_attributes &= ~mask;
|
||||
}
|
||||
}
|
||||
public:
|
||||
@ -167,9 +167,9 @@ namespace ams::kern::arch::arm64 {
|
||||
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
|
||||
constexpr ALWAYS_INLINE bool IsBlock() const { return (this->attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; }
|
||||
constexpr ALWAYS_INLINE bool IsTable() const { return (this->attributes & ExtensionFlag_TestTableMask) == 2; }
|
||||
constexpr ALWAYS_INLINE bool IsEmpty() const { return (this->attributes & ExtensionFlag_TestTableMask) == 0; }
|
||||
constexpr ALWAYS_INLINE bool IsBlock() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; }
|
||||
constexpr ALWAYS_INLINE bool IsTable() const { return (m_attributes & ExtensionFlag_TestTableMask) == 2; }
|
||||
constexpr ALWAYS_INLINE bool IsEmpty() const { return (m_attributes & ExtensionFlag_TestTableMask) == 0; }
|
||||
constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; }
|
||||
|
||||
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
|
||||
@ -185,21 +185,21 @@ namespace ams::kern::arch::arm64 {
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
|
||||
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
|
||||
return this->attributes & BaseMask;
|
||||
return m_attributes & BaseMask;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsForMerge(u64 attr) const {
|
||||
constexpr u64 BaseMaskForMerge = ~static_cast<u64>(ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail);
|
||||
return (this->attributes & BaseMaskForMerge) == attr;
|
||||
return (m_attributes & BaseMaskForMerge) == attr;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const {
|
||||
return this->attributes;
|
||||
return m_attributes;
|
||||
}
|
||||
|
||||
protected:
|
||||
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
|
||||
return this->attributes;
|
||||
return m_attributes;
|
||||
}
|
||||
};
|
||||
|
||||
@ -262,7 +262,7 @@ namespace ams::kern::arch::arm64 {
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
|
||||
return this->attributes & GetEntryTemplateForL2BlockMask(idx);
|
||||
return m_attributes & GetEntryTemplateForL2BlockMask(idx);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
|
||||
@ -322,7 +322,7 @@ namespace ams::kern::arch::arm64 {
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
|
||||
return this->attributes & GetEntryTemplateForL2BlockMask(idx);
|
||||
return m_attributes & GetEntryTemplateForL2BlockMask(idx);
|
||||
}
|
||||
|
||||
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3BlockMask(size_t idx) {
|
||||
@ -339,7 +339,7 @@ namespace ams::kern::arch::arm64 {
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
|
||||
return this->attributes & GetEntryTemplateForL3BlockMask(idx);
|
||||
return m_attributes & GetEntryTemplateForL3BlockMask(idx);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
|
||||
@ -376,7 +376,7 @@ namespace ams::kern::arch::arm64 {
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
|
||||
return this->attributes & GetEntryTemplateForL3BlockMask(idx);
|
||||
return m_attributes & GetEntryTemplateForL3BlockMask(idx);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
|
||||
|
@ -77,16 +77,16 @@ namespace ams::kern::arch::arm64 {
|
||||
ALWAYS_INLINE bool ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const;
|
||||
ALWAYS_INLINE bool ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const;
|
||||
private:
|
||||
L1PageTableEntry *table;
|
||||
bool is_kernel;
|
||||
u32 num_entries;
|
||||
L1PageTableEntry *m_table;
|
||||
bool m_is_kernel;
|
||||
u32 m_num_entries;
|
||||
public:
|
||||
ALWAYS_INLINE KVirtualAddress GetTableEntry(KVirtualAddress table, size_t index) const {
|
||||
return table + index * sizeof(PageTableEntry);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KProcessAddress address) const {
|
||||
return GetPointer<L1PageTableEntry>(GetTableEntry(KVirtualAddress(this->table), GetL1Index(address) & (this->num_entries - 1)));
|
||||
return GetPointer<L1PageTableEntry>(GetTableEntry(KVirtualAddress(m_table), GetL1Index(address) & (m_num_entries - 1)));
|
||||
}
|
||||
|
||||
ALWAYS_INLINE L2PageTableEntry *GetL2EntryFromTable(KVirtualAddress table, KProcessAddress address) const {
|
||||
@ -105,13 +105,14 @@ namespace ams::kern::arch::arm64 {
|
||||
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
|
||||
}
|
||||
public:
|
||||
constexpr KPageTableImpl() : table(), is_kernel(), num_entries() { /* ... */ }
|
||||
constexpr KPageTableImpl() : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
|
||||
|
||||
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||
NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end);
|
||||
L1PageTableEntry *Finalize();
|
||||
|
||||
void Dump(uintptr_t start, size_t size) const;
|
||||
size_t CountPageTables() const;
|
||||
|
||||
bool BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const;
|
||||
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
|
||||
|
@ -21,260 +21,274 @@ namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KProcessPageTable {
|
||||
private:
|
||||
KPageTable page_table;
|
||||
KPageTable m_page_table;
|
||||
public:
|
||||
constexpr KProcessPageTable() : page_table() { /* ... */ }
|
||||
constexpr KProcessPageTable() : m_page_table() { /* ... */ }
|
||||
|
||||
void Activate(u64 id) {
|
||||
/* Activate the page table with the specified contextidr. */
|
||||
this->page_table.Activate(id);
|
||||
m_page_table.Activate(id);
|
||||
}
|
||||
|
||||
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager) {
|
||||
return this->page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager);
|
||||
return m_page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, mem_block_slab_manager, block_info_manager, pt_manager);
|
||||
}
|
||||
|
||||
void Finalize() { this->page_table.Finalize(); }
|
||||
void Finalize() { m_page_table.Finalize(); }
|
||||
|
||||
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||
return this->page_table.SetMemoryPermission(addr, size, perm);
|
||||
return m_page_table.SetMemoryPermission(addr, size, perm);
|
||||
}
|
||||
|
||||
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
|
||||
return this->page_table.SetProcessMemoryPermission(addr, size, perm);
|
||||
return m_page_table.SetProcessMemoryPermission(addr, size, perm);
|
||||
}
|
||||
|
||||
Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
|
||||
return this->page_table.SetMemoryAttribute(addr, size, mask, attr);
|
||||
return m_page_table.SetMemoryAttribute(addr, size, mask, attr);
|
||||
}
|
||||
|
||||
Result SetHeapSize(KProcessAddress *out, size_t size) {
|
||||
return this->page_table.SetHeapSize(out, size);
|
||||
return m_page_table.SetHeapSize(out, size);
|
||||
}
|
||||
|
||||
Result SetMaxHeapSize(size_t size) {
|
||||
return this->page_table.SetMaxHeapSize(size);
|
||||
return m_page_table.SetMaxHeapSize(size);
|
||||
}
|
||||
|
||||
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
|
||||
return this->page_table.QueryInfo(out_info, out_page_info, addr);
|
||||
return m_page_table.QueryInfo(out_info, out_page_info, addr);
|
||||
}
|
||||
|
||||
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
|
||||
return this->page_table.QueryPhysicalAddress(out, address);
|
||||
return m_page_table.QueryPhysicalAddress(out, address);
|
||||
}
|
||||
|
||||
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
|
||||
return this->page_table.QueryStaticMapping(out, address, size);
|
||||
return m_page_table.QueryStaticMapping(out, address, size);
|
||||
}
|
||||
|
||||
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
|
||||
return this->page_table.QueryIoMapping(out, address, size);
|
||||
return m_page_table.QueryIoMapping(out, address, size);
|
||||
}
|
||||
|
||||
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
return this->page_table.MapMemory(dst_address, src_address, size);
|
||||
return m_page_table.MapMemory(dst_address, src_address, size);
|
||||
}
|
||||
|
||||
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
return this->page_table.UnmapMemory(dst_address, src_address, size);
|
||||
return m_page_table.UnmapMemory(dst_address, src_address, size);
|
||||
}
|
||||
|
||||
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
return this->page_table.MapCodeMemory(dst_address, src_address, size);
|
||||
return m_page_table.MapCodeMemory(dst_address, src_address, size);
|
||||
}
|
||||
|
||||
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
|
||||
return this->page_table.UnmapCodeMemory(dst_address, src_address, size);
|
||||
return m_page_table.UnmapCodeMemory(dst_address, src_address, size);
|
||||
}
|
||||
|
||||
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
return this->page_table.MapIo(phys_addr, size, perm);
|
||||
return m_page_table.MapIo(phys_addr, size, perm);
|
||||
}
|
||||
|
||||
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
|
||||
return this->page_table.MapStatic(phys_addr, size, perm);
|
||||
return m_page_table.MapStatic(phys_addr, size, perm);
|
||||
}
|
||||
|
||||
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
|
||||
return this->page_table.MapRegion(region_type, perm);
|
||||
return m_page_table.MapRegion(region_type, perm);
|
||||
}
|
||||
|
||||
Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPageGroup(addr, pg, state, perm);
|
||||
return m_page_table.MapPageGroup(addr, pg, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||
return this->page_table.UnmapPageGroup(address, pg, state);
|
||||
return m_page_table.UnmapPageGroup(address, pg, state);
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm);
|
||||
return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm);
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPages(out_addr, num_pages, state, perm);
|
||||
return m_page_table.MapPages(out_addr, num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPages(address, num_pages, state, perm);
|
||||
return m_page_table.MapPages(address, num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
|
||||
return this->page_table.UnmapPages(addr, num_pages, state);
|
||||
return m_page_table.UnmapPages(addr, num_pages, state);
|
||||
}
|
||||
|
||||
Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
|
||||
return this->page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr);
|
||||
return m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr);
|
||||
}
|
||||
|
||||
Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
|
||||
return this->page_table.InvalidateProcessDataCache(address, size);
|
||||
return m_page_table.InvalidateProcessDataCache(address, size);
|
||||
}
|
||||
|
||||
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
|
||||
return this->page_table.ReadDebugMemory(buffer, address, size);
|
||||
return m_page_table.ReadDebugMemory(buffer, address, size);
|
||||
}
|
||||
|
||||
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
|
||||
return this->page_table.WriteDebugMemory(address, buffer, size);
|
||||
return m_page_table.WriteDebugMemory(address, buffer, size);
|
||||
}
|
||||
|
||||
Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
|
||||
return this->page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned);
|
||||
return m_page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned);
|
||||
}
|
||||
|
||||
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
|
||||
return this->page_table.UnlockForDeviceAddressSpace(address, size);
|
||||
return m_page_table.UnlockForDeviceAddressSpace(address, size);
|
||||
}
|
||||
|
||||
Result MakePageGroupForUnmapDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size) {
|
||||
return this->page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size);
|
||||
return m_page_table.MakePageGroupForUnmapDeviceAddressSpace(out, address, size);
|
||||
}
|
||||
|
||||
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size, size_t mapped_size) {
|
||||
return this->page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
|
||||
return m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size, mapped_size);
|
||||
}
|
||||
|
||||
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
|
||||
return this->page_table.LockForIpcUserBuffer(out, address, size);
|
||||
return m_page_table.LockForIpcUserBuffer(out, address, size);
|
||||
}
|
||||
|
||||
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
|
||||
return this->page_table.UnlockForIpcUserBuffer(address, size);
|
||||
return m_page_table.UnlockForIpcUserBuffer(address, size);
|
||||
}
|
||||
|
||||
Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) {
|
||||
return this->page_table.LockForTransferMemory(out, address, size, perm);
|
||||
return m_page_table.LockForTransferMemory(out, address, size, perm);
|
||||
}
|
||||
|
||||
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
||||
return this->page_table.UnlockForTransferMemory(address, size, pg);
|
||||
return m_page_table.UnlockForTransferMemory(address, size, pg);
|
||||
}
|
||||
|
||||
Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) {
|
||||
return this->page_table.LockForCodeMemory(out, address, size);
|
||||
return m_page_table.LockForCodeMemory(out, address, size);
|
||||
}
|
||||
|
||||
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
|
||||
return this->page_table.UnlockForCodeMemory(address, size, pg);
|
||||
return m_page_table.UnlockForCodeMemory(address, size, pg);
|
||||
}
|
||||
|
||||
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
return this->page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
return m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
}
|
||||
|
||||
Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
return this->page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
return m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
}
|
||||
|
||||
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
||||
return this->page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
||||
return m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
||||
}
|
||||
|
||||
Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
|
||||
return this->page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
||||
return m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr);
|
||||
}
|
||||
|
||||
Result CopyMemoryFromHeapToHeap(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
return this->page_table.CopyMemoryFromHeapToHeap(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
return m_page_table.CopyMemoryFromHeapToHeap(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
}
|
||||
|
||||
Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
|
||||
return this->page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
return m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr);
|
||||
}
|
||||
|
||||
Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KProcessPageTable &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
|
||||
return this->page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.page_table, test_perm, dst_state, send);
|
||||
return m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, test_perm, dst_state, send);
|
||||
}
|
||||
|
||||
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state, KProcess *server_process) {
|
||||
return this->page_table.CleanupForIpcServer(address, size, dst_state, server_process);
|
||||
return m_page_table.CleanupForIpcServer(address, size, dst_state, server_process);
|
||||
}
|
||||
|
||||
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
|
||||
return this->page_table.CleanupForIpcClient(address, size, dst_state);
|
||||
return m_page_table.CleanupForIpcClient(address, size, dst_state);
|
||||
}
|
||||
|
||||
Result MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||
return this->page_table.MapPhysicalMemory(address, size);
|
||||
return m_page_table.MapPhysicalMemory(address, size);
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||
return this->page_table.UnmapPhysicalMemory(address, size);
|
||||
return m_page_table.UnmapPhysicalMemory(address, size);
|
||||
}
|
||||
|
||||
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||
return this->page_table.MapPhysicalMemoryUnsafe(address, size);
|
||||
return m_page_table.MapPhysicalMemoryUnsafe(address, size);
|
||||
}
|
||||
|
||||
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
|
||||
return this->page_table.UnmapPhysicalMemoryUnsafe(address, size);
|
||||
}
|
||||
|
||||
void DumpTable() const {
|
||||
return this->page_table.DumpTable();
|
||||
return m_page_table.UnmapPhysicalMemoryUnsafe(address, size);
|
||||
}
|
||||
|
||||
void DumpMemoryBlocks() const {
|
||||
return this->page_table.DumpMemoryBlocks();
|
||||
return m_page_table.DumpMemoryBlocks();
|
||||
}
|
||||
|
||||
void DumpPageTable() const {
|
||||
return m_page_table.DumpPageTable();
|
||||
}
|
||||
|
||||
size_t CountPageTables() const {
|
||||
return m_page_table.CountPageTables();
|
||||
}
|
||||
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||
return this->page_table.GetPhysicalAddress(out, address);
|
||||
return m_page_table.GetPhysicalAddress(out, address);
|
||||
}
|
||||
|
||||
bool Contains(KProcessAddress addr, size_t size) const { return this->page_table.Contains(addr, size); }
|
||||
bool Contains(KProcessAddress addr, size_t size) const { return m_page_table.Contains(addr, size); }
|
||||
|
||||
bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInAliasRegion(addr, size); }
|
||||
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInUnsafeAliasRegion(addr, size); }
|
||||
bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInAliasRegion(addr, size); }
|
||||
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInUnsafeAliasRegion(addr, size); }
|
||||
|
||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->page_table.CanContain(addr, size, state); }
|
||||
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return m_page_table.CanContain(addr, size, state); }
|
||||
|
||||
KProcessAddress GetAddressSpaceStart() const { return this->page_table.GetAddressSpaceStart(); }
|
||||
KProcessAddress GetHeapRegionStart() const { return this->page_table.GetHeapRegionStart(); }
|
||||
KProcessAddress GetAliasRegionStart() const { return this->page_table.GetAliasRegionStart(); }
|
||||
KProcessAddress GetStackRegionStart() const { return this->page_table.GetStackRegionStart(); }
|
||||
KProcessAddress GetKernelMapRegionStart() const { return this->page_table.GetKernelMapRegionStart(); }
|
||||
KProcessAddress GetAliasCodeRegionStart() const { return this->page_table.GetAliasCodeRegionStart(); }
|
||||
KProcessAddress GetAddressSpaceStart() const { return m_page_table.GetAddressSpaceStart(); }
|
||||
KProcessAddress GetHeapRegionStart() const { return m_page_table.GetHeapRegionStart(); }
|
||||
KProcessAddress GetAliasRegionStart() const { return m_page_table.GetAliasRegionStart(); }
|
||||
KProcessAddress GetStackRegionStart() const { return m_page_table.GetStackRegionStart(); }
|
||||
KProcessAddress GetKernelMapRegionStart() const { return m_page_table.GetKernelMapRegionStart(); }
|
||||
KProcessAddress GetAliasCodeRegionStart() const { return m_page_table.GetAliasCodeRegionStart(); }
|
||||
|
||||
size_t GetAddressSpaceSize() const { return this->page_table.GetAddressSpaceSize(); }
|
||||
size_t GetHeapRegionSize() const { return this->page_table.GetHeapRegionSize(); }
|
||||
size_t GetAliasRegionSize() const { return this->page_table.GetAliasRegionSize(); }
|
||||
size_t GetStackRegionSize() const { return this->page_table.GetStackRegionSize(); }
|
||||
size_t GetKernelMapRegionSize() const { return this->page_table.GetKernelMapRegionSize(); }
|
||||
size_t GetAliasCodeRegionSize() const { return this->page_table.GetAliasCodeRegionSize(); }
|
||||
size_t GetAddressSpaceSize() const { return m_page_table.GetAddressSpaceSize(); }
|
||||
size_t GetHeapRegionSize() const { return m_page_table.GetHeapRegionSize(); }
|
||||
size_t GetAliasRegionSize() const { return m_page_table.GetAliasRegionSize(); }
|
||||
size_t GetStackRegionSize() const { return m_page_table.GetStackRegionSize(); }
|
||||
size_t GetKernelMapRegionSize() const { return m_page_table.GetKernelMapRegionSize(); }
|
||||
size_t GetAliasCodeRegionSize() const { return m_page_table.GetAliasCodeRegionSize(); }
|
||||
|
||||
size_t GetNormalMemorySize() const { return this->page_table.GetNormalMemorySize(); }
|
||||
size_t GetNormalMemorySize() const { return m_page_table.GetNormalMemorySize(); }
|
||||
|
||||
u32 GetAllocateOption() const { return this->page_table.GetAllocateOption(); }
|
||||
size_t GetCodeSize() const { return m_page_table.GetCodeSize(); }
|
||||
size_t GetCodeDataSize() const { return m_page_table.GetCodeDataSize(); }
|
||||
|
||||
size_t GetAliasCodeSize() const { return m_page_table.GetAliasCodeSize(); }
|
||||
size_t GetAliasCodeDataSize() const { return m_page_table.GetAliasCodeDataSize(); }
|
||||
|
||||
u32 GetAllocateOption() const { return m_page_table.GetAllocateOption(); }
|
||||
|
||||
KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) const {
|
||||
return this->page_table.GetHeapPhysicalAddress(address);
|
||||
return m_page_table.GetHeapPhysicalAddress(address);
|
||||
}
|
||||
|
||||
KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) const {
|
||||
return m_page_table.GetHeapVirtualAddress(address);
|
||||
}
|
||||
|
||||
KBlockInfoManager *GetBlockInfoManager() {
|
||||
return this->page_table.GetBlockInfoManager();
|
||||
return m_page_table.GetBlockInfoManager();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -21,19 +21,19 @@ namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KNotAlignedSpinLock {
|
||||
private:
|
||||
u32 packed_tickets;
|
||||
u32 m_packed_tickets;
|
||||
public:
|
||||
constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ }
|
||||
constexpr KNotAlignedSpinLock() : m_packed_tickets(0) { /* ... */ }
|
||||
|
||||
ALWAYS_INLINE void Lock() {
|
||||
u32 tmp0, tmp1, tmp2;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" prfm pstl1keep, %[packed_tickets]\n"
|
||||
" prfm pstl1keep, %[m_packed_tickets]\n"
|
||||
"1:\n"
|
||||
" ldaxr %w[tmp0], %[packed_tickets]\n"
|
||||
" ldaxr %w[tmp0], %[m_packed_tickets]\n"
|
||||
" add %w[tmp2], %w[tmp0], #0x10000\n"
|
||||
" stxr %w[tmp1], %w[tmp2], %[packed_tickets]\n"
|
||||
" stxr %w[tmp1], %w[tmp2], %[m_packed_tickets]\n"
|
||||
" cbnz %w[tmp1], 1b\n"
|
||||
" \n"
|
||||
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
|
||||
@ -42,21 +42,21 @@ namespace ams::kern::arch::arm64 {
|
||||
" sevl\n"
|
||||
"2:\n"
|
||||
" wfe\n"
|
||||
" ldaxrh %w[tmp1], %[packed_tickets]\n"
|
||||
" ldaxrh %w[tmp1], %[m_packed_tickets]\n"
|
||||
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
|
||||
" b.ne 2b\n"
|
||||
"3:\n"
|
||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [packed_tickets]"+Q"(this->packed_tickets)
|
||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [m_packed_tickets]"+Q"(m_packed_tickets)
|
||||
:
|
||||
: "cc", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void Unlock() {
|
||||
const u32 value = this->packed_tickets + 1;
|
||||
const u32 value = m_packed_tickets + 1;
|
||||
__asm__ __volatile__(
|
||||
" stlrh %w[value], %[packed_tickets]\n"
|
||||
: [packed_tickets]"+Q"(this->packed_tickets)
|
||||
" stlrh %w[value], %[m_packed_tickets]\n"
|
||||
: [m_packed_tickets]"+Q"(m_packed_tickets)
|
||||
: [value]"r"(value)
|
||||
: "memory"
|
||||
);
|
||||
@ -66,39 +66,39 @@ namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KAlignedSpinLock {
|
||||
private:
|
||||
alignas(cpu::DataCacheLineSize) u16 current_ticket;
|
||||
alignas(cpu::DataCacheLineSize) u16 next_ticket;
|
||||
alignas(cpu::DataCacheLineSize) u16 m_current_ticket;
|
||||
alignas(cpu::DataCacheLineSize) u16 m_next_ticket;
|
||||
public:
|
||||
constexpr KAlignedSpinLock() : current_ticket(0), next_ticket(0) { /* ... */ }
|
||||
constexpr KAlignedSpinLock() : m_current_ticket(0), m_next_ticket(0) { /* ... */ }
|
||||
|
||||
ALWAYS_INLINE void Lock() {
|
||||
u32 tmp0, tmp1, got_lock;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" prfm pstl1keep, %[next_ticket]\n"
|
||||
" prfm pstl1keep, %[m_next_ticket]\n"
|
||||
"1:\n"
|
||||
" ldaxrh %w[tmp0], %[next_ticket]\n"
|
||||
" ldaxrh %w[tmp0], %[m_next_ticket]\n"
|
||||
" add %w[tmp1], %w[tmp0], #0x1\n"
|
||||
" stxrh %w[got_lock], %w[tmp1], %[next_ticket]\n"
|
||||
" stxrh %w[got_lock], %w[tmp1], %[m_next_ticket]\n"
|
||||
" cbnz %w[got_lock], 1b\n"
|
||||
" \n"
|
||||
" sevl\n"
|
||||
"2:\n"
|
||||
" wfe\n"
|
||||
" ldaxrh %w[tmp1], %[current_ticket]\n"
|
||||
" ldaxrh %w[tmp1], %[m_current_ticket]\n"
|
||||
" cmp %w[tmp1], %w[tmp0]\n"
|
||||
" b.ne 2b\n"
|
||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [next_ticket]"+Q"(this->next_ticket)
|
||||
: [current_ticket]"Q"(this->current_ticket)
|
||||
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [m_next_ticket]"+Q"(m_next_ticket)
|
||||
: [m_current_ticket]"Q"(m_current_ticket)
|
||||
: "cc", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void Unlock() {
|
||||
const u32 value = this->current_ticket + 1;
|
||||
const u32 value = m_current_ticket + 1;
|
||||
__asm__ __volatile__(
|
||||
" stlrh %w[value], %[current_ticket]\n"
|
||||
: [current_ticket]"+Q"(this->current_ticket)
|
||||
" stlrh %w[value], %[m_current_ticket]\n"
|
||||
: [m_current_ticket]"+Q"(m_current_ticket)
|
||||
: [value]"r"(value)
|
||||
: "memory"
|
||||
);
|
||||
|
@ -22,16 +22,16 @@ namespace ams::kern::arch::arm64 {
|
||||
|
||||
class KSupervisorPageTable {
|
||||
private:
|
||||
KPageTable page_table;
|
||||
u64 ttbr0_identity[cpu::NumCores];
|
||||
KPageTable m_page_table;
|
||||
u64 m_ttbr0_identity[cpu::NumCores];
|
||||
public:
|
||||
constexpr KSupervisorPageTable() : page_table(), ttbr0_identity() { /* ... */ }
|
||||
constexpr KSupervisorPageTable() : m_page_table(), m_ttbr0_identity() { /* ... */ }
|
||||
|
||||
NOINLINE void Initialize(s32 core_id);
|
||||
|
||||
void Activate() {
|
||||
/* Activate, using process id = 0xFFFFFFFF */
|
||||
this->page_table.Activate(0xFFFFFFFF);
|
||||
m_page_table.Activate(0xFFFFFFFF);
|
||||
}
|
||||
|
||||
void ActivateForInit() {
|
||||
@ -42,26 +42,38 @@ namespace ams::kern::arch::arm64 {
|
||||
}
|
||||
|
||||
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
||||
return m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
|
||||
return this->page_table.UnmapPages(address, num_pages, state);
|
||||
return m_page_table.UnmapPages(address, num_pages, state);
|
||||
}
|
||||
|
||||
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
|
||||
return this->page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
|
||||
return m_page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm);
|
||||
}
|
||||
|
||||
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
|
||||
return this->page_table.UnmapPageGroup(address, pg, state);
|
||||
return m_page_table.UnmapPageGroup(address, pg, state);
|
||||
}
|
||||
|
||||
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
|
||||
return this->page_table.GetPhysicalAddress(out, address);
|
||||
return m_page_table.GetPhysicalAddress(out, address);
|
||||
}
|
||||
|
||||
constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return this->ttbr0_identity[core_id]; }
|
||||
constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return m_ttbr0_identity[core_id]; }
|
||||
|
||||
void DumpMemoryBlocks() const {
|
||||
return m_page_table.DumpMemoryBlocks();
|
||||
}
|
||||
|
||||
void DumpPageTable() const {
|
||||
return m_page_table.DumpPageTable();
|
||||
}
|
||||
|
||||
size_t CountPageTables() const {
|
||||
return m_page_table.CountPageTables();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -45,19 +45,19 @@ namespace ams::kern::arch::arm64 {
|
||||
u64 x28;
|
||||
u64 x29;
|
||||
};
|
||||
} callee_saved;
|
||||
u64 lr;
|
||||
u64 sp;
|
||||
u64 cpacr;
|
||||
u64 fpcr;
|
||||
u64 fpsr;
|
||||
alignas(0x10) u128 fpu_registers[NumFpuRegisters];
|
||||
bool locked;
|
||||
} m_callee_saved;
|
||||
u64 m_lr;
|
||||
u64 m_sp;
|
||||
u64 m_cpacr;
|
||||
u64 m_fpcr;
|
||||
u64 m_fpsr;
|
||||
alignas(0x10) u128 m_fpu_registers[NumFpuRegisters];
|
||||
bool m_locked;
|
||||
private:
|
||||
static void RestoreFpuRegisters64(const KThreadContext &);
|
||||
static void RestoreFpuRegisters32(const KThreadContext &);
|
||||
public:
|
||||
constexpr explicit KThreadContext() : callee_saved(), lr(), sp(), cpacr(), fpcr(), fpsr(), fpu_registers(), locked() { /* ... */ }
|
||||
constexpr explicit KThreadContext() : m_callee_saved(), m_lr(), m_sp(), m_cpacr(), m_fpcr(), m_fpsr(), m_fpu_registers(), m_locked() { /* ... */ }
|
||||
|
||||
Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main);
|
||||
Result Finalize();
|
||||
@ -66,17 +66,17 @@ namespace ams::kern::arch::arm64 {
|
||||
|
||||
static void FpuContextSwitchHandler(KThread *thread);
|
||||
|
||||
u32 GetFpcr() const { return this->fpcr; }
|
||||
u32 GetFpsr() const { return this->fpsr; }
|
||||
u32 GetFpcr() const { return m_fpcr; }
|
||||
u32 GetFpsr() const { return m_fpsr; }
|
||||
|
||||
void SetFpcr(u32 v) { this->fpcr = v; }
|
||||
void SetFpsr(u32 v) { this->fpsr = v; }
|
||||
void SetFpcr(u32 v) { m_fpcr = v; }
|
||||
void SetFpsr(u32 v) { m_fpsr = v; }
|
||||
|
||||
void CloneFpuStatus();
|
||||
|
||||
void SetFpuRegisters(const u128 *v, bool is_64_bit);
|
||||
|
||||
const u128 *GetFpuRegisters() const { return this->fpu_registers; }
|
||||
const u128 *GetFpuRegisters() const { return m_fpu_registers; }
|
||||
public:
|
||||
static void OnThreadTerminating(const KThread *thread);
|
||||
};
|
||||
|
@ -27,14 +27,36 @@ namespace ams::kern::board::generic {
|
||||
public:
|
||||
constexpr KDevicePageTable() { /* ... */ }
|
||||
|
||||
Result ALWAYS_INLINE Initialize(u64 space_address, u64 space_size) { return ams::kern::svc::ResultNotImplemented(); }
|
||||
Result ALWAYS_INLINE Initialize(u64 space_address, u64 space_size) {
|
||||
MESOSPHERE_UNUSED(space_address, space_size);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE Finalize() { /* ... */ }
|
||||
|
||||
Result ALWAYS_INLINE Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size) { return ams::kern::svc::ResultNotImplemented(); }
|
||||
Result ALWAYS_INLINE Detach(ams::svc::DeviceName device_name) { return ams::kern::svc::ResultNotImplemented(); }
|
||||
Result ALWAYS_INLINE Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size) {
|
||||
MESOSPHERE_UNUSED(device_name, space_address, space_size);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
}
|
||||
|
||||
Result ALWAYS_INLINE Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) { return ams::kern::svc::ResultNotImplemented(); }
|
||||
Result ALWAYS_INLINE Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address) { return ams::kern::svc::ResultNotImplemented(); }
|
||||
Result ALWAYS_INLINE Detach(ams::svc::DeviceName device_name) {
|
||||
MESOSPHERE_UNUSED(device_name);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
}
|
||||
|
||||
Result ALWAYS_INLINE Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) {
|
||||
MESOSPHERE_UNUSED(out_mapped_size, pg, device_address, device_perm, refresh_mappings);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
}
|
||||
|
||||
Result ALWAYS_INLINE Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address) {
|
||||
MESOSPHERE_UNUSED(pg, device_address);
|
||||
return ams::kern::svc::ResultNotImplemented();
|
||||
}
|
||||
|
||||
void ALWAYS_INLINE Unmap(KDeviceVirtualAddress device_address, size_t size) {
|
||||
MESOSPHERE_UNUSED(device_address, size);
|
||||
}
|
||||
public:
|
||||
static ALWAYS_INLINE void Initialize() { /* ... */ }
|
||||
|
||||
|
@ -27,13 +27,13 @@ namespace ams::kern::board::nintendo::nx {
|
||||
private:
|
||||
static constexpr size_t TableCount = 4;
|
||||
private:
|
||||
KVirtualAddress tables[TableCount];
|
||||
u8 table_asids[TableCount];
|
||||
u64 attached_device;
|
||||
u32 attached_value;
|
||||
u32 detached_value;
|
||||
u32 hs_attached_value;
|
||||
u32 hs_detached_value;
|
||||
KVirtualAddress m_tables[TableCount];
|
||||
u8 m_table_asids[TableCount];
|
||||
u64 m_attached_device;
|
||||
u32 m_attached_value;
|
||||
u32 m_detached_value;
|
||||
u32 m_hs_attached_value;
|
||||
u32 m_hs_detached_value;
|
||||
private:
|
||||
static ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress addr) {
|
||||
const KMemoryRegion *hint = nullptr;
|
||||
@ -61,7 +61,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||
return KPageTable::GetPageTablePhysicalAddress(addr);
|
||||
}
|
||||
public:
|
||||
constexpr KDevicePageTable() : tables(), table_asids(), attached_device(), attached_value(), detached_value(), hs_attached_value(), hs_detached_value() { /* ... */ }
|
||||
constexpr KDevicePageTable() : m_tables(), m_table_asids(), m_attached_device(), m_attached_value(), m_detached_value(), m_hs_attached_value(), m_hs_detached_value() { /* ... */ }
|
||||
|
||||
Result Initialize(u64 space_address, u64 space_size);
|
||||
void Finalize();
|
||||
|
@ -30,6 +30,7 @@ namespace ams::kern::board::nintendo::nx {
|
||||
static size_t GetApplicationPoolSize();
|
||||
static size_t GetAppletPoolSize();
|
||||
static size_t GetMinimumNonSecureSystemPoolSize();
|
||||
static u8 GetDebugLogUartPort();
|
||||
|
||||
/* Randomness. */
|
||||
static void GenerateRandomBytes(void *dst, size_t size);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#ifdef MESOSPHERE_BUILD_FOR_DEBUGGING
|
||||
#define MESOSPHERE_ENABLE_ASSERTIONS
|
||||
#define MESOSPHERE_ENABLE_DEBUG_PRINT
|
||||
#define MESOSPHERE_ENABLE_KERNEL_STACK_USAGE
|
||||
#endif
|
||||
|
||||
//#define MESOSPHERE_BUILD_FOR_TRACING
|
||||
|
@ -40,7 +40,7 @@ namespace ams::kern {
|
||||
#ifndef MESOSPHERE_DEBUG_LOG_SELECTED
|
||||
|
||||
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
|
||||
#define MESOSPHERE_DEBUG_LOG_USE_UART_A
|
||||
#define MESOSPHERE_DEBUG_LOG_USE_UART
|
||||
#else
|
||||
#error "Unknown board for Default Debug Log Source"
|
||||
#endif
|
||||
|
@ -23,9 +23,9 @@ namespace ams::kern {
|
||||
public:
|
||||
using ThreadTree = KConditionVariable::ThreadTree;
|
||||
private:
|
||||
ThreadTree tree;
|
||||
ThreadTree m_tree;
|
||||
public:
|
||||
constexpr KAddressArbiter() : tree() { /* ... */ }
|
||||
constexpr KAddressArbiter() : m_tree() { /* ... */ }
|
||||
|
||||
Result SignalToAddress(uintptr_t addr, ams::svc::SignalType type, s32 value, s32 count) {
|
||||
switch (type) {
|
||||
|
@ -32,20 +32,20 @@ namespace ams::kern {
|
||||
Type_Count,
|
||||
};
|
||||
private:
|
||||
size_t bit_width;
|
||||
size_t address;
|
||||
size_t size;
|
||||
Type type;
|
||||
size_t m_bit_width;
|
||||
size_t m_address;
|
||||
size_t m_size;
|
||||
Type m_type;
|
||||
public:
|
||||
static uintptr_t GetAddressSpaceStart(size_t width, Type type);
|
||||
static size_t GetAddressSpaceSize(size_t width, Type type);
|
||||
|
||||
constexpr KAddressSpaceInfo(size_t bw, size_t a, size_t s, Type t) : bit_width(bw), address(a), size(s), type(t) { /* ... */ }
|
||||
constexpr KAddressSpaceInfo(size_t bw, size_t a, size_t s, Type t) : m_bit_width(bw), m_address(a), m_size(s), m_type(t) { /* ... */ }
|
||||
|
||||
constexpr size_t GetWidth() const { return this->bit_width; }
|
||||
constexpr size_t GetAddress() const { return this->address; }
|
||||
constexpr size_t GetSize() const { return this->size; }
|
||||
constexpr Type GetType() const { return this->type; }
|
||||
constexpr size_t GetWidth() const { return m_bit_width; }
|
||||
constexpr size_t GetAddress() const { return m_address; }
|
||||
constexpr size_t GetSize() const { return m_size; }
|
||||
constexpr Type GetType() const { return m_type; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -23,38 +23,38 @@ namespace ams::kern {
|
||||
private:
|
||||
static constexpr u64 AllowedAffinityMask = (1ul << cpu::NumCores) - 1;
|
||||
private:
|
||||
u64 mask;
|
||||
u64 m_mask;
|
||||
private:
|
||||
static constexpr ALWAYS_INLINE u64 GetCoreBit(s32 core) {
|
||||
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
|
||||
return (1ul << core);
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KAffinityMask() : mask(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||
constexpr ALWAYS_INLINE KAffinityMask() : m_mask(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return this->mask; }
|
||||
constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return m_mask; }
|
||||
|
||||
constexpr ALWAYS_INLINE void SetAffinityMask(u64 new_mask) {
|
||||
MESOSPHERE_ASSERT((new_mask & ~AllowedAffinityMask) == 0);
|
||||
this->mask = new_mask;
|
||||
m_mask = new_mask;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE bool GetAffinity(s32 core) const {
|
||||
return this->mask & GetCoreBit(core);
|
||||
return m_mask & GetCoreBit(core);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetAffinity(s32 core, bool set) {
|
||||
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
|
||||
|
||||
if (set) {
|
||||
this->mask |= GetCoreBit(core);
|
||||
m_mask |= GetCoreBit(core);
|
||||
} else {
|
||||
this->mask &= ~GetCoreBit(core);
|
||||
m_mask &= ~GetCoreBit(core);
|
||||
}
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE void SetAll() {
|
||||
this->mask = AllowedAffinityMask;
|
||||
m_mask = AllowedAffinityMask;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -46,13 +46,13 @@ namespace ams::kern {
|
||||
protected:
|
||||
class TypeObj {
|
||||
private:
|
||||
const char *name;
|
||||
ClassTokenType class_token;
|
||||
const char *m_name;
|
||||
ClassTokenType m_class_token;
|
||||
public:
|
||||
constexpr explicit TypeObj(const char *n, ClassTokenType tok) : name(n), class_token(tok) { /* ... */ }
|
||||
constexpr explicit TypeObj(const char *n, ClassTokenType tok) : m_name(n), m_class_token(tok) { /* ... */ }
|
||||
|
||||
constexpr ALWAYS_INLINE const char *GetName() const { return this->name; }
|
||||
constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return this->class_token; }
|
||||
constexpr ALWAYS_INLINE const char *GetName() const { return m_name; }
|
||||
constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return m_class_token; }
|
||||
|
||||
constexpr ALWAYS_INLINE bool operator==(const TypeObj &rhs) {
|
||||
return this->GetClassToken() == rhs.GetClassToken();
|
||||
@ -69,11 +69,11 @@ namespace ams::kern {
|
||||
private:
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
|
||||
private:
|
||||
std::atomic<u32> ref_count;
|
||||
std::atomic<u32> m_ref_count;
|
||||
public:
|
||||
static KAutoObject *Create(KAutoObject *ptr);
|
||||
public:
|
||||
constexpr ALWAYS_INLINE explicit KAutoObject() : ref_count(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||
constexpr ALWAYS_INLINE explicit KAutoObject() : m_ref_count(0) { MESOSPHERE_ASSERT_THIS(); }
|
||||
virtual ~KAutoObject() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
/* Destroy is responsible for destroying the auto object's resources when ref_count hits zero. */
|
||||
@ -85,7 +85,7 @@ namespace ams::kern {
|
||||
virtual KProcess *GetOwner() const { return nullptr; }
|
||||
|
||||
u32 GetReferenceCount() const {
|
||||
return this->ref_count;
|
||||
return m_ref_count;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const {
|
||||
@ -124,14 +124,14 @@ namespace ams::kern {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Atomically increment the reference count, only if it's positive. */
|
||||
u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire);
|
||||
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
|
||||
do {
|
||||
if (AMS_UNLIKELY(cur_ref_count == 0)) {
|
||||
MESOSPHERE_AUDIT(cur_ref_count != 0);
|
||||
return false;
|
||||
}
|
||||
MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1);
|
||||
} while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed));
|
||||
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed));
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -140,10 +140,10 @@ namespace ams::kern {
|
||||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Atomically decrement the reference count, not allowing it to become negative. */
|
||||
u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire);
|
||||
u32 cur_ref_count = m_ref_count.load(std::memory_order_acquire);
|
||||
do {
|
||||
MESOSPHERE_ABORT_UNLESS(cur_ref_count > 0);
|
||||
} while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed));
|
||||
} while (!m_ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed));
|
||||
|
||||
/* If ref count hits zero, destroy the object. */
|
||||
if (cur_ref_count - 1 == 0) {
|
||||
@ -185,44 +185,44 @@ namespace ams::kern {
|
||||
template<typename U>
|
||||
friend class KScopedAutoObject;
|
||||
private:
|
||||
T *obj;
|
||||
T *m_obj;
|
||||
private:
|
||||
constexpr ALWAYS_INLINE void Swap(KScopedAutoObject &rhs) {
|
||||
std::swap(this->obj, rhs.obj);
|
||||
std::swap(m_obj, rhs.m_obj);
|
||||
}
|
||||
public:
|
||||
constexpr ALWAYS_INLINE KScopedAutoObject() : obj(nullptr) { /* ... */ }
|
||||
constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : obj(o) {
|
||||
if (this->obj != nullptr) {
|
||||
this->obj->Open();
|
||||
constexpr ALWAYS_INLINE KScopedAutoObject() : m_obj(nullptr) { /* ... */ }
|
||||
constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : m_obj(o) {
|
||||
if (m_obj != nullptr) {
|
||||
m_obj->Open();
|
||||
}
|
||||
}
|
||||
|
||||
~KScopedAutoObject() {
|
||||
if (this->obj != nullptr) {
|
||||
this->obj->Close();
|
||||
if (m_obj != nullptr) {
|
||||
m_obj->Close();
|
||||
}
|
||||
this->obj = nullptr;
|
||||
m_obj = nullptr;
|
||||
}
|
||||
|
||||
template<typename U> requires (std::derived_from<T, U> || std::derived_from<U, T>)
|
||||
constexpr KScopedAutoObject(KScopedAutoObject<U> &&rhs) {
|
||||
if constexpr (std::derived_from<U, T>) {
|
||||
/* Upcast. */
|
||||
this->obj = rhs.obj;
|
||||
rhs.obj = nullptr;
|
||||
m_obj = rhs.m_obj;
|
||||
rhs.m_obj = nullptr;
|
||||
} else {
|
||||
/* Downcast. */
|
||||
T *derived = nullptr;
|
||||
if (rhs.obj != nullptr) {
|
||||
derived = rhs.obj->template DynamicCast<T *>();
|
||||
if (rhs.m_obj != nullptr) {
|
||||
derived = rhs.m_obj->template DynamicCast<T *>();
|
||||
if (derived == nullptr) {
|
||||
rhs.obj->Close();
|
||||
rhs.m_obj->Close();
|
||||
}
|
||||
}
|
||||
|
||||
this->obj = derived;
|
||||
rhs.obj = nullptr;
|
||||
m_obj = derived;
|
||||
rhs.m_obj = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -231,19 +231,19 @@ namespace ams::kern {
|
||||
return *this;
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE T *operator->() { return this->obj; }
|
||||
constexpr ALWAYS_INLINE T &operator*() { return *this->obj; }
|
||||
constexpr ALWAYS_INLINE T *operator->() { return m_obj; }
|
||||
constexpr ALWAYS_INLINE T &operator*() { return *m_obj; }
|
||||
|
||||
constexpr ALWAYS_INLINE void Reset(T *o) {
|
||||
KScopedAutoObject(o).Swap(*this);
|
||||
}
|
||||
|
||||
constexpr ALWAYS_INLINE T *GetPointerUnsafe() { return this->obj; }
|
||||
constexpr ALWAYS_INLINE T *GetPointerUnsafe() { return m_obj; }
|
||||
|
||||
constexpr ALWAYS_INLINE T *ReleasePointerUnsafe() { T *ret = this->obj; this->obj = nullptr; return ret; }
|
||||
constexpr ALWAYS_INLINE T *ReleasePointerUnsafe() { T *ret = m_obj; m_obj = nullptr; return ret; }
|
||||
|
||||
constexpr ALWAYS_INLINE bool IsNull() const { return this->obj == nullptr; }
|
||||
constexpr ALWAYS_INLINE bool IsNotNull() const { return this->obj != nullptr; }
|
||||
constexpr ALWAYS_INLINE bool IsNull() const { return m_obj == nullptr; }
|
||||
constexpr ALWAYS_INLINE bool IsNotNull() const { return m_obj != nullptr; }
|
||||
};
|
||||
|
||||
|
||||
|
@ -28,30 +28,30 @@ namespace ams::kern {
|
||||
public:
|
||||
class ListAccessor : public KScopedLightLock {
|
||||
private:
|
||||
ListType &list;
|
||||
ListType &m_list;
|
||||
public:
|
||||
explicit ListAccessor(KAutoObjectWithListContainer *container) : KScopedLightLock(container->lock), list(container->object_list) { /* ... */ }
|
||||
explicit ListAccessor(KAutoObjectWithListContainer &container) : KScopedLightLock(container.lock), list(container.object_list) { /* ... */ }
|
||||
explicit ListAccessor(KAutoObjectWithListContainer *container) : KScopedLightLock(container->m_lock), m_list(container->m_object_list) { /* ... */ }
|
||||
explicit ListAccessor(KAutoObjectWithListContainer &container) : KScopedLightLock(container.m_lock), m_list(container.m_object_list) { /* ... */ }
|
||||
|
||||
typename ListType::iterator begin() const {
|
||||
return this->list.begin();
|
||||
return m_list.begin();
|
||||
}
|
||||
|
||||
typename ListType::iterator end() const {
|
||||
return this->list.end();
|
||||
return m_list.end();
|
||||
}
|
||||
|
||||
typename ListType::iterator find(typename ListType::const_reference ref) const {
|
||||
return this->list.find(ref);
|
||||
return m_list.find(ref);
|
||||
}
|
||||
};
|
||||
|
||||
friend class ListAccessor;
|
||||
private:
|
||||
KLightLock lock;
|
||||
ListType object_list;
|
||||
KLightLock m_lock;
|
||||
ListType m_object_list;
|
||||
public:
|
||||
constexpr KAutoObjectWithListContainer() : lock(), object_list() { MESOSPHERE_ASSERT_THIS(); }
|
||||
constexpr KAutoObjectWithListContainer() : m_lock(), m_object_list() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
||||
void Initialize() { MESOSPHERE_ASSERT_THIS(); }
|
||||
void Finalize() { MESOSPHERE_ASSERT_THIS(); }
|
||||
|
@ -29,10 +29,10 @@ namespace ams::kern {
|
||||
private:
|
||||
/* NOTE: Official KBeta has size 0x88, corresponding to 0x58 bytes of fields. */
|
||||
/* TODO: Add these fields, if KBeta is ever instantiable in the NX kernel. */
|
||||
util::IntrusiveListNode process_list_node;
|
||||
util::IntrusiveListNode m_process_list_node;
|
||||
public:
|
||||
explicit KBeta()
|
||||
: process_list_node()
|
||||
: m_process_list_node()
|
||||
{
|
||||
/* ... */
|
||||
}
|
||||
|
@ -200,14 +200,14 @@ namespace ams::kern {
|
||||
CapabilityFlag<CapabilityType::HandleTable> |
|
||||
CapabilityFlag<CapabilityType::DebugFlags>;
|
||||
private:
|
||||
u8 svc_access_flags[SvcFlagCount]{};
|
||||
u8 irq_access_flags[IrqFlagCount]{};
|
||||
u64 core_mask{};
|
||||
u64 priority_mask{};
|
||||
util::BitPack32 debug_capabilities{0};
|
||||
s32 handle_table_size{};
|
||||
util::BitPack32 intended_kernel_version{0};
|
||||
u32 program_type{};
|
||||
u8 m_svc_access_flags[SvcFlagCount]{};
|
||||
u8 m_irq_access_flags[IrqFlagCount]{};
|
||||
u64 m_core_mask{};
|
||||
u64 m_priority_mask{};
|
||||
util::BitPack32 m_debug_capabilities{0};
|
||||
s32 m_handle_table_size{};
|
||||
util::BitPack32 m_intended_kernel_version{0};
|
||||
u32 m_program_type{};
|
||||
private:
|
||||
static constexpr ALWAYS_INLINE void SetSvcAllowedImpl(u8 *data, u32 id) {
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(*data);
|
||||
@ -228,8 +228,8 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
bool SetSvcAllowed(u32 id) {
|
||||
if (id < BITSIZEOF(this->svc_access_flags)) {
|
||||
SetSvcAllowedImpl(this->svc_access_flags, id);
|
||||
if (id < BITSIZEOF(m_svc_access_flags)) {
|
||||
SetSvcAllowedImpl(m_svc_access_flags, id);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@ -237,9 +237,9 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
bool SetInterruptPermitted(u32 id) {
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]);
|
||||
if (id < BITSIZEOF(this->irq_access_flags)) {
|
||||
this->irq_access_flags[id / BitsPerWord] |= (1ul << (id % BitsPerWord));
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(m_irq_access_flags[0]);
|
||||
if (id < BITSIZEOF(m_irq_access_flags)) {
|
||||
m_irq_access_flags[id / BitsPerWord] |= (1ul << (id % BitsPerWord));
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@ -266,14 +266,14 @@ namespace ams::kern {
|
||||
Result Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table);
|
||||
Result Initialize(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table);
|
||||
|
||||
constexpr u64 GetCoreMask() const { return this->core_mask; }
|
||||
constexpr u64 GetPriorityMask() const { return this->priority_mask; }
|
||||
constexpr s32 GetHandleTableSize() const { return this->handle_table_size; }
|
||||
constexpr u64 GetCoreMask() const { return m_core_mask; }
|
||||
constexpr u64 GetPriorityMask() const { return m_priority_mask; }
|
||||
constexpr s32 GetHandleTableSize() const { return m_handle_table_size; }
|
||||
|
||||
ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const {
|
||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
||||
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||
/* Copy permissions. */
|
||||
std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags));
|
||||
std::memcpy(sp.svc_permission, m_svc_access_flags, sizeof(m_svc_access_flags));
|
||||
|
||||
/* Clear specific SVCs based on our state. */
|
||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||
@ -284,9 +284,9 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) const {
|
||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
||||
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||
/* Clear all permissions. */
|
||||
std::memset(sp.svc_permission, 0, sizeof(this->svc_access_flags));
|
||||
std::memset(sp.svc_permission, 0, sizeof(m_svc_access_flags));
|
||||
|
||||
/* Set specific SVCs based on our state. */
|
||||
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState);
|
||||
@ -297,12 +297,12 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) const {
|
||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
||||
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||
/* Get whether we have access to return from exception. */
|
||||
const bool return_from_exception = GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||
|
||||
/* Copy permissions. */
|
||||
std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags));
|
||||
std::memcpy(sp.svc_permission, m_svc_access_flags, sizeof(m_svc_access_flags));
|
||||
|
||||
/* Clear/Set specific SVCs based on our state. */
|
||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||
@ -313,21 +313,21 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void CopyEnterExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
||||
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||
|
||||
/* Set ReturnFromException if allowed. */
|
||||
if (GetSvcAllowedImpl(this->svc_access_flags, svc::SvcId_ReturnFromException)) {
|
||||
if (GetSvcAllowedImpl(m_svc_access_flags, svc::SvcId_ReturnFromException)) {
|
||||
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||
}
|
||||
|
||||
/* Set GetInfo if allowed. */
|
||||
if (GetSvcAllowedImpl(this->svc_access_flags, svc::SvcId_GetInfo)) {
|
||||
if (GetSvcAllowedImpl(m_svc_access_flags, svc::SvcId_GetInfo)) {
|
||||
SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo);
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void CopyLeaveExceptionSvcPermissionsTo(KThread::StackParameters &sp) {
|
||||
static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission));
|
||||
static_assert(sizeof(m_svc_access_flags) == sizeof(sp.svc_permission));
|
||||
|
||||
/* Clear ReturnFromException. */
|
||||
ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException);
|
||||
@ -339,24 +339,24 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
constexpr bool IsPermittedInterrupt(u32 id) const {
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]);
|
||||
if (id < BITSIZEOF(this->irq_access_flags)) {
|
||||
return (this->irq_access_flags[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0;
|
||||
constexpr size_t BitsPerWord = BITSIZEOF(m_irq_access_flags[0]);
|
||||
if (id < BITSIZEOF(m_irq_access_flags)) {
|
||||
return (m_irq_access_flags[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr bool IsPermittedDebug() const {
|
||||
return this->debug_capabilities.Get<DebugFlags::AllowDebug>();
|
||||
return m_debug_capabilities.Get<DebugFlags::AllowDebug>();
|
||||
}
|
||||
|
||||
constexpr bool CanForceDebug() const {
|
||||
return this->debug_capabilities.Get<DebugFlags::ForceDebug>();
|
||||
return m_debug_capabilities.Get<DebugFlags::ForceDebug>();
|
||||
}
|
||||
|
||||
constexpr u32 GetIntendedKernelMajorVersion() const { return this->intended_kernel_version.Get<KernelVersion::MajorVersion>(); }
|
||||
constexpr u32 GetIntendedKernelMinorVersion() const { return this->intended_kernel_version.Get<KernelVersion::MinorVersion>(); }
|
||||
constexpr u32 GetIntendedKernelMajorVersion() const { return m_intended_kernel_version.Get<KernelVersion::MajorVersion>(); }
|
||||
constexpr u32 GetIntendedKernelMinorVersion() const { return m_intended_kernel_version.Get<KernelVersion::MinorVersion>(); }
|
||||
constexpr u32 GetIntendedKernelVersion() const { return ams::svc::EncodeKernelVersion(this->GetIntendedKernelMajorVersion(), this->GetIntendedKernelMinorVersion()); }
|
||||
};
|
||||
|
||||
|
@ -28,19 +28,23 @@ namespace ams::kern {
|
||||
class KClientPort final : public KSynchronizationObject {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
|
||||
private:
|
||||
std::atomic<s32> num_sessions;
|
||||
std::atomic<s32> peak_sessions;
|
||||
s32 max_sessions;
|
||||
KPort *parent;
|
||||
std::atomic<s32> m_num_sessions;
|
||||
std::atomic<s32> m_peak_sessions;
|
||||
s32 m_max_sessions;
|
||||
KPort *m_parent;
|
||||
public:
|
||||
constexpr KClientPort() : num_sessions(), peak_sessions(), max_sessions(), parent() { /* ... */ }
|
||||
constexpr KClientPort() : m_num_sessions(), m_peak_sessions(), m_max_sessions(), m_parent() { /* ... */ }
|
||||
virtual ~KClientPort() { /* ... */ }
|
||||
|
||||
void Initialize(KPort *parent, s32 max_sessions);
|
||||
void OnSessionFinalized();
|
||||
void OnServerClosed();
|
||||
|
||||
constexpr const KPort *GetParent() const { return this->parent; }
|
||||
constexpr const KPort *GetParent() const { return m_parent; }
|
||||
|
||||
ALWAYS_INLINE s32 GetNumSessions() const { return m_num_sessions; }
|
||||
ALWAYS_INLINE s32 GetPeakSessions() const { return m_peak_sessions; }
|
||||
ALWAYS_INLINE s32 GetMaxSessions() const { return m_max_sessions; }
|
||||
|
||||
bool IsLight() const;
|
||||
|
||||
|
@ -24,20 +24,20 @@ namespace ams::kern {
|
||||
class KClientSession final : public KAutoObjectWithSlabHeapAndContainer<KClientSession, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
|
||||
private:
|
||||
KSession *parent;
|
||||
KSession *m_parent;
|
||||
public:
|
||||
constexpr KClientSession() : parent() { /* ... */ }
|
||||
constexpr KClientSession() : m_parent() { /* ... */ }
|
||||
virtual ~KClientSession() { /* ... */ }
|
||||
|
||||
void Initialize(KSession *parent) {
|
||||
/* Set member variables. */
|
||||
this->parent = parent;
|
||||
m_parent = parent;
|
||||
}
|
||||
|
||||
virtual void Destroy() override;
|
||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||
|
||||
constexpr KSession *GetParent() const { return this->parent; }
|
||||
constexpr KSession *GetParent() const { return m_parent; }
|
||||
|
||||
Result SendSyncRequest(uintptr_t address, size_t size);
|
||||
Result SendAsyncRequest(KWritableEvent *event, uintptr_t address, size_t size);
|
||||
|
@ -23,15 +23,15 @@ namespace ams::kern {
|
||||
class KCodeMemory final : public KAutoObjectWithSlabHeapAndContainer<KCodeMemory, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
|
||||
private:
|
||||
TYPED_STORAGE(KPageGroup) page_group;
|
||||
KProcess *owner;
|
||||
KProcessAddress address;
|
||||
KLightLock lock;
|
||||
bool is_initialized;
|
||||
bool is_owner_mapped;
|
||||
bool is_mapped;
|
||||
TYPED_STORAGE(KPageGroup) m_page_group;
|
||||
KProcess *m_owner;
|
||||
KProcessAddress m_address;
|
||||
KLightLock m_lock;
|
||||
bool m_is_initialized;
|
||||
bool m_is_owner_mapped;
|
||||
bool m_is_mapped;
|
||||
public:
|
||||
explicit KCodeMemory() : owner(nullptr), address(Null<KProcessAddress>), is_initialized(false), is_owner_mapped(false), is_mapped(false) {
|
||||
explicit KCodeMemory() : m_owner(nullptr), m_address(Null<KProcessAddress>), m_is_initialized(false), m_is_owner_mapped(false), m_is_mapped(false) {
|
||||
/* ... */
|
||||
}
|
||||
|
||||
@ -45,12 +45,12 @@ namespace ams::kern {
|
||||
Result MapToOwner(KProcessAddress address, size_t size, ams::svc::MemoryPermission perm);
|
||||
Result UnmapFromOwner(KProcessAddress address, size_t size);
|
||||
|
||||
virtual bool IsInitialized() const override { return this->is_initialized; }
|
||||
virtual bool IsInitialized() const override { return m_is_initialized; }
|
||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||
|
||||
KProcess *GetOwner() const { return this->owner; }
|
||||
KProcessAddress GetSourceAddress() { return this->address; }
|
||||
size_t GetSize() const { return this->is_initialized ? GetReference(this->page_group).GetNumPages() * PageSize : 0; }
|
||||
KProcess *GetOwner() const { return m_owner; }
|
||||
KProcessAddress GetSourceAddress() { return m_address; }
|
||||
size_t GetSize() const { return m_is_initialized ? GetReference(m_page_group).GetNumPages() * PageSize : 0; }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -24,9 +24,9 @@ namespace ams::kern {
|
||||
public:
|
||||
using ThreadTree = typename KThread::ConditionVariableThreadTreeType;
|
||||
private:
|
||||
ThreadTree tree;
|
||||
ThreadTree m_tree;
|
||||
public:
|
||||
constexpr KConditionVariable() : tree() { /* ... */ }
|
||||
constexpr KConditionVariable() : m_tree() { /* ... */ }
|
||||
|
||||
/* Arbitration. */
|
||||
Result SignalToAddress(KProcessAddress addr);
|
||||
|
@ -26,11 +26,11 @@ namespace ams::kern {
|
||||
protected:
|
||||
using DebugEventList = util::IntrusiveListBaseTraits<KEventInfo>::ListType;
|
||||
private:
|
||||
DebugEventList event_info_list;
|
||||
u32 continue_flags;
|
||||
KProcess *process;
|
||||
KLightLock lock;
|
||||
KProcess::State old_process_state;
|
||||
DebugEventList m_event_info_list;
|
||||
u32 m_continue_flags;
|
||||
KProcess *m_process;
|
||||
KLightLock m_lock;
|
||||
KProcess::State m_old_process_state;
|
||||
public:
|
||||
explicit KDebugBase() { /* ... */ }
|
||||
virtual ~KDebugBase() { /* ... */ }
|
||||
|
@ -24,19 +24,19 @@ namespace ams::kern {
|
||||
class KDeviceAddressSpace final : public KAutoObjectWithSlabHeapAndContainer<KDeviceAddressSpace, KAutoObjectWithList> {
|
||||
MESOSPHERE_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject);
|
||||
private:
|
||||
KLightLock lock;
|
||||
KDevicePageTable table;
|
||||
u64 space_address;
|
||||
u64 space_size;
|
||||
bool is_initialized;
|
||||
KLightLock m_lock;
|
||||
KDevicePageTable m_table;
|
||||
u64 m_space_address;
|
||||
u64 m_space_size;
|
||||
bool m_is_initialized;
|
||||
public:
|
||||
constexpr KDeviceAddressSpace() : lock(), table(), space_address(), space_size(), is_initialized() { /* ... */ }
|
||||
constexpr KDeviceAddressSpace() : m_lock(), m_table(), m_space_address(), m_space_size(), m_is_initialized() { /* ... */ }
|
||||
virtual ~KDeviceAddressSpace() { /* ... */ }
|
||||
|
||||
Result Initialize(u64 address, u64 size);
|
||||
virtual void Finalize() override;
|
||||
|
||||
virtual bool IsInitialized() const override { return this->is_initialized; }
|
||||
virtual bool IsInitialized() const override { return m_is_initialized; }
|
||||
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
|
||||
|
||||
Result Attach(ams::svc::DeviceName device_name);
|
||||
|
@ -39,6 +39,7 @@ namespace ams::kern {
|
||||
}
|
||||
|
||||
static NOINLINE void HandleDpc();
|
||||
static void Sync();
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2020 Atmosphère-NX
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
#include <mesosphere/kern_common.hpp>
|
||||
#include <mesosphere/kern_select_cpu.hpp>
|
||||
|
||||
namespace ams::kern::KDumpObject {
|
||||
|
||||
void DumpThread();
|
||||
void DumpThread(u64 thread_id);
|
||||
|
||||
void DumpThreadCallStack();
|
||||
void DumpThreadCallStack(u64 thread_id);
|
||||
|
||||
void DumpKernelObject();
|
||||
|
||||
void DumpHandle();
|
||||
void DumpHandle(u64 process_id);
|
||||
|
||||
void DumpKernelMemory();
|
||||
void DumpMemory();
|
||||
void DumpMemory(u64 process_id);
|
||||
|
||||
void DumpKernelPageTable();
|
||||
void DumpPageTable();
|
||||
void DumpPageTable(u64 process_id);
|
||||
|
||||
void DumpKernelCpuUtilization();
|
||||
void DumpCpuUtilization();
|
||||
void DumpCpuUtilization(u64 process_id);
|
||||
|
||||
void DumpProcess();
|
||||
void DumpProcess(u64 process_id);
|
||||
|
||||
void DumpPort();
|
||||
void DumpPort(u64 process_id);
|
||||
|
||||
}
|
@ -28,19 +28,19 @@ namespace ams::kern {
|
||||
public:
|
||||
class PageBuffer {
|
||||
private:
|
||||
u8 buffer[PageSize];
|
||||
u8 m_buffer[PageSize];
|
||||
};
|
||||
static_assert(sizeof(PageBuffer) == PageSize);
|
||||
private:
|
||||
KSpinLock lock;
|
||||
KPageBitmap page_bitmap;
|
||||
size_t used;
|
||||
size_t peak;
|
||||
size_t count;
|
||||
KVirtualAddress address;
|
||||
size_t size;
|
||||
KSpinLock m_lock;
|
||||
KPageBitmap m_page_bitmap;
|
||||
size_t m_used;
|
||||
size_t m_peak;
|
||||
size_t m_count;
|
||||
KVirtualAddress m_address;
|
||||
size_t m_size;
|
||||
public:
|
||||
KDynamicPageManager() : lock(), page_bitmap(), used(), peak(), count(), address(), size() { /* ... */ }
|
||||
KDynamicPageManager() : m_lock(), m_page_bitmap(), m_used(), m_peak(), m_count(), m_address(), m_size() { /* ... */ }
|
||||
|
||||
Result Initialize(KVirtualAddress memory, size_t sz) {
|
||||
/* We need to have positive size. */
|
||||
@ -51,40 +51,40 @@ namespace ams::kern {
|
||||
const size_t allocatable_size = sz - management_size;
|
||||
|
||||
/* Set tracking fields. */
|
||||
this->address = memory;
|
||||
this->size = util::AlignDown(allocatable_size, sizeof(PageBuffer));
|
||||
this->count = allocatable_size / sizeof(PageBuffer);
|
||||
R_UNLESS(this->count > 0, svc::ResultOutOfMemory());
|
||||
m_address = memory;
|
||||
m_size = util::AlignDown(allocatable_size, sizeof(PageBuffer));
|
||||
m_count = allocatable_size / sizeof(PageBuffer);
|
||||
R_UNLESS(m_count > 0, svc::ResultOutOfMemory());
|
||||
|
||||
/* Clear the management region. */
|
||||
u64 *management_ptr = GetPointer<u64>(this->address + allocatable_size);
|
||||
u64 *management_ptr = GetPointer<u64>(m_address + allocatable_size);
|
||||
std::memset(management_ptr, 0, management_size);
|
||||
|
||||
/* Initialize the bitmap. */
|
||||
this->page_bitmap.Initialize(management_ptr, this->count);
|
||||
m_page_bitmap.Initialize(management_ptr, m_count);
|
||||
|
||||
/* Free the pages to the bitmap. */
|
||||
std::memset(GetPointer<PageBuffer>(this->address), 0, this->count * sizeof(PageBuffer));
|
||||
for (size_t i = 0; i < this->count; i++) {
|
||||
this->page_bitmap.SetBit(i);
|
||||
std::memset(GetPointer<PageBuffer>(m_address), 0, m_count * sizeof(PageBuffer));
|
||||
for (size_t i = 0; i < m_count; i++) {
|
||||
m_page_bitmap.SetBit(i);
|
||||
}
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
constexpr KVirtualAddress GetAddress() const { return this->address; }
|
||||
constexpr size_t GetSize() const { return this->size; }
|
||||
constexpr size_t GetUsed() const { return this->used; }
|
||||
constexpr size_t GetPeak() const { return this->peak; }
|
||||
constexpr size_t GetCount() const { return this->count; }
|
||||
constexpr KVirtualAddress GetAddress() const { return m_address; }
|
||||
constexpr size_t GetSize() const { return m_size; }
|
||||
constexpr size_t GetUsed() const { return m_used; }
|
||||
constexpr size_t GetPeak() const { return m_peak; }
|
||||
constexpr size_t GetCount() const { return m_count; }
|
||||
|
||||
PageBuffer *Allocate() {
|
||||
/* Take the lock. */
|
||||
KScopedInterruptDisable di;
|
||||
KScopedSpinLock lk(this->lock);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
/* Find a random free block. */
|
||||
ssize_t soffset = this->page_bitmap.FindFreeBlock(true);
|
||||
ssize_t soffset = m_page_bitmap.FindFreeBlock(true);
|
||||
if (AMS_UNLIKELY(soffset < 0)) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -92,23 +92,23 @@ namespace ams::kern {
|
||||
const size_t offset = static_cast<size_t>(soffset);
|
||||
|
||||
/* Update our tracking. */
|
||||
this->page_bitmap.ClearBit(offset);
|
||||
this->peak = std::max(this->peak, (++this->used));
|
||||
m_page_bitmap.ClearBit(offset);
|
||||
m_peak = std::max(m_peak, (++m_used));
|
||||
|
||||
return GetPointer<PageBuffer>(this->address) + offset;
|
||||
return GetPointer<PageBuffer>(m_address) + offset;
|
||||
}
|
||||
|
||||
void Free(PageBuffer *pb) {
|
||||
/* Take the lock. */
|
||||
KScopedInterruptDisable di;
|
||||
KScopedSpinLock lk(this->lock);
|
||||
KScopedSpinLock lk(m_lock);
|
||||
|
||||
/* Set the bit for the free page. */
|
||||
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(this->address)) / sizeof(PageBuffer);
|
||||
this->page_bitmap.SetBit(offset);
|
||||
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(m_address)) / sizeof(PageBuffer);
|
||||
m_page_bitmap.SetBit(offset);
|
||||
|
||||
/* Decrement our used count. */
|
||||
--this->used;
|
||||
--m_used;
|
||||
}
|
||||
};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user