From 745fa84e5e6df03e27511ba7cd70234652d0d53b Mon Sep 17 00:00:00 2001 From: TuxSH Date: Wed, 31 Oct 2018 21:47:31 +0100 Subject: [PATCH] Add mesosphere (VERY VERY WIP) --- mesosphere/Makefile | 152 +++ mesosphere/README.md | 9 + mesosphere/config/arch/arm64/arch.mk | 25 + mesosphere/config/arch/arm64/linker.ld | 214 +++ mesosphere/config/arch/arm64/linker.specs | 4 + .../config/board/nintendo/switch/board.mk | 5 + mesosphere/config/common.mk | 9 + mesosphere/config/rules.mk | 37 + .../mesosphere/arch/KInterruptMaskGuard.hpp | 11 + .../include/mesosphere/arch/KSpinLock.hpp | 11 + .../arch/arm64/KInterruptMaskGuard.hpp | 40 + .../mesosphere/arch/arm64/KSpinLock.hpp | 103 ++ .../include/mesosphere/arch/arm64/arm64.hpp | 67 + .../include/mesosphere/board/KSystemClock.hpp | 11 + .../common/arm/arm64/timer/KSystemClock.hpp | 83 ++ .../include/mesosphere/core/KCoreContext.hpp | 36 + mesosphere/include/mesosphere/core/types.hpp | 62 + mesosphere/include/mesosphere/core/util.hpp | 109 ++ .../mesosphere/interfaces/IAlarmable.hpp | 61 + .../mesosphere/interfaces/IInterruptible.hpp | 16 + .../interfaces/IInterruptibleWork.hpp | 15 + .../interfaces/ILimitedResource.hpp | 47 + .../mesosphere/interfaces/ISetAllocated.hpp | 60 + .../mesosphere/interfaces/ISlabAllocated.hpp | 35 + .../include/mesosphere/interfaces/IWork.hpp | 29 + .../include/mesosphere/interrupts/KAlarm.hpp | 34 + .../mesosphere/interrupts/KWorkQueue.hpp | 27 + .../mesosphere/kresources/KAutoObject.hpp | 152 +++ .../kresources/KObjectAllocator.hpp | 70 + .../mesosphere/kresources/KResourceLimit.hpp | 87 ++ .../mesosphere/kresources/KSlabHeap.hpp | 55 + .../mesosphere/kresources/KSlabStack.hpp | 78 ++ .../mesosphere/processes/KHandleTable.hpp | 74 ++ .../include/mesosphere/processes/KProcess.hpp | 43 + .../threading/KConditionVariable.hpp | 78 ++ .../mesosphere/threading/KMultiLevelQueue.hpp | 340 +++++ .../include/mesosphere/threading/KMutex.hpp | 93 ++ .../mesosphere/threading/KScheduler.hpp | 130 ++ .../include/mesosphere/threading/KThread.hpp | 261 ++++ mesosphere/source/core/KCoreContext.cpp | 11 + mesosphere/source/interfaces/IAlarmable.cpp | 20 + .../source/interfaces/IInterruptibleWork.cpp | 12 + .../source/interfaces/ILimitedResource.cpp | 26 + mesosphere/source/interrupts/KAlarm.cpp | 58 + mesosphere/source/interrupts/KWorkQueue.cpp | 45 + mesosphere/source/kresources/KAutoObject.cpp | 10 + .../source/kresources/KResourceLimit.cpp | 83 ++ mesosphere/source/my_libc.c | 1150 +++++++++++++++++ mesosphere/source/my_libstdc++.cpp | 6 + mesosphere/source/processes/KHandleTable.cpp | 133 ++ mesosphere/source/processes/KProcess.cpp | 14 + mesosphere/source/test.cpp | 10 + .../source/threading/KConditionVariable.cpp | 39 + mesosphere/source/threading/KMutex.cpp | 62 + mesosphere/source/threading/KScheduler.cpp | 344 +++++ mesosphere/source/threading/KThread.cpp | 237 ++++ 56 files changed, 5033 insertions(+) create mode 100644 mesosphere/Makefile create mode 100644 mesosphere/README.md create mode 100644 mesosphere/config/arch/arm64/arch.mk create mode 100644 mesosphere/config/arch/arm64/linker.ld create mode 100644 mesosphere/config/arch/arm64/linker.specs create mode 100644 mesosphere/config/board/nintendo/switch/board.mk create mode 100644 mesosphere/config/common.mk create mode 100644 mesosphere/config/rules.mk create mode 100644 mesosphere/include/mesosphere/arch/KInterruptMaskGuard.hpp create mode 100644 mesosphere/include/mesosphere/arch/KSpinLock.hpp create mode 100644 mesosphere/include/mesosphere/arch/arm64/KInterruptMaskGuard.hpp create mode 100644 mesosphere/include/mesosphere/arch/arm64/KSpinLock.hpp create mode 100644 mesosphere/include/mesosphere/arch/arm64/arm64.hpp create mode 100644 mesosphere/include/mesosphere/board/KSystemClock.hpp create mode 100644 mesosphere/include/mesosphere/board/common/arm/arm64/timer/KSystemClock.hpp create mode 100644 mesosphere/include/mesosphere/core/KCoreContext.hpp create mode 100644 mesosphere/include/mesosphere/core/types.hpp create mode 100644 mesosphere/include/mesosphere/core/util.hpp create mode 100644 mesosphere/include/mesosphere/interfaces/IAlarmable.hpp create mode 100644 mesosphere/include/mesosphere/interfaces/IInterruptible.hpp create mode 100644 mesosphere/include/mesosphere/interfaces/IInterruptibleWork.hpp create mode 100644 mesosphere/include/mesosphere/interfaces/ILimitedResource.hpp create mode 100644 mesosphere/include/mesosphere/interfaces/ISetAllocated.hpp create mode 100644 mesosphere/include/mesosphere/interfaces/ISlabAllocated.hpp create mode 100644 mesosphere/include/mesosphere/interfaces/IWork.hpp create mode 100644 mesosphere/include/mesosphere/interrupts/KAlarm.hpp create mode 100644 mesosphere/include/mesosphere/interrupts/KWorkQueue.hpp create mode 100644 mesosphere/include/mesosphere/kresources/KAutoObject.hpp create mode 100644 mesosphere/include/mesosphere/kresources/KObjectAllocator.hpp create mode 100644 mesosphere/include/mesosphere/kresources/KResourceLimit.hpp create mode 100644 mesosphere/include/mesosphere/kresources/KSlabHeap.hpp create mode 100644 mesosphere/include/mesosphere/kresources/KSlabStack.hpp create mode 100644 mesosphere/include/mesosphere/processes/KHandleTable.hpp create mode 100644 mesosphere/include/mesosphere/processes/KProcess.hpp create mode 100644 mesosphere/include/mesosphere/threading/KConditionVariable.hpp create mode 100644 mesosphere/include/mesosphere/threading/KMultiLevelQueue.hpp create mode 100644 mesosphere/include/mesosphere/threading/KMutex.hpp create mode 100644 mesosphere/include/mesosphere/threading/KScheduler.hpp create mode 100644 mesosphere/include/mesosphere/threading/KThread.hpp create mode 100644 mesosphere/source/core/KCoreContext.cpp create mode 100644 mesosphere/source/interfaces/IAlarmable.cpp create mode 100644 mesosphere/source/interfaces/IInterruptibleWork.cpp create mode 100644 mesosphere/source/interfaces/ILimitedResource.cpp create mode 100644 mesosphere/source/interrupts/KAlarm.cpp create mode 100644 mesosphere/source/interrupts/KWorkQueue.cpp create mode 100644 mesosphere/source/kresources/KAutoObject.cpp create mode 100644 mesosphere/source/kresources/KResourceLimit.cpp create mode 100644 mesosphere/source/my_libc.c create mode 100644 mesosphere/source/my_libstdc++.cpp create mode 100644 mesosphere/source/processes/KHandleTable.cpp create mode 100644 mesosphere/source/processes/KProcess.cpp create mode 100644 mesosphere/source/test.cpp create mode 100644 mesosphere/source/threading/KConditionVariable.cpp create mode 100644 mesosphere/source/threading/KMutex.cpp create mode 100644 mesosphere/source/threading/KScheduler.cpp create mode 100644 mesosphere/source/threading/KThread.cpp diff --git a/mesosphere/Makefile b/mesosphere/Makefile new file mode 100644 index 000000000..cc5ef78c5 --- /dev/null +++ b/mesosphere/Makefile @@ -0,0 +1,152 @@ +#--------------------------------------------------------------------------------- +.SUFFIXES: +#--------------------------------------------------------------------------------- + +ifeq ($(MESOSPHERE_BOARD),) +export MESOSPHERE_BOARD := nintendo-switch +endif + +#--------------------------------------------------------------------------------- +# TARGET is the name of the output +# BUILD is the directory where object files & intermediate files will be placed +# SOURCES is a list of directories containing source code +# DATA is a list of directories containing data files +# INCLUDES is a list of directories containing header files +#--------------------------------------------------------------------------------- +TARGET := $(notdir $(CURDIR)) +BUILD := build + +ifneq ($(BUILD),$(notdir $(CURDIR))) +export CONFIG_DIR := $(CURDIR)/config +ifeq ($(MESOSPHERE_BOARD),nintendo-switch) +export BOARD_MAKE_DIR := $(CURDIR)/config/board/nintendo/switch +export ARCH_MAKE_DIR := $(CURDIR)/config/arch/arm64 +endif +endif + +include $(CONFIG_DIR)/rules.mk +include $(CONFIG_DIR)/common.mk +include $(ARCH_MAKE_DIR)/arch.mk +include $(BOARD_MAKE_DIR)/board.mk + +SOURCES := $(COMMON_SOURCES_DIRS) $(ARCH_SOURCE_DIRS) $(BOARD_SOURCE_DIRS) +DATA := data +INCLUDES := include ../common/include + +DEFINES := $(COMMON_DEFINES) $(ARCH_DEFINES) $(BOARD_DEFINES) + +#--------------------------------------------------------------------------------- +# options for code generation +#--------------------------------------------------------------------------------- +SETTING := $(COMMON_SETTING) $(ARCH_SETTING) $(BOARD_SETTING) + +CFLAGS := $(SETTING) $(DEFINES) $(COMMON_CFLAGS) $(ARCH_CFLAGS) $(BOARD_CFLAGS) +CFLAGS += $(INCLUDE) + +CXXFLAGS := $(CFLAGS) $(COMMON_CXXFLAGS) $(ARCH_CXXFLAGS) $(BOARD_CXXFLAGS) + +ASFLAGS := -g $(SETTING) +LDFLAGS = -specs=$(ARCH_MAKE_DIR)/linker.specs $(SETTING) $(COMMON_LDFLAGS) + +LIBS := + +#--------------------------------------------------------------------------------- +# list of directories containing libraries, this must be the top level containing +# include and lib +#--------------------------------------------------------------------------------- +LIBDIRS := + + +#--------------------------------------------------------------------------------- +# no real need to edit anything past this point unless you need to add additional +# rules for different file extensions +#--------------------------------------------------------------------------------- +ifneq ($(BUILD),$(notdir $(CURDIR))) +#--------------------------------------------------------------------------------- + +export OUTPUT := $(CURDIR)/$(TARGET) +export TOPDIR := $(CURDIR) + +export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) \ + $(foreach dir,$(DATA),$(CURDIR)/$(dir)) + +export DEPSDIR := $(CURDIR)/$(BUILD) + +CFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.c))) +CPPFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.cpp))) +SFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.s))) +BINFILES := $(foreach dir,$(DATA),$(notdir $(wildcard $(dir)/*.*))) + +#--------------------------------------------------------------------------------- +# use CXX for linking C++ projects, CC for standard C +#--------------------------------------------------------------------------------- +ifeq ($(strip $(CPPFILES)),) +#--------------------------------------------------------------------------------- + export LD := $(CC) +#--------------------------------------------------------------------------------- +else +#--------------------------------------------------------------------------------- + export LD := $(CXX) +#--------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------- + +export OFILES := $(addsuffix .o,$(BINFILES)) \ + $(CPPFILES:.cpp=.o) $(CFILES:.c=.o) $(SFILES:.s=.o) + +export INCLUDE := $(foreach dir,$(INCLUDES),-I$(CURDIR)/$(dir)) \ + $(foreach dir,$(LIBDIRS),-I$(dir)/include) \ + -I$(CURDIR)/$(BUILD) + +export LIBPATHS := $(foreach dir,$(LIBDIRS),-L$(dir)/lib) + +.PHONY: $(BUILD) clean all + +#--------------------------------------------------------------------------------- +all: $(BUILD) + +$(BUILD): + @[ -d $@ ] || mkdir -p $@ + @$(MAKE) --no-print-directory -C $(BUILD) -f $(CURDIR)/Makefile + +#--------------------------------------------------------------------------------- +clean: + @echo clean ... + @rm -fr $(BUILD) $(TARGET).bin $(TARGET).elf + + +#--------------------------------------------------------------------------------- +else +.PHONY: all + +DEPENDS := $(OFILES:.o=.d) + +#--------------------------------------------------------------------------------- +# main targets +#--------------------------------------------------------------------------------- +all : $(OUTPUT).bin + +$(OUTPUT).bin : $(OUTPUT).elf + $(OBJCOPY) -S -O binary $< $@ + @echo built ... $(notdir $@) + +$(OUTPUT).elf : $(OFILES) + +%.elf: + @echo linking $(notdir $@) + $(LD) $(LDFLAGS) $(OFILES) $(LIBPATHS) $(LIBS) -o $@ + @$(NM) -CSn $@ > $(notdir $*.lst) + +#--------------------------------------------------------------------------------- +# you need a rule like this for each extension you use as binary data +#--------------------------------------------------------------------------------- +%.bin.o : %.bin +#--------------------------------------------------------------------------------- + @echo $(notdir $<) + @$(bin2o) + +-include $(DEPENDS) + +#--------------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------------- diff --git a/mesosphere/README.md b/mesosphere/README.md new file mode 100644 index 000000000..4cc86430d --- /dev/null +++ b/mesosphere/README.md @@ -0,0 +1,9 @@ +# Mesosphère + +**WORK IN PROGRESS** + +Special thanks to: + +* @gdkchan ([Ryujinx](https://github.com/Ryujinx/Ryujinx)'s author), without whom I would have been unable to understand many complex mechanisms of the Horizon kernel, such as the scheduler, etc. Ryujinx's kernel HLE is pretty accurate, and of course part of this work has strong similarites to Ryujinx's kernel code. +* @fincs, who helped me in the kernel reverse-engineering process a lot as well, and with countless other things too. + diff --git a/mesosphere/config/arch/arm64/arch.mk b/mesosphere/config/arch/arm64/arch.mk new file mode 100644 index 000000000..2eb1a824b --- /dev/null +++ b/mesosphere/config/arch/arm64/arch.mk @@ -0,0 +1,25 @@ +ifeq ($(strip $(DEVKITPRO)),) + +PREFIX := aarch64-none-elf- + +export CC := $(PREFIX)gcc +export CXX := $(PREFIX)g++ +export AS := $(PREFIX)as +export AR := $(PREFIX)gcc-ar +export OBJCOPY := $(PREFIX)objcopy + +ISVC=$(or $(VCBUILDHELPER_COMMAND),$(MSBUILDEXTENSIONSPATH32),$(MSBUILDEXTENSIONSPATH)) + +ifneq (,$(ISVC)) + ERROR_FILTER := 2>&1 | sed -e 's/\(.[a-zA-Z]\+\):\([0-9]\+\):/\1(\2):/g' +endif + +else +include $(DEVKITPRO)/devkitA64/base_tools +endif + +ARCH_SETTING := -march=armv8-a -mgeneral-regs-only +ARCH_DEFINES := -DMESOSPHERE_ARCH_ARM64 +ARCH_CFLAGS := +ARCH_CXXFLAGS := +ARCH_SOURCE_DIRS := source/arch/arm64 diff --git a/mesosphere/config/arch/arm64/linker.ld b/mesosphere/config/arch/arm64/linker.ld new file mode 100644 index 000000000..402d3a13c --- /dev/null +++ b/mesosphere/config/arch/arm64/linker.ld @@ -0,0 +1,214 @@ +OUTPUT_ARCH(aarch64) +ENTRY(_start) + +/* TODO overhaul */ + +PHDRS +{ + code PT_LOAD FLAGS(5) /* Read | Execute */; + rodata PT_LOAD FLAGS(4) /* Read */; + data PT_LOAD FLAGS(6) /* Read | Write */; + dyn PT_DYNAMIC; +} + +SECTIONS +{ + /* =========== CODE section =========== */ + PROVIDE(__start__ = 0x0); + . = __start__; + + .crt0 : + { + KEEP (*(.crt0)) + . = ALIGN(8); + } :code + + .init : + { + KEEP( *(.init) ) + . = ALIGN(8); + } :code + + .plt : + { + *(.plt) + *(.iplt) + . = ALIGN(8); + } :code + + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + . = ALIGN(8); + } :code + + .fini : + { + KEEP( *(.fini) ) + . = ALIGN(8); + } :code + + /* =========== RODATA section =========== */ + . = ALIGN(0x1000); + + .rodata : + { + *(.rodata .rodata.* .gnu.linkonce.r.*) + . = ALIGN(8); + } :rodata + + .eh_frame_hdr : { __eh_frame_hdr_start = .; *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) __eh_frame_hdr_end = .; } :rodata + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) } :rodata + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) } :rodata + .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) } : rodata + + .dynamic : { *(.dynamic) } :rodata :dyn + .dynsym : { *(.dynsym) } :rodata + .dynstr : { *(.dynstr) } :rodata + .rela.dyn : { *(.rela.*) } :rodata + .interp : { *(.interp) } :rodata + .hash : { *(.hash) } :rodata + .gnu.hash : { *(.gnu.hash) } :rodata + .gnu.version : { *(.gnu.version) } :rodata + .gnu.version_d : { *(.gnu.version_d) } :rodata + .gnu.version_r : { *(.gnu.version_r) } :rodata + .note.gnu.build-id : { *(.note.gnu.build-id) } :rodata + + /* =========== DATA section =========== */ + . = ALIGN(0x1000); + + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) } :data + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } :data + .gnu_extab : ONLY_IF_RW { *(.gnu_extab*) } : data + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } :data + + .tdata ALIGN(8) : + { + __tdata_lma = .; + *(.tdata .tdata.* .gnu.linkonce.td.*) + . = ALIGN(8); + __tdata_lma_end = .; + } :data + + .tbss ALIGN(8) : + { + *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) + . = ALIGN(8); + } :data + + .preinit_array ALIGN(8) : + { + PROVIDE (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE (__preinit_array_end = .); + } :data + + .init_array ALIGN(8) : + { + PROVIDE (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array)) + PROVIDE (__init_array_end = .); + } :data + + .fini_array ALIGN(8) : + { + PROVIDE (__fini_array_start = .); + KEEP (*(.fini_array)) + KEEP (*(SORT(.fini_array.*))) + PROVIDE (__fini_array_end = .); + } :data + + .ctors ALIGN(8) : + { + KEEP (*crtbegin.o(.ctors)) /* MUST be first -- GCC requires it */ + KEEP (*(EXCLUDE_FILE (*crtend.o) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } :data + + .dtors ALIGN(8) : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } :data + + __got_start__ = .; + + .got : { *(.got) *(.igot) } :data + .got.plt : { *(.got.plt) *(.igot.plt) } :data + + __got_end__ = .; + + .data ALIGN(8) : + { + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } :data + + __bss_start__ = .; + .bss ALIGN(8) : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + . = ALIGN(8); + + /* Reserve space for the TLS segment of the main thread */ + __tls_start = .; + . += + SIZEOF(.tdata) + SIZEOF(.tbss); + __tls_end = .; + } : data + __bss_end__ = .; + + __end__ = ABSOLUTE(.) ; + + . = ALIGN(0x1000); + __argdata__ = ABSOLUTE(.) ; + + /* ================== + ==== Metadata ==== + ================== */ + + /* Discard sections that difficult post-processing */ + /DISCARD/ : { *(.group .comment .note) } + + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } +} diff --git a/mesosphere/config/arch/arm64/linker.specs b/mesosphere/config/arch/arm64/linker.specs new file mode 100644 index 000000000..eeff37965 --- /dev/null +++ b/mesosphere/config/arch/arm64/linker.specs @@ -0,0 +1,4 @@ +%rename link old_link + +*link: +%(old_link) -T %:getenv(ARCH_MAKE_DIR /linker.ld) -pie --gc-sections -z text -z nodynamic-undefined-weak --build-id=sha1 diff --git a/mesosphere/config/board/nintendo/switch/board.mk b/mesosphere/config/board/nintendo/switch/board.mk new file mode 100644 index 000000000..9f1f42c88 --- /dev/null +++ b/mesosphere/config/board/nintendo/switch/board.mk @@ -0,0 +1,5 @@ +BOARD_SETTING := -mtune=cortex-a57 +BOARD_DEFINES := -DMESOSPHERE_BOARD_NINTENDO_SWITCH -DMESOSPHERE_BOARD_COMMON_ARM_ARM64_CLOCK +BOARD_CFLAGS := +BOARD_CXXFLAGS := +BOARD_SOURCE_DIRS := diff --git a/mesosphere/config/common.mk b/mesosphere/config/common.mk new file mode 100644 index 000000000..00e9f8c3d --- /dev/null +++ b/mesosphere/config/common.mk @@ -0,0 +1,9 @@ +COMMON_DEFINES := -DBOOST_DISABLE_ASSERTS +COMMON_SOURCES_DIRS := source/core source/interfaces source/interrupts source/kresources\ + source/processes source/threading source +COMMON_SETTING := -fPIE -g -nostdlib +COMMON_CFLAGS := -Wall -Werror -O2 -ffunction-sections -fdata-sections -fno-strict-aliasing -fwrapv\ + -fno-asynchronous-unwind-tables -fno-unwind-tables -fno-stack-protector +COMMON_CXXFLAGS := -fno-rtti -fno-exceptions -std=gnu++17 +COMMON_ASFLAGS := +COMMON_LDFLAGS := -Wl,-Map,out.map diff --git a/mesosphere/config/rules.mk b/mesosphere/config/rules.mk new file mode 100644 index 000000000..f56058227 --- /dev/null +++ b/mesosphere/config/rules.mk @@ -0,0 +1,37 @@ +#--------------------------------------------------------------------------------- +%.a: +#--------------------------------------------------------------------------------- + @echo $(notdir $@) + @rm -f $@ + $(AR) -rc $@ $^ + + +#--------------------------------------------------------------------------------- +%.o: %.cpp + @echo $(notdir $<) + $(CXX) -MMD -MP -MF $(DEPSDIR)/$*.d $(CXXFLAGS) -c $< -o $@ $(ERROR_FILTER) + +#--------------------------------------------------------------------------------- +%.o: %.c + @echo $(notdir $<) + $(CC) -MMD -MP -MF $(DEPSDIR)/$*.d $(CFLAGS) -c $< -o $@ $(ERROR_FILTER) + +#--------------------------------------------------------------------------------- +%.o: %.s + @echo $(notdir $<) + $(CC) -MMD -MP -MF $(DEPSDIR)/$*.d -x assembler-with-cpp $(ASFLAGS) -c $< -o $@ $(ERROR_FILTER) + +#--------------------------------------------------------------------------------- +%.o: %.S + @echo $(notdir $<) + $(CC) -MMD -MP -MF $(DEPSDIR)/$*.d -x assembler-with-cpp $(ASFLAGS) -c $< -o $@ $(ERROR_FILTER) + +#--------------------------------------------------------------------------------- +# canned command sequence for binary data +#--------------------------------------------------------------------------------- +define bin2o + bin2s $< | $(AS) -o $(@) + echo "extern const u8" `(echo $( `(echo $(> `(echo $(> `(echo $( + +#else + +//#error "No arch defined" + +#endif \ No newline at end of file diff --git a/mesosphere/include/mesosphere/arch/KSpinLock.hpp b/mesosphere/include/mesosphere/arch/KSpinLock.hpp new file mode 100644 index 000000000..3b6b3bdaa --- /dev/null +++ b/mesosphere/include/mesosphere/arch/KSpinLock.hpp @@ -0,0 +1,11 @@ +#pragma once + +#if 1 //defined MESOSPHERE_ARCH_ARM64 + +#include + +#else + +//#error "No arch defined" + +#endif \ No newline at end of file diff --git a/mesosphere/include/mesosphere/arch/arm64/KInterruptMaskGuard.hpp b/mesosphere/include/mesosphere/arch/arm64/KInterruptMaskGuard.hpp new file mode 100644 index 000000000..5cdb74ed6 --- /dev/null +++ b/mesosphere/include/mesosphere/arch/arm64/KInterruptMaskGuard.hpp @@ -0,0 +1,40 @@ +#pragma once + +#include +#include + +namespace mesosphere +{ +inline namespace arch +{ +inline namespace arm64 +{ + +// Dummy. Should be platform-independent: + +class KInterruptMaskGuard final { + public: + + KInterruptMaskGuard() + { + flags = MESOSPHERE_READ_SYSREG(daif); + MESOSPHERE_WRITE_SYSREG(flags | PSR_I_BIT, daif); + } + + ~KInterruptMaskGuard() + { + MESOSPHERE_WRITE_SYSREG(MESOSPHERE_READ_SYSREG(daif) | (flags & PSR_I_BIT), daif); + } + + KInterruptMaskGuard(const KInterruptMaskGuard &) = delete; + KInterruptMaskGuard(KInterruptMaskGuard &&) = delete; + KInterruptMaskGuard &operator=(const KInterruptMaskGuard &) = delete; + KInterruptMaskGuard &operator=(KInterruptMaskGuard &&) = delete; + + private: + u64 flags; +}; + +} +} +} diff --git a/mesosphere/include/mesosphere/arch/arm64/KSpinLock.hpp b/mesosphere/include/mesosphere/arch/arm64/KSpinLock.hpp new file mode 100644 index 000000000..04cf16c51 --- /dev/null +++ b/mesosphere/include/mesosphere/arch/arm64/KSpinLock.hpp @@ -0,0 +1,103 @@ +#pragma once + +#include + +namespace mesosphere +{ +inline namespace arch +{ +inline namespace arm64 +{ + +// This largely uses the Linux kernel spinlock code, which is more efficient than Nintendo's (serializing two u16s into an u32). +class KSpinLock final { + + private: + + struct alignas(4) Ticket { + u16 owner, next; + }; + + Ticket ticket; + + public: + + bool try_lock() + { + u32 tmp; + Ticket lockval; + + asm volatile( + " prfm pstl1strm, %2\n" + "1: ldaxr %w0, %2\n" + " eor %w1, %w0, %w0, ror #16\n" + " cbnz %w1, 2f\n" + " add %w0, %w0, %3\n" + " stxr %w1, %w0, %2\n" + " cbnz %w1, 1b\n" + "2:" + : "=&r" (lockval), "=&r" (tmp), "+Q" (ticket) + : "I" (1 << 16) + : "memory" + ); + + return !tmp; + } + + void lock() + { + u32 tmp; + Ticket lockval, newval; + + asm volatile( + // Atomically increment the next ticket. + " prfm pstl1strm, %3\n" + "1: ldaxr %w0, %3\n" + " add %w1, %w0, %w5\n" + " stxr %w2, %w1, %3\n" + " cbnz %w2, 1b\n" + + + // Did we get the lock? + " eor %w1, %w0, %w0, ror #16\n" + " cbz %w1, 3f\n" + /* + No: spin on the owner. Send a local event to avoid missing an + unlock before the exclusive load. + */ + " sevl\n" + "2: wfe\n" + " ldaxrh %w2, %4\n" + " eor %w1, %w2, %w0, lsr #16\n" + " cbnz %w1, 2b\n" + // We got the lock. Critical section starts here. + "3:" + : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*&ticket) + : "Q" (ticket.owner), "I" (1 << 16) + : "memory" + ); + } + + void unlock() + { + u64 tmp; + asm volatile( + " ldrh %w1, %0\n" + " add %w1, %w1, #1\n" + " stlrh %w1, %0" + : "=Q" (ticket.owner), "=&r" (tmp) + : + : "memory" + ); + + } + KSpinLock() = default; + KSpinLock(const KSpinLock &) = delete; + KSpinLock(KSpinLock &&) = delete; + KSpinLock &operator=(const KSpinLock &) = delete; + KSpinLock &operator=(KSpinLock &&) = delete; +}; + +} +} +} diff --git a/mesosphere/include/mesosphere/arch/arm64/arm64.hpp b/mesosphere/include/mesosphere/arch/arm64/arm64.hpp new file mode 100644 index 000000000..1126ea4b4 --- /dev/null +++ b/mesosphere/include/mesosphere/arch/arm64/arm64.hpp @@ -0,0 +1,67 @@ +#pragma once + +#include +#include + +#define MESOSPHERE_READ_SYSREG(r) ({\ + u64 __val; \ + asm volatile("mrs %0, " BOOST_PP_STRINGIZE(r) : "=r" (__val)); \ + __val; \ +}) + +#define MESOSPHERE_WRITE_SYSREG(v, r) do { \ + u64 __val = (u64)v; \ + asm volatile("msr " BOOST_PP_STRINGIZE(r) ", %0" \ + :: "r" (__val)); \ +} while (false) + +#define MESOSPHERE_DAIF_BIT(v) (((u64)(v)) >> 6) + +namespace mesosphere +{ +inline namespace arch +{ +inline namespace arm64 +{ + +enum PsrMode { + PSR_MODE_EL0t = 0x0u, + PSR_MODE_EL1t = 0x4u, + PSR_MODE_EL1h = 0x5u, + PSR_MODE_EL2t = 0x8u, + PSR_MODE_EL2h = 0x9u, + PSR_MODE_EL3t = 0xCu, + PSR_MODE_EL3h = 0xDu, + PSR_MODE_MASK = 0xFu, + PSR_MODE32_BIT = 0x10u, +}; + +enum PsrInterruptBit { + PSR_F_BIT = 1u << 6, + PSR_I_BIT = 1u << 7, + PSR_A_BIT = 1u << 8, + PSR_D_BIT = 1u << 9, +}; + +enum PsrStatusBit { + PSR_PAN_BIT = 1u << 22, + PSR_UAO_BIT = 1u << 23, +}; + +enum PsrFlagBit { + PSR_V_BIT = 1u << 28, + PSR_C_BIT = 1u << 29, + PSR_Z_BIT = 1u << 30, + PSR_N_BIT = 1u << 31, +}; + +enum PsrBitGroup { + PSR_c = 0x000000FFu, + PSR_x = 0x0000FF00u, + PSR_s = 0x00FF0000u, + PSR_f = 0xFF000000u, +}; + +} +} +} diff --git a/mesosphere/include/mesosphere/board/KSystemClock.hpp b/mesosphere/include/mesosphere/board/KSystemClock.hpp new file mode 100644 index 000000000..7b70fe860 --- /dev/null +++ b/mesosphere/include/mesosphere/board/KSystemClock.hpp @@ -0,0 +1,11 @@ +#pragma once + +#if 1 //defined MESOSPHERE_ARCH_ARM64 + +#include + +#else + +//#error "No arch defined" + +#endif diff --git a/mesosphere/include/mesosphere/board/common/arm/arm64/timer/KSystemClock.hpp b/mesosphere/include/mesosphere/board/common/arm/arm64/timer/KSystemClock.hpp new file mode 100644 index 000000000..587651ed3 --- /dev/null +++ b/mesosphere/include/mesosphere/board/common/arm/arm64/timer/KSystemClock.hpp @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include + +#ifndef MESOSPHERE_SYSTEM_CLOCK_RATE // NEEDS to be defined; depends on cntfreq +#define MESOSPHERE_SYSTEM_CLOCK_RATE 192000000ull +#endif + +// Architectural aarch64 armv8 timer + +namespace mesosphere +{ +inline namespace board +{ +inline namespace common +{ +inline namespace arm +{ +inline namespace arm64 +{ + +// Dummy implementation +// Needs to be changed for platform stuff + +using namespace std::chrono_literals; + +/// Fulfills Clock named requirements +class KSystemClock { + public: + + using rep = s64; + using period = std::ratio<1, MESOSPHERE_SYSTEM_CLOCK_RATE>; + using duration = std::chrono::duration; + using time_point = std::chrono::time_point; + + static constexpr bool is_steady = true; + + static time_point now() + { + return time_point{duration::zero()}; + } + + static constexpr bool isCorePrivate = true; + static constexpr duration forever = duration{-1}; + static constexpr time_point never = time_point{forever}; + + static constexpr uint GetIrqId() { return 30; } + + static void Disable() + { + // Note: still continues counting. + MESOSPHERE_WRITE_SYSREG(0, cntp_ctl_el0); + } + + static void SetInterruptMasked(bool maskInterrupts) + { + u64 val = maskInterrupts ? 3 : 1; // Note: also enables the timer. + MESOSPHERE_WRITE_SYSREG(val, cntp_ctl_el0); + } + + static void SetAlarm(const time_point &when) + { + u64 val = (u64)when.time_since_epoch().count(); + MESOSPHERE_WRITE_SYSREG(val, cntp_cval_el0); + SetInterruptMasked(false); + } + + static void Initialize() + { + MESOSPHERE_WRITE_SYSREG(1, cntkctl_el1); // Trap register accesses from el0. + Disable(); + MESOSPHERE_WRITE_SYSREG(UINT64_MAX, cntp_cval_el0); + SetInterruptMasked(true); + } +}; + +} +} +} +} +} diff --git a/mesosphere/include/mesosphere/core/KCoreContext.hpp b/mesosphere/include/mesosphere/core/KCoreContext.hpp new file mode 100644 index 000000000..b68834d54 --- /dev/null +++ b/mesosphere/include/mesosphere/core/KCoreContext.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include +#include + +namespace mesosphere +{ + +class KProcess; +class KThread; +class KScheduler; +class KAlarm; + +class KCoreContext { + public: + static KCoreContext &GetInstance(uint coreId) { return instances[coreId]; }; + static KCoreContext &GetCurrentInstance() { return instances[0]; /* FIXME*/ }; + + KThread *GetCurrentThread() const { return currentThread; } + KProcess *GetCurrentProcess() const { return currentProcess; } + KScheduler *GetScheduler() const { return scheduler; } + KAlarm *GetAlarm() const { return alarm; } + + KCoreContext(KScheduler *scheduler) : scheduler(scheduler) {} + private: + KThread *volatile currentThread = nullptr; + KProcess *volatile currentProcess = nullptr; + KScheduler *volatile scheduler = nullptr; + KAlarm *volatile alarm = nullptr; + + // more stuff + + static std::array instances; +}; + +} diff --git a/mesosphere/include/mesosphere/core/types.hpp b/mesosphere/include/mesosphere/core/types.hpp new file mode 100644 index 000000000..16128855d --- /dev/null +++ b/mesosphere/include/mesosphere/core/types.hpp @@ -0,0 +1,62 @@ +#pragma once + +#include +#include +#include +#include + +#define MAX_CORES 4 + +namespace mesosphere +{ + +using ushort = unsigned short; +using uint = unsigned int; +using ulong = unsigned long; + +using std::size_t; + +using uiptr = std::uintptr_t; +using iptr = std::intptr_t; + +using u8 = uint8_t; +using u16 = uint16_t; +using u32 = uint32_t; +using u64 = uint64_t; + +using s8 = int8_t; +using s16 = int16_t; +using s32 = int32_t; +using s64 = int64_t; + +using vu8 = volatile uint8_t; +using vu16 = volatile uint16_t; +using vu32 = volatile uint32_t; +using vu64 = volatile uint64_t; + +using vs8 = volatile int8_t; +using vs16 = volatile int16_t; +using vs32 = volatile int32_t; +using vs64 = volatile int64_t; + +using Result = uint; + +template +using SharedPtr = boost::intrusive_ptr; + +struct Handle { + u16 index : 15; + s16 id : 16; + bool isAlias : 1; + + constexpr bool IsAliasOrFree() const { return isAlias || id < 0; } + + constexpr bool operator==(const Handle &other) const + { + return index == other.index && id == other.id && isAlias == other.isAlias; + } + + constexpr bool operator!=(const Handle &other) const { return !(*this == other); } +}; + +} diff --git a/mesosphere/include/mesosphere/core/util.hpp b/mesosphere/include/mesosphere/core/util.hpp new file mode 100644 index 000000000..837716413 --- /dev/null +++ b/mesosphere/include/mesosphere/core/util.hpp @@ -0,0 +1,109 @@ +#pragma once + +#include +#include +#include + +#include + +/* + Boost doesn't provide get_parent_from members for arrays so we have to implement this manually + for arrays, for gcc at leadt. + + Thanks fincs. +*/ + +#define kassert(cond) ((void)(cond)) + +namespace mesosphere +{ + +namespace +{ +template +union __my_offsetof { + const MemberT ClassT::* ptr; + iptr offset; +}; + +// Thanks neobrain +template +static constexpr std::array MakeArrayOfHelper(Args&&... args, std::index_sequence) { + // There are two parameter pack expansions here: + // * The inner expansion is over "t" + // * The outer expansion is over "Indexes" + // + // This function will always be called with sizeof...(Indexes) == N, + // so the outer expansion generates exactly N copies of the constructor call + return std::array { ((void)Indexes, T { args... })... }; +} + +// Thanks neobrain +template +static constexpr std::array MakeArrayWithFactorySequenceOfHelper(Args&&... args, std::index_sequence) { + return std::array { T { F{}(std::integral_constant{}), args... }... }; +} +} + +namespace detail +{ + +template +constexpr ClassT* GetParentFromArrayMember(MemberT* member, size_t index, const MemberT (ClassT::* ptr)[N]) noexcept { + member -= index; + return (ClassT*)((iptr)member - __my_offsetof { ptr }.offset); + return nullptr; +} + +template +constexpr const ClassT* GetParentFromArrayMember(const MemberT* member, size_t index, const MemberT (ClassT::* ptr)[N]) noexcept { + member -= index; + return (const ClassT*)((iptr)member - __my_offsetof { ptr }.offset); + return nullptr; +} + +template +constexpr ClassT* GetParentFromMember(MemberT* member, const MemberT ClassT::* ptr) noexcept { + return (ClassT*)((iptr)member - __my_offsetof { ptr }.offset); + return nullptr; +} + +template +constexpr const ClassT* GetParentFromMember(const MemberT* member, const MemberT ClassT::* ptr) noexcept { + return (const ClassT*)((iptr)member - __my_offsetof { ptr }.offset); + return nullptr; +} + +template +constexpr std::array MakeArrayOf(Args&&... args) { + return MakeArrayOfHelper(std::forward(args)..., std::make_index_sequence{}); +} + +template +constexpr std::array MakeArrayWithFactorySequenceOf(Args&&... args) { + return MakeArrayWithFactorySequenceOfHelper(std::forward(args)..., std::make_index_sequence{}); +} + +/// Sequence of two distinc powers of 2 +constexpr ulong A038444(ulong n) +{ + if (n == 0) { + return 3; + } + + ulong v = A038444(n - 1); + ulong m1 = 1 << (63 - __builtin_clzl(v)); + ulong m2 = 1 << (63 - __builtin_clzl(v&~m1)); + + if (m2 << 1 == m1) { + m2 = 1; + m1 <<= 1; + } else { + m2 <<= 1; + } + + return m1 | m2; +} + +} +} diff --git a/mesosphere/include/mesosphere/interfaces/IAlarmable.hpp b/mesosphere/include/mesosphere/interfaces/IAlarmable.hpp new file mode 100644 index 000000000..dae8b181f --- /dev/null +++ b/mesosphere/include/mesosphere/interfaces/IAlarmable.hpp @@ -0,0 +1,61 @@ +#pragma once + +#include +#include + +namespace mesosphere +{ + +struct KAlarm; + +struct AlarmableSetTag; + +using AlarmableSetBaseHook = boost::intrusive::set_base_hook< + boost::intrusive::tag, + boost::intrusive::link_mode +>; + +class IAlarmable : public AlarmableSetBaseHook { + public: + struct Comparator { + constexpr bool operator()(const IAlarmable &lhs, const IAlarmable &rhs) const { + return lhs.alarmTime < rhs.alarmTime; + } + }; + + virtual void OnAlarm() = 0; + + constexpr KSystemClock::time_point GetAlarmTime() const { return alarmTime; } + + /// Precondition: alarm has not been set + template + void SetAlarmTime(const std::chrono::time_point &alarmTime) + { + SetAlarmTime(alarmTime); + } + + template + void SetAlarmIn(const std::chrono::duration &alarmTimeOffset) + { + SetAlarmTime(KSystemClock::now() + alarmTimeOffset); + } + + void ClearAlarm(); + + private: + void SetAlarmTimeImpl(const KSystemClock::time_point &alarmTime); + + KSystemClock::time_point alarmTime = KSystemClock::time_point{}; + + friend class KAlarm; +}; + + +using AlarmableSetType = + boost::intrusive::make_set< + IAlarmable, + boost::intrusive::base_hook, + boost::intrusive::compare +>::type; + +} diff --git a/mesosphere/include/mesosphere/interfaces/IInterruptible.hpp b/mesosphere/include/mesosphere/interfaces/IInterruptible.hpp new file mode 100644 index 000000000..1b9d7e041 --- /dev/null +++ b/mesosphere/include/mesosphere/interfaces/IInterruptible.hpp @@ -0,0 +1,16 @@ +#pragma once + +#include + +namespace mesosphere +{ + +class IWork; + +class IInterruptible { + public: + + /// Top half in Linux jargon + virtual IWork *HandleInterrupt(uint interruptId) = 0; +}; +} diff --git a/mesosphere/include/mesosphere/interfaces/IInterruptibleWork.hpp b/mesosphere/include/mesosphere/interfaces/IInterruptibleWork.hpp new file mode 100644 index 000000000..e4500d299 --- /dev/null +++ b/mesosphere/include/mesosphere/interfaces/IInterruptibleWork.hpp @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace mesosphere +{ + +class IInterruptibleWork : public IInterruptible, public IWork { + public: + + virtual IWork *HandleInterrupt(uint interruptId) override; +}; + +} diff --git a/mesosphere/include/mesosphere/interfaces/ILimitedResource.hpp b/mesosphere/include/mesosphere/interfaces/ILimitedResource.hpp new file mode 100644 index 000000000..34c4ed65c --- /dev/null +++ b/mesosphere/include/mesosphere/interfaces/ILimitedResource.hpp @@ -0,0 +1,47 @@ +#pragma once +// circular dep: #include "resource_limit.h" + +#include +#include + +namespace mesosphere +{ + +namespace detail +{ + +void ReleaseResource(const SharedPtr &owner, KAutoObject::TypeId typeId, size_t count, size_t realCount); +void ReleaseResource(const SharedPtr &reslimit, KAutoObject::TypeId typeId, size_t count, size_t realCount); + +} + +template +class ILimitedResource { + public: + + const SharedPtr& GetResourceOwner() const { return resourceOwner; } + void SetResourceOwner(SharedPtr owner) + { + resourceOwner = std::move(owner); + isLimitedResourceActive = true; + } + + virtual std::tuple GetResourceCount() + { + return {1, 1}; // current, real + } + + ~ILimitedResource() + { + if (isLimitedResourceActive) { + auto [cur, real] = GetResourceCount(); + detail::ReleaseResource(resourceOwner, Derived::typeId, cur, real); + } + } + + private: + SharedPtr resourceOwner{}; + bool isLimitedResourceActive = false; +}; + +} diff --git a/mesosphere/include/mesosphere/interfaces/ISetAllocated.hpp b/mesosphere/include/mesosphere/interfaces/ISetAllocated.hpp new file mode 100644 index 000000000..c3fce4ba9 --- /dev/null +++ b/mesosphere/include/mesosphere/interfaces/ISetAllocated.hpp @@ -0,0 +1,60 @@ +#pragma once + +#include + +namespace mesosphere +{ + +template +class ISetAllocated : public KObjectAllocator::AllocatedSetHookType +{ + public: + static void InitializeAllocator(void *buffer, size_t capacity) noexcept + { + allocator.GetSlabHeap().initialize(buffer, capacity); + } + + void *operator new(size_t sz) noexcept + { + kassert(sz == sizeof(Derived)); + return allocator.GetSlabHeap().allocate(); + } + + void operator delete(void *ptr) noexcept + { + allocator.GetSlabHeap().deallocate((Derived *)ptr); + } + + protected: + void AddToAllocatedSet() noexcept + { + Derived *d = (Derived *)this; + allocator.RegisterObject(*d); + isRegisteredToAllocator = true; + } + + void RemoveFromAllocatedSet() noexcept + { + Derived *d = (Derived *)this; + allocator.UnregisterObject(*d); + } + + virtual ~ISetAllocated() + { + if (isRegisteredToAllocator) { + RemoveFromAllocatedSet(); + isRegisteredToAllocator = false; + } + } + + private: + bool isRegisteredToAllocator = false; + + protected: + static KObjectAllocator allocator; +}; + +template +KObjectAllocator ISetAllocated::allocator{}; + +} \ No newline at end of file diff --git a/mesosphere/include/mesosphere/interfaces/ISlabAllocated.hpp b/mesosphere/include/mesosphere/interfaces/ISlabAllocated.hpp new file mode 100644 index 000000000..83fa62b09 --- /dev/null +++ b/mesosphere/include/mesosphere/interfaces/ISlabAllocated.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include + +namespace mesosphere +{ + +template +class ISlabAllocated +{ + public: + static void InitializeSlabHeap(void *buffer, size_t capacity) noexcept + { + slabHeap.initialize(buffer, capacity); + } + + void *operator new(size_t sz) noexcept + { + kassert(sz == sizeof(Derived)); + return slabHeap.allocate(); + } + + void operator delete(void *ptr) noexcept + { + slabHeap.deallocate((Derived *)ptr); + } + + protected: + static KSlabHeap slabHeap; +}; + +template +KSlabHeap ISlabAllocated::slabHeap{}; + +} diff --git a/mesosphere/include/mesosphere/interfaces/IWork.hpp b/mesosphere/include/mesosphere/interfaces/IWork.hpp new file mode 100644 index 000000000..c29cdf70c --- /dev/null +++ b/mesosphere/include/mesosphere/interfaces/IWork.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include +#include + +namespace mesosphere +{ + +struct WorkSListTag; + +using WorkSListBaseHook = boost::intrusive::slist_base_hook< + boost::intrusive::tag, + boost::intrusive::link_mode +>; + +/// Bottom half in Linux jargon +class IWork : public WorkSListBaseHook { + public: + virtual void DoWork() = 0; +}; + +using WorkSList = boost::intrusive::make_slist< + IWork, + boost::intrusive::base_hook, + boost::intrusive::cache_last, + boost::intrusive::constant_time_size +>::type; + +} diff --git a/mesosphere/include/mesosphere/interrupts/KAlarm.hpp b/mesosphere/include/mesosphere/interrupts/KAlarm.hpp new file mode 100644 index 000000000..113716dbb --- /dev/null +++ b/mesosphere/include/mesosphere/interrupts/KAlarm.hpp @@ -0,0 +1,34 @@ +#pragma once + +#include +#include +#include +#include + +namespace mesosphere +{ + +class KAlarm final : public IInterruptibleWork { + public: + + //KAlarm() = default; + + /// Precondition: alarmable not already added + void AddAlarmable(IAlarmable &alarmable); + + /// Precondition: alarmable is present + void RemoveAlarmable(const IAlarmable &alarmable); + + void HandleAlarm(); + + KAlarm(const KAlarm &) = delete; + KAlarm(KAlarm &&) = delete; + KAlarm &operator=(const KAlarm &) = delete; + KAlarm &operator=(KAlarm &&) = delete; + + private: + KSpinLock spinlock{}; + AlarmableSetType alarmables{}; +}; + +} diff --git a/mesosphere/include/mesosphere/interrupts/KWorkQueue.hpp b/mesosphere/include/mesosphere/interrupts/KWorkQueue.hpp new file mode 100644 index 000000000..08b8848b0 --- /dev/null +++ b/mesosphere/include/mesosphere/interrupts/KWorkQueue.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +namespace mesosphere +{ + +class KWorkQueue final { + public: + + void AddWork(IWork &work); + void Initialize(); + + void HandleWorkQueue(); + + KWorkQueue(const KWorkQueue &) = delete; + KWorkQueue(KWorkQueue &&) = delete; + KWorkQueue &operator=(const KWorkQueue &) = delete; + KWorkQueue &operator=(KWorkQueue &&) = delete; + + private: + WorkSList workQueue{}; + SharedPtr handlerThread{}; +}; + +} diff --git a/mesosphere/include/mesosphere/kresources/KAutoObject.hpp b/mesosphere/include/mesosphere/kresources/KAutoObject.hpp new file mode 100644 index 000000000..b12151781 --- /dev/null +++ b/mesosphere/include/mesosphere/kresources/KAutoObject.hpp @@ -0,0 +1,152 @@ +#pragma once + +#include +#include +#include + +#define MESOSPHERE_AUTO_OBJECT_TRAITS(BaseId, DerivedId)\ +using BaseClass = K##BaseId ;\ +static constexpr KAutoObject::TypeId typeId = KAutoObject::TypeId::DerivedId;\ +virtual ushort GetClassToken() const\ +{\ + return KAutoObject::GenerateClassToken();\ +}\ + +namespace mesosphere +{ + +// Foward declarations for intrusive_ptr +class KProcess; +class KResourceLimit; +class KThread; + +void intrusive_ptr_add_ref(KProcess *obj); +void intrusive_ptr_release(KProcess *obj); + +void intrusive_ptr_add_ref(KResourceLimit *obj); +void intrusive_ptr_release(KResourceLimit *obj); + +class KAutoObject { + public: + + /// Class token for polymorphic type checking + virtual ushort GetClassToken() const + { + return 0; + } + + /// Comparison key for KObjectAllocator + virtual u64 GetComparisonKey() const + { + return (u64)(uiptr)this; + } + + /// Is alive (checked for deletion) + virtual bool IsAlive() const = 0; + + /// Virtual destructor + virtual ~KAutoObject(); + + + /// Check if the offset is base class of T or T itself + template + bool IsInstanceOf() const + { + ushort btoken = GenerateClassToken(); + ushort dtoken = GetClassToken(); + + return (dtoken & btoken) == btoken; + } + + // Explicitely disable copy and move, and add default ctor + KAutoObject() = default; + KAutoObject(const KAutoObject &) = delete; + KAutoObject(KAutoObject &&) = delete; + KAutoObject &operator=(const KAutoObject &) = delete; + KAutoObject &operator=(KAutoObject &&) = delete; + + /// Type order as found in official kernel + enum class TypeId : ushort { + AutoObject = 0, + SynchronizationObject, + ReadableEvent, + + FinalClassesMin = 3, + + InterruptEvent = 3, + Debug, + ClientSession, + Thread, + Process, + Session, + ServerPort, + ResourceLimit, + SharedMemory, + LightClientSession, + ServerSession, + LightSession, + Event, + LightServerSession, + DeviceAddressSpace, + ClientPort, + Port, + WritableEvent, + TransferMemory, + SessionRequest, + CodeMemory, // JIT + + FinalClassesMax = CodeMemory, + }; + + private: + std::atomic referenceCount{0}; // official kernel has u32 for this + friend void intrusive_ptr_add_ref(KAutoObject *obj); + friend void intrusive_ptr_release(KAutoObject *obj); + + protected: + + template + static constexpr ushort GenerateClassToken() + { + /* The token follows these following properties: + * Multiple inheritance is not supported + * (BaseToken & DerivedToken) == BaseToken + * The token for KAutoObject is 0 + * Not-final classes have a token of (1 << typeid) + * Final derived classes have a unique token part of Seq[typeid - DerivedClassMin] | 0x100, + where Seq is (in base 2) 11, 101, 110, 1001, 1010, and so on... + */ + if constexpr (std::is_same_v) { + return 0; + } else if constexpr (!std::is_final_v) { + return (1 << (ushort)T::typeId) | GenerateClassToken(); + } else { + ushort off = (ushort)T::typeId - (ushort)TypeId::FinalClassesMin; + return ((ushort)detail::A038444(off) << 9) | 0x100u | GenerateClassToken(); + } + } +}; + +inline void intrusive_ptr_add_ref(KAutoObject *obj) +{ + ulong oldval = obj->referenceCount.fetch_add(1); + kassert(oldval + 1 != 0); +} + +inline void intrusive_ptr_release(KAutoObject *obj) +{ + ulong oldval = obj->referenceCount.fetch_sub(1); + if (oldval - 1 == 0) { + delete obj; + } +} + +template +inline SharedPtr DynamicObjectCast(SharedPtr object) { + if (object != nullptr && object->IsInstanceOf()) { + return boost::static_pointer_cast(object); + } + return nullptr; +} + +} diff --git a/mesosphere/include/mesosphere/kresources/KObjectAllocator.hpp b/mesosphere/include/mesosphere/kresources/KObjectAllocator.hpp new file mode 100644 index 000000000..c7b66629c --- /dev/null +++ b/mesosphere/include/mesosphere/kresources/KObjectAllocator.hpp @@ -0,0 +1,70 @@ +#pragma once +#include +#include +#include +#include + +namespace mesosphere +{ + +template +class KObjectAllocator { + private: + struct Comparator { + constexpr bool operator()(const T &lhs, const T &rhs) const + { + return lhs.GetComparisonKey() < rhs.GetComparisonKey(); + } + }; + + public: + struct HookTag; + + using AllocatedSetHookType = boost::intrusive::set_base_hook< + boost::intrusive::tag, + boost::intrusive::link_mode + >; + using AllocatedSetType = typename + boost::intrusive::make_set< + T, + boost::intrusive::base_hook, + boost::intrusive::compare + >::type; + + using pointer = T *; + using const_pointer = const T *; + using void_pointer = void *; + using const_void_ptr = const void *; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + AllocatedSetType &GetAllocatedSet() + { + return allocatedSet; + } + + KSlabHeap &GetSlabHeap() + { + return slabHeap; + } + + void RegisterObject(T &obj) noexcept + { + std::lock_guard guard{mutex}; + allocatedSet.insert(obj); + } + + void UnregisterObject(T &obj) noexcept + { + std::lock_guard guard{mutex}; + allocatedSet.erase(obj); + } + + private: + AllocatedSetType allocatedSet{}; + KSlabHeap slabHeap{}; + KMutex mutex{}; +}; + +} diff --git a/mesosphere/include/mesosphere/kresources/KResourceLimit.hpp b/mesosphere/include/mesosphere/kresources/KResourceLimit.hpp new file mode 100644 index 000000000..8229249f9 --- /dev/null +++ b/mesosphere/include/mesosphere/kresources/KResourceLimit.hpp @@ -0,0 +1,87 @@ +#pragma once + +#include + +namespace mesosphere +{ + +class KThread; +class KEvent; +class KTransferMemory; +class KSession; + +class KResourceLimit final : + public KAutoObject, + public ISetAllocated +{ + public: + + MESOSPHERE_AUTO_OBJECT_TRAITS(AutoObject, ResourceLimit); + virtual bool IsAlive() const override { return true; } + + enum class Category : uint { + Memory = 0, + Threads, + Events, + TransferMemories, + Sessions, + + Max, + }; + + static constexpr Category GetCategory(KAutoObject::TypeId typeId) { + switch (typeId) { + case KAutoObject::TypeId::Thread: return Category::Threads; + case KAutoObject::TypeId::Event: return Category::Events; + case KAutoObject::TypeId::TransferMemory: return Category::TransferMemories; + case KAutoObject::TypeId::Session: return Category::Sessions; + default: return Category::Max; + } + } + + template Category GetCategoryOf() + { + return GetCategory(T::typeId); + } + + static KResourceLimit &GetDefaultInstance() { return defaultInstance; } + + size_t GetCurrentValue(Category category) const; + size_t GetLimitValue(Category category) const; + size_t GetRemainingValue(Category category) const; + + bool SetLimitValue(Category category, size_t value); + + template + bool Reserve(Category category, size_t count, const std::chrono::duration& timeout) + { + return ReserveDetail(category, count, KSystemClock::now() + timeout); + } + + void Release(Category category, size_t count, size_t realCount); + + private: + + static KResourceLimit defaultInstance; + bool ReserveDetail(Category category, size_t count, const KSystemClock::time_point &timeoutTime); + + // Signed in official kernel + size_t limitValues[(size_t)Category::Max] = {}; + + // Current value: real value + dangling resources about to be released + size_t currentValues[(size_t)Category::Max] = {}; + size_t realValues[(size_t)Category::Max] = {}; + + mutable KConditionVariable condvar{}; +}; + +inline void intrusive_ptr_add_ref(KResourceLimit *obj) +{ + intrusive_ptr_add_ref((KAutoObject *)obj); +} + +inline void intrusive_ptr_release(KResourceLimit *obj) +{ + intrusive_ptr_add_ref((KAutoObject *)obj); +} +} diff --git a/mesosphere/include/mesosphere/kresources/KSlabHeap.hpp b/mesosphere/include/mesosphere/kresources/KSlabHeap.hpp new file mode 100644 index 000000000..7aebe7b31 --- /dev/null +++ b/mesosphere/include/mesosphere/kresources/KSlabHeap.hpp @@ -0,0 +1,55 @@ +#pragma once + +#include + +namespace mesosphere +{ + +template +class KSlabHeap { + public: + using pointer = T *; + using const_pointer = const T *; + using void_pointer = void *; + using const_void_ptr = const void *; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + private: + KSlabStack stack{}; + size_t capacity = 0; + T *bufferStart = nullptr; + + public: + T *allocate() noexcept + { + return stack.pop(); + } + + void deallocate(T *elem) noexcept + { + kassert(elem >= bufferStart && elem < bufferStart + capacity); + stack.push(elem); + } + + constexpr size_t size() const + { + return capacity; + } + + KSlabHeap() noexcept = default; + + void initialize(void *buffer, size_t capacity) + { + this->capacity = capacity; + this->bufferStart = (T *)buffer; + stack.initialize(buffer, capacity); + } + + KSlabHeap(void *buffer, size_t capacity) noexcept : stack(buffer, capacity), capacity(capacity), bufferStart((T *)buffer) + { + } +}; + +} diff --git a/mesosphere/include/mesosphere/kresources/KSlabStack.hpp b/mesosphere/include/mesosphere/kresources/KSlabStack.hpp new file mode 100644 index 000000000..b8b1f2bbd --- /dev/null +++ b/mesosphere/include/mesosphere/kresources/KSlabStack.hpp @@ -0,0 +1,78 @@ +#pragma once + +#include +#include + +namespace mesosphere +{ + +template +class KSlabStack { + public: + using pointer = T *; + using const_pointer = const T *; + using void_pointer = void *; + using const_void_ptr = const void *; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + private: + struct Node { + Node *next; + }; + + std::atomic head; + public: + + void push(T *data) noexcept + { + Node *newHead = (Node *)data; + Node *oldHead = head.load(); + do { + newHead->next = oldHead; + } while(!head.compare_exchange_weak(oldHead, newHead)); + } + + T *pop() noexcept + { + Node *newHead; + Node *oldHead = head.load(); + if (oldHead == nullptr) { + return nullptr; + } else { + do { + newHead = oldHead == nullptr ? oldHead : oldHead->next; + } while(!head.compare_exchange_weak(oldHead, newHead)); + + return (T *)oldHead; + } + } + + KSlabStack() noexcept = default; + + // Not reentrant (unlike NN's init function) + void initialize(void *buffer, size_t size) noexcept + { + T *ar = (T *)buffer; + if (size == 0) { + return; + } + + Node *ndlast = (Node *)&ar[size - 1]; + ndlast->next = nullptr; + + for (size_t i = 0; i < size - 1; i++) { + Node *nd = (Node *)&ar[i]; + Node *ndnext = (Node *)&ar[i + 1]; + nd->next = ndnext; + } + + Node *ndfirst = (Node *)&ar[0]; + head.store(ndfirst); + } + + KSlabStack(void *buffer, size_t size) { initialize(buffer, size); } +}; + +} diff --git a/mesosphere/include/mesosphere/processes/KHandleTable.hpp b/mesosphere/include/mesosphere/processes/KHandleTable.hpp new file mode 100644 index 000000000..52cbde617 --- /dev/null +++ b/mesosphere/include/mesosphere/processes/KHandleTable.hpp @@ -0,0 +1,74 @@ +#pragma once + +#include +#include +#include +#include + +namespace mesosphere +{ + +class KThread; +class KProcess; + +class KHandleTable final { + public: + + static constexpr size_t capacityLimit = 1024; + static constexpr Handle selfThreadAlias{0, -1, true}; + static constexpr Handle selfProcessAlias{1, -1, true}; + + template + SharedPtr Get(Handle handle, bool allowAlias = true) const + { + if constexpr (std::is_same_v) { + (void)allowAlias; + return GetAutoObject(handle); + } else if constexpr (std::is_same_v) { + return GetThread(handle, allowAlias); + } else if constexpr (std::is_same_v) { + return GetProcess(handle, allowAlias); + } else { + return DynamicObjectCast(GetAutoObject(handle)); + } + } + + bool Generate(Handle &out, SharedPtr obj); + + /// For deferred-init + bool Set(SharedPtr obj, Handle handle); + + bool Close(Handle handle); + void Destroy(); + + constexpr size_t GetNumActive() const { return numActive; } + constexpr size_t GetSize() const { return size; } + constexpr size_t GetCapacity() const { return capacity; } + + KHandleTable(size_t capacity); + ~KHandleTable(); + + private: + + bool IsValid(Handle handle) const; + SharedPtr GetAutoObject(Handle handle) const; + SharedPtr GetThread(Handle handle, bool allowAlias = true) const; + SharedPtr GetProcess(Handle handle, bool allowAlias = true) const; + + struct Entry { + SharedPtr object{}; + s16 id = 0; + }; + + std::array entries{}; + + // Here the official kernel uses pointer, Yuzu and ourselves are repurposing a field in Entry instead. + s16 firstFreeIndex = 0; + s16 idCounter = 1; + + u16 numActive = 0, size = 0, capacity = 0; + + mutable KSpinLock spinlock; +}; + +} diff --git a/mesosphere/include/mesosphere/processes/KProcess.hpp b/mesosphere/include/mesosphere/processes/KProcess.hpp new file mode 100644 index 000000000..70949bb0e --- /dev/null +++ b/mesosphere/include/mesosphere/processes/KProcess.hpp @@ -0,0 +1,43 @@ +#pragma once + +class KThread; +class KResourceLimit; + +#include +#include +#include + +namespace mesosphere +{ + +class KProcess : public KAutoObject { + public: + MESOSPHERE_AUTO_OBJECT_TRAITS(AutoObject, Process); + + virtual bool IsAlive() const override { return true; } + constexpr long GetSchedulerOperationCount() const { return schedulerOperationCount; } + + void IncrementSchedulerOperationCount() { ++schedulerOperationCount; } + void SetLastThreadAndIdleSelectionCount(KThread *thread, ulong idleSelectionCount); + + const SharedPtr &GetResourceLimit() const { return reslimit; } + + private: + KThread *lastThreads[MAX_CORES]{nullptr}; + ulong lastIdleSelectionCount[MAX_CORES]{0}; + long schedulerOperationCount = -1; + + SharedPtr reslimit{}; +}; + +inline void intrusive_ptr_add_ref(KProcess *obj) +{ + intrusive_ptr_add_ref((KAutoObject *)obj); +} + +inline void intrusive_ptr_release(KProcess *obj) +{ + intrusive_ptr_release((KAutoObject *)obj); +} + +} diff --git a/mesosphere/include/mesosphere/threading/KConditionVariable.hpp b/mesosphere/include/mesosphere/threading/KConditionVariable.hpp new file mode 100644 index 000000000..f4bde743a --- /dev/null +++ b/mesosphere/include/mesosphere/threading/KConditionVariable.hpp @@ -0,0 +1,78 @@ +#pragma once + +#include +#include + +namespace mesosphere +{ + + +/// Provides an interface similar to std::condition_variable +class KConditionVariable final { + public: + + using native_handle_type = uiptr; + + KConditionVariable() = default; + KConditionVariable(const KConditionVariable &) = delete; + KConditionVariable(KConditionVariable &&) = delete; + KConditionVariable &operator=(const KConditionVariable &) = delete; + KConditionVariable &operator=(KConditionVariable &&) = delete; + + native_handle_type native_handle() { return mutex_.native_handle(); } + + KMutex &mutex() { return mutex_; } + + void wait() noexcept + { + wait_until_impl(KSystemClock::never); + } + template + void wait(Predicate pred) + { + while (!pred()) { + wait(); + } + } + + template + void wait_until(const std::chrono::time_point &timeoutPoint) noexcept + { + wait_until_impl(timeoutPoint); + } + template + bool wait_until(const std::chrono::time_point &timeoutPoint, Predicate pred) + { + while (!pred()) { + wait_until(timeoutPoint); + if (Clock::now() >= timeoutPoint) { + return pred(); + } + } + + return true; + } + + template + void wait_for(const std::chrono::duration& timeout) noexcept + { + wait_until(KSystemClock::now() + timeout); + } + + template + bool wait_for(const std::chrono::duration& timeout, Predicate pred) + { + return wait_until(KSystemClock::now() + timeout, std::move(pred)); + } + + void notify_one() noexcept; + void notify_all() noexcept; + + private: + void wait_until_impl(const KSystemClock::time_point &timeoutPoint) noexcept; + + KMutex mutex_{}; + KThread::WaitList waiterList{}; +}; + +} diff --git a/mesosphere/include/mesosphere/threading/KMultiLevelQueue.hpp b/mesosphere/include/mesosphere/threading/KMultiLevelQueue.hpp new file mode 100644 index 000000000..b724aa745 --- /dev/null +++ b/mesosphere/include/mesosphere/threading/KMultiLevelQueue.hpp @@ -0,0 +1,340 @@ +#pragma once + +#include +#include +#include + +namespace mesosphere +{ + +template +class KMultiLevelQueue { + static_assert(depth_ <= 64, "Bitfield must be constrained in a u64"); +public: + static constexpr uint depth = depth_; + + using IntrusiveListType = IntrusiveListType_; + using PrioGetterType = PrioGetterType_; + + using value_traits = typename IntrusiveListType::value_traits; + + using pointer = typename IntrusiveListType::pointer; + using const_pointer = typename IntrusiveListType::const_pointer; + using value_type = typename IntrusiveListType::value_type; + using reference = typename IntrusiveListType::reference; + using const_reference = typename IntrusiveListType::const_reference; + using difference_type = typename IntrusiveListType::difference_type; + using size_type = typename IntrusiveListType::size_type; + + template + class iterator_impl { + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = typename KMultiLevelQueue::value_type; + using difference_type = typename KMultiLevelQueue::difference_type; + using pointer = typename std::conditional< + isConst, + typename KMultiLevelQueue::const_pointer, + typename KMultiLevelQueue::pointer>::type; + using reference = typename std::conditional< + isConst, + typename KMultiLevelQueue::const_reference, + typename KMultiLevelQueue::reference>::type; + + bool operator==(const iterator_impl &other) const { + return (isEnd() && other.isEnd()) || (it == other.it); + } + + bool operator!=(const iterator_impl &other) const { + return !(*this == other); + } + + reference operator*() { + return *it; + } + + pointer operator->() { + return it.operator->(); + } + + iterator_impl &operator++() { + if (isEnd()) { + return *this; + } else { + ++it; + } + if (it == getEndItForPrio()) { + u64 prios = mlq.usedPriorities; + prios &= ~((1ull << (currentPrio + 1)) - 1); + if (prios == 0) { + currentPrio = KMultiLevelQueue::depth; + } else { + currentPrio = __builtin_ffsll(prios) - 1; + it = getBeginItForPrio(); + } + } + return *this; + } + + iterator_impl &operator--() { + if (isEnd()) { + if (mlq.usedPriorities != 0) { + currentPrio = 63 - __builtin_clzll(mlq.usedPriorities); + it = getEndItForPrio(); + --it; + } + } else if (it == getBeginItForPrio()) { + u64 prios = mlq.usedPriorities; + prios &= (1ull << currentPrio) - 1; + if (prios != 0) { + currentPrio = __builtin_ffsll(prios) - 1; + it = getEndItForPrio(); + --it; + } + } else { + --it; + } + return *this; + } + + iterator_impl &operator++(int) { + const iterator_impl v{*this}; + ++(*this); + return v; + } + + iterator_impl &operator--(int) { + const iterator_impl v{*this}; + --(*this); + return v; + } + + // allow implicit const->non-const + iterator_impl(const iterator_impl &other) + : mlq(other.mlq), it(other.it), currentPrio(other.currentPrio) {} + + friend class iterator_impl; + iterator_impl() = default; + private: + friend class KMultiLevelQueue; + using container_ref = typename std::conditional< + isConst, + const KMultiLevelQueue &, + KMultiLevelQueue &>::type; + using list_iterator = typename std::conditional< + isConst, + typename IntrusiveListType::const_iterator, + typename IntrusiveListType::iterator>::type; + container_ref mlq; + list_iterator it; + uint currentPrio; + + explicit iterator_impl(container_ref mlq, list_iterator const &it, uint currentPrio) + : mlq(mlq), it(it), currentPrio(currentPrio) {} + explicit iterator_impl(container_ref mlq, uint currentPrio) + : mlq(mlq), it(), currentPrio(currentPrio) {} + constexpr bool isEnd() const { + return currentPrio == KMultiLevelQueue::depth; + } + + list_iterator getBeginItForPrio() const { + if constexpr (isConst) { + return mlq.levels[currentPrio].cbegin(); + } else { + return mlq.levels[currentPrio].begin(); + } + } + + list_iterator getEndItForPrio() const { + if constexpr (isConst) { + return mlq.levels[currentPrio].cend(); + } else { + return mlq.levels[currentPrio].end(); + } + } + }; + + using iterator = iterator_impl; + using const_iterator = iterator_impl; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + void add(reference r) { + uint prio = prioGetter(r); + levels[prio].push_back(r); + usedPriorities |= 1ul << prio; + } + + void remove(const_reference r) { + uint prio = prioGetter(r); + levels[prio].erase(levels[prio].iterator_to(r)); + if (levels[prio].empty()) { + usedPriorities &= ~(1ul << prio); + } + } + + void remove(const_iterator it) { + remove(*it); + } + void erase(const_iterator it) { + remove(it); + } + + void adjust(const_reference r, uint oldPrio, bool isCurrentThread = false) { + uint prio = prioGetter(r); + + // The thread is the current thread if and only if it is first on the running queue of highest priority, so it needs to be first on the dst queue as well. + auto newnext = isCurrentThread ? levels[prio].cbegin() : levels[prio].cend(); + levels[prio].splice(newnext, levels[oldPrio], levels[oldPrio].iterator_to(r)); + + usedPriorities |= 1ul << prio; + } + void adjust(const_iterator it, uint oldPrio, bool isCurrentThread = false) { + adjust(*it, oldPrio, isCurrentThread); + } + + void transferToFront(const_reference r, KMultiLevelQueue &other) { + uint prio = prioGetter(r); + other.levels[prio].splice(other.levels[prio].begin(), levels[prio], levels[prio].iterator_to(r)); + other.usedPriorities |= 1ul << prio; + if (levels[prio].empty()) { + usedPriorities &= ~(1ul << prio); + } + } + + void transferToFront(const_iterator it, KMultiLevelQueue &other) { + transferToFront(*it, other); + } + + void transferToBack(const_reference r, KMultiLevelQueue &other) { + uint prio = prioGetter(r); + other.levels[prio].splice(other.levels[prio].end(), levels[prio], levels[prio].iterator_to(r)); + other.usedPriorities |= 1ul << prio; + if (levels[prio].empty()) { + usedPriorities &= ~(1ul << prio); + } + } + + void transferToBack(const_iterator it, KMultiLevelQueue &other) { + transferToBack(*it, other); + } + + void yield(uint prio, size_type n = 1) { + levels[prio].shift_forward(n); + } + void yield(const_reference r) { + uint prio = prioGetter(r); + if (&r == &levels[prio].front()) { + yield(prio, 1); + } + } + + uint highestPrioritySet(uint maxPrio = 0) { + u64 priorities = maxPrio == 0 ? usedPriorities : (usedPriorities & ~((1 << maxPrio) - 1)); + return priorities == 0 ? depth : (uint)(__builtin_ffsll((long long)priorities) - 1); + } + + uint lowestPrioritySet(uint minPrio = depth - 1) { + u64 priorities = minPrio >= depth - 1 ? usedPriorities : (usedPriorities & ((1 << (minPrio + 1)) - 1)); + return priorities == 0 ? depth : 63 - __builtin_clzll(priorities); + } + + size_type size(uint prio) const { + return levels[prio].size(); + } + bool empty(uint prio) const { + return (usedPriorities & (1 << prio)) == 0; + } + + size_type size() const { + u64 prios = usedPriorities; + size_type sz = 0; + while (prios != 0) { + int ffs = __builtin_ffsll(prios); + sz += size((uint)ffs - 1); + prios &= ~(1ull << (ffs - 1)); + } + + return sz; + } + bool empty() const { + return usedPriorities == 0; + } + + reference front(uint maxPrio = 0) { + // Undefined behavior if empty + uint priority = highestPrioritySet(maxPrio); + return levels[priority == depth ? 0 : priority].front(); + } + const_reference front(uint maxPrio = 0) const { + // Undefined behavior if empty + uint priority = highestPrioritySet(maxPrio); + return levels[priority == depth ? 0 : priority].front(); + } + + reference back(uint minPrio = depth - 1) { + // Inclusive + // Undefined behavior if empty + uint priority = highestPrioritySet(minPrio); // intended + return levels[priority == depth ? 63 : priority].back(); + } + const_reference back(uint minPrio = KMultiLevelQueue::depth - 1) const { + // Inclusive + // Undefined behavior if empty + uint priority = highestPrioritySet(minPrio); // intended + return levels[priority == depth ? 63 : priority].back(); + } + + const_iterator cbegin(uint maxPrio = 0) const { + uint priority = highestPrioritySet(maxPrio); + return priority == depth ? cend() : const_iterator{*this, levels[priority].cbegin(), priority}; + } + iterator begin(uint maxPrio = 0) const { + return cbegin(maxPrio); + } + iterator begin(uint maxPrio = 0) { + uint priority = highestPrioritySet(maxPrio); + return priority == depth ? end() : iterator{*this, levels[priority].begin(), priority}; + } + + const_iterator cend(uint minPrio = depth - 1) const { + return minPrio == depth - 1 ? const_iterator{*this, depth} : cbegin(minPrio + 1); + } + const_iterator end(uint minPrio = depth - 1) const { + return cend(minPrio); + } + iterator end(uint minPrio = depth - 1) { + return minPrio == depth - 1 ? iterator{*this, depth} : begin(minPrio + 1); + } + + const_reverse_iterator crbegin(uint maxPrio = 0) const { + return const_reverse_iterator(cbegin(maxPrio)); + } + const_reverse_iterator rbegin(uint maxPrio = 0) const { + return crbegin(maxPrio); + } + reverse_iterator rbegin(uint maxPrio = 0) { + return reverse_iterator(begin(maxPrio)); + } + + const_reverse_iterator crend(uint minPrio = KMultiLevelQueue::depth - 1) const { + return const_reverse_iterator(cend(minPrio)); + } + const_reverse_iterator rend(uint minPrio = KMultiLevelQueue::depth - 1) const { + return crend(minPrio); + } + reverse_iterator rend(uint minPrio = KMultiLevelQueue::depth - 1) { + return reverse_iterator(end(minPrio)); + } + + KMultiLevelQueue(PrioGetterType prioGetter) : prioGetter(prioGetter), usedPriorities(0), levels() {}; + explicit KMultiLevelQueue(const value_traits &traits, PrioGetterType prioGetter = PrioGetterType{}) + : prioGetter(prioGetter), usedPriorities(0), levels(detail::MakeArrayOf(traits)) {} + +private: + PrioGetterType prioGetter; + u64 usedPriorities; + std::array levels; +}; + +} diff --git a/mesosphere/include/mesosphere/threading/KMutex.hpp b/mesosphere/include/mesosphere/threading/KMutex.hpp new file mode 100644 index 000000000..0c3903fc6 --- /dev/null +++ b/mesosphere/include/mesosphere/threading/KMutex.hpp @@ -0,0 +1,93 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace mesosphere +{ + +/// Fulfills Mutex requirements +class KMutex final { + public: + + using native_handle_type = uiptr; + + KMutex() = default; + KMutex(const KMutex &) = delete; + KMutex(KMutex &&) = delete; + KMutex &operator=(const KMutex &) = delete; + KMutex &operator=(KMutex &&) = delete; + + native_handle_type native_handle() { return tag; } + + bool try_lock() + { + KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread(); + return try_lock_impl_get_owner(currentThread) == nullptr; + } + + void lock() + { + KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread(); + KThread *owner; + + while ((owner = try_lock_impl_get_owner(currentThread)) != nullptr) { + // Our thread may be resumed even if we weren't given the mutex + lock_slow_path(*owner, *currentThread); + } + } + + void unlock() + { + // Ensure sequencial ordering, to happen-after mutex load + std::atomic_thread_fence(std::memory_order_seq_cst); + + KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread(); + native_handle_type thisThread = (native_handle_type)currentThread; + + /* + If we don't have any waiter, just store 0 (free the mutex). + Otherwise, or if a race condition happens and a new waiter appears, + take the slow path. + */ + if (tag.load() != thisThread || !tag.compare_exchange_strong(thisThread, 0)) { + unlock_slow_path(*currentThread); + } + } + + private: + + KThread *try_lock_impl_get_owner(KThread *currentThread) + { + native_handle_type oldTag, newTag; + native_handle_type thisThread = (native_handle_type)currentThread; + + oldTag = tag.load(); + do { + // Add "has listener" if the mutex was not free + newTag = oldTag == 0 ? thisThread : (oldTag | 1); + } while (!tag.compare_exchange_weak(oldTag, newTag, std::memory_order_seq_cst)); + + // The mutex was not free or was not ours => return false + if(oldTag != 0 && (oldTag & ~1) != thisThread) { + return (KThread *)(oldTag &~ 1); + } else { + /* + Ensure sequencial ordering if mutex was acquired + and mutex lock happens-before mutex unlock. + */ + std::atomic_thread_fence(std::memory_order_seq_cst); + return nullptr; + } + } + + void lock_slow_path(KThread &owner, KThread &requester); + void unlock_slow_path(KThread &owner); + std::atomic tag{}; +}; + +} diff --git a/mesosphere/include/mesosphere/threading/KScheduler.hpp b/mesosphere/include/mesosphere/threading/KScheduler.hpp new file mode 100644 index 000000000..c6ad2fa14 --- /dev/null +++ b/mesosphere/include/mesosphere/threading/KScheduler.hpp @@ -0,0 +1,130 @@ +#pragma once + +#include +#include +#include +#include + +namespace mesosphere +{ + +//TODO +struct KCriticalSection { void lock() {} void unlock() {} bool try_lock() {return true;} }; + +class KScheduler { + public: + class Global { + public: + using MlqType = KMultiLevelQueue<64, KThread::SchedulerList, __decltype(&KThread::GetPriorityOf)>; + Global() = delete; + Global(const Global &) = delete; + Global(Global &&) = delete; + Global &operator=(const Global &) = delete; + Global &operator=(Global &&) = delete; + + static MlqType &GetScheduledMlq(uint coreId) { return scheduledMlqs[coreId]; } + static MlqType &GetSuggestedMlq(uint coreId) { return suggestedMlqs[coreId]; } + + static void SetThreadRunning(KThread &thread); + static void SetThreadPaused(KThread &thread); + static void AdjustThreadPriorityChanged(KThread &thread, uint oldPrio, bool isCurrentThread = false); + static void AdjustThreadAffinityChanged(KThread &thread, int oldCoreId, u64 oldAffinityMask); + static void YieldThread(KThread &thread); + static void YieldThreadAndBalanceLoad(KThread &thread); + static void YieldThreadAndWaitForLoadBalancing(KThread &thread); + + static void YieldPreemptThread(KThread ¤tKernelHandlerThread, uint coreId, uint maxPrio = 59); + + static void SelectThreads(); + + static constexpr uint minRegularPriority = 2; + private: + static void TransferThreadToCore(KThread &thread, int coreId); + static void AskForReselectionOrMarkRedundant(KThread *currentThread, KThread *winner); + + // allowSecondPass = true is only used in SelectThreads + static KThread *PickOneSuggestedThread(const std::array ¤tThreads, + uint coreId, bool compareTime = false, bool allowSecondPass = false, + uint maxPrio = 0, uint minPrio = MlqType::depth - 1); + + static bool reselectionRequired; + static std::array scheduledMlqs, suggestedMlqs; + + template + static void ApplyReschedulingOperationImpl(F f, KThread &thread, int coreId, u64 affMask, Args&& ...args) + { + if (coreId >= 0) { + f(scheduledMlqs[coreId], thread, std::forward(args)...); + affMask &= ~(1 << coreId); + } + + while (affMask != 0) { + coreId = __builtin_ffsll(affMask) - 1; + f(suggestedMlqs[coreId], thread, std::forward(args)...); + affMask &= ~(1 << coreId); + } + } + + template + static void ApplyReschedulingOperation(F f, KThread &thread, Args&& ...args) + { + u64 aff = thread.GetAffinityMask(); + int coreId = thread.GetCurrentCoreId(); + + ApplyReschedulingOperationImpl(f, thread, coreId, aff, std::forward(args)...); + + thread.IncrementSchedulerOperationCount(); + reselectionRequired = true; + } + }; + + KScheduler() = default; + KScheduler(const KScheduler &) = delete; + KScheduler(KScheduler &&) = delete; + KScheduler &operator=(const KScheduler &) = delete; + KScheduler &operator=(KScheduler &&) = delete; + + static KCriticalSection &GetCriticalSection() { return criticalSection; } + + static void YieldCurrentThread(); + static void YieldCurrentThreadAndBalanceLoad(); + static void YieldCurrentThreadAndWaitForLoadBalancing(); + + + void ForceContextSwitch() {} + void ForceContextSwitchAfterIrq() {} + + void SetContextSwitchNeededForWorkQueue() { isContextSwitchNeededForWorkQueue = true; } + + constexpr ulong GetIdleSelectionCount() const { return idleSelectionCount; } + constexpr bool IsActive() const { return /*isActive */ true; } // TODO + private: + bool hasContextSwitchStartedAfterIrq; + bool isActive; + bool isContextSwitchNeeded; + bool isContextSwitchNeededForWorkQueue; + uint coreId; + u64 lastContextSwitchTime; + KThread *selectedThread; + KThread *previousThread; + ulong idleSelectionCount; + + void *tmpStack; + KThread *idleThread; + + static KCriticalSection criticalSection; + + template + void DoYieldOperation(F f, KThread ¤tThread) + { + if (!currentThread.IsSchedulerOperationRedundant()) + { + criticalSection.lock(); + f(currentThread); + criticalSection.unlock(); + ForceContextSwitch(); + } + } +}; + +} diff --git a/mesosphere/include/mesosphere/threading/KThread.hpp b/mesosphere/include/mesosphere/threading/KThread.hpp new file mode 100644 index 000000000..8ecb78b26 --- /dev/null +++ b/mesosphere/include/mesosphere/threading/KThread.hpp @@ -0,0 +1,261 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace mesosphere +{ + +struct ThreadWaitListTag; +struct ThreadMutexWaitListTag; +using ThreadWaitListBaseHook = boost::intrusive::list_base_hook >; +using ThreadMutexWaitListBaseHook = boost::intrusive::list_base_hook >; + +class KThread final : + public KAutoObject, + public ILimitedResource, + public ISetAllocated, + public IAlarmable, + public ThreadWaitListBaseHook, + public ThreadMutexWaitListBaseHook +{ + public: + + MESOSPHERE_AUTO_OBJECT_TRAITS(AutoObject, Thread); + virtual bool IsAlive() const override; + + virtual void OnAlarm() override; + + struct SchedulerValueTraits { + using node_traits = boost::intrusive::list_node_traits; + using node = node_traits::node; + using node_ptr = node *; + using const_node_ptr = const node *; + using value_type = KThread; + using pointer = KThread *; + using const_pointer = const KThread *; + static constexpr boost::intrusive::link_mode_type link_mode = boost::intrusive::normal_link; + + constexpr SchedulerValueTraits(uint coreId) : coreId(coreId) {} + node_ptr to_node_ptr (value_type &value) const { + return &value.schedulerNodes[coreId]; + } + const_node_ptr to_node_ptr (const value_type &value) const { + return &value.schedulerNodes[coreId]; + } + pointer to_value_ptr(node_ptr n) const { + return detail::GetParentFromArrayMember(n, coreId, &KThread::schedulerNodes); + } + const_pointer to_value_ptr(const_node_ptr n) const { + return detail::GetParentFromArrayMember(n, coreId, &KThread::schedulerNodes); + } + + private: + uint coreId; + }; + + enum class SchedulingStatus : u16 { + Paused = 1, + Running = 2, + Exited = 3, + }; + + enum class ForcePauseReason : u16 { + ThreadActivity = 0, + ProcessActivity = 1, + Debug = 2, + Reserved = 3, + KernelLoading = 4, + }; + + using SchedulerList = typename boost::intrusive::make_list< + KThread, + boost::intrusive::value_traits + >::type; + + using WaitList = typename boost::intrusive::make_list< + KThread, + boost::intrusive::base_hook, + boost::intrusive::constant_time_size + >::type; + + private: + using MutexWaitList = typename boost::intrusive::make_list< + KThread, + boost::intrusive::base_hook + >::type; + + public: + + static constexpr uint GetPriorityOf(const KThread &thread) + { + return thread.priority; + } + + constexpr uint GetPriority() const { return priority; } + constexpr u64 GetId() const { return id; } + constexpr int GetCurrentCoreId() const { return currentCoreId; } + constexpr ulong GetAffinityMask() const { return affinityMask; } + constexpr long GetLastScheduledTime() const { return lastScheduledTime; } + + KProcess *GetOwner() const { return owner; } + bool IsSchedulerOperationRedundant() const { return owner != nullptr && owner->GetSchedulerOperationCount() == redundantSchedulerOperationCount; } + + void IncrementSchedulerOperationCount() { if (owner != nullptr) owner->IncrementSchedulerOperationCount(); } + void SetRedundantSchedulerOperation() { redundantSchedulerOperationCount = owner != nullptr ? owner->GetSchedulerOperationCount() : redundantSchedulerOperationCount; } + void SetCurrentCoreId(int coreId) { currentCoreId = coreId; } + + void SetProcessLastThreadAndIdleSelectionCount(ulong idleSelectionCount) + { + if (owner != nullptr) { + owner->SetLastThreadAndIdleSelectionCount(this, idleSelectionCount); + } + } + + void UpdateLastScheduledTime() { ++lastScheduledTime; /* FIXME */} + + constexpr SchedulingStatus GetSchedulingStatus() const + { + return (SchedulingStatus)(currentSchedMaskFull & 0xF); + } + constexpr bool IsForcePausedFor(ForcePauseReason reason) const + { + return (schedMaskForForcePauseFull & (1 << (4 + ((ushort)reason)))) != 0; + } + constexpr bool IsForcePaused() const + { + return (schedMaskForForcePauseFull & ~0xF) != 0; + } + static constexpr bool CompareSchedulingStatusFull(ushort fullMask, SchedulingStatus status) + { + return fullMask == (ushort)status; + } + constexpr bool CompareSchedulingStatusFull(SchedulingStatus status) const + { + return CompareSchedulingStatusFull(schedMaskForForcePauseFull, status); + } + + /// Returns old full mask + ushort SetSchedulingStatusField(SchedulingStatus status) + { + ushort oldMaskFull = currentSchedMaskFull; + currentSchedMaskFull = (currentSchedMaskFull & ~0xF) | ((ushort)status & 0xF); + return oldMaskFull; + } + void AddForcePauseReasonToField(ForcePauseReason reason) + { + schedMaskForForcePauseFull |= 1 << (4 + ((ushort)reason)); + } + void RemoveForcePauseReasonToField(ForcePauseReason reason) + { + schedMaskForForcePauseFull |= ~(1 << (4 + ((ushort)reason))); + } + + ushort CommitForcePauseToField() + { + + ushort oldMaskFull = currentSchedMaskFull; + currentSchedMaskFull = (schedMaskForForcePauseFull & ~0xF) | (currentSchedMaskFull & 0xF); + return oldMaskFull; + } + ushort RevertForcePauseToField() + { + ushort oldMaskFull = currentSchedMaskFull; + currentSchedMaskFull &= 0xF; + return oldMaskFull; + } + + void AdjustScheduling(ushort oldMaskFull); + void Reschedule(SchedulingStatus newStatus); + /// Sets status regardless of force-pausing. + void RescheduleIfStatusEquals(SchedulingStatus expectedStatus, SchedulingStatus newStatus); + void AddForcePauseReason(ForcePauseReason reason); + void RemoveForcePauseReason(ForcePauseReason reason); + + bool IsDying() const + { + // Or already dead + /* + terminationWanted is only set on exit, under scheduler critical section, to true, + and the readers are either a thread under critical section (most common), or end-of-irq/svc/other exception, + therefore synchronization outside critsec can be implemented through fences, I think + */ + return CompareSchedulingStatusFull(SchedulingStatus::Exited) || terminationWanted; + } + + void SetTerminationWanted() + { + terminationWanted = true; + std::atomic_thread_fence(std::memory_order_seq_cst); + } + + /// Takes effect when critical section is left + bool WaitForKernelSync(WaitList &waitList); + /// Takes effect when critical section is left + void ResumeFromKernelSync(); + /// Takes effect when critical section is left -- all threads in waitlist + static void ResumeAllFromKernelSync(WaitList &waitList); + /// Takes effect immediately + void CancelKernelSync(); + /// Takes effect immediately + void CancelKernelSync(Result res); + + constexpr size_t GetNumberOfKMutexWaiters() const { return numKernelMutexWaiters; } + constexpr uiptr GetWantedMutex() const { return wantedMutex; } + void SetWantedMutex(uiptr mtx) { wantedMutex = mtx; } + + void AddMutexWaiter(KThread &waiter); + KThread *RelinquishMutex(size_t *count, uiptr mutexAddr); + void RemoveMutexWaiter(KThread &waiter); + void InheritDynamicPriority(); + + KThread() = default; + KThread(KProcess *owner, u64 id, uint priority) : KAutoObject(), owner(owner), schedulerNodes(), + id(id), basePriority(priority), priority(priority), + currentCoreId(0), affinityMask(15) {}; +private: + void AddToMutexWaitList(KThread &thread); + MutexWaitList::iterator RemoveFromMutexWaitList(MutexWaitList::const_iterator it); + void RemoveFromMutexWaitList(const KThread &t); + + KProcess *owner = nullptr; + + boost::intrusive::list_node_traits::node schedulerNodes[4]{}; + + WaitList *currentWaitList = nullptr; + + u64 id = 0; + long redundantSchedulerOperationCount = 0; + ushort currentSchedMaskFull = (ushort)SchedulingStatus::Paused; + ushort schedMaskForForcePauseFull = 0; + bool terminationWanted = false; + uint basePriority = 64, priority = 64; + int currentCoreId = -1; + ulong affinityMask = 0; + + uiptr wantedMutex = 0; + KThread *wantedMutexOwner = nullptr; + MutexWaitList mutexWaitList{}; + size_t numKernelMutexWaiters = 0; + + Handle syncResultHandle{}; + Result syncResult = 0; + + u64 lastScheduledTime = 0; +}; + +inline void intrusive_ptr_add_ref(KThread *obj) +{ + intrusive_ptr_add_ref((KAutoObject *)obj); +} + +inline void intrusive_ptr_release(KThread *obj) +{ + intrusive_ptr_release((KAutoObject *)obj); +} + +} diff --git a/mesosphere/source/core/KCoreContext.cpp b/mesosphere/source/core/KCoreContext.cpp new file mode 100644 index 000000000..38044e903 --- /dev/null +++ b/mesosphere/source/core/KCoreContext.cpp @@ -0,0 +1,11 @@ +#include +#include + +namespace mesosphere +{ + +static KScheduler scheds[4]; + +std::array KCoreContext::instances{ &scheds[0], &scheds[1], &scheds[2], &scheds[3] }; + +} diff --git a/mesosphere/source/interfaces/IAlarmable.cpp b/mesosphere/source/interfaces/IAlarmable.cpp new file mode 100644 index 000000000..cd3f5e0b2 --- /dev/null +++ b/mesosphere/source/interfaces/IAlarmable.cpp @@ -0,0 +1,20 @@ +#include +#include +#include + +namespace mesosphere +{ + +void IAlarmable::SetAlarmTimeImpl(const KSystemClock::time_point &alarmTime) +{ + this->alarmTime = alarmTime; + KCoreContext::GetCurrentInstance().GetAlarm()->AddAlarmable(*this); +} + +void IAlarmable::ClearAlarm() +{ + KCoreContext::GetCurrentInstance().GetAlarm()->RemoveAlarmable(*this); + alarmTime = KSystemClock::time_point{}; +} + +} diff --git a/mesosphere/source/interfaces/IInterruptibleWork.cpp b/mesosphere/source/interfaces/IInterruptibleWork.cpp new file mode 100644 index 000000000..3e0876295 --- /dev/null +++ b/mesosphere/source/interfaces/IInterruptibleWork.cpp @@ -0,0 +1,12 @@ +#include + +namespace mesosphere +{ + +IWork *IInterruptibleWork::HandleInterrupt(uint interruptId) +{ + (void)interruptId; + return (IWork *)this; +} + +} diff --git a/mesosphere/source/interfaces/ILimitedResource.cpp b/mesosphere/source/interfaces/ILimitedResource.cpp new file mode 100644 index 000000000..b7713e949 --- /dev/null +++ b/mesosphere/source/interfaces/ILimitedResource.cpp @@ -0,0 +1,26 @@ +#include +#include +#include + +namespace mesosphere::detail +{ + +void ReleaseResource(const SharedPtr &reslimit, KAutoObject::TypeId typeId, size_t count, size_t realCount) +{ + if (reslimit != nullptr) { + reslimit->Release(KResourceLimit::GetCategory(typeId), count, realCount); + } else { + KResourceLimit::GetDefaultInstance().Release(KResourceLimit::GetCategory(typeId), count, realCount); + } +} + +void ReleaseResource(const SharedPtr &owner, KAutoObject::TypeId typeId, size_t count, size_t realCount) +{ + if (owner != nullptr) { + return ReleaseResource(owner->GetResourceLimit(), typeId, count, realCount); + } else { + KResourceLimit::GetDefaultInstance().Release(KResourceLimit::GetCategory(typeId), count, realCount); + } +} + +} diff --git a/mesosphere/source/interrupts/KAlarm.cpp b/mesosphere/source/interrupts/KAlarm.cpp new file mode 100644 index 000000000..0ea169c57 --- /dev/null +++ b/mesosphere/source/interrupts/KAlarm.cpp @@ -0,0 +1,58 @@ +#include +#include +#include +#include + +namespace mesosphere +{ + +void KAlarm::AddAlarmable(IAlarmable &alarmable) +{ + std::lock_guard guard{spinlock}; + alarmables.insert(alarmable); + + KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime()); +} + +void KAlarm::RemoveAlarmable(const IAlarmable &alarmable) +{ + std::lock_guard guard{spinlock}; + alarmables.erase(alarmable); + + KSystemClock::SetAlarm(alarmables.cbegin()->GetAlarmTime()); +} + +void KAlarm::HandleAlarm() +{ + { + KCriticalSection &critsec = KScheduler::GetCriticalSection(); + std::lock_guard criticalSection{critsec}; + std::lock_guard guard{spinlock}; + + KSystemClock::SetInterruptMasked(true); // mask timer interrupt + KSystemClock::time_point currentTime = KSystemClock::now(), maxAlarmTime; + while (alarmables.begin() != alarmables.end()) { + IAlarmable &a = *alarmables.begin(); + maxAlarmTime = a.alarmTime; + if (maxAlarmTime > currentTime) { + break; + } + + alarmables.erase(a); + a.alarmTime = KSystemClock::time_point{}; + + a.OnAlarm(); + } + + if (maxAlarmTime > KSystemClock::time_point{}) { + KSystemClock::SetAlarm(maxAlarmTime); + } + } + + { + // TODO Reenable interrupt 30 + KInterruptMaskGuard guard{}; + } +} + +} diff --git a/mesosphere/source/interrupts/KWorkQueue.cpp b/mesosphere/source/interrupts/KWorkQueue.cpp new file mode 100644 index 000000000..783cb6ce4 --- /dev/null +++ b/mesosphere/source/interrupts/KWorkQueue.cpp @@ -0,0 +1,45 @@ +#include +#include +#include +#include + +namespace mesosphere +{ + +void KWorkQueue::AddWork(IWork &work) +{ + workQueue.push_back(work); + KCoreContext::GetCurrentInstance().GetScheduler()->SetContextSwitchNeededForWorkQueue(); +} + +void KWorkQueue::Initialize() +{ + //handlerThread.reset(new KThread); //TODO! + kassert(handlerThread == nullptr); +} + +void KWorkQueue::HandleWorkQueue() +{ + KCoreContext &cctx = KCoreContext::GetCurrentInstance(); + while (true) { + IWork *work = nullptr; + do { + KInterruptMaskGuard imguard{}; + auto it = workQueue.begin(); + if (it != workQueue.end()) { + work = &*it; + workQueue.erase(it); + } else { + { + //TODO: thread usercontext scheduler/bottom hard guard + cctx.GetCurrentThread()->Reschedule(KThread::SchedulingStatus::Paused); + } + cctx.GetScheduler()->ForceContextSwitch(); + } + } while (work == nullptr); + + work->DoWork(); + } +} + +} diff --git a/mesosphere/source/kresources/KAutoObject.cpp b/mesosphere/source/kresources/KAutoObject.cpp new file mode 100644 index 000000000..f31f0ea45 --- /dev/null +++ b/mesosphere/source/kresources/KAutoObject.cpp @@ -0,0 +1,10 @@ +#include + +namespace mesosphere +{ + +KAutoObject::~KAutoObject() +{ +} + +} diff --git a/mesosphere/source/kresources/KResourceLimit.cpp b/mesosphere/source/kresources/KResourceLimit.cpp new file mode 100644 index 000000000..dc113bcd6 --- /dev/null +++ b/mesosphere/source/kresources/KResourceLimit.cpp @@ -0,0 +1,83 @@ +#include + +namespace mesosphere +{ + +KResourceLimit KResourceLimit::defaultInstance{}; + +size_t KResourceLimit::GetCurrentValue(KResourceLimit::Category category) const +{ + // Caller should check category + std::lock_guard guard{condvar.mutex()}; + return currentValues[(uint)category]; +} + +size_t KResourceLimit::GetLimitValue(KResourceLimit::Category category) const +{ + // Caller should check category + std::lock_guard guard{condvar.mutex()}; + return limitValues[(uint)category]; +} + +size_t KResourceLimit::GetRemainingValue(KResourceLimit::Category category) const +{ + // Caller should check category + std::lock_guard guard{condvar.mutex()}; + return limitValues[(uint)category] - currentValues[(uint)category]; +} + +bool KResourceLimit::SetLimitValue(KResourceLimit::Category category, size_t value) +{ + std::lock_guard guard{condvar.mutex()}; + if ((long)value < 0 || currentValues[(uint)category] > value) { + return false; + } else { + limitValues[(uint)category] = value; + return true; + } +} + +void KResourceLimit::Release(KResourceLimit::Category category, size_t count, size_t realCount) +{ + // Caller should ensure parameters are correct + std::lock_guard guard{condvar.mutex()}; + currentValues[(uint)category] -= count; + realValues[(uint)category] -= realCount; + condvar.notify_all(); +} + +bool KResourceLimit::ReserveDetail(KResourceLimit::Category category, size_t count, const KSystemClock::time_point &timeoutTime) +{ + std::lock_guard guard{condvar.mutex()}; + if ((long)count <= 0 || realValues[(uint)category] >= limitValues[(uint)category]) { + return false; + } + + size_t newCur = currentValues[(uint)category] + count; + bool ok = false; + + auto condition = + [=, &newCur] { + newCur = this->currentValues[(uint)category] + count; + size_t lval = this->limitValues[(uint)category]; + return this->realValues[(uint)category] <= lval && newCur <= lval; // need to check here + }; + + if (timeoutTime <= KSystemClock::never) { + // TODO, check is actually < 0 + // TODO timeout + ok = true; + condvar.wait(condition); + } else { + ok = condvar.wait_until(timeoutTime, condition); + } + + if (ok) { + currentValues[(uint)category] += count; + realValues[(uint)category] += count; + } + + return ok; +} + +} diff --git a/mesosphere/source/my_libc.c b/mesosphere/source/my_libc.c new file mode 100644 index 000000000..38497c1ee --- /dev/null +++ b/mesosphere/source/my_libc.c @@ -0,0 +1,1150 @@ +/* Note: copied from newlib */ +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* + * Copyright (C) 2004 CodeSourcery, LLC + * + * Permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies. + * + * This file is distributed WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + */ + +/* Handle ELF .{pre_init,init,fini}_array sections. */ +#include + +#ifndef HAVE_INITFINI_ARRAY +#define HAVE_INITFINI_ARRAY +#endif + +#undef HAVE_INIT_FINI + +#ifdef HAVE_INITFINI_ARRAY + +/* These magic symbols are provided by the linker. */ +extern void (*__preinit_array_start []) (void) __attribute__((weak)); +extern void (*__preinit_array_end []) (void) __attribute__((weak)); +extern void (*__init_array_start []) (void) __attribute__((weak)); +extern void (*__init_array_end []) (void) __attribute__((weak)); + +#ifdef HAVE_INIT_FINI +extern void _init (void); +#endif + +/* Iterate over all the init routines. */ +void +__libc_init_array (void) +{ + size_t count; + size_t i; + + count = __preinit_array_end - __preinit_array_start; + for (i = 0; i < count; i++) + __preinit_array_start[i] (); + +#ifdef HAVE_INIT_FINI + _init (); +#endif + + count = __init_array_end - __init_array_start; + for (i = 0; i < count; i++) + __init_array_start[i] (); +} +#endif + +#ifdef HAVE_INITFINI_ARRAY +extern void (*__fini_array_start []) (void) __attribute__((weak)); +extern void (*__fini_array_end []) (void) __attribute__((weak)); + +#ifdef HAVE_INIT_FINI +extern void _fini (void); +#endif + +/* Run all the cleanup routines. */ +void +__libc_fini_array (void) +{ + size_t count; + size_t i; + + count = __fini_array_end - __fini_array_start; + for (i = count; i > 0; i--) + __fini_array_start[i-1] (); + +#ifdef HAVE_INIT_FINI + _fini (); +#endif +} +#endif + +int +__cxa_atexit (void (*fn) (void *), + void *arg, + void *d) +{ + return 0; +} +void *__dso_handle = 0; + +/* +FUNCTION + <>---move possibly overlapping memory +INDEX + memmove +SYNOPSIS + #include + void *memmove(void *<[dst]>, const void *<[src]>, size_t <[length]>); +DESCRIPTION + This function moves <[length]> characters from the block of + memory starting at <<*<[src]>>> to the memory starting at + <<*<[dst]>>>. <> reproduces the characters correctly + at <<*<[dst]>>> even if the two areas overlap. +RETURNS + The function returns <[dst]> as passed. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + memmove ansi pure +*/ + +/* Nonzero if either X or Y is not aligned on a "long" boundary. */ +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +/* How many bytes are copied each iteration of the 4X unrolled loop. */ +#define BIGBLOCKSIZE (sizeof (long) << 2) + +/* How many bytes are copied each iteration of the word copy loop. */ +#define LITTLEBLOCKSIZE (sizeof (long)) + +/* Threshhold for punting to the byte copier. */ +#undef TOO_SMALL +#define TOO_SMALL(LEN) ((LEN) < BIGBLOCKSIZE) + +/*SUPPRESS 20*/ +void * +//__inhibit_loop_to_libcall +memmove (void *dst_void, + const void *src_void, + size_t length) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + char *dst = dst_void; + const char *src = src_void; + + if (src < dst && dst < src + length) + { + /* Have to copy backwards */ + src += length; + dst += length; + while (length--) + { + *--dst = *--src; + } + } + else + { + while (length--) + { + *dst++ = *src++; + } + } + + return dst_void; +#else + char *dst = dst_void; + const char *src = src_void; + long *aligned_dst; + const long *aligned_src; + + if (src < dst && dst < src + length) + { + /* Destructive overlap...have to copy backwards */ + src += length; + dst += length; + while (length--) + { + *--dst = *--src; + } + } + else + { + /* Use optimizing algorithm for a non-destructive copy to closely + match memcpy. If the size is small or either SRC or DST is unaligned, + then punt into the byte copy loop. This should be rare. */ + if (!TOO_SMALL(length) && !UNALIGNED (src, dst)) + { + aligned_dst = (long*)dst; + aligned_src = (long*)src; + + /* Copy 4X long words at a time if possible. */ + while (length >= BIGBLOCKSIZE) + { + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + length -= BIGBLOCKSIZE; + } + + /* Copy one long word at a time if possible. */ + while (length >= LITTLEBLOCKSIZE) + { + *aligned_dst++ = *aligned_src++; + length -= LITTLEBLOCKSIZE; + } + + /* Pick up any residual with a byte copier. */ + dst = (char*)aligned_dst; + src = (char*)aligned_src; + } + + while (length--) + { + *dst++ = *src++; + } + } + + return dst_void; +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +/* +FUNCTION + <>---copy memory regions +SYNOPSIS + #include + void* memcpy(void *restrict <[out]>, const void *restrict <[in]>, + size_t <[n]>); +DESCRIPTION + This function copies <[n]> bytes from the memory region + pointed to by <[in]> to the memory region pointed to by + <[out]>. + If the regions overlap, the behavior is undefined. +RETURNS + <> returns a pointer to the first byte of the <[out]> + region. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + memcpy ansi pure + */ + +void * +memcpy (void * dst0, + const void * __restrict src0, + size_t len0) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + char *dst = (char *) dst0; + char *src = (char *) src0; + + void *save = dst0; + + while (len0--) + { + *dst++ = *src++; + } + + return save; +#else + char *dst = dst0; + const char *src = src0; + long *aligned_dst; + const long *aligned_src; + + /* If the size is small, or either SRC or DST is unaligned, + then punt into the byte copy loop. This should be rare. */ + if (!TOO_SMALL(len0) && !UNALIGNED (src, dst)) + { + aligned_dst = (long*)dst; + aligned_src = (long*)src; + + /* Copy 4X long words at a time if possible. */ + while (len0 >= BIGBLOCKSIZE) + { + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + len0 -= BIGBLOCKSIZE; + } + + /* Copy one long word at a time if possible. */ + while (len0 >= LITTLEBLOCKSIZE) + { + *aligned_dst++ = *aligned_src++; + len0 -= LITTLEBLOCKSIZE; + } + + /* Pick up any residual with a byte copier. */ + dst = (char*)aligned_dst; + src = (char*)aligned_src; + } + + while (len0--) + *dst++ = *src++; + + return dst0; +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +/* +FUNCTION + <>---set an area of memory +INDEX + memset +SYNOPSIS + #include + void *memset(void *<[dst]>, int <[c]>, size_t <[length]>); +DESCRIPTION + This function converts the argument <[c]> into an unsigned + char and fills the first <[length]> characters of the array + pointed to by <[dst]> to the value. +RETURNS + <> returns the value of <[dst]>. +PORTABILITY +<> is ANSI C. + <> requires no supporting OS subroutines. +QUICKREF + memset ansi pure +*/ + +#include + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + +#define LBLOCKSIZE (sizeof(long)) +#define UNALIGNED(X) ((long)X & (LBLOCKSIZE - 1)) +#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE) + +void * +memset (void *m, + int c, + size_t n) +{ + char *s = (char *) m; + +#if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__) + unsigned int i; + unsigned long buffer; + unsigned long *aligned_addr; + unsigned int d = c & 0xff; /* To avoid sign extension, copy C to an + unsigned variable. */ + + while (UNALIGNED (s)) + { + if (n--) + *s++ = (char) c; + else + return m; + } + + if (!TOO_SMALL (n)) + { + /* If we get this far, we know that n is large and s is word-aligned. */ + aligned_addr = (unsigned long *) s; + + /* Store D into each char sized location in BUFFER so that + we can set large blocks quickly. */ + buffer = (d << 8) | d; + buffer |= (buffer << 16); + for (i = 32; i < LBLOCKSIZE * 8; i <<= 1) + buffer = (buffer << i) | buffer; + + /* Unroll the loop. */ + while (n >= LBLOCKSIZE*4) + { + *aligned_addr++ = buffer; + *aligned_addr++ = buffer; + *aligned_addr++ = buffer; + *aligned_addr++ = buffer; + n -= 4*LBLOCKSIZE; + } + + while (n >= LBLOCKSIZE) + { + *aligned_addr++ = buffer; + n -= LBLOCKSIZE; + } + /* Pick up the remainder with a bytewise loop. */ + s = (char*)aligned_addr; + } + +#endif /* not PREFER_SIZE_OVER_SPEED */ + + while (n--) + *s++ = (char) c; + + return m; +} + +/* +FUNCTION + <>---find character in memory +INDEX + memchr +SYNOPSIS + #include + void *memchr(const void *<[src]>, int <[c]>, size_t <[length]>); +DESCRIPTION + This function searches memory starting at <<*<[src]>>> for the + character <[c]>. The search only ends with the first + occurrence of <[c]>, or after <[length]> characters; in + particular, <> does not terminate the search. +RETURNS + If the character <[c]> is found within <[length]> characters + of <<*<[src]>>>, a pointer to the character is returned. If + <[c]> is not found, then <> is returned. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + memchr ansi pure +*/ + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + + +/* Nonzero if either X or Y is not aligned on a "long" boundary. */ +#define UNALIGNED(X) ((long)X & (sizeof (long) - 1)) + +/* How many bytes are loaded each iteration of the word copy loop. */ +#define LBLOCKSIZE (sizeof (long)) + +/* Threshhold for punting to the bytewise iterator. */ +#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE) + +#if LONG_MAX == 2147483647L +#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080) +#else +#if LONG_MAX == 9223372036854775807L +/* Nonzero if X (a long int) contains a NULL byte. */ +#define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080) +#else +#error long int is not a 32bit or 64bit type. +#endif +#endif + +#ifndef DETECTNULL +#error long int is not a 32bit or 64bit byte +#endif + +/* DETECTCHAR returns nonzero if (long)X contains the byte used + to fill (long)MASK. */ +#define DETECTCHAR(X,MASK) (DETECTNULL(X ^ MASK)) + +void * +memchr (const void *src_void, + int c, + size_t length) +{ + const unsigned char *src = (const unsigned char *) src_void; + unsigned char d = c; + +#if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__) + unsigned long *asrc; + unsigned long mask; + unsigned int i; + + while (UNALIGNED (src)) + { + if (!length--) + return NULL; + if (*src == d) + return (void *) src; + src++; + } + + if (!TOO_SMALL (length)) + { + /* If we get this far, we know that length is large and src is + word-aligned. */ + /* The fast code reads the source one word at a time and only + performs the bytewise search on word-sized segments if they + contain the search character, which is detected by XORing + the word-sized segment with a word-sized block of the search + character and then detecting for the presence of NUL in the + result. */ + asrc = (unsigned long *) src; + mask = d << 8 | d; + mask = mask << 16 | mask; + for (i = 32; i < LBLOCKSIZE * 8; i <<= 1) + mask = (mask << i) | mask; + + while (length >= LBLOCKSIZE) + { + if (DETECTCHAR (*asrc, mask)) + break; + length -= LBLOCKSIZE; + asrc++; + } + + /* If there are fewer than LBLOCKSIZE characters left, + then we resort to the bytewise loop. */ + + src = (unsigned char *) asrc; + } + +#endif /* not PREFER_SIZE_OVER_SPEED */ + + while (length--) + { + if (*src == d) + return (void *) src; + src++; + } + + return NULL; +} + +/* +FUNCTION + <>---compare two memory areas +INDEX + memcmp +SYNOPSIS + #include + int memcmp(const void *<[s1]>, const void *<[s2]>, size_t <[n]>); +DESCRIPTION + This function compares not more than <[n]> characters of the + object pointed to by <[s1]> with the object pointed to by <[s2]>. +RETURNS + The function returns an integer greater than, equal to or + less than zero according to whether the object pointed to by + <[s1]> is greater than, equal to or less than the object + pointed to by <[s2]>. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + memcmp ansi pure +*/ + + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + +/* Nonzero if either X or Y is not aligned on a "long" boundary. */ +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +/* How many bytes are copied each iteration of the word copy loop. */ +#define LBLOCKSIZE (sizeof (long)) + +/* Threshhold for punting to the byte copier. */ +#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE) + +int +memcmp (const void *m1, + const void *m2, + size_t n) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + unsigned char *s1 = (unsigned char *) m1; + unsigned char *s2 = (unsigned char *) m2; + + while (n--) + { + if (*s1 != *s2) + { + return *s1 - *s2; + } + s1++; + s2++; + } + return 0; +#else + unsigned char *s1 = (unsigned char *) m1; + unsigned char *s2 = (unsigned char *) m2; + unsigned long *a1; + unsigned long *a2; + + /* If the size is too small, or either pointer is unaligned, + then we punt to the byte compare loop. Hopefully this will + not turn up in inner loops. */ + if (!TOO_SMALL(n) && !UNALIGNED(s1,s2)) + { + /* Otherwise, load and compare the blocks of memory one + word at a time. */ + a1 = (unsigned long*) s1; + a2 = (unsigned long*) s2; + while (n >= LBLOCKSIZE) + { + if (*a1 != *a2) + break; + a1++; + a2++; + n -= LBLOCKSIZE; + } + + /* check m mod LBLOCKSIZE remaining characters */ + + s1 = (unsigned char*)a1; + s2 = (unsigned char*)a2; + } + + while (n--) + { + if (*s1 != *s2) + return *s1 - *s2; + s1++; + s2++; + } + + return 0; +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +/* +FUNCTION + <>---search for character in string +INDEX + strchr +SYNOPSIS + #include + char * strchr(const char *<[string]>, int <[c]>); +DESCRIPTION + This function finds the first occurence of <[c]> (converted to + a char) in the string pointed to by <[string]> (including the + terminating null character). +RETURNS + Returns a pointer to the located character, or a null pointer + if <[c]> does not occur in <[string]>. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + strchr ansi pure +*/ + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + + +/* Nonzero if X is not aligned on a "long" boundary. */ +#define UNALIGNED(X) ((long)X & (sizeof (long) - 1)) + +/* How many bytes are loaded each iteration of the word copy loop. */ +#define LBLOCKSIZE (sizeof (long)) + +char * +strchr (const char *s1, + int i) +{ + const unsigned char *s = (const unsigned char *)s1; + unsigned char c = i; + +#if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__) + unsigned long mask,j; + unsigned long *aligned_addr; + + /* Special case for finding 0. */ + if (!c) + { + while (UNALIGNED (s)) + { + if (!*s) + return (char *) s; + s++; + } + /* Operate a word at a time. */ + aligned_addr = (unsigned long *) s; + while (!DETECTNULL (*aligned_addr)) + aligned_addr++; + /* Found the end of string. */ + s = (const unsigned char *) aligned_addr; + while (*s) + s++; + return (char *) s; + } + + /* All other bytes. Align the pointer, then search a long at a time. */ + while (UNALIGNED (s)) + { + if (!*s) + return NULL; + if (*s == c) + return (char *) s; + s++; + } + + mask = c; + for (j = 8; j < LBLOCKSIZE * 8; j <<= 1) + mask = (mask << j) | mask; + + aligned_addr = (unsigned long *) s; + while (!DETECTNULL (*aligned_addr) && !DETECTCHAR (*aligned_addr, mask)) + aligned_addr++; + + /* The block of bytes currently pointed to by aligned_addr + contains either a null or the target char, or both. We + catch it using the bytewise search. */ + + s = (unsigned char *) aligned_addr; + +#endif /* not PREFER_SIZE_OVER_SPEED */ + + while (*s && *s != c) + s++; + if (*s == c) + return (char *)s; + return NULL; +} + +/* +FUNCTION + <>---character string compare + +INDEX + strcmp +SYNOPSIS + #include + int strcmp(const char *<[a]>, const char *<[b]>); +DESCRIPTION + <> compares the string at <[a]> to + the string at <[b]>. +RETURNS + If <<*<[a]>>> sorts lexicographically after <<*<[b]>>>, + <> returns a number greater than zero. If the two + strings match, <> returns zero. If <<*<[a]>>> + sorts lexicographically before <<*<[b]>>>, <> returns a + number less than zero. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + strcmp ansi pure +*/ + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + +/* Nonzero if either X or Y is not aligned on a "long" boundary. */ +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +int +strcmp (const char *s1, + const char *s2) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + while (*s1 != '\0' && *s1 == *s2) + { + s1++; + s2++; + } + + return (*(unsigned char *) s1) - (*(unsigned char *) s2); +#else + unsigned long *a1; + unsigned long *a2; + + /* If s1 or s2 are unaligned, then compare bytes. */ + if (!UNALIGNED (s1, s2)) + { + /* If s1 and s2 are word-aligned, compare them a word at a time. */ + a1 = (unsigned long*)s1; + a2 = (unsigned long*)s2; + while (*a1 == *a2) + { + /* To get here, *a1 == *a2, thus if we find a null in *a1, + then the strings must be equal, so return zero. */ + if (DETECTNULL (*a1)) + return 0; + + a1++; + a2++; + } + + /* A difference was detected in last few bytes of s1, so search bytewise */ + s1 = (char*)a1; + s2 = (char*)a2; + } + + while (*s1 != '\0' && *s1 == *s2) + { + s1++; + s2++; + } + return (*(unsigned char *) s1) - (*(unsigned char *) s2); +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +/* +FUNCTION + <>---copy string +INDEX + strcpy +SYNOPSIS + #include + char *strcpy(char *<[dst]>, const char *<[src]>); +DESCRIPTION + <> copies the string pointed to by <[src]> + (including the terminating null character) to the array + pointed to by <[dst]>. +RETURNS + This function returns the initial value of <[dst]>. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + strcpy ansi pure +*/ + +/*SUPPRESS 560*/ +/*SUPPRESS 530*/ + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + +/* Nonzero if either X or Y is not aligned on a "long" boundary. */ +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +char* +strcpy (char *dst0, + const char *src0) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + char *s = dst0; + + while (*dst0++ = *src0++) + ; + + return s; +#else + char *dst = dst0; + const char *src = src0; + long *aligned_dst; + const long *aligned_src; + + /* If SRC or DEST is unaligned, then copy bytes. */ + if (!UNALIGNED (src, dst)) + { + aligned_dst = (long*)dst; + aligned_src = (long*)src; + + /* SRC and DEST are both "long int" aligned, try to do "long int" + sized copies. */ + while (!DETECTNULL(*aligned_src)) + { + *aligned_dst++ = *aligned_src++; + } + + dst = (char*)aligned_dst; + src = (char*)aligned_src; + } + + while ((*dst++ = *src++)) + ; + return dst0; +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +/* +FUNCTION + <>---character string length +INDEX + strlen +SYNOPSIS + #include + size_t strlen(const char *<[str]>); +DESCRIPTION + The <> function works out the length of the string + starting at <<*<[str]>>> by counting chararacters until it + reaches a <> character. +RETURNS + <> returns the character count. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + strlen ansi pure +*/ + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + +#define LBLOCKSIZE (sizeof (long)) +#define UNALIGNED(X) ((long)X & (LBLOCKSIZE - 1)) +size_t +strlen (const char *str) +{ + const char *start = str; + +#if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__) + unsigned long *aligned_addr; + + /* Align the pointer, so we can search a word at a time. */ + while (UNALIGNED (str)) + { + if (!*str) + return str - start; + str++; + } + + /* If the string is word-aligned, we can check for the presence of + a null in each word-sized block. */ + aligned_addr = (unsigned long *)str; + while (!DETECTNULL (*aligned_addr)) + aligned_addr++; + + /* Once a null is detected, we check each byte in that block for a + precise position of the null. */ + str = (char *) aligned_addr; + +#endif /* not PREFER_SIZE_OVER_SPEED */ + + while (*str) + str++; + return str - start; +} + +/* +FUNCTION + <>---character string compare + +INDEX + strncmp +SYNOPSIS + #include + int strncmp(const char *<[a]>, const char * <[b]>, size_t <[length]>); +DESCRIPTION + <> compares up to <[length]> characters + from the string at <[a]> to the string at <[b]>. +RETURNS + If <<*<[a]>>> sorts lexicographically after <<*<[b]>>>, + <> returns a number greater than zero. If the two + strings are equivalent, <> returns zero. If <<*<[a]>>> + sorts lexicographically before <<*<[b]>>>, <> returns a + number less than zero. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + strncmp ansi pure +*/ + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +int +strncmp (const char *s1, + const char *s2, + size_t n) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + if (n == 0) + return 0; + + while (n-- != 0 && *s1 == *s2) + { + if (n == 0 || *s1 == '\0') + break; + s1++; + s2++; + } + + return (*(unsigned char *) s1) - (*(unsigned char *) s2); +#else + unsigned long *a1; + unsigned long *a2; + + if (n == 0) + return 0; + + /* If s1 or s2 are unaligned, then compare bytes. */ + if (!UNALIGNED (s1, s2)) + { + /* If s1 and s2 are word-aligned, compare them a word at a time. */ + a1 = (unsigned long*)s1; + a2 = (unsigned long*)s2; + while (n >= sizeof (long) && *a1 == *a2) + { + n -= sizeof (long); + + /* If we've run out of bytes or hit a null, return zero + since we already know *a1 == *a2. */ + if (n == 0 || DETECTNULL (*a1)) + return 0; + + a1++; + a2++; + } + + /* A difference was detected in last few bytes of s1, so search bytewise */ + s1 = (char*)a1; + s2 = (char*)a2; + } + + while (n-- > 0 && *s1 == *s2) + { + /* If we've run out of bytes or hit a null, return zero + since we already know *s1 == *s2. */ + if (n == 0 || *s1 == '\0') + return 0; + s1++; + s2++; + } + return (*(unsigned char *) s1) - (*(unsigned char *) s2); +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +/* +FUNCTION + <>---counted copy string +INDEX + strncpy +SYNOPSIS + #include + char *strncpy(char *restrict <[dst]>, const char *restrict <[src]>, + size_t <[length]>); +DESCRIPTION + <> copies not more than <[length]> characters from the + the string pointed to by <[src]> (including the terminating + null character) to the array pointed to by <[dst]>. If the + string pointed to by <[src]> is shorter than <[length]> + characters, null characters are appended to the destination + array until a total of <[length]> characters have been + written. +RETURNS + This function returns the initial value of <[dst]>. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + strncpy ansi pure +*/ + +/*SUPPRESS 560*/ +/*SUPPRESS 530*/ + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +#define TOO_SMALL(LEN) ((LEN) < sizeof (long)) + +char * +strncpy (char *__restrict dst0, + const char *__restrict src0, + size_t count) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + char *dscan; + const char *sscan; + + dscan = dst0; + sscan = src0; + while (count > 0) + { + --count; + if ((*dscan++ = *sscan++) == '\0') + break; + } + while (count-- > 0) + *dscan++ = '\0'; + + return dst0; +#else + char *dst = dst0; + const char *src = src0; + long *aligned_dst; + const long *aligned_src; + + /* If SRC and DEST is aligned and count large enough, then copy words. */ + if (!UNALIGNED (src, dst) && !TOO_SMALL (count)) + { + aligned_dst = (long*)dst; + aligned_src = (long*)src; + + /* SRC and DEST are both "long int" aligned, try to do "long int" + sized copies. */ + while (count >= sizeof (long int) && !DETECTNULL(*aligned_src)) + { + count -= sizeof (long int); + *aligned_dst++ = *aligned_src++; + } + + dst = (char*)aligned_dst; + src = (char*)aligned_src; + } + + while (count > 0) + { + --count; + if ((*dst++ = *src++) == '\0') + break; + } + + while (count-- > 0) + *dst++ = '\0'; + + return dst0; +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +/* +FUNCTION + <>---character string length + +INDEX + strnlen +SYNOPSIS + #include + size_t strnlen(const char *<[str]>, size_t <[n]>); +DESCRIPTION + The <> function works out the length of the string + starting at <<*<[str]>>> by counting chararacters until it + reaches a NUL character or the maximum: <[n]> number of + characters have been inspected. +RETURNS + <> returns the character count or <[n]>. +PORTABILITY +<> is a GNU extension. +<> requires no supporting OS subroutines. +*/ + +size_t +strnlen (const char *str, + size_t n) +{ + const char *start = str; + + while (n-- > 0 && *str) + str++; + + return str - start; +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/mesosphere/source/my_libstdc++.cpp b/mesosphere/source/my_libstdc++.cpp new file mode 100644 index 000000000..f7c31cb94 --- /dev/null +++ b/mesosphere/source/my_libstdc++.cpp @@ -0,0 +1,6 @@ +#include + +void *operator new(std::size_t) { for(;;); } +void *operator new[](std::size_t) { for(;;); } +void operator delete(void *) { } +void operator delete[](void *) { } diff --git a/mesosphere/source/processes/KHandleTable.cpp b/mesosphere/source/processes/KHandleTable.cpp new file mode 100644 index 000000000..fb9831d75 --- /dev/null +++ b/mesosphere/source/processes/KHandleTable.cpp @@ -0,0 +1,133 @@ +#include +#include +#include +#include +#include + +namespace mesosphere +{ + +bool KHandleTable::IsValid(Handle handle) const +{ + // Official kernel checks for nullptr, however this makes the deferred-init logic more difficult. + // We use our own, more secure, logic instead, that is, free entries are <= 0. + return handle.index < capacity && handle.id > 0 && entries[handle.index].id == handle.id; +} + +SharedPtr KHandleTable::GetAutoObject(Handle handle) const +{ + if (!handle.IsAliasOrFree()) { + // Note: official kernel locks the spinlock here, but we don't need to. + return nullptr; + } else { + std::lock_guard guard{spinlock}; + return IsValid(handle) ? entries[handle.index].object : nullptr; + } +} + +SharedPtr KHandleTable::GetThread(Handle handle, bool allowAlias) const +{ + if (allowAlias && handle == selfThreadAlias) { + return KCoreContext::GetCurrentInstance().GetCurrentThread(); + } else { + return DynamicObjectCast(GetAutoObject(handle)); + } +} + +SharedPtr KHandleTable::GetProcess(Handle handle, bool allowAlias) const +{ + if (allowAlias && handle == selfProcessAlias) { + return KCoreContext::GetCurrentInstance().GetCurrentProcess(); + } else { + return DynamicObjectCast(GetAutoObject(handle)); + } +} + +bool KHandleTable::Close(Handle handle) +{ + SharedPtr tmp{nullptr}; // ensure any potential dtor is called w/o the spinlock being held + + if (handle.IsAliasOrFree()) { + return false; + } else { + std::lock_guard guard{spinlock}; + if (IsValid(handle)) { + entries[-firstFreeIndex].id = firstFreeIndex; + firstFreeIndex = -(s16)handle.index; + --numActive; + tmp = std::move(entries[handle.index].object); + return true; + } else { + return false; + } + } +} + +bool KHandleTable::Generate(Handle &out, SharedPtr obj) +{ + // Note: nullptr is accepted, for deferred-init. + + std::lock_guard guard{spinlock}; + if (numActive >= capacity) { + return false; // caller should return 0xD201 + } + + // Get/allocate the entry + u16 index = (u16)-firstFreeIndex; + Entry *e = &entries[-firstFreeIndex]; + firstFreeIndex = e->id; + + e->id = idCounter; + e->object = std::move(obj); + + out.index = index; + out.id = e->id; + out.isAlias = false; + + size = ++numActive > size ? numActive : size; + idCounter = idCounter == 0x7FFF ? 1 : idCounter + 1; + + return true; +} + +bool KHandleTable::Set(SharedPtr obj, Handle handle) +{ + if (!handle.IsAliasOrFree() && IsValid(handle)) { + std::lock_guard guard{spinlock}; + entries[handle.index].object = std::move(obj); + return true; + } else { + return false; + } +} + +void KHandleTable::Destroy() +{ + spinlock.lock(); + u16 capa = capacity; + capacity = 0; + firstFreeIndex = 0; + spinlock.unlock(); + + for (u16 i = 0; i < capa; i++) { + entries[i].object = nullptr; + entries[i].id = -(i + 1); + } +} + +KHandleTable::KHandleTable(size_t capacity_) : capacity((u16)capacity_) +{ + // Note: caller should check the > case, and return an error in that case! + capacity = capacity > capacityLimit || capacity == 0 ? (u16)capacityLimit : capacity; + + u16 capa = capacity; + Destroy(); + capacity = capa; +} + +KHandleTable::~KHandleTable() +{ + Destroy(); +} + +} diff --git a/mesosphere/source/processes/KProcess.cpp b/mesosphere/source/processes/KProcess.cpp new file mode 100644 index 000000000..75d7d0eef --- /dev/null +++ b/mesosphere/source/processes/KProcess.cpp @@ -0,0 +1,14 @@ +#include +#include +#include + +namespace mesosphere +{ + +void KProcess::SetLastThreadAndIdleSelectionCount(KThread *thread, ulong idleSelectionCount) +{ + lastThreads[thread->GetCurrentCoreId()] = thread; + lastIdleSelectionCount[thread->GetCurrentCoreId()] = idleSelectionCount; +} + +} diff --git a/mesosphere/source/test.cpp b/mesosphere/source/test.cpp new file mode 100644 index 000000000..1032bc844 --- /dev/null +++ b/mesosphere/source/test.cpp @@ -0,0 +1,10 @@ +int main(void) { + for(;;); + return 0; +} + +extern "C" { + void _start(void) { + main(); + } +} diff --git a/mesosphere/source/threading/KConditionVariable.cpp b/mesosphere/source/threading/KConditionVariable.cpp new file mode 100644 index 000000000..61c34dddb --- /dev/null +++ b/mesosphere/source/threading/KConditionVariable.cpp @@ -0,0 +1,39 @@ +#include +#include +#include + +namespace mesosphere +{ + +void KConditionVariable::wait_until_impl(const KSystemClock::time_point &timeoutPoint) noexcept +{ + // Official kernel counts number of waiters, but that isn't necessary + { + KThread *currentThread = KCoreContext::GetCurrentInstance().GetCurrentThread(); + std::lock_guard guard{KScheduler::GetCriticalSection()}; + mutex_.unlock(); + if (currentThread->WaitForKernelSync(waiterList)) { + (void)timeoutPoint; //TODO! + } else { + // Termination + } + } + mutex_.lock(); +} + +void KConditionVariable::notify_one() noexcept +{ + std::lock_guard guard{KScheduler::GetCriticalSection()}; + auto t = waiterList.begin(); + if (t != waiterList.end()) { + t->ResumeFromKernelSync(); + } +} + +void KConditionVariable::notify_all() noexcept +{ + std::lock_guard guard{KScheduler::GetCriticalSection()}; + KThread::ResumeAllFromKernelSync(waiterList); +} + +} diff --git a/mesosphere/source/threading/KMutex.cpp b/mesosphere/source/threading/KMutex.cpp new file mode 100644 index 000000000..caaceb316 --- /dev/null +++ b/mesosphere/source/threading/KMutex.cpp @@ -0,0 +1,62 @@ +#include +#include +#include + +namespace mesosphere +{ + +void KMutex::lock_slow_path(KThread &owner, KThread &requester) +{ + // Requester is currentThread most of (all ?) the time + KCriticalSection &critsec = KScheduler::GetCriticalSection(); + std::lock_guard criticalSection{critsec}; + if (KCoreContext::GetCurrentInstance().GetScheduler()->IsActive()) { + requester.SetWantedMutex((uiptr)this); + owner.AddMutexWaiter(requester); + + // If the requester is/was running, pause it (sets status even if force-paused). + requester.RescheduleIfStatusEquals(KThread::SchedulingStatus::Running, KThread::SchedulingStatus::Paused); + + // If the owner is force-paused, temporarily wake it. + if (owner.IsForcePaused()) { + owner.AdjustScheduling(owner.RevertForcePauseToField()); + } + + // Commit scheduler changes NOW. + critsec.unlock(); + critsec.lock(); + + /* + At this point, mutex ownership has been transferred to requester or another thread (false wake). + Make sure the requester, now resumed, isn't in any mutex wait list. + */ + owner.RemoveMutexWaiter(requester); + } +} + +void KMutex::unlock_slow_path(KThread &owner) +{ + std::lock_guard criticalSection{KScheduler::GetCriticalSection()}; + size_t count; + KThread *newOwner = owner.RelinquishMutex(&count, (uiptr)this); + native_handle_type newTag; + + if (newOwner != nullptr) { + // Wake up new owner + newTag = (native_handle_type)newOwner | (count > 1 ? 1 : 0); + // Sets status even if force-paused. + newOwner->RescheduleIfStatusEquals(KThread::SchedulingStatus::Paused, KThread::SchedulingStatus::Running); + } else { + // Free the mutex. + newTag = 0; + } + + // Allow previous owner to get back to forced-sleep, if no other thread wants the kmutexes it is holding. + if (!owner.IsDying() && owner.GetNumberOfKMutexWaiters() == 0) { + owner.AdjustScheduling(owner.CommitForcePauseToField()); + } + + tag = newTag; +} + +} diff --git a/mesosphere/source/threading/KScheduler.cpp b/mesosphere/source/threading/KScheduler.cpp new file mode 100644 index 000000000..7a91806c8 --- /dev/null +++ b/mesosphere/source/threading/KScheduler.cpp @@ -0,0 +1,344 @@ +#include +#include + +#include +#include + +namespace mesosphere +{ + +namespace { + struct MlqTraitsFactory { + constexpr KThread::SchedulerValueTraits operator()(size_t i) const + { + return KThread::SchedulerValueTraits{(uint)i}; + } + }; +} + +using MlqT = KScheduler::Global::MlqType; + +bool KScheduler::Global::reselectionRequired = false; + +std::array KScheduler::Global::scheduledMlqs = + detail::MakeArrayWithFactorySequenceOf( + &KThread::GetPriorityOf + ); + +std::array KScheduler::Global::suggestedMlqs = + detail::MakeArrayWithFactorySequenceOf( + &KThread::GetPriorityOf + ); + + +void KScheduler::Global::SetThreadRunning(KThread &thread) +{ + ApplyReschedulingOperation([](MlqT &mlq, KThread &t){ mlq.add(t); }, thread); +} + +void KScheduler::Global::SetThreadPaused(KThread &thread) +{ + ApplyReschedulingOperation([](MlqT &mlq, KThread &t){ mlq.remove(t); }, thread); +} + +void KScheduler::Global::AdjustThreadPriorityChanged(KThread &thread, uint oldPrio, bool isCurrentThread) +{ + ApplyReschedulingOperation( + [oldPrio, isCurrentThread](MlqT &mlq, KThread &t){ + mlq.adjust(t, oldPrio, isCurrentThread); + }, thread); +} + +void KScheduler::Global::AdjustThreadAffinityChanged(KThread &thread, int oldCoreId, u64 oldAffinityMask) +{ + int newCoreId = thread.GetCurrentCoreId(); + u64 newAffinityMask = thread.GetAffinityMask(); + + ApplyReschedulingOperationImpl([](MlqT &mlq, KThread &t){ mlq.remove(t); }, thread, oldCoreId, oldAffinityMask); + ApplyReschedulingOperationImpl([](MlqT &mlq, KThread &t){ mlq.add(t); }, thread, newCoreId, newAffinityMask); + + thread.IncrementSchedulerOperationCount(); + reselectionRequired = true; +} + +void KScheduler::Global::TransferThreadToCore(KThread &thread, int coreId) +{ + int currentCoreId = thread.GetCurrentCoreId(); + + if (currentCoreId != coreId) { + if (currentCoreId != -1) { + scheduledMlqs[currentCoreId].transferToBack(thread, suggestedMlqs[currentCoreId]); + } + + if (coreId != -1) { + suggestedMlqs[coreId].transferToFront(thread, scheduledMlqs[coreId]); + } + } + + thread.SetCurrentCoreId(coreId); +} + +void KScheduler::Global::AskForReselectionOrMarkRedundant(KThread *currentThread, KThread *winner) +{ + if (currentThread == winner) { + // Nintendo (not us) has a nullderef bug on currentThread->owner, but which is never triggered. + currentThread->SetRedundantSchedulerOperation(); + } else { + reselectionRequired = true; + } +} + +KThread *KScheduler::Global::PickOneSuggestedThread(const std::array &curThreads, +uint coreId, bool compareTime, bool allowSecondPass, uint maxPrio, uint minPrio) { + if (minPrio < maxPrio) { + return nullptr; + } + + auto hasWorseTime = [coreId, minPrio, compareTime](const KThread &t) { + if (!compareTime || scheduledMlqs[coreId].size(minPrio) <= 1 || t.GetPriority() < minPrio) { + return false; + } else { + // Condition means the thread *it would have been scheduled again after the thread + return t.GetLastScheduledTime() > scheduledMlqs[coreId].front(minPrio).GetLastScheduledTime(); + } + }; + + std::array secondPassCores; + size_t numSecondPassCores = 0; + + auto it = std::find_if( + suggestedMlqs[coreId].begin(maxPrio), + suggestedMlqs[coreId].end(minPrio), + [&hasWorseTime, &secondPassCores, &numSecondPassCores, &curThreads](const KThread &t) { + int srcCoreId = t.GetCurrentCoreId(); + //bool worseTime = compareTime && hasWorseTime(t); + // break if hasWorse time too + if (srcCoreId >= 0) { + bool srcHasEphemeralKernThread = scheduledMlqs[srcCoreId].highestPrioritySet() < minRegularPriority; + bool isSrcCurT = &t == curThreads[srcCoreId]; + if (isSrcCurT) { + secondPassCores[numSecondPassCores++] = (uint)srcCoreId; + } + + // Note, if checkTime official kernel breaks if srcHasEphemeralKernThread + // I believe this is a bug + if(srcHasEphemeralKernThread || isSrcCurT) { + return false; + } + } + + return true; + } + ); + + if (it != suggestedMlqs[coreId].end(minPrio) && (!compareTime || !hasWorseTime(*it))) { + return &*it; + } else if (allowSecondPass) { + // Allow to re-pick a selected thread about to be current, if it doesn't make the core idle + auto srcCoreIdPtr = std::find_if( + secondPassCores.cbegin(), + secondPassCores.cbegin() + numSecondPassCores, + [](uint id) { + return scheduledMlqs[id].highestPrioritySet() >= minRegularPriority && scheduledMlqs[id].size() > 1; + } + ); + + return srcCoreIdPtr == secondPassCores.cbegin() + numSecondPassCores ? nullptr : &scheduledMlqs[*srcCoreIdPtr].front(); + } else { + return nullptr; + } +} + +void KScheduler::Global::YieldThread(KThread ¤tThread) +{ + // Note: caller should use critical section, etc. + kassert(currentThread.GetCurrentCoreId() >= 0); + uint coreId = (uint)currentThread.GetCurrentCoreId(); + uint priority = currentThread.GetPriority(); + + // Yield the thread + scheduledMlqs[coreId].yield(currentThread); + currentThread.IncrementSchedulerOperationCount(); + + KThread *winner = &scheduledMlqs[coreId].front(priority); + AskForReselectionOrMarkRedundant(¤tThread, winner); +} + +void KScheduler::Global::YieldThreadAndBalanceLoad(KThread ¤tThread) +{ + // Note: caller should check if !currentThread.IsSchedulerOperationRedundant and use critical section, etc. + kassert(currentThread.GetCurrentCoreId() >= 0); + uint coreId = (uint)currentThread.GetCurrentCoreId(); + uint priority = currentThread.GetPriority(); + + std::array curThreads; + for (uint i = 0; i < MAX_CORES; i++) { + curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front(); + } + + // Yield the thread + scheduledMlqs[coreId].yield(currentThread); + currentThread.IncrementSchedulerOperationCount(); + + KThread *winner = PickOneSuggestedThread(curThreads, coreId, true, false, 0, priority); + + if (winner != nullptr) { + TransferThreadToCore(*winner, coreId); + winner->IncrementSchedulerOperationCount(); + currentThread.SetRedundantSchedulerOperation(); + } else { + winner = &scheduledMlqs[coreId].front(priority); + } + + AskForReselectionOrMarkRedundant(¤tThread, winner); +} + +void KScheduler::Global::YieldThreadAndWaitForLoadBalancing(KThread ¤tThread) +{ + // Note: caller should check if !currentThread.IsSchedulerOperationRedundant and use critical section, etc. + KThread *winner = nullptr; + kassert(currentThread.GetCurrentCoreId() >= 0); + uint coreId = (uint)currentThread.GetCurrentCoreId(); + + // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead + TransferThreadToCore(currentThread, -1); + currentThread.IncrementSchedulerOperationCount(); + + // If the core is idle, perform load balancing, excluding the threads that have just used this function... + if (scheduledMlqs[coreId].empty()) { + // Here, "curThreads" is calculated after the ""yield"", unlike yield -1 + std::array curThreads; + for (uint i = 0; i < MAX_CORES; i++) { + curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front(); + } + + KThread *winner = PickOneSuggestedThread(curThreads, coreId, false); + + if (winner != nullptr) { + TransferThreadToCore(*winner, coreId); + winner->IncrementSchedulerOperationCount(); + } else { + winner = ¤tThread; + } + } + + AskForReselectionOrMarkRedundant(¤tThread, winner); +} + +void KScheduler::Global::YieldPreemptThread(KThread ¤tKernelHandlerThread, uint coreId, uint maxPrio) +{ + if (!scheduledMlqs[coreId].empty(maxPrio)) { + // Yield the first thread in the level queue + scheduledMlqs[coreId].front(maxPrio).IncrementSchedulerOperationCount(); + scheduledMlqs[coreId].yield(maxPrio); + if (scheduledMlqs[coreId].size() > 1) { + scheduledMlqs[coreId].front(maxPrio).IncrementSchedulerOperationCount(); + } + } + + // Here, "curThreads" is calculated after the forced yield, unlike yield -1 + std::array curThreads; + for (uint i = 0; i < MAX_CORES; i++) { + curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front(); + } + + KThread *winner = PickOneSuggestedThread(curThreads, coreId, true, false, maxPrio, maxPrio); + if (winner != nullptr) { + TransferThreadToCore(*winner, coreId); + winner->IncrementSchedulerOperationCount(); + } + + for (uint i = 0; i < MAX_CORES; i++) { + curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front(); + } + + // Find first thread which is not the kernel handler thread. + auto itFirst = std::find_if( + scheduledMlqs[coreId].begin(), + scheduledMlqs[coreId].end(), + [¤tKernelHandlerThread, coreId](const KThread &t) { + return &t != ¤tKernelHandlerThread; + } + ); + + if (itFirst != scheduledMlqs[coreId].end()) { + // If under the threshold, do load balancing again + winner = PickOneSuggestedThread(curThreads, coreId, true, false, maxPrio, itFirst->GetPriority() - 1); + if (winner != nullptr) { + TransferThreadToCore(*winner, coreId); + winner->IncrementSchedulerOperationCount(); + } + } + + reselectionRequired = true; +} + +void KScheduler::Global::SelectThreads() +{ + auto updateThread = [](KThread *thread, KScheduler &sched) { + if (thread != sched.selectedThread) { + if (thread != nullptr) { + thread->IncrementSchedulerOperationCount(); + thread->UpdateLastScheduledTime(); + thread->SetProcessLastThreadAndIdleSelectionCount(sched.idleSelectionCount); + } else { + ++sched.idleSelectionCount; + } + sched.selectedThread = thread; + sched.isContextSwitchNeeded = true; + } + std::atomic_thread_fence(std::memory_order_seq_cst); + }; + + // This maintain the "current thread is on front of queue" invariant + std::array curThreads; + for (uint i = 0; i < MAX_CORES; i++) { + KScheduler &sched = *KCoreContext::GetInstance(i).GetScheduler(); + curThreads[i] = scheduledMlqs[i].empty() ? nullptr : &scheduledMlqs[i].front(); + updateThread(curThreads[i], sched); + } + + // Do some load-balancing. Allow second pass. + std::array curThreads2 = curThreads; + for (uint i = 0; i < MAX_CORES; i++) { + if (scheduledMlqs[i].empty()) { + KThread *winner = PickOneSuggestedThread(curThreads2, i, false, true); + if (winner != nullptr) { + curThreads2[i] = winner; + TransferThreadToCore(*winner, i); + winner->IncrementSchedulerOperationCount(); + } + } + } + + // See which to-be-current threads have changed & update accordingly + for (uint i = 0; i < MAX_CORES; i++) { + KScheduler &sched = *KCoreContext::GetInstance(i).GetScheduler(); + if (curThreads2[i] != curThreads[i]) { + updateThread(curThreads2[i], sched); + } + } + reselectionRequired = false; +} + +KCriticalSection KScheduler::criticalSection{}; + +void KScheduler::YieldCurrentThread() +{ + KCoreContext &cctx = KCoreContext::GetCurrentInstance(); + cctx.GetScheduler()->DoYieldOperation(Global::YieldThread, *cctx.GetCurrentThread()); +} + +void KScheduler::YieldCurrentThreadAndBalanceLoad() +{ + KCoreContext &cctx = KCoreContext::GetCurrentInstance(); + cctx.GetScheduler()->DoYieldOperation(Global::YieldThreadAndBalanceLoad, *cctx.GetCurrentThread()); +} + +void KScheduler::YieldCurrentThreadAndWaitForLoadBalancing() +{ + KCoreContext &cctx = KCoreContext::GetCurrentInstance(); + cctx.GetScheduler()->DoYieldOperation(Global::YieldThreadAndWaitForLoadBalancing, *cctx.GetCurrentThread()); +} + +} diff --git a/mesosphere/source/threading/KThread.cpp b/mesosphere/source/threading/KThread.cpp new file mode 100644 index 000000000..7cf45f020 --- /dev/null +++ b/mesosphere/source/threading/KThread.cpp @@ -0,0 +1,237 @@ +#include +#include +#include + +#include +#include +#include + +namespace mesosphere +{ + +bool KThread::IsAlive() const +{ + return true; +} + +void KThread::OnAlarm() +{ + CancelKernelSync(); +} + +void KThread::AdjustScheduling(ushort oldMaskFull) +{ + if (currentSchedMaskFull == oldMaskFull) { + return; + } else if (CompareSchedulingStatusFull(oldMaskFull, SchedulingStatus::Running)) { + KScheduler::Global::SetThreadPaused(*this); + } else if (CompareSchedulingStatusFull(SchedulingStatus::Running)) { + KScheduler::Global::SetThreadRunning(*this); + } +} + +void KThread::Reschedule(KThread::SchedulingStatus newStatus) +{ + std::lock_guard criticalSection{KScheduler::GetCriticalSection()}; + AdjustScheduling(SetSchedulingStatusField(newStatus)); +} + +void KThread::RescheduleIfStatusEquals(SchedulingStatus expectedStatus, SchedulingStatus newStatus) +{ + if(GetSchedulingStatus() == expectedStatus) { + Reschedule(newStatus); + } +} + +void KThread::AddForcePauseReason(KThread::ForcePauseReason reason) +{ + std::lock_guard criticalSection{KScheduler::GetCriticalSection()}; + + if (!IsDying()) { + AddForcePauseReasonToField(reason); + if (numKernelMutexWaiters == 0) { + AdjustScheduling(CommitForcePauseToField()); + } + } +} + +void KThread::RemoveForcePauseReason(KThread::ForcePauseReason reason) +{ + std::lock_guard criticalSection{KScheduler::GetCriticalSection()}; + + if (!IsDying()) { + RemoveForcePauseReasonToField(reason); + if (!IsForcePaused() && numKernelMutexWaiters == 0) { + AdjustScheduling(CommitForcePauseToField()); + } + } +} + +bool KThread::WaitForKernelSync(KThread::WaitList &waitList) +{ + // Has to be called from critical section + currentWaitList = &waitList; + Reschedule(SchedulingStatus::Paused); + waitList.push_back(*this); + if (IsDying()) { + // Whoops + ResumeFromKernelSync(); + return false; + } + + return true; +} + +void KThread::ResumeFromKernelSync() +{ + // Has to be called from critical section + currentWaitList->erase(currentWaitList->iterator_to(*this)); + currentWaitList = nullptr; + Reschedule(SchedulingStatus::Running); +} + +void KThread::ResumeAllFromKernelSync(KThread::WaitList &waitList) +{ + // Has to be called from critical section + waitList.clear_and_dispose( + [](KThread *t) { + t->currentWaitList = nullptr; + t->Reschedule(SchedulingStatus::Running); + } + ); +} + +void KThread::CancelKernelSync() +{ + std::lock_guard criticalSection{KScheduler::GetCriticalSection()}; + if (GetSchedulingStatus() == SchedulingStatus::Paused) { + // Note: transparent to force-pause + if (currentWaitList != nullptr) { + ResumeFromKernelSync(); + } else { + Reschedule(SchedulingStatus::Running); + } + } +} + +void KThread::CancelKernelSync(Result res) +{ + syncResult = res; + CancelKernelSync(); +} + +void KThread::AddToMutexWaitList(KThread &thread) +{ + // TODO: check&increment numKernelMutexWaiters + // Ordered list insertion + auto it = std::find_if( + mutexWaitList.begin(), + mutexWaitList.end(), + [&thread](const KThread &t) { + return t.GetPriority() > thread.GetPriority(); + } + ); + + if (it != mutexWaitList.end()) { + mutexWaitList.insert(it, thread); + } else { + mutexWaitList.push_back(thread); + } +} + +KThread::MutexWaitList::iterator KThread::RemoveFromMutexWaitList(KThread::MutexWaitList::const_iterator it) +{ + // TODO: check&decrement numKernelMutexWaiters + return mutexWaitList.erase(it); +} + +void KThread::RemoveFromMutexWaitList(const KThread &t) +{ + RemoveFromMutexWaitList(mutexWaitList.iterator_to(t)); +} + +void KThread::InheritDynamicPriority() +{ + /* + Do priority inheritance + Since we're maybe changing the priority of the thread, + we must go through the entire mutex owner chain. + The invariant must be preserved: + A thread holding a mutex must have a higher-or-same priority than + all threads waiting for it to release the mutex. + */ + + for (KThread *t = this; t != nullptr; t = t->wantedMutexOwner) { + uint newPrio, oldPrio = priority; + if (!mutexWaitList.empty() && mutexWaitList.front().priority < basePriority) { + newPrio = mutexWaitList.front().priority; + } else { + newPrio = basePriority; + } + + if (newPrio == oldPrio) { + break; + } else { + // Update everything that depends on dynamic priority: + + // TODO update condvar + // TODO update ctr arbiter + priority = newPrio; + // TODO update condvar + // TODO update ctr arbiter + if (CompareSchedulingStatusFull(SchedulingStatus::Running)) { + KScheduler::Global::AdjustThreadPriorityChanged(*this, oldPrio, this == KCoreContext::GetCurrentInstance().GetCurrentThread()); + } + + if (wantedMutexOwner != nullptr) { + wantedMutexOwner->RemoveFromMutexWaitList(*this); + wantedMutexOwner->AddToMutexWaitList(*this); + } + } + } +} + +void KThread::AddMutexWaiter(KThread &waiter) +{ + AddToMutexWaitList(waiter); + InheritDynamicPriority(); +} + +void KThread::RemoveMutexWaiter(KThread &waiter) +{ + RemoveFromMutexWaitList(waiter); + InheritDynamicPriority(); +} + +KThread *KThread::RelinquishMutex(size_t *count, uiptr mutexAddr) +{ + KThread *newOwner = nullptr; + *count = 0; + + // First in list wanting mutexAddr becomes owner, the rest is transferred + for (auto it = mutexWaitList.begin(); it != mutexWaitList.end(); ) { + if (it->wantedMutex != mutexAddr) { + ++it; + continue; + } else { + KThread &t = *it; + ++(*count); + it = RemoveFromMutexWaitList(it); + if (newOwner == nullptr) { + newOwner = &t; + } else { + newOwner->AddToMutexWaitList(t); + } + } + } + + // Mutex waiters list have changed + InheritDynamicPriority(); + if (newOwner != nullptr) { + newOwner->InheritDynamicPriority(); + } + + return newOwner; +} + +}