libnx/nx/include/switch/kernel/virtmem.h
fincs a05a44fca8
virtmem: Major rewrite to support ASLR, see details:
- Added virtmemLock/Unlock, needed for atomic find-and-map operations
- Added virtmemFindAslr, which looks for a random free area in AslrRegion
  - virtmemReserve still exists for legacy callers who rely on sequential
    allocation in order to avoid race conditions from concurrent uses
- Added virtmemFindStack, which searches within StackRegion instead
  - Removed virtmemReserveStack/FreeStack
- Changed shmem/thread/tmem/codememory-jit to use the new virtmem API
  - Legacy jit still uses virtmemReserve
2020-10-27 12:27:32 +01:00

47 lines
1.8 KiB
C

/**
* @file virtmem.h
* @brief Virtual memory mapping utilities
* @author plutoo
* @copyright libnx Authors
*/
#pragma once
#include "../types.h"
/**
* @brief Reserves a slice of general purpose address space sequentially.
* @param size Desired size of the slice (rounded up to page alignment).
* @return Pointer to the slice of address space.
*/
void* virtmemReserve(size_t size);
/**
* @brief Relinquishes a slice of address space reserved with virtmemReserve (currently no-op).
* @param addr Pointer to the slice.
* @param size Size of the slice.
*/
void virtmemFree(void* addr, size_t size);
/// Locks the virtual memory manager mutex.
void virtmemLock(void);
/// Unlocks the virtual memory manager mutex.
void virtmemUnlock(void);
/**
* @brief Finds a random slice of free general purpose address space.
* @param size Desired size of the slice (rounded up to page alignment).
* @param guard_size Desired size of the unmapped guard areas surrounding the slice (rounded up to page alignment).
* @return Pointer to the slice of address space, or NULL on failure.
* @note The virtual memory manager mutex must be held during the find-and-map process (see \ref virtmemLock and \ref virtmemUnlock).
*/
void* virtmemFindAslr(size_t size, size_t guard_size);
/**
* @brief Finds a random slice of free stack address space.
* @param size Desired size of the slice (rounded up to page alignment).
* @param guard_size Desired size of the unmapped guard areas surrounding the slice (rounded up to page alignment).
* @return Pointer to the slice of address space, or NULL on failure.
* @note The virtual memory manager mutex must be held during the find-and-map process (see \ref virtmemLock and \ref virtmemUnlock).
*/
void* virtmemFindStack(size_t size, size_t guard_size);