Moving ioctls to separate files

This commit is contained in:
plutoo 2017-11-26 08:16:48 +01:00 committed by yellows8
parent 99880a7b26
commit 2fd13b5556
11 changed files with 537 additions and 505 deletions

View File

@ -24,8 +24,7 @@ VERSION := $(LIBNX_MAJOR).$(LIBNX_MINOR).$(LIBNX_PATCH)
#--------------------------------------------------------------------------------- #---------------------------------------------------------------------------------
TARGET := nx TARGET := nx
#BUILD := build #BUILD := build
SOURCES := source/arm source/system source/kernel source/services source/gfx source/devices source/util/utf SOURCES := source/arm source/system source/kernel source/services source/gfx source/gfx/ioctl source/devices source/util/utf
DATA := data DATA := data
INCLUDES := include INCLUDES := include

View File

@ -40,3 +40,6 @@
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) #define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) #define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
#define __in
#define __out
#define __inout

View File

@ -1,40 +1,40 @@
typedef struct { typedef struct {
u32 arch; // 0x120 (NVGPU_GPU_ARCH_GM200) u32 arch; // 0x120 (NVGPU_GPU_ARCH_GM200)
u32 impl; // 0xB (NVGPU_GPU_IMPL_GM20B) u32 impl; // 0xB (NVGPU_GPU_IMPL_GM20B)
u32 rev; // 0xA1 (Revision A1) u32 rev; // 0xA1 (Revision A1)
u32 num_gpc; // 0x1 u32 num_gpc; // 0x1
u64 L2_cache_size; // 0x40000 u64 L2_cache_size; // 0x40000
u64 on_board_video_memory_size; // 0x0 (not used) u64 on_board_video_memory_size; // 0x0 (not used)
u32 num_tpc_per_gpc; // 0x2 u32 num_tpc_per_gpc; // 0x2
u32 bus_type; // 0x20 (NVGPU_GPU_BUS_TYPE_AXI) u32 bus_type; // 0x20 (NVGPU_GPU_BUS_TYPE_AXI)
u32 big_page_size; // 0x20000 u32 big_page_size; // 0x20000
u32 compression_page_size; // 0x20000 u32 compression_page_size; // 0x20000
u32 pde_coverage_bit_count; // 0x1B u32 pde_coverage_bit_count; // 0x1B
u32 available_big_page_sizes; // 0x30000 u32 available_big_page_sizes; // 0x30000
u32 gpc_mask; // 0x1 u32 gpc_mask; // 0x1
u32 sm_arch_sm_version; // 0x503 (Maxwell Generation 5.0.3?) u32 sm_arch_sm_version; // 0x503 (Maxwell Generation 5.0.3?)
u32 sm_arch_spa_version; // 0x503 (Maxwell Generation 5.0.3?) u32 sm_arch_spa_version; // 0x503 (Maxwell Generation 5.0.3?)
u32 sm_arch_warp_count; // 0x80 u32 sm_arch_warp_count; // 0x80
u32 gpu_va_bit_count; // 0x28 u32 gpu_va_bit_count; // 0x28
u32 reserved; // NULL u32 reserved; // NULL
u64 flags; // 0x55 u64 flags; // 0x55
u32 twod_class; // 0x902D (FERMI_TWOD_A) u32 twod_class; // 0x902D (FERMI_TWOD_A)
u32 threed_class; // 0xB197 (MAXWELL_B) u32 threed_class; // 0xB197 (MAXWELL_B)
u32 compute_class; // 0xB1C0 (MAXWELL_COMPUTE_B) u32 compute_class; // 0xB1C0 (MAXWELL_COMPUTE_B)
u32 gpfifo_class; // 0xB06F (MAXWELL_CHANNEL_GPFIFO_A) u32 gpfifo_class; // 0xB06F (MAXWELL_CHANNEL_GPFIFO_A)
u32 inline_to_memory_class; // 0xA140 (KEPLER_INLINE_TO_MEMORY_B) u32 inline_to_memory_class; // 0xA140 (KEPLER_INLINE_TO_MEMORY_B)
u32 dma_copy_class; // 0xB0B5 (MAXWELL_DMA_COPY_A) u32 dma_copy_class; // 0xB0B5 (MAXWELL_DMA_COPY_A)
u32 max_fbps_count; // 0x1 u32 max_fbps_count; // 0x1
u32 fbp_en_mask; // 0x0 (disabled) u32 fbp_en_mask; // 0x0 (disabled)
u32 max_ltc_per_fbp; // 0x2 u32 max_ltc_per_fbp; // 0x2
u32 max_lts_per_ltc; // 0x1 u32 max_lts_per_ltc; // 0x1
u32 max_tex_per_tpc; // 0x0 (not supported) u32 max_tex_per_tpc; // 0x0 (not supported)
u32 max_gpc_count; // 0x1 u32 max_gpc_count; // 0x1
u32 rop_l2_en_mask_0; // 0x21D70 (fuse_status_opt_rop_l2_fbp_r) u32 rop_l2_en_mask_0; // 0x21D70 (fuse_status_opt_rop_l2_fbp_r)
u32 rop_l2_en_mask_1; // 0x0 u32 rop_l2_en_mask_1; // 0x0
u64 chipname; // 0x6230326D67 ("gm20b") u64 chipname; // 0x6230326D67 ("gm20b")
u64 gr_compbit_store_base_hw; // 0x0 (not supported) u64 gr_compbit_store_base_hw; // 0x0 (not supported)
} gpu_characteristics; } gpu_characteristics;
typedef struct { typedef struct {
u64 offset; u64 offset;
@ -92,16 +92,16 @@ Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]);
Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size); Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size);
Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle); Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle);
Result nvioctlNvmap_FromID(u32 fd, u32 id, u32 *nvmap_handle); Result nvioctlNvmap_FromId(u32 fd, u32 id, u32 *nvmap_handle);
Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 align, u8 kind, void* addr); Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 align, u8 kind, void* addr);
Result nvioctlNvmap_GetID(u32 fd, u32 nvmap_handle, u32 *id); Result nvioctlNvmap_GetId(u32 fd, u32 nvmap_handle, u32 *id);
Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd); Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd);
Result nvioctlChannel_SubmitGPFIFO(u32 fd, nvioctl_gpfifo_entry *entries, u32 num_entries, u32 flags, nvioctl_fence *fence_out); Result nvioctlChannel_SubmitGpfifo(u32 fd, nvioctl_gpfifo_entry *entries, u32 num_entries, u32 flags, nvioctl_fence *fence_out);
Result nvioctlChannel_AllocObjCtx(u32 fd, u32 class_num, u32 flags); Result nvioctlChannel_AllocObjCtx(u32 fd, u32 class_num, u32 flags);
Result nvioctlChannel_ZCullBind(u32 fd, u32 in[4]); Result nvioctlChannel_ZCullBind(u32 fd, u32 in[4]);
Result nvioctlChannel_SetErrorNotifier(u32 fd, u64 offset, u64 size, u32 nvmap_handle); Result nvioctlChannel_SetErrorNotifier(u32 fd, u64 offset, u64 size, u32 nvmap_handle);
Result nvioctlChannel_SetPriority(u32 fd, u32 priority); Result nvioctlChannel_SetPriority(u32 fd, u32 priority);
Result nvioctlChannel_AllocGPFIFOEx2(u32 fd, u32 num_entries, u32 flags, u32 unk0, u32 unk1, u32 unk2, u32 unk3, nvioctl_fence *fence_out); Result nvioctlChannel_AllocGpfifoEx2(u32 fd, u32 num_entries, u32 flags, u32 unk0, u32 unk1, u32 unk2, u32 unk3, nvioctl_fence *fence_out);
Result nvioctlChannel_SetUserData(u32 fd, void* addr); Result nvioctlChannel_SetUserData(u32 fd, void* addr);

View File

@ -59,7 +59,7 @@ static Result _gfxDequeueBuffer() {
rc = gfxproducerDequeueBuffer(/*1*/0, 1280, 720, 0, 0x300, &g_gfxCurrentProducerBuffer); rc = gfxproducerDequeueBuffer(/*1*/0, 1280, 720, 0, 0x300, &g_gfxCurrentProducerBuffer);
if (R_SUCCEEDED(rc)) g_gfxCurrentBuffer = (g_gfxCurrentBuffer+1) & (g_nvgfx_totalframebufs-1); if (R_SUCCEEDED(rc)) g_gfxCurrentBuffer = (g_gfxCurrentBuffer + 1) & (g_nvgfx_totalframebufs-1);
return rc; return rc;
} }

View File

@ -0,0 +1,139 @@
#include <switch.h>
#include <string.h>
Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd) {
struct {
__in u32 fd;
} data;
memset(&data, 0, sizeof(data));
data.fd = nvmap_fd;
return nvIoctl(fd, _IOW(0x48, 0x01, data), &data);
}
Result nvioctlChannel_SubmitGpfifo(u32 fd, nvioctl_gpfifo_entry *entries, u32 num_entries, u32 flags, nvioctl_fence *fence_out) {
Result rc=0;
// Make sure stack data doesn't get very large.
if(num_entries > 0x200)
return MAKERESULT(MODULE_LIBNX, LIBNX_OUTOFMEM);
struct {
__in u64 gpfifo; // (ignored) pointer to gpfifo entry structs
__in u32 num_entries; // number of entries being submitted
__in u32 flags;
__out nvioctl_fence fence_out; // returned new fence object for others to wait on
__in nvioctl_gpfifo_entry entries[num_entries]; // depends on num_entries
} data;
memset(&data, 0, sizeof(data));
data.gpfifo = 1;
data.num_entries = num_entries;
data.flags = flags;
memcpy(data.entries, entries, sizeof(data.entries));
rc = nvIoctl(fd, _IOWR(0x48, 0x08, data), &data);
if (R_SUCCEEDED(rc) && fence_out) {
memcpy(fence_out, &data.fence_out, sizeof(data.fence_out));
}
return rc;
}
Result nvioctlChannel_AllocObjCtx(u32 fd, u32 class_num, u32 flags) {
struct {
__in u32 class_num;
__in u32 flags;
__in u64 obj_id; // (ignored) used for FREE_OBJ_CTX ioctl, which is not supported
} data;
memset(&data, 0, sizeof(data));
data.class_num = class_num;
data.flags = flags;
data.obj_id = 0xDEADBEEF;
return nvIoctl(fd, _IOWR(0x48, 0x09, data), &data);
}
Result nvioctlChannel_ZCullBind(u32 fd, u32 in[4]) {
// Fixme: Needs work
struct {
u32 in[4];
} data;
memset(&data, 0, sizeof(data));
memcpy(data.in, in, sizeof(data.in));
return nvIoctl(fd, _IOWR(0x48, 0x0B, data), &data);
}
Result nvioctlChannel_SetErrorNotifier(u32 fd, u64 offset, u64 size, u32 nvmap_handle) {
struct {
__in u64 offset;
__in u64 size;
__in u32 nvmap_handle;
u32 padding;
} data;
memset(&data, 0, sizeof(data));
data.offset = offset;
data.size = size;
data.nvmap_handle = nvmap_handle;
return nvIoctl(fd, _IOWR(0x48, 0x0C, data), &data);
}
Result nvioctlChannel_SetPriority(u32 fd, u32 priority) {
struct {
__in u32 priority; // 0x32 is low, 0x64 is medium and 0x96 is high
} data;
memset(&data, 0, sizeof(data));
data.priority = priority;
return nvIoctl(fd, _IOW(0x48, 0x0D, data), &data);
}
Result nvioctlChannel_AllocGpfifoEx2(u32 fd, u32 num_entries, u32 flags, u32 unk0, u32 unk1, u32 unk2, u32 unk3, nvioctl_fence *fence_out) {
Result rc=0;
struct {
__in u32 num_entries;
__in u32 flags;
__in u32 unk0; // (1 works)
__out nvioctl_fence fence_out;
__in u32 unk1;
__in u32 unk2;
__in u32 unk3;
} data;
memset(&data, 0, sizeof(data));
data.num_entries = num_entries;
data.flags = flags;
data.unk0 = unk0;
data.unk1 = unk1;
data.unk2 = unk2;
data.unk3 = unk3;
rc = nvIoctl(fd, _IOWR(0x48, 0x1A, data), &data);
if (R_SUCCEEDED(rc) && fence_out) {
memcpy(fence_out, &data.fence_out, sizeof(data.fence_out));
}
return rc;
}
Result nvioctlChannel_SetUserData(u32 fd, void* addr) {
struct {
__in u64 addr;
} data;
memset(&data, 0, sizeof(data));
data.addr = (u64)addr;
return nvIoctl(fd, _IOW(0x47, 0x14, data), &data);
}

View File

@ -0,0 +1,114 @@
#include <switch.h>
#include <string.h>
Result nvioctlNvhostAsGpu_BindChannel(u32 fd, u32 channel_fd) {
struct {
__in u32 fd;
} data;
memset(&data, 0, sizeof(data));
data.fd = channel_fd;
return nvIoctl(fd, _IOW(0x41, 0x01, data), &data);
}
Result nvioctlNvhostAsGpu_AllocSpace(u32 fd, u32 pages, u32 page_size, u32 flags, u64 align, u64 *offset) {
Result rc = 0;
struct {
__in u32 pages;
__in u32 page_size;
__in u32 flags;
u32 pad;
union {
__out u64 offset;
__in u64 align;
};
} data;
memset(&data, 0, sizeof(data));
data.pages = pages;
data.page_size = page_size;
data.flags = flags;
data.align = align;
rc = nvIoctl(fd, _IOWR(0x41, 0x02, data), &data);
if (R_FAILED(rc)) return rc;
*offset = data.offset;
return rc;
}
Result nvioctlNvhostAsGpu_MapBufferEx(
u32 fd, u32 flags, u32 kind, u32 nvmap_handle, u32 page_size,
u64 buffer_offset, u64 mapping_size, u64 input_offset, u64 *offset)
{
Result rc = 0;
struct {
__in u32 flags; // bit0: fixed_offset, bit2: cacheable
__in u32 kind; // -1 is default
__in u32 nvmap_handle;
__inout u32 page_size; // 0 means don't care
__in u64 buffer_offset;
__in u64 mapping_size;
__inout u64 offset;
} data;
memset(&data, 0, sizeof(data));
data.flags = flags;
data.kind = kind;
data.nvmap_handle = nvmap_handle;
data.page_size = page_size;
data.buffer_offset = buffer_offset;
data.mapping_size = mapping_size;
data.offset = input_offset;
rc = nvIoctl(fd, _IOWR(0x41, 0x06, data), &data);
if (R_SUCCEEDED(rc) && offset) {
*offset = data.offset;
}
return rc;
}
Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]) {
Result rc=0;
struct {
u64 not_used; // contained output user ptr on linux, ignored
__inout u32 bufsize; // forced to 2*sizeof(struct va_region)
u32 pad;
__out nvioctl_va_region regions[2];
} data;
memset(&data, 0, sizeof(data));
data.bufsize = sizeof(data.regions);
rc = nvIoctl(fd, _IOWR(0x41, 0x08, data), &data);
if (R_SUCCEEDED(rc)) {
memcpy(regions, data.regions, sizeof(data.regions));
}
return rc;
}
Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size) {
struct {
__in u32 big_page_size; // depends on GPU's available_big_page_sizes; 0=default
__in s32 as_fd; // ignored; passes 0
__in u32 flags; // ignored; passes 0
__in u32 reserved; // ignored; passes 0
__in u64 unk0;
__in u64 unk1;
__in u64 unk2;
} data;
memset(&data, 0, sizeof(data));
data.big_page_size = big_page_size;
return nvIoctl(fd, _IOW(0x41, 0x09, data), &data);
}

View File

@ -0,0 +1,98 @@
#include <switch.h>
#include <string.h>
Result nvioctlNvhostCtrlGpu_ZCullGetCtxSize(u32 fd, u32 *out) {
Result rc = 0;
struct {
__out u32 out;
} data;
memset(&data, 0, sizeof(data));
rc = nvIoctl(fd, _IOR(0x47, 0x01, data), &data);
if (R_SUCCEEDED(rc)) {
*out = data.out;
}
return rc;
}
Result nvioctlNvhostCtrlGpu_ZCullGetInfo(u32 fd, u32 out[40>>2]) {
Result rc = 0;
struct {
__out u32 out[40>>2];
} data;
memset(&data, 0, sizeof(data));
rc = nvIoctl(fd, _IOR(0x47, 0x02, data), &data);
if (R_SUCCEEDED(rc)) {
memcpy(out, data.out, sizeof(data.out));
}
return rc;
}
Result nvioctlNvhostCtrlGpu_GetCharacteristics(u32 fd, gpu_characteristics *out) {
Result rc = 0;
struct {
__in u64 gc_buf_size; // must not be NULL, but gets overwritten with 0xA0=max_size
__in u64 gc_buf_addr; // ignored, but must not be NULL
__out gpu_characteristics gc;
} data;
memset(&data, 0, sizeof(data));
data.gc_buf_size = sizeof(gpu_characteristics);
data.gc_buf_addr = 1;
rc = nvIoctl(fd, _IOWR(0x47, 0x05, data), &data);
if (R_SUCCEEDED(rc)) {
memcpy(out, &data.gc, sizeof(gpu_characteristics));
}
return rc;
}
Result nvioctlNvhostCtrlGpu_GetTpcMasks(u32 fd, u32 inval, u32 out[24>>2]) {
Result rc = 0;
// Fixme: This one is wrong.
struct {
__inout u32 unk[24>>2];
} data;
memset(&data, 0, sizeof(data));
data.unk[0] = inval;
data.unk[2] = 1; //addr?
rc = nvIoctl(fd, _IOWR(0x47, 0x06, data), &data);
if (R_FAILED(rc)) return rc;
memcpy(out, &data.unk, sizeof(data.unk));
return rc;
}
Result nvioctlNvhostCtrlGpu_GetL2State(u32 fd, nvioctl_l2_state *out) {
Result rc = 0;
struct {
__out nvioctl_l2_state out;
} data;
memset(&data, 0, sizeof(data));
rc = nvIoctl(fd, _IOR(0x47, 0x14, data), &data);
if (R_SUCCEEDED(rc)) {
memcpy(out, &data.out, sizeof(data.out));
}
return rc;
}

View File

@ -0,0 +1,52 @@
#include <switch.h>
#include <string.h>
Result nvioctlNvhostCtrl_EventSignal(u32 fd, u32 event_id) {
struct {
__in u32 event_id;
} data;
memset(&data, 0, sizeof(data));
data.event_id = event_id;
return nvIoctl(fd, _IOWR(0x00, 0x1C, data), &data);
}
Result nvioctlNvhostCtrl_EventWait(u32 fd, u32 unk0, u32 unk1, s32 timeout, u32 event_id, u32 *out)
{
Result rc = 0;
struct {
__in u32 unk0;
__in u32 unk1;
__in s32 timeout;
union {
__in u32 event;
__out u32 result;
};
} data;
memset(&data, 0, sizeof(data));
data.unk0 = unk0;
data.unk1 = unk1;
data.timeout = timeout;
data.event = event_id;
rc = nvIoctl(fd, _IOWR(0x00, 0x1D, data), &data);
if (R_SUCCEEDED(rc))
*out = data.result;
return rc;
}
Result nvioctlNvhostCtrl_EventRegister(u32 fd, u32 event_id) {
struct {
__in u32 event_id;
} data;
memset(&data, 0, sizeof(data));
data.event_id = event_id;
return nvIoctl(fd, _IOWR(0x40, 0x1F, data), &data);
}

View File

@ -0,0 +1,84 @@
#include <switch.h>
#include <string.h>
Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle) {
Result rc=0;
struct {
__in u32 size;
__out u32 handle;
} data;
memset(&data, 0, sizeof(data));
data.size = size;
rc = nvIoctl(fd, _IOWR(0x01, 0x01, data), &data);
if (R_SUCCEEDED(rc)) {
*nvmap_handle = data.handle;
}
return rc;
}
Result nvioctlNvmap_FromId(u32 fd, u32 id, u32 *nvmap_handle) {
Result rc=0;
struct {
__in u32 id;
__out u32 handle;
} data;
memset(&data, 0, sizeof(data));
data.id = id;
rc = nvIoctl(fd, _IOWR(0x01, 0x03, data), &data);
if (R_SUCCEEDED(rc)) {
*nvmap_handle = data.handle;
}
return rc;
}
Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 align, u8 kind, void* addr) {
struct {
__in u32 handle;
__in u32 heapmask;
__in u32 flags; // (0=read-only, 1=read-write)
__in u32 align;
__in u8 kind;
u8 pad[7];
__in u64 addr;
} data;
memset(&data, 0, sizeof(data));
data.handle = nvmap_handle;
data.heapmask = heapmask;
data.flags = flags;
data.align = align;
data.kind = kind;
data.addr = (u64)addr;
return nvIoctl(fd, _IOWR(0x01, 0x04, data), &data);
}
Result nvioctlNvmap_GetId(u32 fd, u32 nvmap_handle, u32 *id) {
Result rc=0;
struct {
__out u32 id;
__in u32 handle;
} data;
memset(&data, 0, sizeof(data));
data.handle = nvmap_handle;
rc = nvIoctl(fd, _IOWR(0x01, 0x0E, data), &data);
if (R_SUCCEEDED(rc)) {
*id = data.id;
}
return rc;
}

View File

@ -187,6 +187,7 @@ Result nvgfxInitialize(void) {
//Officially NVHOST_IOCTL_CTRL_GET_CONFIG is used a lot (here and later), skip that. This is done with a /dev/nvhost-ctrl fd, seperate from the one used later. //Officially NVHOST_IOCTL_CTRL_GET_CONFIG is used a lot (here and later), skip that. This is done with a /dev/nvhost-ctrl fd, seperate from the one used later.
if (R_SUCCEEDED(rc)) rc = nvOpen(&g_nvgfx_fd_nvhostctrlgpu, "/dev/nvhost-ctrl-gpu"); if (R_SUCCEEDED(rc)) rc = nvOpen(&g_nvgfx_fd_nvhostctrlgpu, "/dev/nvhost-ctrl-gpu");
if (R_SUCCEEDED(rc)) rc = nvioctlNvhostCtrlGpu_GetCharacteristics(g_nvgfx_fd_nvhostctrlgpu, &g_nvgfx_gpu_characteristics); if (R_SUCCEEDED(rc)) rc = nvioctlNvhostCtrlGpu_GetCharacteristics(g_nvgfx_fd_nvhostctrlgpu, &g_nvgfx_gpu_characteristics);
if (R_SUCCEEDED(rc)) rc = nvioctlNvhostCtrlGpu_GetTpcMasks(g_nvgfx_fd_nvhostctrlgpu, 4, g_nvgfx_tpcmasks); if (R_SUCCEEDED(rc)) rc = nvioctlNvhostCtrlGpu_GetTpcMasks(g_nvgfx_fd_nvhostctrlgpu, 4, g_nvgfx_tpcmasks);
@ -226,7 +227,7 @@ Result nvgfxInitialize(void) {
if (R_SUCCEEDED(rc)) rc = nvioctlChannel_SetNvmapFd(g_nvgfx_fd_nvhostgpu, g_nvgfx_fd_nvmap); if (R_SUCCEEDED(rc)) rc = nvioctlChannel_SetNvmapFd(g_nvgfx_fd_nvhostgpu, g_nvgfx_fd_nvmap);
if (R_SUCCEEDED(rc)) rc = nvioctlChannel_AllocGPFIFOEx2(g_nvgfx_fd_nvhostgpu, 0x800, 0x1, 0, 0, 0, 0, &g_nvgfx_nvhost_fence); if (R_SUCCEEDED(rc)) rc = nvioctlChannel_AllocGpfifoEx2(g_nvgfx_fd_nvhostgpu, 0x800, 0x1, 0, 0, 0, 0, &g_nvgfx_nvhost_fence);
if (R_SUCCEEDED(rc)) rc = nvioctlChannel_AllocObjCtx(g_nvgfx_fd_nvhostgpu, NVIOCTL_CHANNEL_OBJ_CLASSNUM_3d, 0); if (R_SUCCEEDED(rc)) rc = nvioctlChannel_AllocObjCtx(g_nvgfx_fd_nvhostgpu, NVIOCTL_CHANNEL_OBJ_CLASSNUM_3d, 0);
@ -290,7 +291,7 @@ Result nvgfxInitialize(void) {
for(i=0; i<2; i++) { for(i=0; i<2; i++) {
tmpval = 0; tmpval = 0;
rc = nvioctlNvmap_GetID(g_nvgfx_fd_nvmap, nvmap_objs[6].handle, &tmpval); rc = nvioctlNvmap_GetId(g_nvgfx_fd_nvmap, nvmap_objs[6].handle, &tmpval);
if (R_FAILED(rc)) break; if (R_FAILED(rc)) break;
if(tmpval==~0) { if(tmpval==~0) {
@ -298,10 +299,10 @@ Result nvgfxInitialize(void) {
break; break;
} }
rc = nvioctlNvmap_FromID(g_nvgfx_fd_nvmap, tmpval, &tmpval); rc = nvioctlNvmap_FromId(g_nvgfx_fd_nvmap, tmpval, &tmpval);
if (R_FAILED(rc)) break; if (R_FAILED(rc)) break;
//The above gets a nvmap_handle, but normally it's the same value passed to nvioctlNvmap_GetID(). //The above gets a nvmap_handle, but normally it's the same value passed to nvioctlNvmap_GetId().
g_gfxprod_BufferInitData[0xa] = i; g_gfxprod_BufferInitData[0xa] = i;
g_gfxprod_BufferInitData[0xe] = tmpval; g_gfxprod_BufferInitData[0xe] = tmpval;

View File

@ -1,458 +0,0 @@
#include <string.h>
#include <switch.h>
Result nvioctlNvhostCtrl_EventSignal(u32 fd, u32 event_id) {
struct {
u32 event_id; //in ranges from 0x01 to 0x3F
} data;
memset(&data, 0, sizeof(data));
data.event_id = event_id;
return nvIoctl(fd, _IOWR(0x00, 0x1C, data), &data);
}
Result nvioctlNvhostCtrl_EventWait(u32 fd, u32 unk0, u32 unk1, s32 timeout, u32 event_id, u32 *out) {
Result rc = 0;
struct {
u32 unk0;//in
u32 unk1;//in
s32 timeout;//in
u32 event;// in=event_id; out=result
} data;
memset(&data, 0, sizeof(data));
data.unk0 = unk0;
data.unk1 = unk1;
data.timeout = timeout;
data.event = event_id;
rc = nvIoctl(fd, _IOWR(0x00, 0x1D, data), &data);
if (R_FAILED(rc)) return rc;
if(out) *out = data.event;
return rc;
}
Result nvioctlNvhostCtrl_EventRegister(u32 fd, u32 event_id) {
struct {
u32 event_id; //in ranges from 0x01 to 0x3F
} data;
memset(&data, 0, sizeof(data));
data.event_id = event_id;
return nvIoctl(fd, _IOWR(0x40, 0x1F, data), &data);
}
Result nvioctlNvhostCtrlGpu_ZCullGetCtxSize(u32 fd, u32 *out) {
Result rc = 0;
struct {
u32 out;
} data;
memset(&data, 0, sizeof(data));
rc = nvIoctl(fd, _IOR(0x47, 0x01, data), &data);
if (R_FAILED(rc)) return rc;
*out = data.out;
return rc;
}
Result nvioctlNvhostCtrlGpu_ZCullGetInfo(u32 fd, u32 out[40>>2]) {
Result rc = 0;
struct {
u32 out[40>>2];
} data;
memset(&data, 0, sizeof(data));
rc = nvIoctl(fd, _IOR(0x47, 0x02, data), &data);
if (R_FAILED(rc)) return rc;
memcpy(out, data.out, sizeof(data.out));
return rc;
}
Result nvioctlNvhostCtrlGpu_GetCharacteristics(u32 fd, gpu_characteristics *out) {
Result rc = 0;
struct {
u64 gpu_characteristics_buf_size; // in/out (must not be NULL, but gets overwritten with 0xA0=max_size)
u64 gpu_characteristics_buf_addr; // in (ignored, but must not be NULL)
gpu_characteristics gc; // out
} data;
memset(&data, 0, sizeof(data));
data.gpu_characteristics_buf_size = sizeof(gpu_characteristics);
data.gpu_characteristics_buf_addr = 1;
rc = nvIoctl(fd, _IOWR(0x47, 0x05, data), &data);
if (R_FAILED(rc)) return rc;
memcpy(out, &data.gc, sizeof(gpu_characteristics));
return rc;
}
Result nvioctlNvhostCtrlGpu_GetTpcMasks(u32 fd, u32 inval, u32 out[24>>2]) {
Result rc = 0;
struct {
u32 unk[24>>2];
} data;
memset(&data, 0, sizeof(data));
data.unk[0] = inval;
data.unk[2] = 1;//addr?
rc = nvIoctl(fd, _IOWR(0x47, 0x06, data), &data);
if (R_FAILED(rc)) return rc;
memcpy(out, &data.unk, sizeof(data.unk));
return rc;
}
Result nvioctlNvhostCtrlGpu_GetL2State(u32 fd, nvioctl_l2_state *out) {
Result rc = 0;
struct {
nvioctl_l2_state out;
} data;
memset(&data, 0, sizeof(data));
rc = nvIoctl(fd, _IOR(0x47, 0x14, data), &data);
if (R_FAILED(rc)) return rc;
memcpy(out, &data.out, sizeof(data.out));
return rc;
}
Result nvioctlNvhostAsGpu_BindChannel(u32 fd, u32 channel_fd) {
struct {
u32 fd;//in
} data;
memset(&data, 0, sizeof(data));
data.fd = channel_fd;
return nvIoctl(fd, _IOW(0x41, 0x01, data), &data);
}
Result nvioctlNvhostAsGpu_AllocSpace(u32 fd, u32 pages, u32 page_size, u32 flags, u64 align, u64 *offset) {
Result rc=0;
struct {
u32 pages;//in
u32 page_size;//in
u32 flags;//in
u32 pad;
union {
u64 offset;//out
u64 align;//in
};
} data;
memset(&data, 0, sizeof(data));
data.pages = pages;
data.page_size = page_size;
data.flags = flags;
data.align = align;
rc = nvIoctl(fd, _IOWR(0x41, 0x02, data), &data);
if (R_FAILED(rc)) return rc;
*offset = data.offset;
return rc;
}
Result nvioctlNvhostAsGpu_MapBufferEx(u32 fd, u32 flags, u32 kind, u32 nvmap_handle, u32 page_size, u64 buffer_offset, u64 mapping_size, u64 input_offset, u64 *offset) {
Result rc=0;
struct {
u32 flags; // in bit0: fixed_offset, bit2: cacheable
u32 kind; // in -1 is default
u32 nvmap_handle; // in
u32 page_size; // inout 0 means don't care
u64 buffer_offset; // in
u64 mapping_size; // in
u64 offset; // inout
} data;
memset(&data, 0, sizeof(data));
data.flags = flags;
data.kind = kind;
data.nvmap_handle = nvmap_handle;
data.page_size = page_size;
data.buffer_offset = buffer_offset;
data.mapping_size = mapping_size;
data.offset = input_offset;
rc = nvIoctl(fd, _IOWR(0x41, 0x06, data), &data);
if (R_FAILED(rc)) return rc;
if (offset) *offset = data.offset;
return rc;
}
Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]) {
Result rc=0;
struct {
u64 not_used; // (contained output user ptr on linux, ignored)
u32 bufsize; //inout forced to 2*sizeof(struct va_region)
u32 pad;
nvioctl_va_region regions[2];//out
} data;
memset(&data, 0, sizeof(data));
data.bufsize = sizeof(data.regions);
rc = nvIoctl(fd, _IOWR(0x41, 0x08, data), &data);
if (R_FAILED(rc)) return rc;
memcpy(regions, data.regions, sizeof(data.regions));
return rc;
}
Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size) {
struct {
u32 big_page_size; // depends on GPU's available_big_page_sizes; 0=default
s32 as_fd; // ignored; passes 0
u32 flags; // ignored; passes 0
u32 reserved; // ignored; passes 0
u64 unk0;
u64 unk1;
u64 unk2;
} data;
memset(&data, 0, sizeof(data));
data.big_page_size = big_page_size;
return nvIoctl(fd, _IOW(0x41, 0x09, data), &data);
}
Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle) {
Result rc=0;
struct {
u32 size;//in
u32 handle;//out
} data;
memset(&data, 0, sizeof(data));
data.size = size;
rc = nvIoctl(fd, _IOWR(0x01, 0x01, data), &data);
if (R_FAILED(rc)) return rc;
*nvmap_handle = data.handle;
return rc;
}
Result nvioctlNvmap_FromID(u32 fd, u32 id, u32 *nvmap_handle) {
Result rc=0;
struct {
u32 id;//in
u32 handle;//out
} data;
memset(&data, 0, sizeof(data));
data.id = id;
rc = nvIoctl(fd, _IOWR(0x01, 0x03, data), &data);
if (R_FAILED(rc)) return rc;
*nvmap_handle = data.handle;
return rc;
}
Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 align, u8 kind, void* addr) {
struct {
u32 handle;//in
u32 heapmask;//in
u32 flags; //in (0=read-only, 1=read-write)
u32 align;//in
u8 kind;//in
u8 pad[7];
u64 addr;//in
} data;
memset(&data, 0, sizeof(data));
data.handle = nvmap_handle;
data.heapmask = heapmask;
data.flags = flags;
data.align = align;
data.kind = kind;
data.addr = (u64)addr;
return nvIoctl(fd, _IOWR(0x01, 0x04, data), &data);
}
Result nvioctlNvmap_GetID(u32 fd, u32 nvmap_handle, u32 *id) {
Result rc=0;
struct {
u32 id;//out
u32 handle;//in
} data;
memset(&data, 0, sizeof(data));
data.handle = nvmap_handle;
rc = nvIoctl(fd, _IOWR(0x01, 0x0E, data), &data);
if (R_FAILED(rc)) return rc;
*id = data.id;
return rc;
}
Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd) {
struct {
u32 fd;//in
} data;
memset(&data, 0, sizeof(data));
data.fd = nvmap_fd;
return nvIoctl(fd, _IOW(0x48, 0x01, data), &data);
}
Result nvioctlChannel_SubmitGPFIFO(u32 fd, nvioctl_gpfifo_entry *entries, u32 num_entries, u32 flags, nvioctl_fence *fence_out) {
Result rc=0;
if(num_entries > 0x200) return MAKERESULT(MODULE_LIBNX, LIBNX_OUTOFMEM);//Make sure stack data doesn't get very large.
struct {
u64 gpfifo; // in (ignored) pointer to gpfifo fence structs
u32 num_entries; // in number of fence objects being submitted
u32 flags; // in
nvioctl_fence fence_out; // out returned new fence object for others to wait on
nvioctl_gpfifo_entry entries[num_entries]; // in depends on num_entries
} data;
memset(&data, 0, sizeof(data));
data.gpfifo = 1;
data.num_entries = num_entries;
data.flags = flags;
memcpy(data.entries, entries, sizeof(data.entries));
rc = nvIoctl(fd, _IOWR(0x48, 0x08, data), &data);
if (R_FAILED(rc)) return rc;
if(fence_out) memcpy(fence_out, &data.fence_out, sizeof(data.fence_out));
return rc;
}
Result nvioctlChannel_AllocObjCtx(u32 fd, u32 class_num, u32 flags) {
struct {
u32 class_num; // 0x902D=2d, 0xB197=3d, 0xB1C0=compute, 0xA140=kepler, 0xB0B5=DMA, 0xB06F=channel_gpfifo
u32 flags;
u64 obj_id; // (ignored) used for FREE_OBJ_CTX ioctl, which is not supported
} data;
memset(&data, 0, sizeof(data));
data.class_num = class_num;
data.flags = flags;
data.obj_id = 0xDEADBEEF;
return nvIoctl(fd, _IOWR(0x48, 0x09, data), &data);
}
Result nvioctlChannel_ZCullBind(u32 fd, u32 in[4]) {
struct {
u32 in[4];
} data;
memset(&data, 0, sizeof(data));
memcpy(data.in, in, sizeof(data.in));
return nvIoctl(fd, _IOWR(0x48, 0x0B, data), &data);
}
Result nvioctlChannel_SetErrorNotifier(u32 fd, u64 offset, u64 size, u32 nvmap_handle) {
struct {
u64 offset;//in
u64 size;//in
u32 mem; //in nvmap object handle
u32 padding;//in
} data;
memset(&data, 0, sizeof(data));
data.offset = offset;
data.size = size;
data.mem = nvmap_handle;
return nvIoctl(fd, _IOWR(0x48, 0x0C, data), &data);
}
Result nvioctlChannel_SetPriority(u32 fd, u32 priority) {
struct {
u32 priority; //in 0x32 is low, 0x64 is medium and 0x96 is high
} data;
memset(&data, 0, sizeof(data));
data.priority = priority;
return nvIoctl(fd, _IOW(0x48, 0x0D, data), &data);
}
Result nvioctlChannel_AllocGPFIFOEx2(u32 fd, u32 num_entries, u32 flags, u32 unk0, u32 unk1, u32 unk2, u32 unk3, nvioctl_fence *fence_out) {
Result rc=0;
struct {
u32 num_entries; // in
u32 flags; // in
u32 unk0; // in (1 works)
nvioctl_fence fence_out; // out
u32 unk1; // in
u32 unk2; // in
u32 unk3; // in
} data;
memset(&data, 0, sizeof(data));
data.num_entries = num_entries;
data.flags = flags;
data.unk0 = unk0;
data.unk1 = unk1;
data.unk2 = unk2;
data.unk3 = unk3;
rc = nvIoctl(fd, _IOWR(0x48, 0x1A, data), &data);
if (R_FAILED(rc)) return rc;
if(fence_out) memcpy(fence_out, &data.fence_out, sizeof(data.fence_out));
return rc;
}
Result nvioctlChannel_SetUserData(u32 fd, void* addr) {
struct {
u64 addr;
} data;
memset(&data, 0, sizeof(data));
data.addr = (u64)addr;
return nvIoctl(fd, _IOW(0x47, 0x14, data), &data);
}