mirror of
https://github.com/switchbrew/libnx.git
synced 2025-06-21 12:32:40 +02:00
More ioctls + nvgfx init. Added nvmapobjSetup().
This commit is contained in:
parent
31b945dff9
commit
017770b509
@ -48,12 +48,35 @@ typedef struct {
|
||||
u32 flush; // active flush bit field
|
||||
} nvioctl_l2_state;
|
||||
|
||||
typedef struct {
|
||||
u32 id;
|
||||
u32 value;
|
||||
} nvioctl_fence;
|
||||
|
||||
//Used with nvioctlChannel_AllocObjCtx().
|
||||
enum nvioctl_channel_obj_classnum {
|
||||
NVIOCTL_CHANNEL_OBJ_CLASSNUM_2d = 0x902D,
|
||||
NVIOCTL_CHANNEL_OBJ_CLASSNUM_3d = 0xB197,
|
||||
NVIOCTL_CHANNEL_OBJ_CLASSNUM_compute = 0xB1C0,
|
||||
NVIOCTL_CHANNEL_OBJ_CLASSNUM_kepler = 0xA140,
|
||||
NVIOCTL_CHANNEL_OBJ_CLASSNUM_DMA = 0xB0B5,
|
||||
NVIOCTL_CHANNEL_OBJ_CLASSNUM_channel_gpfifo = 0xB06F
|
||||
};
|
||||
|
||||
//Used with nvioctlChannel_SetPriority().
|
||||
enum nvioctl_channel_priority {
|
||||
NVIOCTL_CHANNEL_PRIORITY_low = 0x32,
|
||||
NVIOCTL_CHANNEL_PRIORITY_medium = 0x64,
|
||||
NVIOCTL_CHANNEL_PRIORITY_high = 0x96
|
||||
};
|
||||
|
||||
Result nvioctlNvhostCtrlGpu_ZCullGetCtxSize(u32 fd, u32 *out);
|
||||
Result nvioctlNvhostCtrlGpu_ZCullGetInfo(u32 fd, u32 out[40>>2]);
|
||||
Result nvioctlNvhostCtrlGpu_GetCharacteristics(u32 fd, gpu_characteristics *out);
|
||||
Result nvioctlNvhostCtrlGpu_GetTpcMasks(u32 fd, u32 inval, u32 out[24>>2]);
|
||||
Result nvioctlNvhostCtrlGpu_GetL2State(u32 fd, nvioctl_l2_state *out);
|
||||
|
||||
Result nvioctlNvhostAsGpu_BindChannel(u32 fd, u32 channel_fd);
|
||||
Result nvioctlNvhostAsGpu_AllocSpace(u32 fd, u32 pages, u32 page_size, u32 flags, u64 align, u64 *offset);
|
||||
Result nvioctlNvhostAsGpu_MapBufferEx(u32 fd, u32 flags, u32 kind, u32 nvmap_handle, u32 page_size, u64 buffer_offset, u64 mapping_size, u64 input_offset, u64 *offset);
|
||||
Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]);
|
||||
@ -62,3 +85,10 @@ Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size);
|
||||
Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle);
|
||||
Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 align, u8 kind, void* addr);
|
||||
|
||||
Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd);
|
||||
Result nvioctlChannel_AllocObjCtx(u32 fd, u32 class_num, u32 flags);
|
||||
Result nvioctlChannel_SetErrorNotifier(u32 fd, u64 offset, u64 size, u32 nvmap_handle);
|
||||
Result nvioctlChannel_SetPriority(u32 fd, u32 priority);
|
||||
Result nvioctlChannel_AllocGPFIFOEx2(u32 fd, u32 num_entries, u32 flags, u32 unk0, u32 unk1, u32 unk2, u32 unk3, nvioctl_fence *fence_out);
|
||||
Result nvioctlChannel_SetUserData(u32 fd, void* addr);
|
||||
|
||||
|
@ -13,6 +13,7 @@ static bool g_nvgfxInitialized = 0;
|
||||
static u32 g_nvgfx_fd_nvhostctrlgpu;
|
||||
static u32 g_nvgfx_fd_nvhostasgpu;
|
||||
static u32 g_nvgfx_fd_nvmap;
|
||||
static u32 g_nvgfx_fd_nvhostgpu;
|
||||
|
||||
static gpu_characteristics g_nvgfx_gpu_characteristics;
|
||||
static u64 g_nvgfx_nvhostasgpu_allocspace_offset;
|
||||
@ -21,8 +22,11 @@ static u32 g_nvgfx_zcullctxsize;
|
||||
static u32 g_nvgfx_zcullinfo[40>>2];
|
||||
static nvioctl_va_region g_nvgfx_nvhostasgpu_varegions[2];
|
||||
static nvioctl_l2_state g_nvgfx_l2state;
|
||||
static nvioctl_fence g_nvgfx_nvhost_fence;
|
||||
static u8 *g_nvgfx_nvhost_userdata;
|
||||
static size_t g_nvgfx_nvhost_userdata_size;
|
||||
|
||||
nvmapobj nvmap_objs[2];
|
||||
static nvmapobj nvmap_objs[3];
|
||||
|
||||
Result nvmapobjInitialize(nvmapobj *obj, size_t size) {
|
||||
Result rc=0;
|
||||
@ -59,6 +63,15 @@ void nvmapobjCloseAll(void) {
|
||||
for(pos=0; pos<sizeof(nvmap_objs)/sizeof(nvmapobj); pos++) nvmapobjClose(&nvmap_objs[pos]);
|
||||
}
|
||||
|
||||
Result nvmapobjSetup(nvmapobj *obj, u32 heapmask, u32 flags, u32 align, u8 kind) {
|
||||
Result rc=0;
|
||||
|
||||
rc = nvioctlNvmap_Create(g_nvgfx_fd_nvmap, obj->mem_size, &obj->handle);
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvmap_Alloc(g_nvgfx_fd_nvmap, obj->handle, heapmask, flags, align, kind, obj->mem);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
Result nvgfxInitialize(void) {
|
||||
Result rc=0;
|
||||
if(g_nvgfxInitialized)return 0;
|
||||
@ -66,6 +79,7 @@ Result nvgfxInitialize(void) {
|
||||
g_nvgfx_fd_nvhostctrlgpu = 0;
|
||||
g_nvgfx_fd_nvhostasgpu = 0;
|
||||
g_nvgfx_fd_nvmap = 0;
|
||||
g_nvgfx_fd_nvhostgpu = 0;
|
||||
|
||||
memset(nvmap_objs, 0, sizeof(nvmap_objs));
|
||||
|
||||
@ -74,11 +88,20 @@ Result nvgfxInitialize(void) {
|
||||
memset(g_nvgfx_zcullinfo, 0, sizeof(g_nvgfx_zcullinfo));
|
||||
memset(g_nvgfx_nvhostasgpu_varegions, 0, sizeof(g_nvgfx_nvhostasgpu_varegions));
|
||||
memset(&g_nvgfx_l2state, 0, sizeof(nvioctl_l2_state));
|
||||
memset(&g_nvgfx_nvhost_fence, 0, sizeof(g_nvgfx_nvhost_fence));
|
||||
g_nvgfx_nvhostasgpu_allocspace_offset = 0;
|
||||
g_nvgfx_zcullctxsize = 0;
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvmapobjInitialize(&nvmap_objs[0], 0x1000);
|
||||
if (R_SUCCEEDED(rc)) rc = nvmapobjInitialize(&nvmap_objs[1], 0x10000);
|
||||
if (R_SUCCEEDED(rc)) rc = nvmapobjInitialize(&nvmap_objs[2], 0x1000);
|
||||
|
||||
if (R_SUCCEEDED(rc)) { //Unknown what size/etc is used officially.
|
||||
g_nvgfx_nvhost_userdata_size = 0x1000;
|
||||
g_nvgfx_nvhost_userdata = memalign(0x1000, g_nvgfx_nvhost_userdata_size);
|
||||
if (g_nvgfx_nvhost_userdata==NULL) rc = MAKERESULT(MODULE_LIBNX, LIBNX_OUTOFMEM);
|
||||
if (R_SUCCEEDED(rc)) memset(g_nvgfx_nvhost_userdata, 0, g_nvgfx_nvhost_userdata_size);
|
||||
}
|
||||
|
||||
//Officially NVHOST_IOCTL_CTRL_GET_CONFIG is used a lot (here and later), skip that.
|
||||
|
||||
@ -102,27 +125,54 @@ Result nvgfxInitialize(void) {
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvhostAsGpu_AllocSpace(g_nvgfx_fd_nvhostasgpu, 0x10000, 0x20000, 0, 0x10000, &g_nvgfx_nvhostasgpu_allocspace_offset);
|
||||
if (R_SUCCEEDED(rc)) rc = nvOpen(&g_nvgfx_fd_nvmap, "/dev/nvmap");
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvmap_Create(g_nvgfx_fd_nvmap, nvmap_objs[0].mem_size, &nvmap_objs[0].handle);
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvmap_Alloc(g_nvgfx_fd_nvmap, nvmap_objs[0].handle, 0, 0, 0x20000, 0, nvmap_objs[0].mem);
|
||||
if (R_SUCCEEDED(rc)) rc = nvmapobjSetup(&nvmap_objs[0], 0, 0, 0x20000, 0);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvhostAsGpu_MapBufferEx(g_nvgfx_fd_nvhostasgpu, 0, 0, nvmap_objs[0].handle, 0x10000, 0, 0, 0, NULL);
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvhostAsGpu_MapBufferEx(g_nvgfx_fd_nvhostasgpu, 0, 0xfe, nvmap_objs[0].handle, 0x10000, 0, 0, 0, NULL);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvmap_Create(g_nvgfx_fd_nvmap, nvmap_objs[1].mem_size, &nvmap_objs[1].handle);
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvmap_Alloc(g_nvgfx_fd_nvmap, nvmap_objs[1].handle, 0, 0, 0x20000, 0, nvmap_objs[1].mem);
|
||||
if (R_SUCCEEDED(rc)) rc = nvmapobjSetup(&nvmap_objs[1], 0, 0, 0x20000, 0);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvhostAsGpu_MapBufferEx(g_nvgfx_fd_nvhostasgpu, 5, 0, nvmap_objs[1].handle, 0x10000, 0, 0x10000, g_nvgfx_nvhostasgpu_allocspace_offset, NULL);
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvhostAsGpu_MapBufferEx(g_nvgfx_fd_nvhostasgpu, 4, 0xfe, nvmap_objs[1].handle, 0x10000, 0, 0, 0, NULL);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvhostCtrlGpu_GetL2State(g_nvgfx_fd_nvhostctrlgpu, &g_nvgfx_l2state);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvmapobjSetup(&nvmap_objs[2], 0, 0x1, 0x1000, 0);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvOpen(&g_nvgfx_fd_nvhostgpu, "/dev/nvhost-gpu");
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlNvhostAsGpu_BindChannel(g_nvgfx_fd_nvhostasgpu, g_nvgfx_fd_nvhostgpu);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlChannel_SetNvmapFd(g_nvgfx_fd_nvhostgpu, g_nvgfx_fd_nvmap);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlChannel_AllocGPFIFOEx2(g_nvgfx_fd_nvhostgpu, 0x800, 0x1, 0, 0, 0, 0, &g_nvgfx_nvhost_fence);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlChannel_AllocObjCtx(g_nvgfx_fd_nvhostgpu, NVIOCTL_CHANNEL_OBJ_CLASSNUM_3d, 0);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlChannel_SetErrorNotifier(g_nvgfx_fd_nvhostgpu, 0, 0x1000, nvmap_objs[2].handle);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlChannel_SetUserData(g_nvgfx_fd_nvhostgpu, g_nvgfx_nvhost_userdata);
|
||||
|
||||
if (R_SUCCEEDED(rc)) rc = nvioctlChannel_SetPriority(g_nvgfx_fd_nvhostgpu, NVIOCTL_CHANNEL_PRIORITY_medium);
|
||||
|
||||
//if (R_SUCCEEDED(rc)) rc = -1;
|
||||
|
||||
if (R_FAILED(rc)) {
|
||||
nvClose(g_nvgfx_fd_nvhostgpu);
|
||||
nvClose(g_nvgfx_fd_nvmap);
|
||||
nvClose(g_nvgfx_fd_nvhostasgpu);
|
||||
nvClose(g_nvgfx_fd_nvhostctrlgpu);
|
||||
g_nvgfx_fd_nvhostgpu = 0;
|
||||
g_nvgfx_fd_nvmap = 0;
|
||||
g_nvgfx_fd_nvhostasgpu = 0;
|
||||
g_nvgfx_fd_nvhostctrlgpu = 0;
|
||||
|
||||
nvmapobjCloseAll();
|
||||
|
||||
if(g_nvgfx_nvhost_userdata) {
|
||||
free(g_nvgfx_nvhost_userdata);
|
||||
g_nvgfx_nvhost_userdata = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (R_SUCCEEDED(rc)) g_nvgfxInitialized = 1;
|
||||
@ -133,15 +183,22 @@ Result nvgfxInitialize(void) {
|
||||
void nvgfxExit(void) {
|
||||
if(!g_nvgfxInitialized)return;
|
||||
|
||||
nvClose(g_nvgfx_fd_nvhostgpu);
|
||||
nvClose(g_nvgfx_fd_nvmap);
|
||||
nvClose(g_nvgfx_fd_nvhostasgpu);
|
||||
nvClose(g_nvgfx_fd_nvhostctrlgpu);
|
||||
g_nvgfx_fd_nvhostgpu = 0;
|
||||
g_nvgfx_fd_nvmap = 0;
|
||||
g_nvgfx_fd_nvhostasgpu = 0;
|
||||
g_nvgfx_fd_nvhostctrlgpu = 0;
|
||||
|
||||
nvmapobjCloseAll();
|
||||
|
||||
if(g_nvgfx_nvhost_userdata) {
|
||||
free(g_nvgfx_nvhost_userdata);
|
||||
g_nvgfx_nvhost_userdata = NULL;
|
||||
}
|
||||
|
||||
g_nvgfxInitialized = 0;
|
||||
}
|
||||
|
||||
|
@ -92,6 +92,17 @@ Result nvioctlNvhostCtrlGpu_GetL2State(u32 fd, nvioctl_l2_state *out) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
Result nvioctlNvhostAsGpu_BindChannel(u32 fd, u32 channel_fd) {
|
||||
struct {
|
||||
u32 fd;//in
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.fd = channel_fd;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x41, 0x01, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlNvhostAsGpu_AllocSpace(u32 fd, u32 pages, u32 page_size, u32 flags, u64 align, u64 *offset) {
|
||||
Result rc=0;
|
||||
|
||||
@ -186,7 +197,6 @@ Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size) {
|
||||
data.big_page_size = big_page_size;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x41, 0x09, data), &data);
|
||||
|
||||
}
|
||||
|
||||
Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle) {
|
||||
@ -230,3 +240,96 @@ Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32
|
||||
return nvIoctl(fd, _IOWR(0x01, 0x04, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd) {
|
||||
struct {
|
||||
u32 fd;//in
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.fd = nvmap_fd;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x48, 0x01, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_AllocObjCtx(u32 fd, u32 class_num, u32 flags) {
|
||||
struct {
|
||||
u32 class_num; // 0x902D=2d, 0xB197=3d, 0xB1C0=compute, 0xA140=kepler, 0xB0B5=DMA, 0xB06F=channel_gpfifo
|
||||
u32 flags;
|
||||
u64 obj_id; // (ignored) used for FREE_OBJ_CTX ioctl, which is not supported
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.class_num = class_num;
|
||||
data.flags = flags;
|
||||
data.obj_id = 0xDEADBEEF;
|
||||
|
||||
return nvIoctl(fd, _IOWR(0x48, 0x09, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_SetErrorNotifier(u32 fd, u64 offset, u64 size, u32 nvmap_handle) {
|
||||
struct {
|
||||
u64 offset;//in
|
||||
u64 size;//in
|
||||
u32 mem; //in nvmap object handle
|
||||
u32 padding;//in
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.offset = offset;
|
||||
data.size = size;
|
||||
data.mem = nvmap_handle;
|
||||
|
||||
return nvIoctl(fd, _IOWR(0x48, 0x0C, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_SetPriority(u32 fd, u32 priority) {
|
||||
struct {
|
||||
u32 priority; //in 0x32 is low, 0x64 is medium and 0x96 is high
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.priority = priority;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x48, 0x0D, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_AllocGPFIFOEx2(u32 fd, u32 num_entries, u32 flags, u32 unk0, u32 unk1, u32 unk2, u32 unk3, nvioctl_fence *fence_out) {
|
||||
Result rc=0;
|
||||
|
||||
struct {
|
||||
u32 num_entries; // in
|
||||
u32 flags; // in
|
||||
u32 unk0; // in (1 works)
|
||||
nvioctl_fence fence_out; // out
|
||||
u32 unk1; // in
|
||||
u32 unk2; // in
|
||||
u32 unk3; // in
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.num_entries = num_entries;
|
||||
data.flags = flags;
|
||||
data.unk0 = unk0;
|
||||
data.unk1 = unk1;
|
||||
data.unk2 = unk2;
|
||||
data.unk3 = unk3;
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x48, 0x1A, data), &data);
|
||||
if (R_FAILED(rc)) return rc;
|
||||
|
||||
if(fence_out) memcpy(fence_out, &data.fence_out, sizeof(data.fence_out));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
Result nvioctlChannel_SetUserData(u32 fd, void* addr) {
|
||||
struct {
|
||||
u64 addr;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.addr = (u64)addr;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x47, 0x14, data), &data);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user