mirror of
https://github.com/switchbrew/libnx.git
synced 2025-06-21 12:32:40 +02:00
Fix NV ioctl nameclashes...
...with bsd ioctls (coming soon) & C++ stdlib
This commit is contained in:
parent
7ef73113cb
commit
507bf71a7b
@ -2,46 +2,46 @@
|
||||
|
||||
//The below defines are from Linux kernel ioctl.h.
|
||||
|
||||
#define _IOC_NRBITS 8
|
||||
#define _IOC_TYPEBITS 8
|
||||
#define _IOC_SIZEBITS 14
|
||||
#define _IOC_DIRBITS 2
|
||||
#define _NV_IOC_NRBITS 8
|
||||
#define _NV_IOC_TYPEBITS 8
|
||||
#define _NV_IOC_SIZEBITS 14
|
||||
#define _NV_IOC_DIRBITS 2
|
||||
|
||||
#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
|
||||
#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
|
||||
#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
|
||||
#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
|
||||
#define _NV_IOC_NRMASK ((1 << _NV_IOC_NRBITS)-1)
|
||||
#define _NV_IOC_TYPEMASK ((1 << _NV_IOC_TYPEBITS)-1)
|
||||
#define _NV_IOC_SIZEMASK ((1 << _NV_IOC_SIZEBITS)-1)
|
||||
#define _NV_IOC_DIRMASK ((1 << _NV_IOC_DIRBITS)-1)
|
||||
|
||||
#define _IOC_NRSHIFT 0
|
||||
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
|
||||
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
|
||||
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
|
||||
#define _NV_IOC_NRSHIFT 0
|
||||
#define _NV_IOC_TYPESHIFT (_NV_IOC_NRSHIFT+_NV_IOC_NRBITS)
|
||||
#define _NV_IOC_SIZESHIFT (_NV_IOC_TYPESHIFT+_NV_IOC_TYPEBITS)
|
||||
#define _NV_IOC_DIRSHIFT (_NV_IOC_SIZESHIFT+_NV_IOC_SIZEBITS)
|
||||
|
||||
/*
|
||||
* Direction bits.
|
||||
*/
|
||||
#define _IOC_NONE 0U
|
||||
#define _IOC_WRITE 1U
|
||||
#define _IOC_READ 2U
|
||||
#define _NV_IOC_NONE 0U
|
||||
#define _NV_IOC_WRITE 1U
|
||||
#define _NV_IOC_READ 2U
|
||||
|
||||
#define _IOC(dir,type,nr,size) \
|
||||
(((dir) << _IOC_DIRSHIFT) | \
|
||||
((type) << _IOC_TYPESHIFT) | \
|
||||
((nr) << _IOC_NRSHIFT) | \
|
||||
((size) << _IOC_SIZESHIFT))
|
||||
#define _NV_IOC(dir,type,nr,size) \
|
||||
(((dir) << _NV_IOC_DIRSHIFT) | \
|
||||
((type) << _NV_IOC_TYPESHIFT) | \
|
||||
((nr) << _NV_IOC_NRSHIFT) | \
|
||||
((size) << _NV_IOC_SIZESHIFT))
|
||||
|
||||
/* used to create numbers */
|
||||
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
|
||||
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
|
||||
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
|
||||
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
|
||||
#define _NV_IO(type,nr) _NV_IOC(_NV_IOC_NONE,(type),(nr),0)
|
||||
#define _NV_IOR(type,nr,size) _NV_IOC(_NV_IOC_READ,(type),(nr),sizeof(size))
|
||||
#define _NV_IOW(type,nr,size) _NV_IOC(_NV_IOC_WRITE,(type),(nr),sizeof(size))
|
||||
#define _NV_IOWR(type,nr,size) _NV_IOC(_NV_IOC_READ|_NV_IOC_WRITE,(type),(nr),sizeof(size))
|
||||
|
||||
/* used to decode ioctl numbers.. */
|
||||
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
|
||||
#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
|
||||
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
|
||||
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
|
||||
#define _NV_IOC_DIR(nr) (((nr) >> _NV_IOC_DIRSHIFT) & _NV_IOC_DIRMASK)
|
||||
#define _NV_IOC_TYPE(nr) (((nr) >> _NV_IOC_TYPESHIFT) & _NV_IOC_TYPEMASK)
|
||||
#define _NV_IOC_NR(nr) (((nr) >> _NV_IOC_NRSHIFT) & _NV_IOC_NRMASK)
|
||||
#define _NV_IOC_SIZE(nr) (((nr) >> _NV_IOC_SIZESHIFT) & _NV_IOC_SIZEMASK)
|
||||
|
||||
#define _in_
|
||||
#define _out_
|
||||
#define _inout_
|
||||
#define __nv_in
|
||||
#define __nv_out
|
||||
#define __nv_inout
|
||||
|
@ -7,13 +7,13 @@
|
||||
|
||||
Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd) {
|
||||
struct {
|
||||
_in_ u32 fd;
|
||||
__nv_in u32 fd;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.fd = nvmap_fd;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x48, 0x01, data), &data);
|
||||
return nvIoctl(fd, _NV_IOW(0x48, 0x01, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_SubmitGpfifo(u32 fd, nvioctl_gpfifo_entry *entries, u32 num_entries, u32 flags, nvioctl_fence *fence_out) {
|
||||
@ -24,11 +24,11 @@ Result nvioctlChannel_SubmitGpfifo(u32 fd, nvioctl_gpfifo_entry *entries, u32 nu
|
||||
return MAKERESULT(Module_Libnx, LibnxError_OutOfMemory);
|
||||
|
||||
struct {
|
||||
_in_ u64 gpfifo; // (ignored) pointer to gpfifo entry structs
|
||||
_in_ u32 num_entries; // number of entries being submitted
|
||||
_in_ u32 flags;
|
||||
_out_ nvioctl_fence fence_out; // returned new fence object for others to wait on
|
||||
_in_ nvioctl_gpfifo_entry entries[num_entries]; // depends on num_entries
|
||||
__nv_in u64 gpfifo; // (ignored) pointer to gpfifo entry structs
|
||||
__nv_in u32 num_entries; // number of entries being submitted
|
||||
__nv_in u32 flags;
|
||||
__nv_out nvioctl_fence fence_out; // returned new fence object for others to wait on
|
||||
__nv_in nvioctl_gpfifo_entry entries[num_entries]; // depends on num_entries
|
||||
} data;
|
||||
|
||||
|
||||
@ -38,7 +38,7 @@ Result nvioctlChannel_SubmitGpfifo(u32 fd, nvioctl_gpfifo_entry *entries, u32 nu
|
||||
data.flags = flags;
|
||||
memcpy(data.entries, entries, sizeof(data.entries));
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x48, 0x08, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x48, 0x08, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc) && fence_out) {
|
||||
memcpy(fence_out, &data.fence_out, sizeof(data.fence_out));
|
||||
@ -49,9 +49,9 @@ Result nvioctlChannel_SubmitGpfifo(u32 fd, nvioctl_gpfifo_entry *entries, u32 nu
|
||||
|
||||
Result nvioctlChannel_AllocObjCtx(u32 fd, u32 class_num, u32 flags) {
|
||||
struct {
|
||||
_in_ u32 class_num;
|
||||
_in_ u32 flags;
|
||||
_in_ u64 obj_id; // (ignored) used for FREE_OBJ_CTX ioctl, which is not supported
|
||||
__nv_in u32 class_num;
|
||||
__nv_in u32 flags;
|
||||
__nv_in u64 obj_id; // (ignored) used for FREE_OBJ_CTX ioctl, which is not supported
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
@ -59,28 +59,28 @@ Result nvioctlChannel_AllocObjCtx(u32 fd, u32 class_num, u32 flags) {
|
||||
data.flags = flags;
|
||||
data.obj_id = 0xDEADBEEF;
|
||||
|
||||
return nvIoctl(fd, _IOWR(0x48, 0x09, data), &data);
|
||||
return nvIoctl(fd, _NV_IOWR(0x48, 0x09, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_ZCullBind(u32 fd, u64 gpu_va, u32 mode) {
|
||||
struct {
|
||||
_in_ u64 gpu_va;
|
||||
_in_ u32 mode;
|
||||
_in_ u32 padding;
|
||||
__nv_in u64 gpu_va;
|
||||
__nv_in u32 mode;
|
||||
__nv_in u32 padding;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.gpu_va = gpu_va;
|
||||
data.mode = mode;
|
||||
|
||||
return nvIoctl(fd, _IOWR(0x48, 0x0B, data), &data);
|
||||
return nvIoctl(fd, _NV_IOWR(0x48, 0x0B, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_SetErrorNotifier(u32 fd, u64 offset, u64 size, u32 nvmap_handle) {
|
||||
struct {
|
||||
_in_ u64 offset;
|
||||
_in_ u64 size;
|
||||
_in_ u32 nvmap_handle;
|
||||
__nv_in u64 offset;
|
||||
__nv_in u64 size;
|
||||
__nv_in u32 nvmap_handle;
|
||||
u32 padding;
|
||||
} data;
|
||||
|
||||
@ -89,31 +89,31 @@ Result nvioctlChannel_SetErrorNotifier(u32 fd, u64 offset, u64 size, u32 nvmap_h
|
||||
data.size = size;
|
||||
data.nvmap_handle = nvmap_handle;
|
||||
|
||||
return nvIoctl(fd, _IOWR(0x48, 0x0C, data), &data);
|
||||
return nvIoctl(fd, _NV_IOWR(0x48, 0x0C, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_SetPriority(u32 fd, u32 priority) {
|
||||
struct {
|
||||
_in_ u32 priority; // 0x32 is low, 0x64 is medium and 0x96 is high
|
||||
__nv_in u32 priority; // 0x32 is low, 0x64 is medium and 0x96 is high
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.priority = priority;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x48, 0x0D, data), &data);
|
||||
return nvIoctl(fd, _NV_IOW(0x48, 0x0D, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlChannel_AllocGpfifoEx2(u32 fd, u32 num_entries, u32 flags, u32 unk0, u32 unk1, u32 unk2, u32 unk3, nvioctl_fence *fence_out) {
|
||||
Result rc=0;
|
||||
|
||||
struct {
|
||||
_in_ u32 num_entries;
|
||||
_in_ u32 flags;
|
||||
_in_ u32 unk0; // (1 works)
|
||||
_out_ nvioctl_fence fence_out;
|
||||
_in_ u32 unk1;
|
||||
_in_ u32 unk2;
|
||||
_in_ u32 unk3;
|
||||
__nv_in u32 num_entries;
|
||||
__nv_in u32 flags;
|
||||
__nv_in u32 unk0; // (1 works)
|
||||
__nv_out nvioctl_fence fence_out;
|
||||
__nv_in u32 unk1;
|
||||
__nv_in u32 unk2;
|
||||
__nv_in u32 unk3;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
@ -124,7 +124,7 @@ Result nvioctlChannel_AllocGpfifoEx2(u32 fd, u32 num_entries, u32 flags, u32 unk
|
||||
data.unk2 = unk2;
|
||||
data.unk3 = unk3;
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x48, 0x1A, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x48, 0x1A, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc) && fence_out) {
|
||||
memcpy(fence_out, &data.fence_out, sizeof(data.fence_out));
|
||||
@ -135,11 +135,11 @@ Result nvioctlChannel_AllocGpfifoEx2(u32 fd, u32 num_entries, u32 flags, u32 unk
|
||||
|
||||
Result nvioctlChannel_SetUserData(u32 fd, void* addr) {
|
||||
struct {
|
||||
_in_ u64 addr;
|
||||
__nv_in u64 addr;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.addr = (u64)addr;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x47, 0x14, data), &data);
|
||||
return nvIoctl(fd, _NV_IOW(0x47, 0x14, data), &data);
|
||||
}
|
||||
|
@ -7,26 +7,26 @@
|
||||
|
||||
Result nvioctlNvhostAsGpu_BindChannel(u32 fd, u32 channel_fd) {
|
||||
struct {
|
||||
_in_ u32 fd;
|
||||
__nv_in u32 fd;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.fd = channel_fd;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x41, 0x01, data), &data);
|
||||
return nvIoctl(fd, _NV_IOW(0x41, 0x01, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlNvhostAsGpu_AllocSpace(u32 fd, u32 pages, u32 page_size, u32 flags, u64 align, u64 *offset) {
|
||||
Result rc = 0;
|
||||
|
||||
struct {
|
||||
_in_ u32 pages;
|
||||
_in_ u32 page_size;
|
||||
_in_ u32 flags;
|
||||
__nv_in u32 pages;
|
||||
__nv_in u32 page_size;
|
||||
__nv_in u32 flags;
|
||||
u32 pad;
|
||||
union {
|
||||
_out_ u64 offset;
|
||||
_in_ u64 align;
|
||||
__nv_out u64 offset;
|
||||
__nv_in u64 align;
|
||||
};
|
||||
} data;
|
||||
|
||||
@ -36,7 +36,7 @@ Result nvioctlNvhostAsGpu_AllocSpace(u32 fd, u32 pages, u32 page_size, u32 flags
|
||||
data.flags = flags;
|
||||
data.align = align;
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x41, 0x02, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x41, 0x02, data), &data);
|
||||
if (R_FAILED(rc)) return rc;
|
||||
|
||||
*offset = data.offset;
|
||||
@ -51,13 +51,13 @@ Result nvioctlNvhostAsGpu_MapBufferEx(
|
||||
Result rc = 0;
|
||||
|
||||
struct {
|
||||
_in_ u32 flags; // bit0: fixed_offset, bit2: cacheable
|
||||
_in_ u32 kind; // -1 is default
|
||||
_in_ u32 nvmap_handle;
|
||||
_inout_ u32 page_size; // 0 means don't care
|
||||
_in_ u64 buffer_offset;
|
||||
_in_ u64 mapping_size;
|
||||
_inout_ u64 offset;
|
||||
__nv_in u32 flags; // bit0: fixed_offset, bit2: cacheable
|
||||
__nv_in u32 kind; // -1 is default
|
||||
__nv_in u32 nvmap_handle;
|
||||
__nv_inout u32 page_size; // 0 means don't care
|
||||
__nv_in u64 buffer_offset;
|
||||
__nv_in u64 mapping_size;
|
||||
__nv_inout u64 offset;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
@ -69,7 +69,7 @@ Result nvioctlNvhostAsGpu_MapBufferEx(
|
||||
data.mapping_size = mapping_size;
|
||||
data.offset = input_offset;
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x41, 0x06, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x41, 0x06, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc) && offset) {
|
||||
*offset = data.offset;
|
||||
@ -83,15 +83,15 @@ Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]) {
|
||||
|
||||
struct {
|
||||
u64 not_used; // contained output user ptr on linux, ignored
|
||||
_inout_ u32 bufsize; // forced to 2*sizeof(struct va_region)
|
||||
__nv_inout u32 bufsize; // forced to 2*sizeof(struct va_region)
|
||||
u32 pad;
|
||||
_out_ nvioctl_va_region regions[2];
|
||||
__nv_out nvioctl_va_region regions[2];
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.bufsize = sizeof(data.regions);
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x41, 0x08, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x41, 0x08, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc)) {
|
||||
memcpy(regions, data.regions, sizeof(data.regions));
|
||||
@ -102,18 +102,18 @@ Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]) {
|
||||
|
||||
Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size, u32 flags) {
|
||||
struct {
|
||||
_in_ u32 big_page_size; // depends on GPU's available_big_page_sizes; 0=default
|
||||
_in_ s32 as_fd; // ignored; passes 0
|
||||
_in_ u32 flags; // ignored; passes 0
|
||||
_in_ u32 reserved; // ignored; passes 0
|
||||
_in_ u64 unk0;
|
||||
_in_ u64 unk1;
|
||||
_in_ u64 unk2;
|
||||
__nv_in u32 big_page_size; // depends on GPU's available_big_page_sizes; 0=default
|
||||
__nv_in s32 as_fd; // ignored; passes 0
|
||||
__nv_in u32 flags; // ignored; passes 0
|
||||
__nv_in u32 reserved; // ignored; passes 0
|
||||
__nv_in u64 unk0;
|
||||
__nv_in u64 unk1;
|
||||
__nv_in u64 unk2;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.big_page_size = big_page_size;
|
||||
data.flags = flags;
|
||||
|
||||
return nvIoctl(fd, _IOW(0x41, 0x09, data), &data);
|
||||
return nvIoctl(fd, _NV_IOW(0x41, 0x09, data), &data);
|
||||
}
|
||||
|
@ -9,12 +9,12 @@ Result nvioctlNvhostCtrlGpu_ZCullGetCtxSize(u32 fd, u32 *out) {
|
||||
Result rc = 0;
|
||||
|
||||
struct {
|
||||
_out_ u32 out;
|
||||
__nv_out u32 out;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
|
||||
rc = nvIoctl(fd, _IOR(0x47, 0x01, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOR(0x47, 0x01, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc)) {
|
||||
*out = data.out;
|
||||
@ -27,12 +27,12 @@ Result nvioctlNvhostCtrlGpu_ZCullGetInfo(u32 fd, u32 out[40>>2]) {
|
||||
Result rc = 0;
|
||||
|
||||
struct {
|
||||
_out_ u32 out[40>>2];
|
||||
__nv_out u32 out[40>>2];
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
|
||||
rc = nvIoctl(fd, _IOR(0x47, 0x02, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOR(0x47, 0x02, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc)) {
|
||||
memcpy(out, data.out, sizeof(data.out));
|
||||
@ -45,16 +45,16 @@ Result nvioctlNvhostCtrlGpu_GetCharacteristics(u32 fd, gpu_characteristics *out)
|
||||
Result rc = 0;
|
||||
|
||||
struct {
|
||||
_in_ u64 gc_buf_size; // must not be NULL, but gets overwritten with 0xA0=max_size
|
||||
_in_ u64 gc_buf_addr; // ignored, but must not be NULL
|
||||
_out_ gpu_characteristics gc;
|
||||
__nv_in u64 gc_buf_size; // must not be NULL, but gets overwritten with 0xA0=max_size
|
||||
__nv_in u64 gc_buf_addr; // ignored, but must not be NULL
|
||||
__nv_out gpu_characteristics gc;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.gc_buf_size = sizeof(gpu_characteristics);
|
||||
data.gc_buf_addr = 1;
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x47, 0x05, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x47, 0x05, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc)) {
|
||||
memcpy(out, &data.gc, sizeof(gpu_characteristics));
|
||||
@ -68,14 +68,14 @@ Result nvioctlNvhostCtrlGpu_GetTpcMasks(u32 fd, u32 inval, u32 out[24>>2]) {
|
||||
|
||||
// Fixme: This one is wrong.
|
||||
struct {
|
||||
_inout_ u32 unk[24>>2];
|
||||
__nv_inout u32 unk[24>>2];
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.unk[0] = inval;
|
||||
data.unk[2] = 1; //addr?
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x47, 0x06, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x47, 0x06, data), &data);
|
||||
if (R_FAILED(rc)) return rc;
|
||||
|
||||
memcpy(out, &data.unk, sizeof(data.unk));
|
||||
@ -87,12 +87,12 @@ Result nvioctlNvhostCtrlGpu_GetL2State(u32 fd, nvioctl_l2_state *out) {
|
||||
Result rc = 0;
|
||||
|
||||
struct {
|
||||
_out_ nvioctl_l2_state out;
|
||||
__nv_out nvioctl_l2_state out;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
|
||||
rc = nvIoctl(fd, _IOR(0x47, 0x14, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOR(0x47, 0x14, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc)) {
|
||||
memcpy(out, &data.out, sizeof(data.out));
|
||||
|
@ -7,13 +7,13 @@
|
||||
|
||||
Result nvioctlNvhostCtrl_EventSignal(u32 fd, u32 event_id) {
|
||||
struct {
|
||||
_in_ u32 event_id;
|
||||
__nv_in u32 event_id;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.event_id = event_id;
|
||||
|
||||
return nvIoctl(fd, _IOWR(0x00, 0x1C, data), &data);
|
||||
return nvIoctl(fd, _NV_IOWR(0x00, 0x1C, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlNvhostCtrl_EventWait(u32 fd, u32 syncpt_id, u32 threshold, s32 timeout, u32 event_id, u32 *out)
|
||||
@ -21,10 +21,10 @@ Result nvioctlNvhostCtrl_EventWait(u32 fd, u32 syncpt_id, u32 threshold, s32 tim
|
||||
Result rc = 0;
|
||||
|
||||
struct {
|
||||
_in_ u32 syncpt_id;
|
||||
_in_ u32 threshold;
|
||||
_in_ s32 timeout;
|
||||
_inout_ u32 value;
|
||||
__nv_in u32 syncpt_id;
|
||||
__nv_in u32 threshold;
|
||||
__nv_in s32 timeout;
|
||||
__nv_inout u32 value;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
@ -33,7 +33,7 @@ Result nvioctlNvhostCtrl_EventWait(u32 fd, u32 syncpt_id, u32 threshold, s32 tim
|
||||
data.timeout = timeout;
|
||||
data.value = event_id;
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x00, 0x1D, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x00, 0x1D, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc))
|
||||
*out = data.value;
|
||||
@ -43,11 +43,11 @@ Result nvioctlNvhostCtrl_EventWait(u32 fd, u32 syncpt_id, u32 threshold, s32 tim
|
||||
|
||||
Result nvioctlNvhostCtrl_EventRegister(u32 fd, u32 event_id) {
|
||||
struct {
|
||||
_in_ u32 event_id;
|
||||
__nv_in u32 event_id;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.event_id = event_id;
|
||||
|
||||
return nvIoctl(fd, _IOWR(0x40, 0x1F, data), &data);
|
||||
return nvIoctl(fd, _NV_IOWR(0x40, 0x1F, data), &data);
|
||||
}
|
||||
|
@ -9,14 +9,14 @@ Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle) {
|
||||
Result rc=0;
|
||||
|
||||
struct {
|
||||
_in_ u32 size;
|
||||
_out_ u32 handle;
|
||||
__nv_in u32 size;
|
||||
__nv_out u32 handle;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.size = size;
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x01, 0x01, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x01, 0x01, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc)) {
|
||||
*nvmap_handle = data.handle;
|
||||
@ -29,14 +29,14 @@ Result nvioctlNvmap_FromId(u32 fd, u32 id, u32 *nvmap_handle) {
|
||||
Result rc=0;
|
||||
|
||||
struct {
|
||||
_in_ u32 id;
|
||||
_out_ u32 handle;
|
||||
__nv_in u32 id;
|
||||
__nv_out u32 handle;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.id = id;
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x01, 0x03, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x01, 0x03, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc)) {
|
||||
*nvmap_handle = data.handle;
|
||||
@ -47,13 +47,13 @@ Result nvioctlNvmap_FromId(u32 fd, u32 id, u32 *nvmap_handle) {
|
||||
|
||||
Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 align, u8 kind, void* addr) {
|
||||
struct {
|
||||
_in_ u32 handle;
|
||||
_in_ u32 heapmask;
|
||||
_in_ u32 flags; // (0=read-only, 1=read-write)
|
||||
_in_ u32 align;
|
||||
_in_ u8 kind;
|
||||
__nv_in u32 handle;
|
||||
__nv_in u32 heapmask;
|
||||
__nv_in u32 flags; // (0=read-only, 1=read-write)
|
||||
__nv_in u32 align;
|
||||
__nv_in u8 kind;
|
||||
u8 pad[7];
|
||||
_in_ u64 addr;
|
||||
__nv_in u64 addr;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
@ -64,21 +64,21 @@ Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32
|
||||
data.kind = kind;
|
||||
data.addr = (u64)addr;
|
||||
|
||||
return nvIoctl(fd, _IOWR(0x01, 0x04, data), &data);
|
||||
return nvIoctl(fd, _NV_IOWR(0x01, 0x04, data), &data);
|
||||
}
|
||||
|
||||
Result nvioctlNvmap_GetId(u32 fd, u32 nvmap_handle, u32 *id) {
|
||||
Result rc=0;
|
||||
|
||||
struct {
|
||||
_out_ u32 id;
|
||||
_in_ u32 handle;
|
||||
__nv_out u32 id;
|
||||
__nv_in u32 handle;
|
||||
} data;
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.handle = nvmap_handle;
|
||||
|
||||
rc = nvIoctl(fd, _IOWR(0x01, 0x0E, data), &data);
|
||||
rc = nvIoctl(fd, _NV_IOWR(0x01, 0x0E, data), &data);
|
||||
|
||||
if (R_SUCCEEDED(rc)) {
|
||||
*id = data.id;
|
||||
|
@ -199,18 +199,18 @@ Result nvIoctl(u32 fd, u32 request, void* argp) {
|
||||
u32 request;
|
||||
} *raw;
|
||||
|
||||
size_t bufsize = _IOC_SIZE(request);
|
||||
u32 dir = _IOC_DIR(request);
|
||||
size_t bufsize = _NV_IOC_SIZE(request);
|
||||
u32 dir = _NV_IOC_DIR(request);
|
||||
|
||||
void* buf_send = NULL, *buf_recv = NULL;
|
||||
size_t buf_send_size = 0, buf_recv_size = 0;
|
||||
|
||||
if(dir & _IOC_WRITE) {
|
||||
if(dir & _NV_IOC_WRITE) {
|
||||
buf_send = argp;
|
||||
buf_send_size = bufsize;
|
||||
}
|
||||
|
||||
if(dir & _IOC_READ) {
|
||||
if(dir & _NV_IOC_READ) {
|
||||
buf_recv = argp;
|
||||
buf_recv_size = bufsize;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user