mirror of
https://github.com/switchbrew/libnx.git
synced 2025-06-24 22:02:39 +02:00
NvBuffer: actually unmap and free the buffer in nvBufferFree
This commit is contained in:
parent
23fa46a8ee
commit
1cbc67e650
@ -19,6 +19,7 @@ Result nvAddressSpaceReserveAtFixedAddr(NvAddressSpace* a, iova_t addr, u32 page
|
|||||||
Result nvAddressSpaceReserveFull(NvAddressSpace* a);
|
Result nvAddressSpaceReserveFull(NvAddressSpace* a);
|
||||||
|
|
||||||
Result nvAddressSpaceMapBuffer(NvAddressSpace* a, u32 fd, NvKind kind, iova_t* iova_out);
|
Result nvAddressSpaceMapBuffer(NvAddressSpace* a, u32 fd, NvKind kind, iova_t* iova_out);
|
||||||
|
Result nvAddressSpaceUnmapBuffer(NvAddressSpace* a, iova_t iova);
|
||||||
|
|
||||||
struct NvChannel;
|
struct NvChannel;
|
||||||
Result nvAddressSpaceBindToChannel(NvAddressSpace* a, struct NvChannel* channel);
|
Result nvAddressSpaceBindToChannel(NvAddressSpace* a, struct NvChannel* channel);
|
||||||
|
@ -170,12 +170,14 @@ Result nvioctlNvhostCtrlGpu_GetL2State(u32 fd, nvioctl_l2_state *out);
|
|||||||
Result nvioctlNvhostAsGpu_BindChannel(u32 fd, u32 channel_fd);
|
Result nvioctlNvhostAsGpu_BindChannel(u32 fd, u32 channel_fd);
|
||||||
Result nvioctlNvhostAsGpu_AllocSpace(u32 fd, u32 pages, u32 page_size, u32 flags, u64 align, u64 *offset);
|
Result nvioctlNvhostAsGpu_AllocSpace(u32 fd, u32 pages, u32 page_size, u32 flags, u64 align, u64 *offset);
|
||||||
Result nvioctlNvhostAsGpu_MapBufferEx(u32 fd, u32 flags, u32 kind, u32 nvmap_handle, u32 page_size, u64 buffer_offset, u64 mapping_size, u64 input_offset, u64 *offset);
|
Result nvioctlNvhostAsGpu_MapBufferEx(u32 fd, u32 flags, u32 kind, u32 nvmap_handle, u32 page_size, u64 buffer_offset, u64 mapping_size, u64 input_offset, u64 *offset);
|
||||||
|
Result nvioctlNvhostAsGpu_UnmapBuffer(u32 fd, u64 offset);
|
||||||
Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]);
|
Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]);
|
||||||
Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size, u32 flags);
|
Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size, u32 flags);
|
||||||
|
|
||||||
Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle);
|
Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle);
|
||||||
Result nvioctlNvmap_FromId(u32 fd, u32 id, u32 *nvmap_handle);
|
Result nvioctlNvmap_FromId(u32 fd, u32 id, u32 *nvmap_handle);
|
||||||
Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 align, u8 kind, void* addr);
|
Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 align, u8 kind, void* addr);
|
||||||
|
Result nvioctlNvmap_Free(u32 fd, u32 nvmap_handle);
|
||||||
Result nvioctlNvmap_GetId(u32 fd, u32 nvmap_handle, u32 *id);
|
Result nvioctlNvmap_GetId(u32 fd, u32 nvmap_handle, u32 *id);
|
||||||
|
|
||||||
Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd);
|
Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd);
|
||||||
|
@ -62,6 +62,10 @@ Result nvAddressSpaceMapBuffer(
|
|||||||
a->fd, NvMapBufferFlags_IsCachable, kind, fd, 0x10000, 0, 0, 0, iova_out);
|
a->fd, NvMapBufferFlags_IsCachable, kind, fd, 0x10000, 0, 0, 0, iova_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result nvAddressSpaceUnmapBuffer(NvAddressSpace* a, iova_t iova) {
|
||||||
|
return nvioctlNvhostAsGpu_UnmapBuffer(a->fd, iova);
|
||||||
|
}
|
||||||
|
|
||||||
Result nvAddressSpaceBindToChannel(NvAddressSpace* a, NvChannel* channel) {
|
Result nvAddressSpaceBindToChannel(NvAddressSpace* a, NvChannel* channel) {
|
||||||
return nvioctlNvhostAsGpu_BindChannel(a->fd, channel->fd);
|
return nvioctlNvhostAsGpu_BindChannel(a->fd, channel->fd);
|
||||||
}
|
}
|
||||||
|
@ -63,9 +63,6 @@ static Result _nvBufferCreate(
|
|||||||
|
|
||||||
rc = nvioctlNvmap_Create(g_nvmap_fd, size, &m->fd);
|
rc = nvioctlNvmap_Create(g_nvmap_fd, size, &m->fd);
|
||||||
|
|
||||||
if (R_FAILED(rc))
|
|
||||||
m->fd = -1;
|
|
||||||
|
|
||||||
if (R_SUCCEEDED(rc))
|
if (R_SUCCEEDED(rc))
|
||||||
rc = nvioctlNvmap_Alloc(
|
rc = nvioctlNvmap_Alloc(
|
||||||
g_nvmap_fd, m->fd, 0, flags, align, kind, m->cpu_addr);
|
g_nvmap_fd, m->fd, 0, flags, align, kind, m->cpu_addr);
|
||||||
@ -102,17 +99,28 @@ void nvBufferFree(NvBuffer* m)
|
|||||||
if (!m->has_init)
|
if (!m->has_init)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
// todo: nvAddressSpaceUnmapBuffer(m->gpu_addr)
|
if (m->gpu_addr_texture) {
|
||||||
// todo: nvAddressSpaceUnmapBuffer(m->gpu_addr_texture)
|
nvAddressSpaceUnmapBuffer(m->addr_space, m->gpu_addr_texture);
|
||||||
nvBufferMakeCpuCached(m);
|
m->gpu_addr_texture = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m->gpu_addr) {
|
||||||
|
nvAddressSpaceUnmapBuffer(m->addr_space, m->gpu_addr);
|
||||||
|
m->gpu_addr = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m->fd != -1) {
|
||||||
|
nvioctlNvmap_Free(g_nvmap_fd, m->fd);
|
||||||
|
m->fd = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m->cpu_addr) {
|
||||||
|
nvBufferMakeCpuCached(m);
|
||||||
free(m->cpu_addr);
|
free(m->cpu_addr);
|
||||||
m->cpu_addr = NULL;
|
m->cpu_addr = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (m->fd != -1)
|
m->has_init = false;
|
||||||
nvClose(m->fd);
|
|
||||||
|
|
||||||
m->fd = -1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void* nvBufferGetCpuAddr(NvBuffer* m) {
|
void* nvBufferGetCpuAddr(NvBuffer* m) {
|
||||||
|
@ -78,6 +78,17 @@ Result nvioctlNvhostAsGpu_MapBufferEx(
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result nvioctlNvhostAsGpu_UnmapBuffer(u32 fd, u64 offset) {
|
||||||
|
struct {
|
||||||
|
__nv_in u64 offset;
|
||||||
|
} data;
|
||||||
|
|
||||||
|
memset(&data, 0, sizeof(data));
|
||||||
|
data.offset = offset;
|
||||||
|
|
||||||
|
return nvIoctl(fd, _NV_IOWR(0x41, 0x05, data), &data);
|
||||||
|
}
|
||||||
|
|
||||||
Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]) {
|
Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]) {
|
||||||
Result rc=0;
|
Result rc=0;
|
||||||
|
|
||||||
|
@ -66,6 +66,21 @@ Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32
|
|||||||
return nvIoctl(fd, _NV_IOWR(0x01, 0x04, data), &data);
|
return nvIoctl(fd, _NV_IOWR(0x01, 0x04, data), &data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result nvioctlNvmap_Free(u32 fd, u32 nvmap_handle) {
|
||||||
|
struct {
|
||||||
|
__nv_in u32 handle;
|
||||||
|
u32 pad;
|
||||||
|
__nv_out u64 refcount;
|
||||||
|
__nv_out u32 size;
|
||||||
|
__nv_out u32 flags; // 1=NOT_FREED_YET
|
||||||
|
} data;
|
||||||
|
|
||||||
|
memset(&data, 0, sizeof(data));
|
||||||
|
data.handle = nvmap_handle;
|
||||||
|
|
||||||
|
return nvIoctl(fd, _NV_IOWR(0x01, 0x05, data), &data);
|
||||||
|
}
|
||||||
|
|
||||||
Result nvioctlNvmap_GetId(u32 fd, u32 nvmap_handle, u32 *id) {
|
Result nvioctlNvmap_GetId(u32 fd, u32 nvmap_handle, u32 *id) {
|
||||||
Result rc=0;
|
Result rc=0;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user