diff --git a/nx/include/switch/nvidia/address_space.h b/nx/include/switch/nvidia/address_space.h index 0a28fc39..e16e5077 100644 --- a/nx/include/switch/nvidia/address_space.h +++ b/nx/include/switch/nvidia/address_space.h @@ -19,6 +19,7 @@ Result nvAddressSpaceReserveAtFixedAddr(NvAddressSpace* a, iova_t addr, u32 page Result nvAddressSpaceReserveFull(NvAddressSpace* a); Result nvAddressSpaceMapBuffer(NvAddressSpace* a, u32 fd, NvKind kind, iova_t* iova_out); +Result nvAddressSpaceUnmapBuffer(NvAddressSpace* a, iova_t iova); struct NvChannel; Result nvAddressSpaceBindToChannel(NvAddressSpace* a, struct NvChannel* channel); diff --git a/nx/include/switch/nvidia/ioctl.h b/nx/include/switch/nvidia/ioctl.h index 67f6941c..663d0008 100644 --- a/nx/include/switch/nvidia/ioctl.h +++ b/nx/include/switch/nvidia/ioctl.h @@ -170,12 +170,14 @@ Result nvioctlNvhostCtrlGpu_GetL2State(u32 fd, nvioctl_l2_state *out); Result nvioctlNvhostAsGpu_BindChannel(u32 fd, u32 channel_fd); Result nvioctlNvhostAsGpu_AllocSpace(u32 fd, u32 pages, u32 page_size, u32 flags, u64 align, u64 *offset); Result nvioctlNvhostAsGpu_MapBufferEx(u32 fd, u32 flags, u32 kind, u32 nvmap_handle, u32 page_size, u64 buffer_offset, u64 mapping_size, u64 input_offset, u64 *offset); +Result nvioctlNvhostAsGpu_UnmapBuffer(u32 fd, u64 offset); Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]); Result nvioctlNvhostAsGpu_InitializeEx(u32 fd, u32 big_page_size, u32 flags); Result nvioctlNvmap_Create(u32 fd, u32 size, u32 *nvmap_handle); Result nvioctlNvmap_FromId(u32 fd, u32 id, u32 *nvmap_handle); Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 align, u8 kind, void* addr); +Result nvioctlNvmap_Free(u32 fd, u32 nvmap_handle); Result nvioctlNvmap_GetId(u32 fd, u32 nvmap_handle, u32 *id); Result nvioctlChannel_SetNvmapFd(u32 fd, u32 nvmap_fd); diff --git a/nx/source/nvidia/address_space.c b/nx/source/nvidia/address_space.c index 1efdb405..772a1e03 100644 --- a/nx/source/nvidia/address_space.c +++ b/nx/source/nvidia/address_space.c @@ -62,6 +62,10 @@ Result nvAddressSpaceMapBuffer( a->fd, NvMapBufferFlags_IsCachable, kind, fd, 0x10000, 0, 0, 0, iova_out); } +Result nvAddressSpaceUnmapBuffer(NvAddressSpace* a, iova_t iova) { + return nvioctlNvhostAsGpu_UnmapBuffer(a->fd, iova); +} + Result nvAddressSpaceBindToChannel(NvAddressSpace* a, NvChannel* channel) { return nvioctlNvhostAsGpu_BindChannel(a->fd, channel->fd); } diff --git a/nx/source/nvidia/buffer.c b/nx/source/nvidia/buffer.c index 8cb74c0e..e1425737 100644 --- a/nx/source/nvidia/buffer.c +++ b/nx/source/nvidia/buffer.c @@ -63,9 +63,6 @@ static Result _nvBufferCreate( rc = nvioctlNvmap_Create(g_nvmap_fd, size, &m->fd); - if (R_FAILED(rc)) - m->fd = -1; - if (R_SUCCEEDED(rc)) rc = nvioctlNvmap_Alloc( g_nvmap_fd, m->fd, 0, flags, align, kind, m->cpu_addr); @@ -102,17 +99,28 @@ void nvBufferFree(NvBuffer* m) if (!m->has_init) return; - // todo: nvAddressSpaceUnmapBuffer(m->gpu_addr) - // todo: nvAddressSpaceUnmapBuffer(m->gpu_addr_texture) - nvBufferMakeCpuCached(m); + if (m->gpu_addr_texture) { + nvAddressSpaceUnmapBuffer(m->addr_space, m->gpu_addr_texture); + m->gpu_addr_texture = 0; + } - free(m->cpu_addr); - m->cpu_addr = NULL; + if (m->gpu_addr) { + nvAddressSpaceUnmapBuffer(m->addr_space, m->gpu_addr); + m->gpu_addr = 0; + } - if (m->fd != -1) - nvClose(m->fd); + if (m->fd != -1) { + nvioctlNvmap_Free(g_nvmap_fd, m->fd); + m->fd = -1; + } - m->fd = -1; + if (m->cpu_addr) { + nvBufferMakeCpuCached(m); + free(m->cpu_addr); + m->cpu_addr = NULL; + } + + m->has_init = false; } void* nvBufferGetCpuAddr(NvBuffer* m) { diff --git a/nx/source/nvidia/ioctl/nvhost-as-gpu.c b/nx/source/nvidia/ioctl/nvhost-as-gpu.c index af26cc02..d1f0bc7e 100644 --- a/nx/source/nvidia/ioctl/nvhost-as-gpu.c +++ b/nx/source/nvidia/ioctl/nvhost-as-gpu.c @@ -78,6 +78,17 @@ Result nvioctlNvhostAsGpu_MapBufferEx( return rc; } +Result nvioctlNvhostAsGpu_UnmapBuffer(u32 fd, u64 offset) { + struct { + __nv_in u64 offset; + } data; + + memset(&data, 0, sizeof(data)); + data.offset = offset; + + return nvIoctl(fd, _NV_IOWR(0x41, 0x05, data), &data); +} + Result nvioctlNvhostAsGpu_GetVARegions(u32 fd, nvioctl_va_region regions[2]) { Result rc=0; diff --git a/nx/source/nvidia/ioctl/nvmap.c b/nx/source/nvidia/ioctl/nvmap.c index 078e4eac..c5f84405 100644 --- a/nx/source/nvidia/ioctl/nvmap.c +++ b/nx/source/nvidia/ioctl/nvmap.c @@ -66,6 +66,21 @@ Result nvioctlNvmap_Alloc(u32 fd, u32 nvmap_handle, u32 heapmask, u32 flags, u32 return nvIoctl(fd, _NV_IOWR(0x01, 0x04, data), &data); } +Result nvioctlNvmap_Free(u32 fd, u32 nvmap_handle) { + struct { + __nv_in u32 handle; + u32 pad; + __nv_out u64 refcount; + __nv_out u32 size; + __nv_out u32 flags; // 1=NOT_FREED_YET + } data; + + memset(&data, 0, sizeof(data)); + data.handle = nvmap_handle; + + return nvIoctl(fd, _NV_IOWR(0x01, 0x05, data), &data); +} + Result nvioctlNvmap_GetId(u32 fd, u32 nvmap_handle, u32 *id) { Result rc=0;