forked from eden-emu/eden
		
	Merge pull request #6525 from ameerj/nvdec-fixes
nvdec: Fix Submit Ioctl data source, vic frame dimension computations
This commit is contained in:
		
						commit
						da4ca4f2f9
					
				
					 2 changed files with 50 additions and 56 deletions
				
			
		|  | @ -19,26 +19,29 @@ | |||
| namespace Service::Nvidia::Devices { | ||||
| 
 | ||||
| namespace { | ||||
| // Splice vectors will copy count amount of type T from the input vector into the dst vector.
 | ||||
| // Copies count amount of type T from the input vector into the dst vector.
 | ||||
| // Returns the number of bytes written into dst.
 | ||||
| template <typename T> | ||||
| std::size_t SpliceVectors(const std::vector<u8>& input, std::vector<T>& dst, std::size_t count, | ||||
|                           std::size_t offset) { | ||||
|     if (!dst.empty()) { | ||||
|         std::memcpy(dst.data(), input.data() + offset, count * sizeof(T)); | ||||
| std::size_t SliceVectors(const std::vector<u8>& input, std::vector<T>& dst, std::size_t count, | ||||
|                          std::size_t offset) { | ||||
|     if (dst.empty()) { | ||||
|         return 0; | ||||
|     } | ||||
|     return 0; | ||||
|     const size_t bytes_copied = count * sizeof(T); | ||||
|     std::memcpy(dst.data(), input.data() + offset, bytes_copied); | ||||
|     return bytes_copied; | ||||
| } | ||||
| 
 | ||||
| // Write vectors will write data to the output buffer
 | ||||
| // Writes the data in src to an offset into the dst vector. The offset is specified in bytes
 | ||||
| // Returns the number of bytes written into dst.
 | ||||
| template <typename T> | ||||
| std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::size_t offset) { | ||||
|     if (src.empty()) { | ||||
|         return 0; | ||||
|     } else { | ||||
|         std::memcpy(dst.data() + offset, src.data(), src.size() * sizeof(T)); | ||||
|         offset += src.size() * sizeof(T); | ||||
|         return offset; | ||||
|     } | ||||
|     const size_t bytes_copied = src.size() * sizeof(T); | ||||
|     std::memcpy(dst.data() + offset, src.data(), bytes_copied); | ||||
|     return bytes_copied; | ||||
| } | ||||
| } // Anonymous namespace
 | ||||
| 
 | ||||
|  | @ -62,7 +65,6 @@ NvResult nvhost_nvdec_common::Submit(const std::vector<u8>& input, std::vector<u | |||
|     LOG_DEBUG(Service_NVDRV, "called NVDEC Submit, cmd_buffer_count={}", params.cmd_buffer_count); | ||||
| 
 | ||||
|     // Instantiate param buffers
 | ||||
|     std::size_t offset = sizeof(IoctlSubmit); | ||||
|     std::vector<CommandBuffer> command_buffers(params.cmd_buffer_count); | ||||
|     std::vector<Reloc> relocs(params.relocation_count); | ||||
|     std::vector<u32> reloc_shifts(params.relocation_count); | ||||
|  | @ -70,13 +72,14 @@ NvResult nvhost_nvdec_common::Submit(const std::vector<u8>& input, std::vector<u | |||
|     std::vector<SyncptIncr> wait_checks(params.syncpoint_count); | ||||
|     std::vector<Fence> fences(params.fence_count); | ||||
| 
 | ||||
|     // Splice input into their respective buffers
 | ||||
|     offset = SpliceVectors(input, command_buffers, params.cmd_buffer_count, offset); | ||||
|     offset = SpliceVectors(input, relocs, params.relocation_count, offset); | ||||
|     offset = SpliceVectors(input, reloc_shifts, params.relocation_count, offset); | ||||
|     offset = SpliceVectors(input, syncpt_increments, params.syncpoint_count, offset); | ||||
|     offset = SpliceVectors(input, wait_checks, params.syncpoint_count, offset); | ||||
|     offset = SpliceVectors(input, fences, params.fence_count, offset); | ||||
|     // Slice input into their respective buffers
 | ||||
|     std::size_t offset = sizeof(IoctlSubmit); | ||||
|     offset += SliceVectors(input, command_buffers, params.cmd_buffer_count, offset); | ||||
|     offset += SliceVectors(input, relocs, params.relocation_count, offset); | ||||
|     offset += SliceVectors(input, reloc_shifts, params.relocation_count, offset); | ||||
|     offset += SliceVectors(input, syncpt_increments, params.syncpoint_count, offset); | ||||
|     offset += SliceVectors(input, wait_checks, params.syncpoint_count, offset); | ||||
|     offset += SliceVectors(input, fences, params.fence_count, offset); | ||||
| 
 | ||||
|     auto& gpu = system.GPU(); | ||||
|     if (gpu.UseNvdec()) { | ||||
|  | @ -88,35 +91,27 @@ NvResult nvhost_nvdec_common::Submit(const std::vector<u8>& input, std::vector<u | |||
|         } | ||||
|     } | ||||
|     for (const auto& cmd_buffer : command_buffers) { | ||||
|         auto object = nvmap_dev->GetObject(cmd_buffer.memory_id); | ||||
|         const auto object = nvmap_dev->GetObject(cmd_buffer.memory_id); | ||||
|         ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); | ||||
|         const auto map = FindBufferMap(object->dma_map_addr); | ||||
|         if (!map) { | ||||
|             LOG_ERROR(Service_NVDRV, "Tried to submit an invalid offset 0x{:X} dma 0x{:X}", | ||||
|                       object->addr, object->dma_map_addr); | ||||
|             return NvResult::Success; | ||||
|         } | ||||
|         Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); | ||||
|         gpu.MemoryManager().ReadBlock(map->StartAddr() + cmd_buffer.offset, cmdlist.data(), | ||||
|                                       cmdlist.size() * sizeof(u32)); | ||||
|         system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(), | ||||
|                                   cmdlist.size() * sizeof(u32)); | ||||
|         gpu.PushCommandBuffer(cmdlist); | ||||
|     } | ||||
|     if (gpu.UseNvdec()) { | ||||
| 
 | ||||
|         fences[0].value = syncpoint_manager.IncreaseSyncpoint(fences[0].id, 1); | ||||
| 
 | ||||
|         Tegra::ChCommandHeaderList cmdlist{{(4 << 28) | fences[0].id}}; | ||||
|         gpu.PushCommandBuffer(cmdlist); | ||||
|     } | ||||
|     std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmit)); | ||||
|     // Some games expect command_buffers to be written back
 | ||||
|     offset = sizeof(IoctlSubmit); | ||||
|     offset = WriteVectors(output, command_buffers, offset); | ||||
|     offset = WriteVectors(output, relocs, offset); | ||||
|     offset = WriteVectors(output, reloc_shifts, offset); | ||||
|     offset = WriteVectors(output, syncpt_increments, offset); | ||||
|     offset = WriteVectors(output, wait_checks, offset); | ||||
|     offset = WriteVectors(output, fences, offset); | ||||
|     offset += WriteVectors(output, command_buffers, offset); | ||||
|     offset += WriteVectors(output, relocs, offset); | ||||
|     offset += WriteVectors(output, reloc_shifts, offset); | ||||
|     offset += WriteVectors(output, syncpt_increments, offset); | ||||
|     offset += WriteVectors(output, wait_checks, offset); | ||||
|     offset += WriteVectors(output, fences, offset); | ||||
| 
 | ||||
|     return NvResult::Success; | ||||
| } | ||||
|  | @ -148,14 +143,14 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto | |||
|     std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer)); | ||||
|     std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries); | ||||
| 
 | ||||
|     SpliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); | ||||
|     SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); | ||||
| 
 | ||||
|     auto& gpu = system.GPU(); | ||||
| 
 | ||||
|     for (auto& cmf_buff : cmd_buffer_handles) { | ||||
|         auto object{nvmap_dev->GetObject(cmf_buff.map_handle)}; | ||||
|     for (auto& cmd_buffer : cmd_buffer_handles) { | ||||
|         auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)}; | ||||
|         if (!object) { | ||||
|             LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmf_buff.map_handle); | ||||
|             LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle); | ||||
|             std::memcpy(output.data(), ¶ms, output.size()); | ||||
|             return NvResult::InvalidState; | ||||
|         } | ||||
|  | @ -170,7 +165,7 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto | |||
|         if (!object->dma_map_addr) { | ||||
|             LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size); | ||||
|         } else { | ||||
|             cmf_buff.map_address = object->dma_map_addr; | ||||
|             cmd_buffer.map_address = object->dma_map_addr; | ||||
|             AddBufferMap(object->dma_map_addr, object->size, object->addr, | ||||
|                          object->status == nvmap::Object::Status::Allocated); | ||||
|         } | ||||
|  | @ -186,14 +181,14 @@ NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vec | |||
|     IoctlMapBuffer params{}; | ||||
|     std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer)); | ||||
|     std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries); | ||||
|     SpliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); | ||||
|     SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); | ||||
| 
 | ||||
|     auto& gpu = system.GPU(); | ||||
| 
 | ||||
|     for (auto& cmf_buff : cmd_buffer_handles) { | ||||
|         const auto object{nvmap_dev->GetObject(cmf_buff.map_handle)}; | ||||
|     for (auto& cmd_buffer : cmd_buffer_handles) { | ||||
|         const auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)}; | ||||
|         if (!object) { | ||||
|             LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmf_buff.map_handle); | ||||
|             LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle); | ||||
|             std::memcpy(output.data(), ¶ms, output.size()); | ||||
|             return NvResult::InvalidState; | ||||
|         } | ||||
|  |  | |||
|  | @ -129,28 +129,27 @@ void Vic::Execute() { | |||
| 
 | ||||
|         const std::size_t surface_width = config.surface_width_minus1 + 1; | ||||
|         const std::size_t surface_height = config.surface_height_minus1 + 1; | ||||
|         const std::size_t half_width = surface_width / 2; | ||||
|         const std::size_t half_height = config.surface_height_minus1 / 2; | ||||
|         const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width)); | ||||
|         const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height)); | ||||
|         const std::size_t half_width = frame_width / 2; | ||||
|         const std::size_t half_height = frame_height / 2; | ||||
|         const std::size_t aligned_width = (surface_width + 0xff) & ~0xff; | ||||
| 
 | ||||
|         const auto* luma_ptr = frame->data[0]; | ||||
|         const auto* chroma_b_ptr = frame->data[1]; | ||||
|         const auto* chroma_r_ptr = frame->data[2]; | ||||
|         const auto stride = frame->linesize[0]; | ||||
|         const auto half_stride = frame->linesize[1]; | ||||
|         const auto stride = static_cast<size_t>(frame->linesize[0]); | ||||
|         const auto half_stride = static_cast<size_t>(frame->linesize[1]); | ||||
| 
 | ||||
|         luma_buffer.resize(aligned_width * surface_height); | ||||
|         chroma_buffer.resize(aligned_width * half_height); | ||||
|         chroma_buffer.resize(aligned_width * surface_height / 2); | ||||
| 
 | ||||
|         // Populate luma buffer
 | ||||
|         for (std::size_t y = 0; y < surface_height - 1; ++y) { | ||||
|         for (std::size_t y = 0; y < frame_height; ++y) { | ||||
|             const std::size_t src = y * stride; | ||||
|             const std::size_t dst = y * aligned_width; | ||||
| 
 | ||||
|             const std::size_t size = surface_width; | ||||
| 
 | ||||
|             for (std::size_t offset = 0; offset < size; ++offset) { | ||||
|                 luma_buffer[dst + offset] = luma_ptr[src + offset]; | ||||
|             for (std::size_t x = 0; x < frame_width; ++x) { | ||||
|                 luma_buffer[dst + x] = luma_ptr[src + x]; | ||||
|             } | ||||
|         } | ||||
|         gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Fernando S
						Fernando S