mirror of
				https://git.zaroz.cloud/nintendo-back-up/yuzu/yuzu-mainline.git
				synced 2025-03-21 01:53:15 +00:00 
			
		
		
		
	Merge pull request #5136 from lioncash/video-shadow3
video_core: Resolve more variable shadowing scenarios pt.3
This commit is contained in:
		
						commit
						69af6ada2f
					
				| @ -10,7 +10,7 @@ | ||||
| namespace detail { | ||||
| template <typename Func> | ||||
| struct ScopeExitHelper { | ||||
|     explicit ScopeExitHelper(Func&& func) : func(std::move(func)) {} | ||||
|     explicit ScopeExitHelper(Func&& func_) : func(std::move(func_)) {} | ||||
|     ~ScopeExitHelper() { | ||||
|         if (active) { | ||||
|             func(); | ||||
|  | ||||
| @ -52,8 +52,8 @@ public: | ||||
| template <typename T> | ||||
| class Field : public FieldInterface { | ||||
| public: | ||||
|     Field(FieldType type, std::string name, T value) | ||||
|         : name(std::move(name)), type(type), value(std::move(value)) {} | ||||
|     Field(FieldType type_, std::string name_, T value_) | ||||
|         : name(std::move(name_)), type(type_), value(std::move(value_)) {} | ||||
| 
 | ||||
|     Field(const Field&) = default; | ||||
|     Field& operator=(const Field&) = default; | ||||
|  | ||||
| @ -17,9 +17,9 @@ namespace FileSys { | ||||
| template <std::size_t size> | ||||
| class ArrayVfsFile : public VfsFile { | ||||
| public: | ||||
|     explicit ArrayVfsFile(const std::array<u8, size>& data, std::string name = "", | ||||
|                           VirtualDir parent = nullptr) | ||||
|         : data(data), name(std::move(name)), parent(std::move(parent)) {} | ||||
|     explicit ArrayVfsFile(const std::array<u8, size>& data_, std::string name_ = "", | ||||
|                           VirtualDir parent_ = nullptr) | ||||
|         : data(data_), name(std::move(name_)), parent(std::move(parent_)) {} | ||||
| 
 | ||||
|     std::string GetName() const override { | ||||
|         return name; | ||||
| @ -51,12 +51,12 @@ public: | ||||
|         return read; | ||||
|     } | ||||
| 
 | ||||
|     std::size_t Write(const u8* data, std::size_t length, std::size_t offset) override { | ||||
|     std::size_t Write(const u8* data_, std::size_t length, std::size_t offset) override { | ||||
|         return 0; | ||||
|     } | ||||
| 
 | ||||
|     bool Rename(std::string_view name) override { | ||||
|         this->name = name; | ||||
|     bool Rename(std::string_view new_name) override { | ||||
|         name = new_name; | ||||
|         return true; | ||||
|     } | ||||
| 
 | ||||
|  | ||||
| @ -222,9 +222,9 @@ public: | ||||
| 
 | ||||
| public: | ||||
|     constexpr MemoryBlock() = default; | ||||
|     constexpr MemoryBlock(VAddr addr, std::size_t num_pages, MemoryState state, | ||||
|                           MemoryPermission perm, MemoryAttribute attribute) | ||||
|         : addr{addr}, num_pages(num_pages), state{state}, perm{perm}, attribute{attribute} {} | ||||
|     constexpr MemoryBlock(VAddr addr_, std::size_t num_pages_, MemoryState state_, | ||||
|                           MemoryPermission perm_, MemoryAttribute attribute_) | ||||
|         : addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {} | ||||
| 
 | ||||
|     constexpr VAddr GetAddress() const { | ||||
|         return addr; | ||||
|  | ||||
| @ -297,13 +297,20 @@ if (ENABLE_NSIGHT_AFTERMATH) | ||||
| endif() | ||||
| 
 | ||||
| if (MSVC) | ||||
|     target_compile_options(video_core PRIVATE /we4267) | ||||
|     target_compile_options(video_core PRIVATE | ||||
|         /we4267 # 'var' : conversion from 'size_t' to 'type', possible loss of data | ||||
|         /we4456 # Declaration of 'identifier' hides previous local declaration | ||||
|         /we4457 # Declaration of 'identifier' hides function parameter | ||||
|         /we4458 # Declaration of 'identifier' hides class member | ||||
|         /we4459 # Declaration of 'identifier' hides global declaration | ||||
|     ) | ||||
| else() | ||||
|     target_compile_options(video_core PRIVATE | ||||
|         -Werror=conversion | ||||
|         -Wno-error=sign-conversion | ||||
|         -Werror=pessimizing-move | ||||
|         -Werror=redundant-move | ||||
|         -Werror=shadow | ||||
|         -Werror=switch | ||||
|         -Werror=type-limits | ||||
|         -Werror=unused-variable | ||||
|  | ||||
| @ -28,8 +28,8 @@ namespace VideoCommon { | ||||
| template <class QueryCache, class HostCounter> | ||||
| class CounterStreamBase { | ||||
| public: | ||||
|     explicit CounterStreamBase(QueryCache& cache, VideoCore::QueryType type) | ||||
|         : cache{cache}, type{type} {} | ||||
|     explicit CounterStreamBase(QueryCache& cache_, VideoCore::QueryType type_) | ||||
|         : cache{cache_}, type{type_} {} | ||||
| 
 | ||||
|     /// Updates the state of the stream, enabling or disabling as needed.
 | ||||
|     void Update(bool enabled) { | ||||
| @ -334,8 +334,8 @@ private: | ||||
| template <class HostCounter> | ||||
| class CachedQueryBase { | ||||
| public: | ||||
|     explicit CachedQueryBase(VAddr cpu_addr, u8* host_ptr) | ||||
|         : cpu_addr{cpu_addr}, host_ptr{host_ptr} {} | ||||
|     explicit CachedQueryBase(VAddr cpu_addr_, u8* host_ptr_) | ||||
|         : cpu_addr{cpu_addr_}, host_ptr{host_ptr_} {} | ||||
|     virtual ~CachedQueryBase() = default; | ||||
| 
 | ||||
|     CachedQueryBase(CachedQueryBase&&) noexcept = default; | ||||
|  | ||||
| @ -22,11 +22,11 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs; | ||||
| 
 | ||||
| MICROPROFILE_DEFINE(OpenGL_Buffer_Download, "OpenGL", "Buffer Download", MP_RGB(192, 192, 128)); | ||||
| 
 | ||||
| Buffer::Buffer(const Device& device, VAddr cpu_addr, std::size_t size) | ||||
|     : VideoCommon::BufferBlock{cpu_addr, size} { | ||||
| Buffer::Buffer(const Device& device_, VAddr cpu_addr_, std::size_t size_) | ||||
|     : BufferBlock{cpu_addr_, size_} { | ||||
|     gl_buffer.Create(); | ||||
|     glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size), nullptr, GL_DYNAMIC_DRAW); | ||||
|     if (device.UseAssemblyShaders() || device.HasVertexBufferUnifiedMemory()) { | ||||
|     glNamedBufferData(gl_buffer.handle, static_cast<GLsizeiptr>(size_), nullptr, GL_DYNAMIC_DRAW); | ||||
|     if (device_.UseAssemblyShaders() || device_.HasVertexBufferUnifiedMemory()) { | ||||
|         glMakeNamedBufferResidentNV(gl_buffer.handle, GL_READ_WRITE); | ||||
|         glGetNamedBufferParameterui64vNV(gl_buffer.handle, GL_BUFFER_GPU_ADDRESS_NV, &gpu_address); | ||||
|     } | ||||
| @ -34,14 +34,14 @@ Buffer::Buffer(const Device& device, VAddr cpu_addr, std::size_t size) | ||||
| 
 | ||||
| Buffer::~Buffer() = default; | ||||
| 
 | ||||
| void Buffer::Upload(std::size_t offset, std::size_t size, const u8* data) { | ||||
|     glNamedBufferSubData(Handle(), static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size), | ||||
|                          data); | ||||
| void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) { | ||||
|     glNamedBufferSubData(Handle(), static_cast<GLintptr>(offset), | ||||
|                          static_cast<GLsizeiptr>(data_size), data); | ||||
| } | ||||
| 
 | ||||
| void Buffer::Download(std::size_t offset, std::size_t size, u8* data) { | ||||
| void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) { | ||||
|     MICROPROFILE_SCOPE(OpenGL_Buffer_Download); | ||||
|     const GLsizeiptr gl_size = static_cast<GLsizeiptr>(size); | ||||
|     const GLsizeiptr gl_size = static_cast<GLsizeiptr>(data_size); | ||||
|     const GLintptr gl_offset = static_cast<GLintptr>(offset); | ||||
|     if (read_buffer.handle == 0) { | ||||
|         read_buffer.Create(); | ||||
| @ -54,16 +54,16 @@ void Buffer::Download(std::size_t offset, std::size_t size, u8* data) { | ||||
| } | ||||
| 
 | ||||
| void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset, | ||||
|                       std::size_t size) { | ||||
|                       std::size_t copy_size) { | ||||
|     glCopyNamedBufferSubData(src.Handle(), Handle(), static_cast<GLintptr>(src_offset), | ||||
|                              static_cast<GLintptr>(dst_offset), static_cast<GLsizeiptr>(size)); | ||||
|                              static_cast<GLintptr>(dst_offset), static_cast<GLsizeiptr>(copy_size)); | ||||
| } | ||||
| 
 | ||||
| OGLBufferCache::OGLBufferCache(VideoCore::RasterizerInterface& rasterizer, | ||||
|                                Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, | ||||
|                                const Device& device_, std::size_t stream_size) | ||||
|     : GenericBufferCache{rasterizer, gpu_memory, cpu_memory, | ||||
|                          std::make_unique<OGLStreamBuffer>(device_, stream_size, true)}, | ||||
| OGLBufferCache::OGLBufferCache(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                                Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | ||||
|                                const Device& device_, std::size_t stream_size_) | ||||
|     : GenericBufferCache{rasterizer_, gpu_memory_, cpu_memory_, | ||||
|                          std::make_unique<OGLStreamBuffer>(device_, stream_size_, true)}, | ||||
|       device{device_} { | ||||
|     if (!device.HasFastBufferSubData()) { | ||||
|         return; | ||||
|  | ||||
| @ -25,15 +25,15 @@ class RasterizerOpenGL; | ||||
| 
 | ||||
| class Buffer : public VideoCommon::BufferBlock { | ||||
| public: | ||||
|     explicit Buffer(const Device& device, VAddr cpu_addr, std::size_t size); | ||||
|     explicit Buffer(const Device& device_, VAddr cpu_addr_, std::size_t size_); | ||||
|     ~Buffer(); | ||||
| 
 | ||||
|     void Upload(std::size_t offset, std::size_t size, const u8* data); | ||||
|     void Upload(std::size_t offset, std::size_t data_size, const u8* data); | ||||
| 
 | ||||
|     void Download(std::size_t offset, std::size_t size, u8* data); | ||||
|     void Download(std::size_t offset, std::size_t data_size, u8* data); | ||||
| 
 | ||||
|     void CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset, | ||||
|                   std::size_t size); | ||||
|                   std::size_t copy_size); | ||||
| 
 | ||||
|     GLuint Handle() const noexcept { | ||||
|         return gl_buffer.handle; | ||||
| @ -52,9 +52,9 @@ private: | ||||
| using GenericBufferCache = VideoCommon::BufferCache<Buffer, GLuint, OGLStreamBuffer>; | ||||
| class OGLBufferCache final : public GenericBufferCache { | ||||
| public: | ||||
|     explicit OGLBufferCache(VideoCore::RasterizerInterface& rasterizer, | ||||
|                             Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, | ||||
|                             const Device& device, std::size_t stream_size); | ||||
|     explicit OGLBufferCache(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                             Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | ||||
|                             const Device& device_, std::size_t stream_size_); | ||||
|     ~OGLBufferCache(); | ||||
| 
 | ||||
|     BufferInfo GetEmptyBuffer(std::size_t) override; | ||||
|  | ||||
| @ -30,11 +30,9 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) { | ||||
| 
 | ||||
| } // Anonymous namespace
 | ||||
| 
 | ||||
| QueryCache::QueryCache(RasterizerOpenGL& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, | ||||
|                        Tegra::MemoryManager& gpu_memory) | ||||
|     : VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter>( | ||||
|           rasterizer, maxwell3d, gpu_memory), | ||||
|       gl_rasterizer{rasterizer} {} | ||||
| QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                        Tegra::MemoryManager& gpu_memory_) | ||||
|     : QueryCacheBase(rasterizer_, maxwell3d_, gpu_memory_), gl_rasterizer{rasterizer_} {} | ||||
| 
 | ||||
| QueryCache::~QueryCache() = default; | ||||
| 
 | ||||
| @ -59,10 +57,11 @@ bool QueryCache::AnyCommandQueued() const noexcept { | ||||
|     return gl_rasterizer.AnyCommandQueued(); | ||||
| } | ||||
| 
 | ||||
| HostCounter::HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency, | ||||
| HostCounter::HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency_, | ||||
|                          VideoCore::QueryType type_) | ||||
|     : HostCounterBase<QueryCache, HostCounter>{std::move(dependency)}, cache{cache_}, type{type_}, | ||||
|       query{cache.AllocateQuery(type)} { | ||||
|     : HostCounterBase{std::move(dependency_)}, cache{cache_}, type{type_}, query{ | ||||
|                                                                                cache.AllocateQuery( | ||||
|                                                                                    type)} { | ||||
|     glBeginQuery(GetTarget(type), query.handle); | ||||
| } | ||||
| 
 | ||||
| @ -86,14 +85,14 @@ u64 HostCounter::BlockingQuery() const { | ||||
|     return static_cast<u64>(value); | ||||
| } | ||||
| 
 | ||||
| CachedQuery::CachedQuery(QueryCache& cache_, VideoCore::QueryType type_, VAddr cpu_addr, | ||||
|                          u8* host_ptr) | ||||
|     : CachedQueryBase<HostCounter>{cpu_addr, host_ptr}, cache{&cache_}, type{type_} {} | ||||
| CachedQuery::CachedQuery(QueryCache& cache_, VideoCore::QueryType type_, VAddr cpu_addr_, | ||||
|                          u8* host_ptr_) | ||||
|     : CachedQueryBase{cpu_addr_, host_ptr_}, cache{&cache_}, type{type_} {} | ||||
| 
 | ||||
| CachedQuery::~CachedQuery() = default; | ||||
| 
 | ||||
| CachedQuery::CachedQuery(CachedQuery&& rhs) noexcept | ||||
|     : CachedQueryBase<HostCounter>(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {} | ||||
|     : CachedQueryBase(std::move(rhs)), cache{rhs.cache}, type{rhs.type} {} | ||||
| 
 | ||||
| CachedQuery& CachedQuery::operator=(CachedQuery&& rhs) noexcept { | ||||
|     cache = rhs.cache; | ||||
|  | ||||
| @ -29,8 +29,8 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>; | ||||
| class QueryCache final | ||||
|     : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { | ||||
| public: | ||||
|     explicit QueryCache(RasterizerOpenGL& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, | ||||
|                         Tegra::MemoryManager& gpu_memory); | ||||
|     explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                         Tegra::MemoryManager& gpu_memory_); | ||||
|     ~QueryCache(); | ||||
| 
 | ||||
|     OGLQuery AllocateQuery(VideoCore::QueryType type); | ||||
| @ -46,7 +46,7 @@ private: | ||||
| 
 | ||||
| class HostCounter final : public VideoCommon::HostCounterBase<QueryCache, HostCounter> { | ||||
| public: | ||||
|     explicit HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency, | ||||
|     explicit HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency_, | ||||
|                          VideoCore::QueryType type_); | ||||
|     ~HostCounter(); | ||||
| 
 | ||||
| @ -62,8 +62,8 @@ private: | ||||
| 
 | ||||
| class CachedQuery final : public VideoCommon::CachedQueryBase<HostCounter> { | ||||
| public: | ||||
|     explicit CachedQuery(QueryCache& cache_, VideoCore::QueryType type_, VAddr cpu_addr, | ||||
|                          u8* host_ptr); | ||||
|     explicit CachedQuery(QueryCache& cache_, VideoCore::QueryType type_, VAddr cpu_addr_, | ||||
|                          u8* host_ptr_); | ||||
|     ~CachedQuery() override; | ||||
| 
 | ||||
|     CachedQuery(CachedQuery&& rhs) noexcept; | ||||
|  | ||||
| @ -149,19 +149,19 @@ void UpdateBindlessSSBOs(GLenum target, const BindlessSSBO* ssbos, size_t num_ss | ||||
| 
 | ||||
| } // Anonymous namespace
 | ||||
| 
 | ||||
| RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu_, | ||||
|                                    Core::Memory::Memory& cpu_memory, const Device& device_, | ||||
| RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | ||||
|                                    Core::Memory::Memory& cpu_memory_, const Device& device_, | ||||
|                                    ScreenInfo& screen_info_, ProgramManager& program_manager_, | ||||
|                                    StateTracker& state_tracker_) | ||||
|     : RasterizerAccelerated{cpu_memory}, gpu(gpu_), maxwell3d(gpu.Maxwell3D()), | ||||
|     : RasterizerAccelerated{cpu_memory_}, gpu(gpu_), maxwell3d(gpu.Maxwell3D()), | ||||
|       kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_), | ||||
|       screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_), | ||||
|       texture_cache(*this, maxwell3d, gpu_memory, device, state_tracker), | ||||
|       shader_cache(*this, emu_window, gpu, maxwell3d, kepler_compute, gpu_memory, device), | ||||
|       shader_cache(*this, emu_window_, gpu, maxwell3d, kepler_compute, gpu_memory, device), | ||||
|       query_cache(*this, maxwell3d, gpu_memory), | ||||
|       buffer_cache(*this, gpu_memory, cpu_memory, device, STREAM_BUFFER_SIZE), | ||||
|       buffer_cache(*this, gpu_memory, cpu_memory_, device, STREAM_BUFFER_SIZE), | ||||
|       fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache), | ||||
|       async_shaders(emu_window) { | ||||
|       async_shaders(emu_window_) { | ||||
|     CheckExtensions(); | ||||
| 
 | ||||
|     unified_uniform_buffer.Create(); | ||||
|  | ||||
| @ -62,10 +62,10 @@ static_assert(sizeof(BindlessSSBO) * CHAR_BIT == 128); | ||||
| 
 | ||||
| class RasterizerOpenGL : public VideoCore::RasterizerAccelerated { | ||||
| public: | ||||
|     explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu, | ||||
|                               Core::Memory::Memory& cpu_memory, const Device& device, | ||||
|                               ScreenInfo& screen_info, ProgramManager& program_manager, | ||||
|                               StateTracker& state_tracker); | ||||
|     explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | ||||
|                               Core::Memory::Memory& cpu_memory_, const Device& device_, | ||||
|                               ScreenInfo& screen_info_, ProgramManager& program_manager_, | ||||
|                               StateTracker& state_tracker_); | ||||
|     ~RasterizerOpenGL() override; | ||||
| 
 | ||||
|     void Draw(bool is_indexed, bool is_instanced) override; | ||||
|  | ||||
| @ -318,14 +318,13 @@ std::unique_ptr<Shader> Shader::CreateFromCache(const ShaderParameters& params, | ||||
|         precompiled_shader.registry, precompiled_shader.entries, precompiled_shader.program)); | ||||
| } | ||||
| 
 | ||||
| ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, | ||||
| ShaderCacheOpenGL::ShaderCacheOpenGL(RasterizerOpenGL& rasterizer_, | ||||
|                                      Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | ||||
|                                      Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                                      Tegra::Engines::KeplerCompute& kepler_compute_, | ||||
|                                      Tegra::MemoryManager& gpu_memory_, const Device& device_) | ||||
|     : VideoCommon::ShaderCache<Shader>{rasterizer}, emu_window{emu_window_}, gpu{gpu_}, | ||||
|       gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, | ||||
|       kepler_compute{kepler_compute_}, device{device_} {} | ||||
|     : ShaderCache{rasterizer_}, emu_window{emu_window_}, gpu{gpu_}, gpu_memory{gpu_memory_}, | ||||
|       maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_}, device{device_} {} | ||||
| 
 | ||||
| ShaderCacheOpenGL::~ShaderCacheOpenGL() = default; | ||||
| 
 | ||||
|  | ||||
| @ -119,10 +119,11 @@ private: | ||||
| 
 | ||||
| class ShaderCacheOpenGL final : public VideoCommon::ShaderCache<Shader> { | ||||
| public: | ||||
|     explicit ShaderCacheOpenGL(RasterizerOpenGL& rasterizer, Core::Frontend::EmuWindow& emu_window, | ||||
|                                Tegra::GPU& gpu, Tegra::Engines::Maxwell3D& maxwell3d, | ||||
|                                Tegra::Engines::KeplerCompute& kepler_compute, | ||||
|                                Tegra::MemoryManager& gpu_memory, const Device& device); | ||||
|     explicit ShaderCacheOpenGL(RasterizerOpenGL& rasterizer_, | ||||
|                                Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu, | ||||
|                                Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                                Tegra::Engines::KeplerCompute& kepler_compute_, | ||||
|                                Tegra::MemoryManager& gpu_memory_, const Device& device_); | ||||
|     ~ShaderCacheOpenGL() override; | ||||
| 
 | ||||
|     /// Loads disk cache for the current game
 | ||||
|  | ||||
| @ -25,8 +25,8 @@ using ImageEntry = VideoCommon::Shader::Image; | ||||
| 
 | ||||
| class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer { | ||||
| public: | ||||
|     explicit ConstBufferEntry(u32 max_offset, bool is_indirect, u32 index_) | ||||
|         : ConstBuffer{max_offset, is_indirect}, index{index_} {} | ||||
|     explicit ConstBufferEntry(u32 max_offset_, bool is_indirect_, u32 index_) | ||||
|         : ConstBuffer{max_offset_, is_indirect_}, index{index_} {} | ||||
| 
 | ||||
|     u32 GetIndex() const { | ||||
|         return index; | ||||
|  | ||||
| @ -347,14 +347,14 @@ void CachedSurface::UploadTextureMipmap(u32 level, const std::vector<u8>& stagin | ||||
|                                           internal_format, image_size, buffer); | ||||
|             break; | ||||
|         case SurfaceTarget::TextureCubemap: { | ||||
|             const std::size_t layer_size{params.GetHostLayerSize(level)}; | ||||
|             const std::size_t host_layer_size{params.GetHostLayerSize(level)}; | ||||
|             for (std::size_t face = 0; face < params.depth; ++face) { | ||||
|                 glCompressedTextureSubImage3D(texture.handle, level, 0, 0, static_cast<GLint>(face), | ||||
|                                               static_cast<GLsizei>(params.GetMipWidth(level)), | ||||
|                                               static_cast<GLsizei>(params.GetMipHeight(level)), 1, | ||||
|                                               internal_format, static_cast<GLsizei>(layer_size), | ||||
|                                               buffer); | ||||
|                 buffer += layer_size; | ||||
|                                               internal_format, | ||||
|                                               static_cast<GLsizei>(host_layer_size), buffer); | ||||
|                 buffer += host_layer_size; | ||||
|             } | ||||
|             break; | ||||
|         } | ||||
| @ -532,12 +532,12 @@ OGLTextureView CachedSurfaceView::CreateTextureView() const { | ||||
|     return texture_view; | ||||
| } | ||||
| 
 | ||||
| TextureCacheOpenGL::TextureCacheOpenGL(VideoCore::RasterizerInterface& rasterizer, | ||||
|                                        Tegra::Engines::Maxwell3D& maxwell3d, | ||||
|                                        Tegra::MemoryManager& gpu_memory, const Device& device, | ||||
| TextureCacheOpenGL::TextureCacheOpenGL(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                                        Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                                        Tegra::MemoryManager& gpu_memory_, const Device& device_, | ||||
|                                        StateTracker& state_tracker_) | ||||
|     : TextureCacheBase{rasterizer, maxwell3d, gpu_memory, device.HasASTC()}, state_tracker{ | ||||
|                                                                                  state_tracker_} { | ||||
|     : TextureCacheBase{rasterizer_, maxwell3d_, gpu_memory_, device_.HasASTC()}, | ||||
|       state_tracker{state_tracker_} { | ||||
|     src_framebuffer.Create(); | ||||
|     dst_framebuffer.Create(); | ||||
| } | ||||
|  | ||||
| @ -130,9 +130,9 @@ private: | ||||
| 
 | ||||
| class TextureCacheOpenGL final : public TextureCacheBase { | ||||
| public: | ||||
|     explicit TextureCacheOpenGL(VideoCore::RasterizerInterface& rasterizer, | ||||
|                                 Tegra::Engines::Maxwell3D& maxwell3d, | ||||
|                                 Tegra::MemoryManager& gpu_memory, const Device& device, | ||||
|     explicit TextureCacheOpenGL(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                                 Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                                 Tegra::MemoryManager& gpu_memory_, const Device& device_, | ||||
|                                 StateTracker& state_tracker); | ||||
|     ~TextureCacheOpenGL(); | ||||
| 
 | ||||
|  | ||||
| @ -130,8 +130,8 @@ void APIENTRY DebugHandler(GLenum source, GLenum type, GLuint id, GLenum severit | ||||
| RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_, | ||||
|                                Core::Frontend::EmuWindow& emu_window_, | ||||
|                                Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, | ||||
|                                std::unique_ptr<Core::Frontend::GraphicsContext> context) | ||||
|     : RendererBase{emu_window_, std::move(context)}, telemetry_session{telemetry_session_}, | ||||
|                                std::unique_ptr<Core::Frontend::GraphicsContext> context_) | ||||
|     : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_}, | ||||
|       emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, program_manager{device} {} | ||||
| 
 | ||||
| RendererOpenGL::~RendererOpenGL() = default; | ||||
|  | ||||
| @ -57,10 +57,10 @@ struct ScreenInfo { | ||||
| 
 | ||||
| class RendererOpenGL final : public VideoCore::RendererBase { | ||||
| public: | ||||
|     explicit RendererOpenGL(Core::TelemetrySession& telemetry_session, | ||||
|                             Core::Frontend::EmuWindow& emu_window, Core::Memory::Memory& cpu_memory, | ||||
|                             Tegra::GPU& gpu, | ||||
|                             std::unique_ptr<Core::Frontend::GraphicsContext> context); | ||||
|     explicit RendererOpenGL(Core::TelemetrySession& telemetry_session_, | ||||
|                             Core::Frontend::EmuWindow& emu_window_, | ||||
|                             Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, | ||||
|                             std::unique_ptr<Core::Frontend::GraphicsContext> context_); | ||||
|     ~RendererOpenGL() override; | ||||
| 
 | ||||
|     bool Init() override; | ||||
|  | ||||
| @ -38,13 +38,13 @@ std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const VKDevice& device, VKSch | ||||
| } // Anonymous namespace
 | ||||
| 
 | ||||
| Buffer::Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler_, | ||||
|                VKStagingBufferPool& staging_pool_, VAddr cpu_addr, std::size_t size) | ||||
|     : BufferBlock{cpu_addr, size}, scheduler{scheduler_}, staging_pool{staging_pool_} { | ||||
|                VKStagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_) | ||||
|     : BufferBlock{cpu_addr_, size_}, scheduler{scheduler_}, staging_pool{staging_pool_} { | ||||
|     const VkBufferCreateInfo ci{ | ||||
|         .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, | ||||
|         .pNext = nullptr, | ||||
|         .flags = 0, | ||||
|         .size = static_cast<VkDeviceSize>(size), | ||||
|         .size = static_cast<VkDeviceSize>(size_), | ||||
|         .usage = BUFFER_USAGE | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, | ||||
|         .sharingMode = VK_SHARING_MODE_EXCLUSIVE, | ||||
|         .queueFamilyIndexCount = 0, | ||||
| @ -57,69 +57,71 @@ Buffer::Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VKSchedu | ||||
| 
 | ||||
| Buffer::~Buffer() = default; | ||||
| 
 | ||||
| void Buffer::Upload(std::size_t offset, std::size_t size, const u8* data) { | ||||
|     const auto& staging = staging_pool.GetUnusedBuffer(size, true); | ||||
|     std::memcpy(staging.commit->Map(size), data, size); | ||||
| void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) { | ||||
|     const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); | ||||
|     std::memcpy(staging.commit->Map(data_size), data, data_size); | ||||
| 
 | ||||
|     scheduler.RequestOutsideRenderPassOperationContext(); | ||||
| 
 | ||||
|     const VkBuffer handle = Handle(); | ||||
|     scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) { | ||||
|         cmdbuf.CopyBuffer(staging, handle, VkBufferCopy{0, offset, size}); | ||||
|     scheduler.Record( | ||||
|         [staging = *staging.handle, handle, offset, data_size](vk::CommandBuffer cmdbuf) { | ||||
|             cmdbuf.CopyBuffer(staging, handle, VkBufferCopy{0, offset, data_size}); | ||||
| 
 | ||||
|         const VkBufferMemoryBarrier barrier{ | ||||
|             .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, | ||||
|             .pNext = nullptr, | ||||
|             .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, | ||||
|             .dstAccessMask = UPLOAD_ACCESS_BARRIERS, | ||||
|             .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||||
|             .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||||
|             .buffer = handle, | ||||
|             .offset = offset, | ||||
|             .size = size, | ||||
|         }; | ||||
|         cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {}, | ||||
|                                barrier, {}); | ||||
|     }); | ||||
|             const VkBufferMemoryBarrier barrier{ | ||||
|                 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, | ||||
|                 .pNext = nullptr, | ||||
|                 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT, | ||||
|                 .dstAccessMask = UPLOAD_ACCESS_BARRIERS, | ||||
|                 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||||
|                 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||||
|                 .buffer = handle, | ||||
|                 .offset = offset, | ||||
|                 .size = data_size, | ||||
|             }; | ||||
|             cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {}, | ||||
|                                    barrier, {}); | ||||
|         }); | ||||
| } | ||||
| 
 | ||||
| void Buffer::Download(std::size_t offset, std::size_t size, u8* data) { | ||||
|     const auto& staging = staging_pool.GetUnusedBuffer(size, true); | ||||
| void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) { | ||||
|     const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); | ||||
|     scheduler.RequestOutsideRenderPassOperationContext(); | ||||
| 
 | ||||
|     const VkBuffer handle = Handle(); | ||||
|     scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) { | ||||
|         const VkBufferMemoryBarrier barrier{ | ||||
|             .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, | ||||
|             .pNext = nullptr, | ||||
|             .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, | ||||
|             .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, | ||||
|             .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||||
|             .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||||
|             .buffer = handle, | ||||
|             .offset = offset, | ||||
|             .size = size, | ||||
|         }; | ||||
|     scheduler.Record( | ||||
|         [staging = *staging.handle, handle, offset, data_size](vk::CommandBuffer cmdbuf) { | ||||
|             const VkBufferMemoryBarrier barrier{ | ||||
|                 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, | ||||
|                 .pNext = nullptr, | ||||
|                 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT, | ||||
|                 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT, | ||||
|                 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||||
|                 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, | ||||
|                 .buffer = handle, | ||||
|                 .offset = offset, | ||||
|                 .size = data_size, | ||||
|             }; | ||||
| 
 | ||||
|         cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | | ||||
|                                    VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | | ||||
|                                    VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, | ||||
|                                VK_PIPELINE_STAGE_TRANSFER_BIT, 0, {}, barrier, {}); | ||||
|         cmdbuf.CopyBuffer(handle, staging, VkBufferCopy{offset, 0, size}); | ||||
|     }); | ||||
|             cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | | ||||
|                                        VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | | ||||
|                                        VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, | ||||
|                                    VK_PIPELINE_STAGE_TRANSFER_BIT, 0, {}, barrier, {}); | ||||
|             cmdbuf.CopyBuffer(handle, staging, VkBufferCopy{offset, 0, data_size}); | ||||
|         }); | ||||
|     scheduler.Finish(); | ||||
| 
 | ||||
|     std::memcpy(data, staging.commit->Map(size), size); | ||||
|     std::memcpy(data, staging.commit->Map(data_size), data_size); | ||||
| } | ||||
| 
 | ||||
| void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset, | ||||
|                       std::size_t size) { | ||||
|                       std::size_t copy_size) { | ||||
|     scheduler.RequestOutsideRenderPassOperationContext(); | ||||
| 
 | ||||
|     const VkBuffer dst_buffer = Handle(); | ||||
|     scheduler.Record([src_buffer = src.Handle(), dst_buffer, src_offset, dst_offset, | ||||
|                       size](vk::CommandBuffer cmdbuf) { | ||||
|         cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size}); | ||||
|                       copy_size](vk::CommandBuffer cmdbuf) { | ||||
|         cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, copy_size}); | ||||
| 
 | ||||
|         std::array<VkBufferMemoryBarrier, 2> barriers; | ||||
|         barriers[0].sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; | ||||
| @ -130,7 +132,7 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst | ||||
|         barriers[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; | ||||
|         barriers[0].buffer = src_buffer; | ||||
|         barriers[0].offset = src_offset; | ||||
|         barriers[0].size = size; | ||||
|         barriers[0].size = copy_size; | ||||
|         barriers[1].sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; | ||||
|         barriers[1].pNext = nullptr; | ||||
|         barriers[1].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; | ||||
| @ -139,19 +141,17 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst | ||||
|         barriers[1].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; | ||||
|         barriers[1].buffer = dst_buffer; | ||||
|         barriers[1].offset = dst_offset; | ||||
|         barriers[1].size = size; | ||||
|         barriers[1].size = copy_size; | ||||
|         cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {}, | ||||
|                                barriers, {}); | ||||
|     }); | ||||
| } | ||||
| 
 | ||||
| VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, | ||||
|                              Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, | ||||
| VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                              Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | ||||
|                              const VKDevice& device_, VKMemoryManager& memory_manager_, | ||||
|                              VKScheduler& scheduler_, VKStagingBufferPool& staging_pool_) | ||||
|     : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, gpu_memory, cpu_memory, | ||||
|                                                                  CreateStreamBuffer(device_, | ||||
|                                                                                     scheduler_)}, | ||||
|     : BufferCache{rasterizer_, gpu_memory_, cpu_memory_, CreateStreamBuffer(device_, scheduler_)}, | ||||
|       device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{ | ||||
|                                                                                    staging_pool_} {} | ||||
| 
 | ||||
|  | ||||
| @ -22,15 +22,15 @@ class VKScheduler; | ||||
| class Buffer final : public VideoCommon::BufferBlock { | ||||
| public: | ||||
|     explicit Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler, | ||||
|                     VKStagingBufferPool& staging_pool, VAddr cpu_addr, std::size_t size); | ||||
|                     VKStagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_); | ||||
|     ~Buffer(); | ||||
| 
 | ||||
|     void Upload(std::size_t offset, std::size_t size, const u8* data); | ||||
|     void Upload(std::size_t offset, std::size_t data_size, const u8* data); | ||||
| 
 | ||||
|     void Download(std::size_t offset, std::size_t size, u8* data); | ||||
|     void Download(std::size_t offset, std::size_t data_size, u8* data); | ||||
| 
 | ||||
|     void CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset, | ||||
|                   std::size_t size); | ||||
|                   std::size_t copy_size); | ||||
| 
 | ||||
|     VkBuffer Handle() const { | ||||
|         return *buffer.handle; | ||||
| @ -49,10 +49,10 @@ private: | ||||
| 
 | ||||
| class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> { | ||||
| public: | ||||
|     explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, | ||||
|                            Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, | ||||
|                            const VKDevice& device, VKMemoryManager& memory_manager, | ||||
|                            VKScheduler& scheduler, VKStagingBufferPool& staging_pool); | ||||
|     explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                            Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | ||||
|                            const VKDevice& device_, VKMemoryManager& memory_manager_, | ||||
|                            VKScheduler& scheduler_, VKStagingBufferPool& staging_pool_); | ||||
|     ~VKBufferCache(); | ||||
| 
 | ||||
|     BufferInfo GetEmptyBuffer(std::size_t size) override; | ||||
|  | ||||
| @ -17,8 +17,8 @@ struct CommandPool::Pool { | ||||
|     vk::CommandBuffers cmdbufs; | ||||
| }; | ||||
| 
 | ||||
| CommandPool::CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device_) | ||||
|     : ResourcePool(master_semaphore, COMMAND_BUFFER_POOL_SIZE), device{device_} {} | ||||
| CommandPool::CommandPool(MasterSemaphore& master_semaphore_, const VKDevice& device_) | ||||
|     : ResourcePool(master_semaphore_, COMMAND_BUFFER_POOL_SIZE), device{device_} {} | ||||
| 
 | ||||
| CommandPool::~CommandPool() = default; | ||||
| 
 | ||||
|  | ||||
| @ -17,7 +17,7 @@ class VKDevice; | ||||
| 
 | ||||
| class CommandPool final : public ResourcePool { | ||||
| public: | ||||
|     explicit CommandPool(MasterSemaphore& master_semaphore, const VKDevice& device_); | ||||
|     explicit CommandPool(MasterSemaphore& master_semaphore_, const VKDevice& device_); | ||||
|     ~CommandPool() override; | ||||
| 
 | ||||
|     void Allocate(size_t begin, size_t end) override; | ||||
|  | ||||
| @ -136,26 +136,25 @@ bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) con | ||||
|     return std::memcmp(&rhs, this, sizeof *this) == 0; | ||||
| } | ||||
| 
 | ||||
| Shader::Shader(Tegra::Engines::ConstBufferEngineInterface& engine, Tegra::Engines::ShaderType stage, | ||||
|                GPUVAddr gpu_addr_, VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code_, | ||||
|                u32 main_offset) | ||||
|     : gpu_addr(gpu_addr_), program_code(std::move(program_code_)), registry(stage, engine), | ||||
|       shader_ir(program_code, main_offset, compiler_settings, registry), | ||||
| Shader::Shader(Tegra::Engines::ConstBufferEngineInterface& engine_, ShaderType stage_, | ||||
|                GPUVAddr gpu_addr_, VAddr cpu_addr_, ProgramCode program_code_, u32 main_offset_) | ||||
|     : gpu_addr(gpu_addr_), program_code(std::move(program_code_)), registry(stage_, engine_), | ||||
|       shader_ir(program_code, main_offset_, compiler_settings, registry), | ||||
|       entries(GenerateShaderEntries(shader_ir)) {} | ||||
| 
 | ||||
| Shader::~Shader() = default; | ||||
| 
 | ||||
| VKPipelineCache::VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu_, | ||||
| VKPipelineCache::VKPipelineCache(RasterizerVulkan& rasterizer_, Tegra::GPU& gpu_, | ||||
|                                  Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                                  Tegra::Engines::KeplerCompute& kepler_compute_, | ||||
|                                  Tegra::MemoryManager& gpu_memory_, const VKDevice& device_, | ||||
|                                  VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_, | ||||
|                                  VKUpdateDescriptorQueue& update_descriptor_queue_, | ||||
|                                  VKRenderPassCache& renderpass_cache_) | ||||
|     : VideoCommon::ShaderCache<Shader>{rasterizer}, gpu{gpu_}, maxwell3d{maxwell3d_}, | ||||
|       kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, device{device_}, | ||||
|       scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, | ||||
|       update_descriptor_queue{update_descriptor_queue_}, renderpass_cache{renderpass_cache_} {} | ||||
|     : ShaderCache{rasterizer_}, gpu{gpu_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_}, | ||||
|       gpu_memory{gpu_memory_}, device{device_}, scheduler{scheduler_}, | ||||
|       descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_}, | ||||
|       renderpass_cache{renderpass_cache_} {} | ||||
| 
 | ||||
| VKPipelineCache::~VKPipelineCache() = default; | ||||
| 
 | ||||
|  | ||||
| @ -84,9 +84,9 @@ namespace Vulkan { | ||||
| 
 | ||||
| class Shader { | ||||
| public: | ||||
|     explicit Shader(Tegra::Engines::ConstBufferEngineInterface& engine, | ||||
|                     Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, VAddr cpu_addr, | ||||
|                     VideoCommon::Shader::ProgramCode program_code, u32 main_offset); | ||||
|     explicit Shader(Tegra::Engines::ConstBufferEngineInterface& engine_, | ||||
|                     Tegra::Engines::ShaderType stage_, GPUVAddr gpu_addr, VAddr cpu_addr_, | ||||
|                     VideoCommon::Shader::ProgramCode program_code, u32 main_offset_); | ||||
|     ~Shader(); | ||||
| 
 | ||||
|     GPUVAddr GetGpuAddr() const { | ||||
| @ -119,13 +119,13 @@ private: | ||||
| 
 | ||||
| class VKPipelineCache final : public VideoCommon::ShaderCache<Shader> { | ||||
| public: | ||||
|     explicit VKPipelineCache(RasterizerVulkan& rasterizer, Tegra::GPU& gpu, | ||||
|                              Tegra::Engines::Maxwell3D& maxwell3d, | ||||
|                              Tegra::Engines::KeplerCompute& kepler_compute, | ||||
|                              Tegra::MemoryManager& gpu_memory, const VKDevice& device, | ||||
|                              VKScheduler& scheduler, VKDescriptorPool& descriptor_pool, | ||||
|                              VKUpdateDescriptorQueue& update_descriptor_queue, | ||||
|                              VKRenderPassCache& renderpass_cache); | ||||
|     explicit VKPipelineCache(RasterizerVulkan& rasterizer_, Tegra::GPU& gpu_, | ||||
|                              Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                              Tegra::Engines::KeplerCompute& kepler_compute_, | ||||
|                              Tegra::MemoryManager& gpu_memory_, const VKDevice& device_, | ||||
|                              VKScheduler& scheduler_, VKDescriptorPool& descriptor_pool_, | ||||
|                              VKUpdateDescriptorQueue& update_descriptor_queue_, | ||||
|                              VKRenderPassCache& renderpass_cache_); | ||||
|     ~VKPipelineCache() override; | ||||
| 
 | ||||
|     std::array<Shader*, Maxwell::MaxShaderProgram> GetShaders(); | ||||
|  | ||||
| @ -69,12 +69,10 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) { | ||||
| VKQueryCache::VKQueryCache(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                            Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, | ||||
|                            const VKDevice& device_, VKScheduler& scheduler_) | ||||
|     : QueryCacheBase<VKQueryCache, CachedQuery, CounterStream, HostCounter>{rasterizer_, maxwell3d_, | ||||
|                                                                             gpu_memory_}, | ||||
|       device{device_}, scheduler{scheduler_}, query_pools{ | ||||
|                                                   QueryPool{device_, scheduler_, | ||||
|                                                             QueryType::SamplesPassed}, | ||||
|                                               } {} | ||||
|     : QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_}, | ||||
|       query_pools{ | ||||
|           QueryPool{device_, scheduler_, QueryType::SamplesPassed}, | ||||
|       } {} | ||||
| 
 | ||||
| VKQueryCache::~VKQueryCache() { | ||||
|     // TODO(Rodrigo): This is a hack to destroy all HostCounter instances before the base class
 | ||||
| @ -97,8 +95,8 @@ void VKQueryCache::Reserve(QueryType type, std::pair<VkQueryPool, u32> query) { | ||||
| 
 | ||||
| HostCounter::HostCounter(VKQueryCache& cache_, std::shared_ptr<HostCounter> dependency_, | ||||
|                          QueryType type_) | ||||
|     : HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency_)}, cache{cache_}, | ||||
|       type{type_}, query{cache_.AllocateQuery(type_)}, tick{cache_.Scheduler().CurrentTick()} { | ||||
|     : HostCounterBase{std::move(dependency_)}, cache{cache_}, type{type_}, | ||||
|       query{cache_.AllocateQuery(type_)}, tick{cache_.Scheduler().CurrentTick()} { | ||||
|     const vk::Device* logical = &cache_.Device().GetLogical(); | ||||
|     cache_.Scheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) { | ||||
|         logical->ResetQueryPoolEXT(query.first, query.second, 1); | ||||
| @ -119,18 +117,20 @@ u64 HostCounter::BlockingQuery() const { | ||||
|     if (tick >= cache.Scheduler().CurrentTick()) { | ||||
|         cache.Scheduler().Flush(); | ||||
|     } | ||||
| 
 | ||||
|     u64 data; | ||||
|     const VkResult result = cache.Device().GetLogical().GetQueryResults( | ||||
|     const VkResult query_result = cache.Device().GetLogical().GetQueryResults( | ||||
|         query.first, query.second, 1, sizeof(data), &data, sizeof(data), | ||||
|         VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); | ||||
|     switch (result) { | ||||
| 
 | ||||
|     switch (query_result) { | ||||
|     case VK_SUCCESS: | ||||
|         return data; | ||||
|     case VK_ERROR_DEVICE_LOST: | ||||
|         cache.Device().ReportLoss(); | ||||
|         [[fallthrough]]; | ||||
|     default: | ||||
|         throw vk::Exception(result); | ||||
|         throw vk::Exception(query_result); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -95,8 +95,8 @@ private: | ||||
| 
 | ||||
| class CachedQuery : public VideoCommon::CachedQueryBase<HostCounter> { | ||||
| public: | ||||
|     explicit CachedQuery(VKQueryCache&, VideoCore::QueryType, VAddr cpu_addr, u8* host_ptr) | ||||
|         : VideoCommon::CachedQueryBase<HostCounter>{cpu_addr, host_ptr} {} | ||||
|     explicit CachedQuery(VKQueryCache&, VideoCore::QueryType, VAddr cpu_addr_, u8* host_ptr_) | ||||
|         : CachedQueryBase{cpu_addr_, host_ptr_} {} | ||||
| }; | ||||
| 
 | ||||
| } // namespace Vulkan
 | ||||
|  | ||||
| @ -128,12 +128,12 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry | ||||
|             const u32 offset_2 = entry.secondary_offset; | ||||
|             const u32 handle_1 = engine.AccessConstBuffer32(stage_type, buffer_1, offset_1); | ||||
|             const u32 handle_2 = engine.AccessConstBuffer32(stage_type, buffer_2, offset_2); | ||||
|             return engine.GetTextureInfo(handle_1 | handle_2); | ||||
|             return engine.GetTextureInfo(Tegra::Texture::TextureHandle{handle_1 | handle_2}); | ||||
|         } | ||||
|     } | ||||
|     if (entry.is_bindless) { | ||||
|         const auto tex_handle = engine.AccessConstBuffer32(stage_type, entry.buffer, entry.offset); | ||||
|         return engine.GetTextureInfo(tex_handle); | ||||
|         return engine.GetTextureInfo(Tegra::Texture::TextureHandle{tex_handle}); | ||||
|     } | ||||
|     const auto& gpu_profile = engine.AccessGuestDriverProfile(); | ||||
|     const u32 entry_offset = static_cast<u32>(index * gpu_profile.GetTextureHandlerSize()); | ||||
| @ -380,12 +380,12 @@ void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu_, | ||||
| RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | ||||
|                                    Tegra::MemoryManager& gpu_memory_, | ||||
|                                    Core::Memory::Memory& cpu_memory, VKScreenInfo& screen_info_, | ||||
|                                    Core::Memory::Memory& cpu_memory_, VKScreenInfo& screen_info_, | ||||
|                                    const VKDevice& device_, VKMemoryManager& memory_manager_, | ||||
|                                    StateTracker& state_tracker_, VKScheduler& scheduler_) | ||||
|     : RasterizerAccelerated(cpu_memory), gpu(gpu_), gpu_memory(gpu_memory_), | ||||
|     : RasterizerAccelerated(cpu_memory_), gpu(gpu_), gpu_memory(gpu_memory_), | ||||
|       maxwell3d(gpu.Maxwell3D()), kepler_compute(gpu.KeplerCompute()), screen_info(screen_info_), | ||||
|       device(device_), memory_manager(memory_manager_), state_tracker(state_tracker_), | ||||
|       scheduler(scheduler_), staging_pool(device, memory_manager, scheduler), | ||||
| @ -397,11 +397,11 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra: | ||||
|       texture_cache(*this, maxwell3d, gpu_memory, device, memory_manager, scheduler, staging_pool), | ||||
|       pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler, | ||||
|                      descriptor_pool, update_descriptor_queue, renderpass_cache), | ||||
|       buffer_cache(*this, gpu_memory, cpu_memory, device, memory_manager, scheduler, staging_pool), | ||||
|       buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_manager, scheduler, staging_pool), | ||||
|       sampler_cache(device), query_cache(*this, maxwell3d, gpu_memory, device, scheduler), | ||||
|       fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device, | ||||
|                     scheduler), | ||||
|       wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window) { | ||||
|       wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) { | ||||
|     scheduler.SetQueryCache(query_cache); | ||||
|     if (device.UseAsynchronousShaders()) { | ||||
|         async_shaders.AllocateWorkers(); | ||||
|  | ||||
| @ -105,11 +105,11 @@ struct ImageView { | ||||
| 
 | ||||
| class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { | ||||
| public: | ||||
|     explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window, Tegra::GPU& gpu, | ||||
|                               Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, | ||||
|                               VKScreenInfo& screen_info, const VKDevice& device, | ||||
|                               VKMemoryManager& memory_manager, StateTracker& state_tracker, | ||||
|                               VKScheduler& scheduler); | ||||
|     explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, | ||||
|                               Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, | ||||
|                               VKScreenInfo& screen_info_, const VKDevice& device_, | ||||
|                               VKMemoryManager& memory_manager_, StateTracker& state_tracker_, | ||||
|                               VKScheduler& scheduler_); | ||||
|     ~RasterizerVulkan() override; | ||||
| 
 | ||||
|     void Draw(bool is_indexed, bool is_instanced) override; | ||||
|  | ||||
| @ -489,12 +489,12 @@ VkImageView CachedSurfaceView::GetAttachment() { | ||||
|     return *render_target; | ||||
| } | ||||
| 
 | ||||
| VKTextureCache::VKTextureCache(VideoCore::RasterizerInterface& rasterizer, | ||||
|                                Tegra::Engines::Maxwell3D& maxwell3d, | ||||
|                                Tegra::MemoryManager& gpu_memory, const VKDevice& device_, | ||||
| VKTextureCache::VKTextureCache(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                                Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                                Tegra::MemoryManager& gpu_memory_, const VKDevice& device_, | ||||
|                                VKMemoryManager& memory_manager_, VKScheduler& scheduler_, | ||||
|                                VKStagingBufferPool& staging_pool_) | ||||
|     : TextureCache(rasterizer, maxwell3d, gpu_memory, device_.IsOptimalAstcSupported()), | ||||
|     : TextureCache(rasterizer_, maxwell3d_, gpu_memory_, device_.IsOptimalAstcSupported()), | ||||
|       device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{ | ||||
|                                                                                    staging_pool_} {} | ||||
| 
 | ||||
|  | ||||
| @ -193,10 +193,11 @@ private: | ||||
| 
 | ||||
| class VKTextureCache final : public TextureCacheBase { | ||||
| public: | ||||
|     explicit VKTextureCache(VideoCore::RasterizerInterface& rasterizer, | ||||
|                             Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, | ||||
|                             const VKDevice& device, VKMemoryManager& memory_manager, | ||||
|                             VKScheduler& scheduler, VKStagingBufferPool& staging_pool); | ||||
|     explicit VKTextureCache(VideoCore::RasterizerInterface& rasterizer_, | ||||
|                             Tegra::Engines::Maxwell3D& maxwell3d_, | ||||
|                             Tegra::MemoryManager& gpu_memory_, const VKDevice& device_, | ||||
|                             VKMemoryManager& memory_manager_, VKScheduler& scheduler_, | ||||
|                             VKStagingBufferPool& staging_pool_); | ||||
|     ~VKTextureCache(); | ||||
| 
 | ||||
| private: | ||||
|  | ||||
| @ -374,8 +374,8 @@ std::string ASTManager::Print() const { | ||||
|     return printer.GetResult(); | ||||
| } | ||||
| 
 | ||||
| ASTManager::ASTManager(bool full_decompile, bool disable_else_derivation) | ||||
|     : full_decompile{full_decompile}, disable_else_derivation{disable_else_derivation} {}; | ||||
| ASTManager::ASTManager(bool do_full_decompile, bool disable_else_derivation_) | ||||
|     : full_decompile{do_full_decompile}, disable_else_derivation{disable_else_derivation_} {} | ||||
| 
 | ||||
| ASTManager::~ASTManager() { | ||||
|     Clear(); | ||||
|  | ||||
| @ -76,7 +76,7 @@ public: | ||||
| 
 | ||||
| class ASTIfThen { | ||||
| public: | ||||
|     explicit ASTIfThen(Expr condition) : condition{std::move(condition)} {} | ||||
|     explicit ASTIfThen(Expr condition_) : condition{std::move(condition_)} {} | ||||
|     Expr condition; | ||||
|     ASTZipper nodes{}; | ||||
| }; | ||||
| @ -88,63 +88,68 @@ public: | ||||
| 
 | ||||
| class ASTBlockEncoded { | ||||
| public: | ||||
|     explicit ASTBlockEncoded(u32 start, u32 end) : start{start}, end{end} {} | ||||
|     explicit ASTBlockEncoded(u32 start_, u32 _) : start{start_}, end{_} {} | ||||
|     u32 start; | ||||
|     u32 end; | ||||
| }; | ||||
| 
 | ||||
| class ASTBlockDecoded { | ||||
| public: | ||||
|     explicit ASTBlockDecoded(NodeBlock&& new_nodes) : nodes(std::move(new_nodes)) {} | ||||
|     explicit ASTBlockDecoded(NodeBlock&& new_nodes_) : nodes(std::move(new_nodes_)) {} | ||||
|     NodeBlock nodes; | ||||
| }; | ||||
| 
 | ||||
| class ASTVarSet { | ||||
| public: | ||||
|     explicit ASTVarSet(u32 index, Expr condition) : index{index}, condition{std::move(condition)} {} | ||||
|     explicit ASTVarSet(u32 index_, Expr condition_) | ||||
|         : index{index_}, condition{std::move(condition_)} {} | ||||
| 
 | ||||
|     u32 index; | ||||
|     Expr condition; | ||||
| }; | ||||
| 
 | ||||
| class ASTLabel { | ||||
| public: | ||||
|     explicit ASTLabel(u32 index) : index{index} {} | ||||
|     explicit ASTLabel(u32 index_) : index{index_} {} | ||||
|     u32 index; | ||||
|     bool unused{}; | ||||
| }; | ||||
| 
 | ||||
| class ASTGoto { | ||||
| public: | ||||
|     explicit ASTGoto(Expr condition, u32 label) : condition{std::move(condition)}, label{label} {} | ||||
|     explicit ASTGoto(Expr condition_, u32 label_) | ||||
|         : condition{std::move(condition_)}, label{label_} {} | ||||
| 
 | ||||
|     Expr condition; | ||||
|     u32 label; | ||||
| }; | ||||
| 
 | ||||
| class ASTDoWhile { | ||||
| public: | ||||
|     explicit ASTDoWhile(Expr condition) : condition{std::move(condition)} {} | ||||
|     explicit ASTDoWhile(Expr condition_) : condition{std::move(condition_)} {} | ||||
|     Expr condition; | ||||
|     ASTZipper nodes{}; | ||||
| }; | ||||
| 
 | ||||
| class ASTReturn { | ||||
| public: | ||||
|     explicit ASTReturn(Expr condition, bool kills) | ||||
|         : condition{std::move(condition)}, kills{kills} {} | ||||
|     explicit ASTReturn(Expr condition_, bool kills_) | ||||
|         : condition{std::move(condition_)}, kills{kills_} {} | ||||
| 
 | ||||
|     Expr condition; | ||||
|     bool kills; | ||||
| }; | ||||
| 
 | ||||
| class ASTBreak { | ||||
| public: | ||||
|     explicit ASTBreak(Expr condition) : condition{std::move(condition)} {} | ||||
|     explicit ASTBreak(Expr condition_) : condition{std::move(condition_)} {} | ||||
|     Expr condition; | ||||
| }; | ||||
| 
 | ||||
| class ASTBase { | ||||
| public: | ||||
|     explicit ASTBase(ASTNode parent, ASTData data) | ||||
|         : data{std::move(data)}, parent{std::move(parent)} {} | ||||
|     explicit ASTBase(ASTNode parent_, ASTData data_) | ||||
|         : data{std::move(data_)}, parent{std::move(parent_)} {} | ||||
| 
 | ||||
|     template <class U, class... Args> | ||||
|     static ASTNode Make(ASTNode parent, Args&&... args) { | ||||
| @ -300,7 +305,7 @@ private: | ||||
| 
 | ||||
| class ASTManager final { | ||||
| public: | ||||
|     ASTManager(bool full_decompile, bool disable_else_derivation); | ||||
|     explicit ASTManager(bool do_full_decompile, bool disable_else_derivation_); | ||||
|     ~ASTManager(); | ||||
| 
 | ||||
|     ASTManager(const ASTManager& o) = delete; | ||||
|  | ||||
| @ -13,7 +13,7 @@ | ||||
| 
 | ||||
| namespace VideoCommon::Shader { | ||||
| 
 | ||||
| AsyncShaders::AsyncShaders(Core::Frontend::EmuWindow& emu_window) : emu_window(emu_window) {} | ||||
| AsyncShaders::AsyncShaders(Core::Frontend::EmuWindow& emu_window_) : emu_window(emu_window_) {} | ||||
| 
 | ||||
| AsyncShaders::~AsyncShaders() { | ||||
|     KillWorkers(); | ||||
|  | ||||
| @ -66,7 +66,7 @@ public: | ||||
|         Tegra::Engines::ShaderType shader_type; | ||||
|     }; | ||||
| 
 | ||||
|     explicit AsyncShaders(Core::Frontend::EmuWindow& emu_window); | ||||
|     explicit AsyncShaders(Core::Frontend::EmuWindow& emu_window_); | ||||
|     ~AsyncShaders(); | ||||
| 
 | ||||
|     /// Start up shader worker threads
 | ||||
|  | ||||
| @ -66,8 +66,8 @@ struct BlockInfo { | ||||
| }; | ||||
| 
 | ||||
| struct CFGRebuildState { | ||||
|     explicit CFGRebuildState(const ProgramCode& program_code, u32 start, Registry& registry) | ||||
|         : program_code{program_code}, registry{registry}, start{start} {} | ||||
|     explicit CFGRebuildState(const ProgramCode& program_code_, u32 start_, Registry& registry_) | ||||
|         : program_code{program_code_}, registry{registry_}, start{start_} {} | ||||
| 
 | ||||
|     const ProgramCode& program_code; | ||||
|     Registry& registry; | ||||
|  | ||||
| @ -42,10 +42,10 @@ struct Condition { | ||||
| class SingleBranch { | ||||
| public: | ||||
|     SingleBranch() = default; | ||||
|     SingleBranch(Condition condition, s32 address, bool kill, bool is_sync, bool is_brk, | ||||
|                  bool ignore) | ||||
|         : condition{condition}, address{address}, kill{kill}, is_sync{is_sync}, is_brk{is_brk}, | ||||
|           ignore{ignore} {} | ||||
|     explicit SingleBranch(Condition condition_, s32 address_, bool kill_, bool is_sync_, | ||||
|                           bool is_brk_, bool ignore_) | ||||
|         : condition{condition_}, address{address_}, kill{kill_}, is_sync{is_sync_}, is_brk{is_brk_}, | ||||
|           ignore{ignore_} {} | ||||
| 
 | ||||
|     bool operator==(const SingleBranch& b) const { | ||||
|         return std::tie(condition, address, kill, is_sync, is_brk, ignore) == | ||||
| @ -65,15 +65,15 @@ public: | ||||
| }; | ||||
| 
 | ||||
| struct CaseBranch { | ||||
|     CaseBranch(u32 cmp_value, u32 address) : cmp_value{cmp_value}, address{address} {} | ||||
|     explicit CaseBranch(u32 cmp_value_, u32 address_) : cmp_value{cmp_value_}, address{address_} {} | ||||
|     u32 cmp_value; | ||||
|     u32 address; | ||||
| }; | ||||
| 
 | ||||
| class MultiBranch { | ||||
| public: | ||||
|     MultiBranch(u32 gpr, std::vector<CaseBranch>&& branches) | ||||
|         : gpr{gpr}, branches{std::move(branches)} {} | ||||
|     explicit MultiBranch(u32 gpr_, std::vector<CaseBranch>&& branches_) | ||||
|         : gpr{gpr_}, branches{std::move(branches_)} {} | ||||
| 
 | ||||
|     u32 gpr{}; | ||||
|     std::vector<CaseBranch> branches{}; | ||||
|  | ||||
| @ -66,7 +66,7 @@ std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce, | ||||
| 
 | ||||
| class ASTDecoder { | ||||
| public: | ||||
|     ASTDecoder(ShaderIR& ir) : ir(ir) {} | ||||
|     explicit ASTDecoder(ShaderIR& ir_) : ir(ir_) {} | ||||
| 
 | ||||
|     void operator()(ASTProgram& ast) { | ||||
|         ASTNode current = ast.nodes.GetFirst(); | ||||
|  | ||||
| @ -258,7 +258,7 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) { | ||||
|     case OpCode::Id::LEA_IMM: | ||||
|     case OpCode::Id::LEA_RZ: | ||||
|     case OpCode::Id::LEA_HI: { | ||||
|         auto [op_a, op_b, op_c] = [&]() -> std::tuple<Node, Node, Node> { | ||||
|         auto [op_a_, op_b_, op_c_] = [&]() -> std::tuple<Node, Node, Node> { | ||||
|             switch (opcode->get().GetId()) { | ||||
|             case OpCode::Id::LEA_R2: { | ||||
|                 return {GetRegister(instr.gpr20), GetRegister(instr.gpr39), | ||||
| @ -294,8 +294,9 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) { | ||||
|         UNIMPLEMENTED_IF_MSG(instr.lea.pred48 != static_cast<u64>(Pred::UnusedIndex), | ||||
|                              "Unhandled LEA Predicate"); | ||||
| 
 | ||||
|         Node value = Operation(OperationCode::ILogicalShiftLeft, std::move(op_a), std::move(op_c)); | ||||
|         value = Operation(OperationCode::IAdd, std::move(op_b), std::move(value)); | ||||
|         Node value = | ||||
|             Operation(OperationCode::ILogicalShiftLeft, std::move(op_a_), std::move(op_c_)); | ||||
|         value = Operation(OperationCode::IAdd, std::move(op_b_), std::move(value)); | ||||
|         SetRegister(bb, instr.gpr0, std::move(value)); | ||||
| 
 | ||||
|         break; | ||||
|  | ||||
| @ -76,7 +76,7 @@ public: | ||||
| 
 | ||||
| class ExprPredicate final { | ||||
| public: | ||||
|     explicit ExprPredicate(u32 predicate) : predicate{predicate} {} | ||||
|     explicit ExprPredicate(u32 predicate_) : predicate{predicate_} {} | ||||
| 
 | ||||
|     bool operator==(const ExprPredicate& b) const { | ||||
|         return predicate == b.predicate; | ||||
| @ -91,7 +91,7 @@ public: | ||||
| 
 | ||||
| class ExprCondCode final { | ||||
| public: | ||||
|     explicit ExprCondCode(ConditionCode cc) : cc{cc} {} | ||||
|     explicit ExprCondCode(ConditionCode condition_code) : cc{condition_code} {} | ||||
| 
 | ||||
|     bool operator==(const ExprCondCode& b) const { | ||||
|         return cc == b.cc; | ||||
| @ -121,7 +121,7 @@ public: | ||||
| 
 | ||||
| class ExprGprEqual final { | ||||
| public: | ||||
|     ExprGprEqual(u32 gpr, u32 value) : gpr{gpr}, value{value} {} | ||||
|     explicit ExprGprEqual(u32 gpr_, u32 value_) : gpr{gpr_}, value{value_} {} | ||||
| 
 | ||||
|     bool operator==(const ExprGprEqual& b) const { | ||||
|         return gpr == b.gpr && value == b.value; | ||||
|  | ||||
| @ -290,18 +290,18 @@ struct Sampler { | ||||
|           is_buffer{is_buffer_}, is_indexed{is_indexed_} {} | ||||
| 
 | ||||
|     /// Separate sampler constructor
 | ||||
|     constexpr explicit Sampler(u32 index_, std::pair<u32, u32> offsets, std::pair<u32, u32> buffers, | ||||
|                                Tegra::Shader::TextureType type, bool is_array_, bool is_shadow_, | ||||
|                                bool is_buffer_) | ||||
|         : index{index_}, offset{offsets.first}, secondary_offset{offsets.second}, | ||||
|           buffer{buffers.first}, secondary_buffer{buffers.second}, type{type}, is_array{is_array_}, | ||||
|           is_shadow{is_shadow_}, is_buffer{is_buffer_}, is_separated{true} {} | ||||
|     constexpr explicit Sampler(u32 index_, std::pair<u32, u32> offsets_, | ||||
|                                std::pair<u32, u32> buffers_, Tegra::Shader::TextureType type_, | ||||
|                                bool is_array_, bool is_shadow_, bool is_buffer_) | ||||
|         : index{index_}, offset{offsets_.first}, secondary_offset{offsets_.second}, | ||||
|           buffer{buffers_.first}, secondary_buffer{buffers_.second}, type{type_}, | ||||
|           is_array{is_array_}, is_shadow{is_shadow_}, is_buffer{is_buffer_}, is_separated{true} {} | ||||
| 
 | ||||
|     /// Bindless samplers constructor
 | ||||
|     constexpr explicit Sampler(u32 index_, u32 offset_, u32 buffer_, | ||||
|                                Tegra::Shader::TextureType type, bool is_array_, bool is_shadow_, | ||||
|                                Tegra::Shader::TextureType type_, bool is_array_, bool is_shadow_, | ||||
|                                bool is_buffer_, bool is_indexed_) | ||||
|         : index{index_}, offset{offset_}, buffer{buffer_}, type{type}, is_array{is_array_}, | ||||
|         : index{index_}, offset{offset_}, buffer{buffer_}, type{type_}, is_array{is_array_}, | ||||
|           is_shadow{is_shadow_}, is_buffer{is_buffer_}, is_bindless{true}, is_indexed{is_indexed_} { | ||||
|     } | ||||
| 
 | ||||
|  | ||||
| @ -25,9 +25,10 @@ using Tegra::Shader::PredCondition; | ||||
| using Tegra::Shader::PredOperation; | ||||
| using Tegra::Shader::Register; | ||||
| 
 | ||||
| ShaderIR::ShaderIR(const ProgramCode& program_code, u32 main_offset, CompilerSettings settings, | ||||
|                    Registry& registry) | ||||
|     : program_code{program_code}, main_offset{main_offset}, settings{settings}, registry{registry} { | ||||
| ShaderIR::ShaderIR(const ProgramCode& program_code_, u32 main_offset_, CompilerSettings settings_, | ||||
|                    Registry& registry_) | ||||
|     : program_code{program_code_}, main_offset{main_offset_}, settings{settings_}, registry{ | ||||
|                                                                                        registry_} { | ||||
|     Decode(); | ||||
|     PostDecode(); | ||||
| } | ||||
|  | ||||
| @ -29,8 +29,8 @@ struct ShaderBlock; | ||||
| constexpr u32 MAX_PROGRAM_LENGTH = 0x1000; | ||||
| 
 | ||||
| struct ConstBuffer { | ||||
|     constexpr explicit ConstBuffer(u32 max_offset, bool is_indirect) | ||||
|         : max_offset{max_offset}, is_indirect{is_indirect} {} | ||||
|     constexpr explicit ConstBuffer(u32 max_offset_, bool is_indirect_) | ||||
|         : max_offset{max_offset_}, is_indirect{is_indirect_} {} | ||||
| 
 | ||||
|     constexpr ConstBuffer() = default; | ||||
| 
 | ||||
| @ -66,8 +66,8 @@ struct GlobalMemoryUsage { | ||||
| 
 | ||||
| class ShaderIR final { | ||||
| public: | ||||
|     explicit ShaderIR(const ProgramCode& program_code, u32 main_offset, CompilerSettings settings, | ||||
|                       Registry& registry); | ||||
|     explicit ShaderIR(const ProgramCode& program_code_, u32 main_offset_, | ||||
|                       CompilerSettings settings_, Registry& registry_); | ||||
|     ~ShaderIR(); | ||||
| 
 | ||||
|     const std::map<u32, NodeBlock>& GetBasicBlocks() const { | ||||
|  | ||||
| @ -9,16 +9,16 @@ | ||||
| namespace VideoCommon { | ||||
| 
 | ||||
| struct CopyParams { | ||||
|     constexpr CopyParams(u32 source_x, u32 source_y, u32 source_z, u32 dest_x, u32 dest_y, | ||||
|                          u32 dest_z, u32 source_level, u32 dest_level, u32 width, u32 height, | ||||
|                          u32 depth) | ||||
|         : source_x{source_x}, source_y{source_y}, source_z{source_z}, dest_x{dest_x}, | ||||
|           dest_y{dest_y}, dest_z{dest_z}, source_level{source_level}, | ||||
|           dest_level{dest_level}, width{width}, height{height}, depth{depth} {} | ||||
|     constexpr CopyParams(u32 source_x_, u32 source_y_, u32 source_z_, u32 dest_x_, u32 dest_y_, | ||||
|                          u32 dest_z_, u32 source_level_, u32 dest_level_, u32 width_, u32 height_, | ||||
|                          u32 depth_) | ||||
|         : source_x{source_x_}, source_y{source_y_}, source_z{source_z_}, dest_x{dest_x_}, | ||||
|           dest_y{dest_y_}, dest_z{dest_z_}, source_level{source_level_}, | ||||
|           dest_level{dest_level_}, width{width_}, height{height_}, depth{depth_} {} | ||||
| 
 | ||||
|     constexpr CopyParams(u32 width, u32 height, u32 depth, u32 level) | ||||
|         : source_x{}, source_y{}, source_z{}, dest_x{}, dest_y{}, dest_z{}, source_level{level}, | ||||
|           dest_level{level}, width{width}, height{height}, depth{depth} {} | ||||
|     constexpr CopyParams(u32 width_, u32 height_, u32 depth_, u32 level_) | ||||
|         : source_x{}, source_y{}, source_z{}, dest_x{}, dest_y{}, dest_z{}, source_level{level_}, | ||||
|           dest_level{level_}, width{width_}, height{height_}, depth{depth_} {} | ||||
| 
 | ||||
|     u32 source_x; | ||||
|     u32 source_y; | ||||
|  | ||||
| @ -24,12 +24,12 @@ constexpr bool C = false; // Normal color | ||||
| constexpr bool S = true;  // Srgb
 | ||||
| 
 | ||||
| struct Table { | ||||
|     constexpr Table(TextureFormat texture_format, bool is_srgb, ComponentType red_component, | ||||
|                     ComponentType green_component, ComponentType blue_component, | ||||
|                     ComponentType alpha_component, PixelFormat pixel_format) | ||||
|         : texture_format{texture_format}, pixel_format{pixel_format}, red_component{red_component}, | ||||
|           green_component{green_component}, blue_component{blue_component}, | ||||
|           alpha_component{alpha_component}, is_srgb{is_srgb} {} | ||||
|     constexpr Table(TextureFormat texture_format_, bool is_srgb_, ComponentType red_component_, | ||||
|                     ComponentType green_component_, ComponentType blue_component_, | ||||
|                     ComponentType alpha_component_, PixelFormat pixel_format_) | ||||
|         : texture_format{texture_format_}, pixel_format{pixel_format_}, | ||||
|           red_component{red_component_}, green_component{green_component_}, | ||||
|           blue_component{blue_component_}, alpha_component{alpha_component_}, is_srgb{is_srgb_} {} | ||||
| 
 | ||||
|     TextureFormat texture_format; | ||||
|     PixelFormat pixel_format; | ||||
|  | ||||
| @ -25,11 +25,11 @@ StagingCache::StagingCache() = default; | ||||
| 
 | ||||
| StagingCache::~StagingCache() = default; | ||||
| 
 | ||||
| SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params, | ||||
|                                  bool is_astc_supported) | ||||
|     : params{params}, gpu_addr{gpu_addr}, mipmap_sizes(params.num_levels), | ||||
| SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr_, const SurfaceParams& params_, | ||||
|                                  bool is_astc_supported_) | ||||
|     : params{params_}, gpu_addr{gpu_addr_}, mipmap_sizes(params_.num_levels), | ||||
|       mipmap_offsets(params.num_levels) { | ||||
|     is_converted = IsPixelFormatASTC(params.pixel_format) && !is_astc_supported; | ||||
|     is_converted = IsPixelFormatASTC(params.pixel_format) && !is_astc_supported_; | ||||
|     host_memory_size = params.GetHostSizeInBytes(is_converted); | ||||
| 
 | ||||
|     std::size_t offset = 0; | ||||
|  | ||||
| @ -148,8 +148,8 @@ public: | ||||
|     } | ||||
| 
 | ||||
| protected: | ||||
|     explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params, | ||||
|                              bool is_astc_supported); | ||||
|     explicit SurfaceBaseImpl(GPUVAddr gpu_addr_, const SurfaceParams& params_, | ||||
|                              bool is_astc_supported_); | ||||
|     ~SurfaceBaseImpl() = default; | ||||
| 
 | ||||
|     virtual void DecorateSurfaceName() = 0; | ||||
| @ -297,9 +297,9 @@ public: | ||||
|     } | ||||
| 
 | ||||
| protected: | ||||
|     explicit SurfaceBase(const GPUVAddr gpu_addr, const SurfaceParams& params, | ||||
|                          bool is_astc_supported) | ||||
|         : SurfaceBaseImpl(gpu_addr, params, is_astc_supported) {} | ||||
|     explicit SurfaceBase(const GPUVAddr gpu_addr_, const SurfaceParams& params_, | ||||
|                          bool is_astc_supported_) | ||||
|         : SurfaceBaseImpl{gpu_addr_, params_, is_astc_supported_} {} | ||||
| 
 | ||||
|     ~SurfaceBase() = default; | ||||
| 
 | ||||
|  | ||||
| @ -13,10 +13,10 @@ | ||||
| namespace VideoCommon { | ||||
| 
 | ||||
| struct ViewParams { | ||||
|     constexpr explicit ViewParams(VideoCore::Surface::SurfaceTarget target, u32 base_layer, | ||||
|                                   u32 num_layers, u32 base_level, u32 num_levels) | ||||
|         : target{target}, base_layer{base_layer}, num_layers{num_layers}, base_level{base_level}, | ||||
|           num_levels{num_levels} {} | ||||
|     constexpr explicit ViewParams(VideoCore::Surface::SurfaceTarget target_, u32 base_layer_, | ||||
|                                   u32 num_layers_, u32 base_level_, u32 num_levels_) | ||||
|         : target{target_}, base_layer{base_layer_}, num_layers{num_layers_}, | ||||
|           base_level{base_level_}, num_levels{num_levels_} {} | ||||
| 
 | ||||
|     std::size_t Hash() const; | ||||
| 
 | ||||
| @ -44,7 +44,7 @@ struct ViewParams { | ||||
| 
 | ||||
| class ViewBase { | ||||
| public: | ||||
|     constexpr explicit ViewBase(const ViewParams& params) : params{params} {} | ||||
|     constexpr explicit ViewBase(const ViewParams& view_params) : params{view_params} {} | ||||
| 
 | ||||
|     constexpr const ViewParams& GetViewParams() const { | ||||
|         return params; | ||||
|  | ||||
| @ -146,7 +146,7 @@ enum class MsaaMode : u32 { | ||||
| }; | ||||
| 
 | ||||
| union TextureHandle { | ||||
|     TextureHandle(u32 raw) : raw{raw} {} | ||||
|     /* implicit */ TextureHandle(u32 raw_) : raw{raw_} {} | ||||
| 
 | ||||
|     u32 raw; | ||||
|     BitField<0, 20, u32> tic_id; | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user
	 LC
						LC