mirror of
https://github.com/hrydgard/ppsspp.git
synced 2025-02-17 04:39:34 +00:00
Don't load the shader cache on a separate thread - all it does is already async
This commit is contained in:
parent
1c58617392
commit
d31ba393af
@ -72,7 +72,7 @@ public:
|
||||
}
|
||||
|
||||
bool ContainsKey(const Key &key) const {
|
||||
// Slightly wasteful.
|
||||
// Slightly wasteful, though compiler might optimize it.
|
||||
Value value;
|
||||
return Get(key, &value);
|
||||
}
|
||||
|
@ -93,25 +93,12 @@ GPU_Vulkan::GPU_Vulkan(GraphicsContext *gfxCtx, Draw::DrawContext *draw)
|
||||
if (discID.size()) {
|
||||
File::CreateFullPath(GetSysDirectory(DIRECTORY_APP_CACHE));
|
||||
shaderCachePath_ = GetSysDirectory(DIRECTORY_APP_CACHE) / (discID + ".vkshadercache");
|
||||
shaderCacheLoaded_ = false;
|
||||
|
||||
shaderCacheLoadThread_ = std::thread([&] {
|
||||
SetCurrentThreadName("VulkanLoadCache");
|
||||
AndroidJNIThreadContext ctx;
|
||||
LoadCache(shaderCachePath_);
|
||||
shaderCacheLoaded_ = true;
|
||||
});
|
||||
} else {
|
||||
shaderCacheLoaded_ = true;
|
||||
LoadCache(shaderCachePath_);
|
||||
}
|
||||
}
|
||||
|
||||
bool GPU_Vulkan::IsReady() {
|
||||
return shaderCacheLoaded_;
|
||||
}
|
||||
|
||||
void GPU_Vulkan::CancelReady() {
|
||||
pipelineManager_->CancelCache();
|
||||
return true;
|
||||
}
|
||||
|
||||
void GPU_Vulkan::LoadCache(const Path &filename) {
|
||||
@ -182,10 +169,6 @@ void GPU_Vulkan::SaveCache(const Path &filename) {
|
||||
}
|
||||
|
||||
GPU_Vulkan::~GPU_Vulkan() {
|
||||
if (shaderCacheLoadThread_.joinable()) {
|
||||
shaderCacheLoadThread_.join();
|
||||
}
|
||||
|
||||
if (draw_) {
|
||||
VulkanRenderManager *rm = (VulkanRenderManager *)draw_->GetNativeObject(Draw::NativeObject::RENDER_MANAGER);
|
||||
rm->DrainAndBlockCompileQueue();
|
||||
|
@ -42,7 +42,6 @@ public:
|
||||
u32 CheckGPUFeatures() const override;
|
||||
|
||||
bool IsReady() override;
|
||||
void CancelReady() override;
|
||||
|
||||
// These are where we can reset command buffers etc.
|
||||
void BeginHostFrame() override;
|
||||
@ -84,7 +83,4 @@ private:
|
||||
PipelineManagerVulkan *pipelineManager_;
|
||||
|
||||
Path shaderCachePath_;
|
||||
std::atomic<bool> shaderCacheLoaded_{};
|
||||
|
||||
std::thread shaderCacheLoadThread_;
|
||||
};
|
||||
|
@ -719,8 +719,6 @@ bool PipelineManagerVulkan::LoadPipelineCache(FILE *file, bool loadRawPipelineCa
|
||||
VulkanRenderManager *rm = (VulkanRenderManager *)drawContext->GetNativeObject(Draw::NativeObject::RENDER_MANAGER);
|
||||
VulkanQueueRunner *queueRunner = rm->GetQueueRunner();
|
||||
|
||||
cancelCache_ = false;
|
||||
|
||||
uint32_t size = 0;
|
||||
if (loadRawPipelineCache) {
|
||||
NOTICE_LOG(G3D, "WARNING: Using the badly tested raw pipeline cache path!!!!");
|
||||
@ -779,7 +777,7 @@ bool PipelineManagerVulkan::LoadPipelineCache(FILE *file, bool loadRawPipelineCa
|
||||
int pipelineCreateFailCount = 0;
|
||||
int shaderFailCount = 0;
|
||||
for (uint32_t i = 0; i < size; i++) {
|
||||
if (failed || cancelCache_) {
|
||||
if (failed) {
|
||||
break;
|
||||
}
|
||||
StoredVulkanPipelineKey key;
|
||||
@ -824,7 +822,3 @@ bool PipelineManagerVulkan::LoadPipelineCache(FILE *file, bool loadRawPipelineCa
|
||||
// We just ignore any failures.
|
||||
return true;
|
||||
}
|
||||
|
||||
void PipelineManagerVulkan::CancelCache() {
|
||||
cancelCache_ = true;
|
||||
}
|
||||
|
@ -101,11 +101,9 @@ public:
|
||||
// Saves data for faster creation next time.
|
||||
void SavePipelineCache(FILE *file, bool saveRawPipelineCache, ShaderManagerVulkan *shaderManager, Draw::DrawContext *drawContext);
|
||||
bool LoadPipelineCache(FILE *file, bool loadRawPipelineCache, ShaderManagerVulkan *shaderManager, Draw::DrawContext *drawContext, VkPipelineLayout layout, int multiSampleLevel);
|
||||
void CancelCache();
|
||||
|
||||
private:
|
||||
DenseHashMap<VulkanPipelineKey, VulkanPipeline *> pipelines_;
|
||||
VkPipelineCache pipelineCache_ = VK_NULL_HANDLE;
|
||||
VulkanContext *vulkan_;
|
||||
bool cancelCache_ = false;
|
||||
};
|
||||
|
@ -239,8 +239,6 @@ void ShaderManagerVulkan::DeviceRestore(Draw::DrawContext *draw) {
|
||||
}
|
||||
|
||||
void ShaderManagerVulkan::Clear() {
|
||||
std::lock_guard<std::mutex> guard(cacheLock_);
|
||||
|
||||
fsCache_.Iterate([&](const FShaderID &key, VulkanFragmentShader *shader) {
|
||||
delete shader;
|
||||
});
|
||||
@ -335,8 +333,6 @@ void ShaderManagerVulkan::GetShaders(int prim, VertexDecoder *decoder, VulkanVer
|
||||
return;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> guard(cacheLock_);
|
||||
|
||||
VulkanContext *vulkan = (VulkanContext *)draw_->GetNativeObject(Draw::NativeObject::CONTEXT);
|
||||
VulkanVertexShader *vs = nullptr;
|
||||
if (!vsCache_.Get(VSID, &vs)) {
|
||||
@ -399,7 +395,6 @@ void ShaderManagerVulkan::GetShaders(int prim, VertexDecoder *decoder, VulkanVer
|
||||
}
|
||||
|
||||
std::vector<std::string> ShaderManagerVulkan::DebugGetShaderIDs(DebugShaderType type) {
|
||||
std::lock_guard<std::mutex> guard(cacheLock_);
|
||||
std::vector<std::string> ids;
|
||||
switch (type) {
|
||||
case SHADER_TYPE_VERTEX:
|
||||
@ -586,8 +581,7 @@ bool ShaderManagerVulkan::LoadCache(FILE *f) {
|
||||
continue;
|
||||
}
|
||||
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "VS length error: %d", (int)strlen(codeBuffer_));
|
||||
// Don't add the new shader if already compiled (can happen since this is a background thread).
|
||||
std::lock_guard<std::mutex> guard(cacheLock_);
|
||||
// Don't add the new shader if already compiled - though this should no longer happen.
|
||||
if (!vsCache_.ContainsKey(id)) {
|
||||
VulkanVertexShader *vs = new VulkanVertexShader(vulkan, id, flags, codeBuffer_, useHWTransform);
|
||||
vsCache_.Insert(id, vs);
|
||||
@ -611,7 +605,6 @@ bool ShaderManagerVulkan::LoadCache(FILE *f) {
|
||||
continue;
|
||||
}
|
||||
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "FS length error: %d", (int)strlen(codeBuffer_));
|
||||
std::lock_guard<std::mutex> guard(cacheLock_);
|
||||
if (!fsCache_.ContainsKey(id)) {
|
||||
VulkanFragmentShader *fs = new VulkanFragmentShader(vulkan, id, flags, codeBuffer_);
|
||||
fsCache_.Insert(id, fs);
|
||||
@ -634,7 +627,6 @@ bool ShaderManagerVulkan::LoadCache(FILE *f) {
|
||||
continue;
|
||||
}
|
||||
_assert_msg_(strlen(codeBuffer_) < CODE_BUFFER_SIZE, "GS length error: %d", (int)strlen(codeBuffer_));
|
||||
std::lock_guard<std::mutex> guard(cacheLock_);
|
||||
if (!gsCache_.ContainsKey(id)) {
|
||||
VulkanGeometryShader *gs = new VulkanGeometryShader(vulkan, id, codeBuffer_);
|
||||
gsCache_.Insert(id, gs);
|
||||
|
@ -121,9 +121,9 @@ public:
|
||||
int GetNumGeometryShaders() const { return (int)gsCache_.size(); }
|
||||
|
||||
// Used for saving/loading the cache. Don't need to be particularly fast.
|
||||
VulkanVertexShader *GetVertexShaderFromID(VShaderID id) { std::lock_guard<std::mutex> guard(cacheLock_); return vsCache_.GetOrNull(id); }
|
||||
VulkanFragmentShader *GetFragmentShaderFromID(FShaderID id) { std::lock_guard<std::mutex> guard(cacheLock_); return fsCache_.GetOrNull(id); }
|
||||
VulkanGeometryShader *GetGeometryShaderFromID(GShaderID id) { std::lock_guard<std::mutex> guard(cacheLock_); return gsCache_.GetOrNull(id); }
|
||||
VulkanVertexShader *GetVertexShaderFromID(VShaderID id) { return vsCache_.GetOrNull(id); }
|
||||
VulkanFragmentShader *GetFragmentShaderFromID(FShaderID id) { return fsCache_.GetOrNull(id); }
|
||||
VulkanGeometryShader *GetGeometryShaderFromID(GShaderID id) { return gsCache_.GetOrNull(id); }
|
||||
|
||||
VulkanVertexShader *GetVertexShaderFromModule(VkShaderModule module);
|
||||
VulkanFragmentShader *GetFragmentShaderFromModule(VkShaderModule module);
|
||||
@ -170,7 +170,6 @@ private:
|
||||
GSCache gsCache_;
|
||||
|
||||
char *codeBuffer_;
|
||||
std::mutex cacheLock_;
|
||||
|
||||
uint64_t uboAlignment_;
|
||||
// Uniform block scratchpad. These (the relevant ones) are copied to the current pushbuffer at draw time.
|
||||
|
Loading…
x
Reference in New Issue
Block a user