Vulkan GraphicsPipeline: Fix an old and likely very rare race condition.

This commit is contained in:
Henrik Rydgård 2024-10-18 14:15:05 +02:00
parent bcb83a2ad1
commit d161388b21
3 changed files with 29 additions and 24 deletions

View File

@ -1061,17 +1061,21 @@ void VulkanQueueRunner::PerformRenderPass(const VKRStep &step, VkCommandBuffer c
"expected %d sample count, got %d", fbSampleCount, graphicsPipeline->SampleCount()); "expected %d sample count, got %d", fbSampleCount, graphicsPipeline->SampleCount());
} }
if (!graphicsPipeline->pipeline[(size_t)rpType]) { VkPipeline pipeline;
// NOTE: If render steps got merged, it can happen that, as they ended during recording,
// they didn't know their final render pass type so they created the wrong pipelines in EndCurRenderStep().
// Unfortunately I don't know if we can fix it in any more sensible place than here.
// Maybe a middle pass. But let's try to just block and compile here for now, this doesn't
// happen all that much.
graphicsPipeline->pipeline[(size_t)rpType] = Promise<VkPipeline>::CreateEmpty();
graphicsPipeline->Create(vulkan_, renderPass->Get(vulkan_, rpType, fbSampleCount), rpType, fbSampleCount, time_now_d(), -1);
}
VkPipeline pipeline = graphicsPipeline->pipeline[(size_t)rpType]->BlockUntilReady(); {
std::lock_guard<std::mutex> lock(graphicsPipeline->mutex_);
if (!graphicsPipeline->pipeline[(size_t)rpType]) {
// NOTE: If render steps got merged, it can happen that, as they ended during recording,
// they didn't know their final render pass type so they created the wrong pipelines in EndCurRenderStep().
// Unfortunately I don't know if we can fix it in any more sensible place than here.
// Maybe a middle pass. But let's try to just block and compile here for now, this doesn't
// happen all that much.
graphicsPipeline->pipeline[(size_t)rpType] = Promise<VkPipeline>::CreateEmpty();
graphicsPipeline->Create(vulkan_, renderPass->Get(vulkan_, rpType, fbSampleCount), rpType, fbSampleCount, time_now_d(), -1);
}
pipeline = graphicsPipeline->pipeline[(size_t)rpType]->BlockUntilReady();
}
if (pipeline != VK_NULL_HANDLE) { if (pipeline != VK_NULL_HANDLE) {
vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline); vkCmdBindPipeline(cmd, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);

View File

@ -349,7 +349,7 @@ bool VulkanRenderManager::CreateBackbuffers() {
void VulkanRenderManager::StartThreads() { void VulkanRenderManager::StartThreads() {
{ {
std::unique_lock<std::mutex> lock(compileMutex_); std::unique_lock<std::mutex> lock(compileQueueMutex_);
_assert_(compileQueue_.empty()); _assert_(compileQueue_.empty());
} }
@ -395,7 +395,7 @@ void VulkanRenderManager::StopThreads() {
} }
{ {
std::unique_lock<std::mutex> lock(compileMutex_); std::unique_lock<std::mutex> lock(compileQueueMutex_);
runCompileThread_ = false; // Compiler and present thread both look at this bool. runCompileThread_ = false; // Compiler and present thread both look at this bool.
_assert_(compileThread_.joinable()); _assert_(compileThread_.joinable());
compileCond_.notify_one(); compileCond_.notify_one();
@ -410,7 +410,7 @@ void VulkanRenderManager::StopThreads() {
CreateMultiPipelinesTask::WaitForAll(); CreateMultiPipelinesTask::WaitForAll();
{ {
std::unique_lock<std::mutex> lock(compileMutex_); std::unique_lock<std::mutex> lock(compileQueueMutex_);
_assert_(compileQueue_.empty()); _assert_(compileQueue_.empty());
} }
} }
@ -425,7 +425,7 @@ void VulkanRenderManager::DestroyBackbuffers() {
void VulkanRenderManager::CheckNothingPending() { void VulkanRenderManager::CheckNothingPending() {
_assert_(pipelinesToCheck_.empty()); _assert_(pipelinesToCheck_.empty());
{ {
std::unique_lock<std::mutex> lock(compileMutex_); std::unique_lock<std::mutex> lock(compileQueueMutex_);
_assert_(compileQueue_.empty()); _assert_(compileQueue_.empty());
} }
} }
@ -434,7 +434,7 @@ VulkanRenderManager::~VulkanRenderManager() {
INFO_LOG(Log::G3D, "VulkanRenderManager destructor"); INFO_LOG(Log::G3D, "VulkanRenderManager destructor");
{ {
std::unique_lock<std::mutex> lock(compileMutex_); std::unique_lock<std::mutex> lock(compileQueueMutex_);
_assert_(compileQueue_.empty()); _assert_(compileQueue_.empty());
} }
@ -462,7 +462,7 @@ void VulkanRenderManager::CompileThreadFunc() {
bool exitAfterCompile = false; bool exitAfterCompile = false;
std::vector<CompileQueueEntry> toCompile; std::vector<CompileQueueEntry> toCompile;
{ {
std::unique_lock<std::mutex> lock(compileMutex_); std::unique_lock<std::mutex> lock(compileQueueMutex_);
while (compileQueue_.empty() && runCompileThread_) { while (compileQueue_.empty() && runCompileThread_) {
compileCond_.wait(lock); compileCond_.wait(lock);
} }
@ -521,7 +521,7 @@ void VulkanRenderManager::CompileThreadFunc() {
sleep_ms(1); sleep_ms(1);
} }
std::unique_lock<std::mutex> lock(compileMutex_); std::unique_lock<std::mutex> lock(compileQueueMutex_);
_assert_(compileQueue_.empty()); _assert_(compileQueue_.empty());
} }
@ -794,7 +794,7 @@ VKRGraphicsPipeline *VulkanRenderManager::CreateGraphicsPipeline(VKRGraphicsPipe
VKRRenderPassStoreAction::STORE, VKRRenderPassStoreAction::DONT_CARE, VKRRenderPassStoreAction::DONT_CARE, VKRRenderPassStoreAction::STORE, VKRRenderPassStoreAction::DONT_CARE, VKRRenderPassStoreAction::DONT_CARE,
}; };
VKRRenderPass *compatibleRenderPass = queueRunner_.GetRenderPass(key); VKRRenderPass *compatibleRenderPass = queueRunner_.GetRenderPass(key);
std::unique_lock<std::mutex> lock(compileMutex_); std::unique_lock<std::mutex> lock(compileQueueMutex_);
bool needsCompile = false; bool needsCompile = false;
for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) { for (size_t i = 0; i < (size_t)RenderPassType::TYPE_COUNT; i++) {
if (!(variantBitmask & (1 << i))) if (!(variantBitmask & (1 << i)))
@ -865,13 +865,14 @@ void VulkanRenderManager::EndCurRenderStep() {
VkSampleCountFlagBits sampleCount = curRenderStep_->render.framebuffer ? curRenderStep_->render.framebuffer->sampleCount : VK_SAMPLE_COUNT_1_BIT; VkSampleCountFlagBits sampleCount = curRenderStep_->render.framebuffer ? curRenderStep_->render.framebuffer->sampleCount : VK_SAMPLE_COUNT_1_BIT;
compileMutex_.lock(); compileQueueMutex_.lock();
bool needsCompile = false; bool needsCompile = false;
for (VKRGraphicsPipeline *pipeline : pipelinesToCheck_) { for (VKRGraphicsPipeline *pipeline : pipelinesToCheck_) {
if (!pipeline) { if (!pipeline) {
// Not good, but let's try not to crash. // Not good, but let's try not to crash.
continue; continue;
} }
std::lock_guard<std::mutex> lock(pipeline->mutex_);
if (!pipeline->pipeline[(size_t)rpType]) { if (!pipeline->pipeline[(size_t)rpType]) {
pipeline->pipeline[(size_t)rpType] = Promise<VkPipeline>::CreateEmpty(); pipeline->pipeline[(size_t)rpType] = Promise<VkPipeline>::CreateEmpty();
_assert_(renderPass); _assert_(renderPass);
@ -881,7 +882,7 @@ void VulkanRenderManager::EndCurRenderStep() {
} }
if (needsCompile) if (needsCompile)
compileCond_.notify_one(); compileCond_.notify_one();
compileMutex_.unlock(); compileQueueMutex_.unlock();
pipelinesToCheck_.clear(); pipelinesToCheck_.clear();
// We don't do this optimization for very small targets, probably not worth it. // We don't do this optimization for very small targets, probably not worth it.

View File

@ -137,11 +137,11 @@ struct VKRGraphicsPipeline {
VKRGraphicsPipelineDesc *desc = nullptr; VKRGraphicsPipelineDesc *desc = nullptr;
Promise<VkPipeline> *pipeline[(size_t)RenderPassType::TYPE_COUNT]{}; Promise<VkPipeline> *pipeline[(size_t)RenderPassType::TYPE_COUNT]{};
std::mutex mutex_; // protects the pipeline array
VkSampleCountFlagBits SampleCount() const { return sampleCount_; } VkSampleCountFlagBits SampleCount() const { return sampleCount_; }
const char *Tag() const { return tag_.c_str(); } const char *Tag() const { return tag_.c_str(); }
private: private:
void DestroyVariantsInstant(VkDevice device); void DestroyVariantsInstant(VkDevice device);
@ -285,9 +285,9 @@ public:
void ReportBadStateForDraw(); void ReportBadStateForDraw();
void NudgeCompilerThread() { void NudgeCompilerThread() {
compileMutex_.lock(); compileQueueMutex_.lock();
compileCond_.notify_one(); compileCond_.notify_one();
compileMutex_.unlock(); compileQueueMutex_.unlock();
} }
void AssertInRenderPass() const { void AssertInRenderPass() const {
@ -615,7 +615,7 @@ private:
std::thread compileThread_; std::thread compileThread_;
// Sync // Sync
std::condition_variable compileCond_; std::condition_variable compileCond_;
std::mutex compileMutex_; std::mutex compileQueueMutex_;
std::vector<CompileQueueEntry> compileQueue_; std::vector<CompileQueueEntry> compileQueue_;
// Thread for measuring presentation delay. // Thread for measuring presentation delay.