@@ -187,6 +187,18 @@ GPUUploadManagerImpl::Page::SealStatus GPUUploadManagerImpl::Page::TrySeal()
187187 }
188188}
189189
190+ void GPUUploadManagerImpl::Page::Unseal ()
191+ {
192+ VERIFY_EXPR (DbgGetWriterCount () == 0 );
193+ m_State.fetch_and (WRITER_MASK, std::memory_order_release);
194+ }
195+
196+ void GPUUploadManagerImpl::Page::Seal ()
197+ {
198+ VERIFY_EXPR (DbgGetWriterCount () == 0 );
199+ m_State.fetch_or (SEALED_BIT, std::memory_order_release);
200+ }
201+
190202bool GPUUploadManagerImpl::Page::ScheduleBufferUpdate (const ScheduleBufferUpdateInfo& UpdateInfo)
191203{
192204 VERIFY_EXPR (DbgGetWriterCount () > 0 );
@@ -286,7 +298,7 @@ void GPUUploadManagerImpl::Page::Reset(IDeviceContext* pContext)
286298 VERIFY (m_PendingOps.IsEmpty (), " All pending operations must be executed before resetting the page" );
287299
288300 m_Offset.store (0 );
289- m_State.store (0 );
301+ m_State.store (SEALED_BIT );
290302 m_NumPendingOps.store (0 );
291303 m_Enqueued.store (false );
292304 m_FenceValue = 0 ;
@@ -396,6 +408,7 @@ GPUUploadManagerImpl::GPUUploadManagerImpl(IReferenceCounters* pRefCounters, con
396408 // Create at least one page.
397409 if (Page* pPage = CreatePage (CI.pContext ))
398410 {
411+ pPage->Unseal ();
399412 m_pCurrentPage.store (pPage, std::memory_order_release);
400413 }
401414 else
@@ -558,14 +571,7 @@ void GPUUploadManagerImpl::ScheduleBufferUpdate(const ScheduleBufferUpdateInfo&
558571 }
559572 };
560573
561- // Prevent ABA-style bug when pages are reset and reused
562- // * Thread T1 loads P0 as current, then gets descheduled before TryBeginWriting().
563- // * Render thread swaps current to P1, seals P0, sees it has 0 ops, calls Reset(nullptr)
564- // which clears SEALED_BIT, and pushes P0 to FreePages
565- // * T1 resumes and calls P0->TryBeginWriting() and succeeds (because SEALED_BIT is now cleared).
566- // * T1 schedules an update into a page that is not current and is simultaneously sitting in the free list,
567- // potentially being popped/installed elsewhere.
568- // Validate that the page is still current to avoid this scenario.
574+ // Prevent ABA-style bug when pages are reset and reused by validating that the page is still current.
569575 if (P != m_pCurrentPage.load (std::memory_order_acquire))
570576 {
571577 EndWriting ();
@@ -646,6 +652,7 @@ bool GPUUploadManagerImpl::TryRotatePage(IDeviceContext* pContext, Page* Expecte
646652 if (!m_pCurrentPage.compare_exchange_strong (Cur, Fresh, std::memory_order_acq_rel))
647653 {
648654 // Lost the race: put Fresh back
655+ Fresh->Seal ();
649656 m_FreePages.Push (Fresh);
650657 return true ; // Rotation happened by someone else
651658 }
@@ -813,6 +820,19 @@ GPUUploadManagerImpl::Page* GPUUploadManagerImpl::AcquireFreePage(IDeviceContext
813820
814821 if (P != nullptr )
815822 {
823+ // Unseal the page only when we make it current to prevent any accidental writes in scenario like this:
824+ // * Thread T1 runs GPUUploadManagerImpl::ScheduleBufferUpdate:
825+ // * loads P0 as current, then gets descheduled before TryBeginWriting().
826+ // * Render thread swaps current to P1, seals P0, sees it has 0 ops, calls Reset(nullptr), and pushes P0 to FreePages
827+ // * T1 resumes and calls P0->TryBeginWriting() and succeeds (if SEALED_BIT is cleared).
828+ // * T1 schedules an update into a page that is not current and is simultaneously sitting in the free list,
829+ // potentially being popped/installed elsewhere.
830+ P->Unseal ();
831+
832+ // Note that we MUST unseal the page BEFORE we make it current because otherwise
833+ // GPUUploadManagerImpl::ScheduleBufferUpdate may find a sealed page and try to
834+ // rotate it.
835+
816836 const Uint32 PageSize = P->GetSize ();
817837 // Clear if the page is larger than the max pending update
818838 for (;;)
0 commit comments