@@ -187,6 +187,18 @@ GPUUploadManagerImpl::Page::SealStatus GPUUploadManagerImpl::Page::TrySeal()
187187 }
188188}
189189
190+ void GPUUploadManagerImpl::Page::Unseal ()
191+ {
192+ VERIFY_EXPR (DbgGetWriterCount () == 0 );
193+ m_State.fetch_and (WRITER_MASK, std::memory_order_release);
194+ }
195+
196+ void GPUUploadManagerImpl::Page::Seal ()
197+ {
198+ VERIFY_EXPR (DbgGetWriterCount () == 0 );
199+ m_State.fetch_or (SEALED_BIT, std::memory_order_release);
200+ }
201+
190202bool GPUUploadManagerImpl::Page::ScheduleBufferUpdate (const ScheduleBufferUpdateInfo& UpdateInfo)
191203{
192204 VERIFY_EXPR (DbgGetWriterCount () > 0 );
@@ -286,7 +298,8 @@ void GPUUploadManagerImpl::Page::Reset(IDeviceContext* pContext)
286298 VERIFY (m_PendingOps.IsEmpty (), " All pending operations must be executed before resetting the page" );
287299
288300 m_Offset.store (0 );
289- m_State.store (0 );
301+ // Keep the page sealed until we make it current to prevent any accidental writes.
302+ m_State.store (SEALED_BIT);
290303 m_NumPendingOps.store (0 );
291304 m_Enqueued.store (false );
292305 m_FenceValue = 0 ;
@@ -396,6 +409,7 @@ GPUUploadManagerImpl::GPUUploadManagerImpl(IReferenceCounters* pRefCounters, con
396409 // Create at least one page.
397410 if (Page* pPage = CreatePage (CI.pContext ))
398411 {
412+ pPage->Unseal ();
399413 m_pCurrentPage.store (pPage, std::memory_order_release);
400414 }
401415 else
@@ -558,14 +572,7 @@ void GPUUploadManagerImpl::ScheduleBufferUpdate(const ScheduleBufferUpdateInfo&
558572 }
559573 };
560574
561- // Prevent ABA-style bug when pages are reset and reused
562- // * Thread T1 loads P0 as current, then gets descheduled before TryBeginWriting().
563- // * Render thread swaps current to P1, seals P0, sees it has 0 ops, calls Reset(nullptr)
564- // which clears SEALED_BIT, and pushes P0 to FreePages
565- // * T1 resumes and calls P0->TryBeginWriting() and succeeds (because SEALED_BIT is now cleared).
566- // * T1 schedules an update into a page that is not current and is simultaneously sitting in the free list,
567- // potentially being popped/installed elsewhere.
568- // Validate that the page is still current to avoid this scenario.
575+ // Prevent ABA-style bug when pages are reset and reused by validating that the page is still current.
569576 if (P != m_pCurrentPage.load (std::memory_order_acquire))
570577 {
571578 EndWriting ();
@@ -646,6 +653,7 @@ bool GPUUploadManagerImpl::TryRotatePage(IDeviceContext* pContext, Page* Expecte
646653 if (!m_pCurrentPage.compare_exchange_strong (Cur, Fresh, std::memory_order_acq_rel))
647654 {
648655 // Lost the race: put Fresh back
656+ Fresh->Seal ();
649657 m_FreePages.Push (Fresh);
650658 return true ; // Rotation happened by someone else
651659 }
@@ -813,6 +821,19 @@ GPUUploadManagerImpl::Page* GPUUploadManagerImpl::AcquireFreePage(IDeviceContext
813821
814822 if (P != nullptr )
815823 {
824+ // Unseal the page only when we make it current to prevent any accidental writes in scenario like this:
825+ // * Thread T1 runs GPUUploadManagerImpl::ScheduleBufferUpdate:
826+ // * loads P0 as current, then gets descheduled before TryBeginWriting().
827+ // * Render thread swaps current to P1, seals P0, sees it has 0 ops, calls Reset(nullptr), and pushes P0 to FreePages
828+ // * T1 resumes and calls P0->TryBeginWriting() and succeeds (if SEALED_BIT is cleared).
829+ // * T1 schedules an update into a page that is not current and is simultaneously sitting in the free list,
830+ // potentially being popped/installed elsewhere.
831+ P->Unseal ();
832+
833+ // Note that we MUST unseal the page BEFORE we make it current because otherwise
834+ // GPUUploadManagerImpl::ScheduleBufferUpdate may find a sealed page and try to
835+ // rotate it.
836+
816837 const Uint32 PageSize = P->GetSize ();
817838 // Clear if the page is larger than the max pending update
818839 for (;;)
0 commit comments