@@ -179,14 +179,20 @@ pub struct SyncDownloadProgress {
179179 buckets : BTreeMap < String , BucketProgress > ,
180180}
181181
182+ pub struct SyncProgressFromCheckpoint {
183+ pub progress : SyncDownloadProgress ,
184+ pub needs_counter_reset : bool ,
185+ }
186+
182187impl SyncDownloadProgress {
183188 pub fn for_checkpoint < ' a > (
184189 checkpoint : & OwnedCheckpoint ,
185190 mut local_progress : impl StreamingIterator <
186191 Item = Result < PersistedBucketProgress < ' a > , ResultCode > ,
187192 > ,
188- ) -> Result < Self , ResultCode > {
193+ ) -> Result < SyncProgressFromCheckpoint , ResultCode > {
189194 let mut buckets = BTreeMap :: < String , BucketProgress > :: new ( ) ;
195+ let mut needs_reset = false ;
190196 for bucket in checkpoint. buckets . values ( ) {
191197 buckets. insert (
192198 bucket. bucket . clone ( ) ,
@@ -212,9 +218,24 @@ impl SyncDownloadProgress {
212218
213219 progress. at_last = row. count_at_last ;
214220 progress. since_last = row. count_since_last ;
221+
222+ if progress. target_count < row. count_at_last + row. count_since_last {
223+ needs_reset = true ;
224+ // Either due to a defrag / sync rule deploy or a compactioon operation, the size
225+ // of the bucket shrank so much that the local ops exceed the ops in the updated
226+ // bucket. We can't possibly report progress in this case (it would overshoot 100%).
227+ for ( _, progress) in & mut buckets {
228+ progress. at_last = 0 ;
229+ progress. since_last = 0 ;
230+ }
231+ break ;
232+ }
215233 }
216234
217- Ok ( Self { buckets } )
235+ Ok ( SyncProgressFromCheckpoint {
236+ progress : Self { buckets } ,
237+ needs_counter_reset : needs_reset,
238+ } )
218239 }
219240
220241 pub fn increment_download_count ( & mut self , line : & DataLine ) {
0 commit comments