@@ -96,8 +96,10 @@ export class RepoIndexManager {
9696 } ) ;
9797 }
9898
99- public startScheduler ( ) {
99+ public async startScheduler ( ) {
100100 logger . debug ( 'Starting scheduler' ) ;
101+ // Cleanup any orphaned disk resources on startup
102+ await this . cleanupOrphanedDiskResources ( ) ;
101103 this . interval = setIntervalAsync ( async ( ) => {
102104 await this . scheduleIndexJobs ( ) ;
103105 await this . scheduleCleanupJobs ( ) ;
@@ -170,8 +172,6 @@ export class RepoIndexManager {
170172 }
171173
172174 private async scheduleCleanupJobs ( ) {
173- await this . cleanupOrphanedDiskResources ( ) ;
174-
175175 const gcGracePeriodMs = new Date ( Date . now ( ) - this . settings . repoGarbageCollectionGracePeriodMs ) ;
176176 const timeoutDate = new Date ( Date . now ( ) - this . settings . repoIndexTimeoutMs ) ;
177177
@@ -647,17 +647,26 @@ export class RepoIndexManager {
647647 // Dirs are named by repoId: DATA_CACHE_DIR/repos/<repoId>/
648648 if ( existsSync ( REPOS_CACHE_DIR ) ) {
649649 const entries = await readdir ( REPOS_CACHE_DIR ) ;
650+ const repoIdToPath = new Map < number , string > ( ) ;
650651 for ( const entry of entries ) {
651652 const repoPath = `${ REPOS_CACHE_DIR } /${ entry } ` ;
652653 const repoId = getRepoIdFromPath ( repoPath ) ;
653- if ( repoId = == undefined ) {
654- continue ;
654+ if ( repoId ! == undefined ) {
655+ repoIdToPath . set ( repoId , repoPath ) ;
655656 }
657+ }
656658
657- const repo = await this . db . repo . findUnique ( { where : { id : repoId } } ) ;
658- if ( ! repo ) {
659- logger . info ( `Removing orphaned repo directory with no DB record: ${ repoPath } ` ) ;
660- await rm ( repoPath , { recursive : true , force : true } ) ;
659+ if ( repoIdToPath . size > 0 ) {
660+ const existingRepos = await this . db . repo . findMany ( {
661+ where : { id : { in : [ ...repoIdToPath . keys ( ) ] } } ,
662+ select : { id : true } ,
663+ } ) ;
664+ const existingIds = new Set ( existingRepos . map ( r => r . id ) ) ;
665+ for ( const [ repoId , repoPath ] of repoIdToPath ) {
666+ if ( ! existingIds . has ( repoId ) ) {
667+ logger . info ( `Removing orphaned repo directory with no DB record: ${ repoPath } ` ) ;
668+ await rm ( repoPath , { recursive : true , force : true } ) ;
669+ }
661670 }
662671 }
663672 }
@@ -666,16 +675,30 @@ export class RepoIndexManager {
666675 // Shard files are prefixed with <orgId>_<repoId>: DATA_CACHE_DIR/index/<orgId>_<repoId>_*.zoekt
667676 if ( existsSync ( INDEX_CACHE_DIR ) ) {
668677 const entries = await readdir ( INDEX_CACHE_DIR ) ;
678+ const repoIdToShards = new Map < number , string [ ] > ( ) ;
669679 for ( const entry of entries ) {
670680 const repoId = getRepoIdFromShardFileName ( entry ) ;
671- if ( repoId === undefined ) {
672- continue ;
681+ if ( repoId !== undefined ) {
682+ const shards = repoIdToShards . get ( repoId ) ?? [ ] ;
683+ shards . push ( entry ) ;
684+ repoIdToShards . set ( repoId , shards ) ;
673685 }
674- const repo = await this . db . repo . findUnique ( { where : { id : repoId } } ) ;
675- if ( ! repo ) {
676- const shardPath = `${ INDEX_CACHE_DIR } /${ entry } ` ;
677- logger . info ( `Removing orphaned index shard with no DB record: ${ shardPath } ` ) ;
678- await rm ( shardPath , { force : true } ) ;
686+ }
687+
688+ if ( repoIdToShards . size > 0 ) {
689+ const existingRepos = await this . db . repo . findMany ( {
690+ where : { id : { in : [ ...repoIdToShards . keys ( ) ] } } ,
691+ select : { id : true } ,
692+ } ) ;
693+ const existingIds = new Set ( existingRepos . map ( r => r . id ) ) ;
694+ for ( const [ repoId , shards ] of repoIdToShards ) {
695+ if ( ! existingIds . has ( repoId ) ) {
696+ for ( const entry of shards ) {
697+ const shardPath = `${ INDEX_CACHE_DIR } /${ entry } ` ;
698+ logger . info ( `Removing orphaned index shard with no DB record: ${ shardPath } ` ) ;
699+ await rm ( shardPath , { force : true } ) ;
700+ }
701+ }
679702 }
680703 }
681704 }
0 commit comments