Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
a5f7bc5
feat: implement fast FQN prefix-based bulk deletion to fix race condi…
mohityadav766 Apr 6, 2026
b9eb147
fix: backfill entity_relationship FQN hashes for all entity types inc…
mohityadav766 Apr 9, 2026
5a0d7d2
refactor: move entity_relationship FQN hash migrations from v1.14.1 t…
mohityadav766 Apr 9, 2026
4c2c690
fix: replace paginated backfill with direct correlated-subquery UPDATE
mohityadav766 Apr 9, 2026
6b266b8
test: add unit tests for v1.13.0 FQN hash backfill migration
mohityadav766 Apr 9, 2026
40583bc
test: add integration and benchmark tests for FQN prefix deletion
mohityadav766 Apr 9, 2026
7fe9212
feat: expose prefix hard delete at database and schema hierarchy levels
mohityadav766 Apr 9, 2026
9cd10af
refactor: move prefix hard delete endpoint to EntityResource base class
mohityadav766 Apr 9, 2026
8343288
fix: await async deletion before asserting in PrefixDeletionIT
mohityadav766 Apr 9, 2026
29cb761
fix: initialize LockManagerInitializer on startup so PrefixDeletionSe…
mohityadav766 Apr 9, 2026
9970d96
fix: use unique names in PrefixDeletionIT/BenchmarkIT to avoid 409 co…
mohityadav766 Apr 9, 2026
670e3d2
feat: scale benchmark to 10k entities and measure actual async deleti…
mohityadav766 Apr 9, 2026
7181840
perf: parallelize entity seeding in PrefixDeletionBenchmarkIT
mohityadav766 Apr 9, 2026
64735da
fix: use VARCHAR(36) for UUID columns in entity_deletion_lock on Post…
mohityadav766 Apr 9, 2026
da2f672
Revert "fix: use VARCHAR(36) for UUID columns in entity_deletion_lock…
mohityadav766 Apr 9, 2026
170e0ed
fix: use VARCHAR(36) for UUID columns in entity_deletion_lock on Post…
mohityadav766 Apr 9, 2026
a5625ec
fix: use VARCHAR(36) for UUID columns in entity_deletion_lock on Post…
mohityadav766 Apr 9, 2026
f13f96a
fix: batch IN-clause deletes to avoid PostgreSQL 65535 parameter limit
mohityadav766 Apr 9, 2026
ca4183b
refactor: move prefix hard delete endpoint to DatabaseServiceResource
mohityadav766 Apr 9, 2026
9321871
feat: expose DELETE /prefix/{id} endpoint on all EntityResource subcl…
mohityadav766 Apr 9, 2026
a570eb1
fix: add missing UUID and Response imports in two resource classes
mohityadav766 Apr 9, 2026
01be3c6
fix: call super.deletePrefixHardById to prevent StackOverflowError
mohityadav766 Apr 10, 2026
7bd398b
fix: chunk FeedDAO.findByEntityIds to avoid PostgreSQL 65535 paramete…
mohityadav766 Apr 10, 2026
a70140b
feat: clean up Elasticsearch index during prefix hard deletion
mohityadav766 Apr 10, 2026
16db11d
feat: run pre/post delete hooks for all entity types during prefix de…
mohityadav766 Apr 10, 2026
682f0f6
feat: run pre/post hooks for all entity types during prefix deletion
mohityadav766 Apr 10, 2026
9227427
fix: use information_schema conditional for ADD COLUMN in MySQL migra…
mohityadav766 Apr 10, 2026
9380afa
fix: drop IF NOT EXISTS from CREATE INDEX in MySQL migration
mohityadav766 Apr 11, 2026
9c3e4d6
fix: simplify MySQL migration to plain ALTER TABLE and CREATE INDEX
mohityadav766 Apr 11, 2026
e13ce2e
Merge branch 'main' into mohit/35dc-improve-deletion
mohityadav766 Apr 13, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions bootstrap/sql/migrations/native/1.13.0/mysql/schemaChanges.sql
Original file line number Diff line number Diff line change
Expand Up @@ -129,3 +129,13 @@ SELECT ue.id, re.id, 'user', 'role', 10
FROM user_entity ue, role_entity re
WHERE ue.name = 'mcpapplicationbot'
AND re.name = 'ApplicationBotImpersonationRole';

-- Add FQN hash columns to entity_relationship to enable fast prefix-based bulk deletion.
-- This allows deleting all relationships for an entire entity subtree in a single indexed query
-- instead of walking the tree entity-by-entity.
ALTER TABLE entity_relationship
ADD COLUMN fromFQNHash VARCHAR(768) DEFAULT NULL,
ADD COLUMN toFQNHash VARCHAR(768) DEFAULT NULL;

CREATE INDEX idx_er_from_fqn_hash ON entity_relationship (fromFQNHash(768));
CREATE INDEX idx_er_to_fqn_hash ON entity_relationship (toFQNHash(768));
17 changes: 17 additions & 0 deletions bootstrap/sql/migrations/native/1.13.0/postgres/schemaChanges.sql
Original file line number Diff line number Diff line change
Expand Up @@ -150,3 +150,20 @@ FROM user_entity ue, role_entity re
WHERE ue.name = 'mcpapplicationbot'
AND re.name = 'ApplicationBotImpersonationRole'
ON CONFLICT DO NOTHING;

-- Add FQN hash columns to entity_relationship to enable fast prefix-based bulk deletion.
-- This allows deleting all relationships for an entire entity subtree in a single indexed query
-- instead of walking the tree entity-by-entity.
ALTER TABLE entity_relationship
ADD COLUMN IF NOT EXISTS fromFQNHash VARCHAR(768) DEFAULT NULL,
ADD COLUMN IF NOT EXISTS toFQNHash VARCHAR(768) DEFAULT NULL;

CREATE INDEX IF NOT EXISTS idx_er_from_fqn_hash ON entity_relationship (fromFQNHash);
CREATE INDEX IF NOT EXISTS idx_er_to_fqn_hash ON entity_relationship (toFQNHash);
Comment on lines +153 to +162
Copy link

Copilot AI Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These schema changes are being appended to the existing 1.13.0 migration. If a database was already upgraded past 1.13.0, the migration framework will not re-run 1.13.0, so the new columns/indexes will never be applied.

To ensure upgrades work correctly, the DDL should be added to the current/new migration version (e.g. 1.14.1) rather than modifying an older migration script.

Suggested change
-- Add FQN hash columns to entity_relationship to enable fast prefix-based bulk deletion.
-- This allows deleting all relationships for an entire entity subtree in a single indexed query
-- instead of walking the tree entity-by-entity.
ALTER TABLE entity_relationship
ADD COLUMN IF NOT EXISTS fromFQNHash VARCHAR(768) DEFAULT NULL,
ADD COLUMN IF NOT EXISTS toFQNHash VARCHAR(768) DEFAULT NULL;
CREATE INDEX IF NOT EXISTS idx_er_from_fqn_hash ON entity_relationship (fromFQNHash);
CREATE INDEX IF NOT EXISTS idx_er_to_fqn_hash ON entity_relationship (toFQNHash);

Copilot uses AI. Check for mistakes.
Comment on lines +154 to +162
Copy link

Copilot AI Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These new columns/indexes are being added to the 1.13.0 migration. Existing installations that already applied 1.13.0 will NOT re-run it, so they will miss fromFQNHash/toFQNHash entirely. The DDL needs to live in the current release migration (e.g., 1.14.1) instead of retroactively modifying an old migration.

Suggested change
-- Add FQN hash columns to entity_relationship to enable fast prefix-based bulk deletion.
-- This allows deleting all relationships for an entire entity subtree in a single indexed query
-- instead of walking the tree entity-by-entity.
ALTER TABLE entity_relationship
ADD COLUMN IF NOT EXISTS fromFQNHash VARCHAR(768) DEFAULT NULL,
ADD COLUMN IF NOT EXISTS toFQNHash VARCHAR(768) DEFAULT NULL;
CREATE INDEX IF NOT EXISTS idx_er_from_fqn_hash ON entity_relationship (fromFQNHash);
CREATE INDEX IF NOT EXISTS idx_er_to_fqn_hash ON entity_relationship (toFQNHash);
-- NOTE:
-- Do not add new entity_relationship schema changes to this historical 1.13.0 migration.
-- The fromFQNHash/toFQNHash columns and their indexes must be added in the current
-- release migration so existing installations receive them during upgrade.

Copilot uses AI. Check for mistakes.

Comment on lines +154 to +163
Copy link

Copilot AI Apr 11, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding new DDL to an older, already-shipped migration version (1.13.0) is unsafe for upgrades: databases that have already applied 1.13.0 will not re-run this file, so they’ll never get the new fromFQNHash/toFQNHash columns/indexes required by prefix deletion. Please introduce this DDL in the latest migration version (and keep 1.13.0 immutable) so upgrades are deterministic.

Suggested change
-- Add FQN hash columns to entity_relationship to enable fast prefix-based bulk deletion.
-- This allows deleting all relationships for an entire entity subtree in a single indexed query
-- instead of walking the tree entity-by-entity.
ALTER TABLE entity_relationship
ADD COLUMN IF NOT EXISTS fromFQNHash VARCHAR(768) DEFAULT NULL,
ADD COLUMN IF NOT EXISTS toFQNHash VARCHAR(768) DEFAULT NULL;
CREATE INDEX IF NOT EXISTS idx_er_from_fqn_hash ON entity_relationship (fromFQNHash);
CREATE INDEX IF NOT EXISTS idx_er_to_fqn_hash ON entity_relationship (toFQNHash);

Copilot uses AI. Check for mistakes.
-- Fix entity_deletion_lock column types: id and entityId were created as native UUID
-- in 1.9.0 but the rest of the codebase uses VARCHAR(36) for UUID columns so that
-- BindUUID (which binds via UUID.toString()) can compare them without an explicit cast.
ALTER TABLE entity_deletion_lock
ALTER COLUMN id TYPE VARCHAR(36) USING id::VARCHAR,
ALTER COLUMN entityId TYPE VARCHAR(36) USING entityId::VARCHAR;
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
-- No schema changes in this version.
Copy link

Copilot AI Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The PR description indicates the new entity_relationship columns/indexes are introduced in 1.14.1, but this 1.14.1 migration is marked as having no schema changes. As written, upgrading from 1.14.0 → 1.14.1 would not apply the required DDL.

Move the ALTER TABLE ... ADD COLUMN fromFQNHash/toFQNHash + index creation into the 1.14.1 schemaChanges files (or whichever version is intended to ship this feature).

Copilot uses AI. Check for mistakes.
Copy link

Copilot AI Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

PR description states 1.14.1 adds fromFQNHash/toFQNHash, but this migration is empty. Ensure the schema changes are applied in the current migration version, not only in bootstrap/sql/schema/*.sql or older migrations.

Copilot uses AI. Check for mistakes.
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
-- No schema changes in this version.
Copy link

Copilot AI Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The PR description indicates the new entity_relationship columns/indexes are introduced in 1.14.1, but this 1.14.1 migration is marked as having no schema changes. As written, upgrading from 1.14.0 → 1.14.1 would not apply the required DDL.

Move the ALTER TABLE ... ADD COLUMN fromFQNHash/toFQNHash + index creation into the 1.14.1 schemaChanges files (or whichever version is intended to ship this feature).

Copilot uses AI. Check for mistakes.
Copy link

Copilot AI Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

PR description states 1.14.1 adds fromFQNHash/toFQNHash, but this migration is empty. If 1.14.1 is the intended release vehicle, the DDL must be present here (or in the correct target version), otherwise upgrades from already-migrated installs won't get the schema changes.

Copilot uses AI. Check for mistakes.
6 changes: 5 additions & 1 deletion bootstrap/sql/schema/mysql.sql
Original file line number Diff line number Diff line change
Expand Up @@ -352,9 +352,13 @@ CREATE TABLE `entity_relationship` (
`jsonSchema` varchar(256) DEFAULT NULL,
`json` json DEFAULT NULL,
`deleted` tinyint(1) NOT NULL DEFAULT '0',
`fromFQNHash` varchar(768) DEFAULT NULL,
`toFQNHash` varchar(768) DEFAULT NULL,
PRIMARY KEY (`fromId`,`toId`,`relation`),
KEY `from_index` (`fromId`,`relation`),
KEY `to_index` (`toId`,`relation`)
KEY `to_index` (`toId`,`relation`),
KEY `idx_er_from_fqn_hash` (`fromFQNHash`(768)),
KEY `idx_er_to_fqn_hash` (`toFQNHash`(768))
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;
/*!40101 SET character_set_client = @saved_cs_client */;

Expand Down
18 changes: 17 additions & 1 deletion bootstrap/sql/schema/postgres.sql
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,9 @@ CREATE TABLE public.entity_relationship (
relation smallint NOT NULL,
jsonschema character varying(256),
json jsonb,
deleted boolean DEFAULT false NOT NULL
deleted boolean DEFAULT false NOT NULL,
fromfqnhash character varying(768),
tofqnhash character varying(768)
);


Expand Down Expand Up @@ -1921,6 +1923,20 @@ CREATE INDEX entity_relationship_from_index ON public.entity_relationship USING
CREATE INDEX entity_relationship_to_index ON public.entity_relationship USING btree (toid, relation);


--
-- Name: idx_er_from_fqn_hash; Type: INDEX; Schema: public; Owner: openmetadata_user
--

CREATE INDEX idx_er_from_fqn_hash ON public.entity_relationship USING btree (fromfqnhash);


--
-- Name: idx_er_to_fqn_hash; Type: INDEX; Schema: public; Owner: openmetadata_user
--

CREATE INDEX idx_er_to_fqn_hash ON public.entity_relationship USING btree (tofqnhash);


--
-- Name: field_relationship_from_index; Type: INDEX; Schema: public; Owner: openmetadata_user
--
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,14 @@ public static DatabaseSchema createSimple(TestNamespace ns) {
return createSimple(ns, service);
}

/**
* Create a schema with a namespaced base name using fluent API.
*/
public static DatabaseSchema createWithName(
TestNamespace ns, String databaseFqn, String baseName) {
return DatabaseSchemas.create().name(ns.prefix(baseName)).in(databaseFqn).execute();
}

/**
* Create a schema with a custom name using fluent API.
* Useful for tests that need short names to avoid FQN length limits.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,15 @@ public static Database create(TestNamespace ns, String serviceFqn) {
public static Database createWithName(String serviceFqn, String name) {
return Databases.create().name(name).in(serviceFqn).execute();
}

/**
* Create database with a namespaced base name using fluent API.
*/
public static Database createWithName(TestNamespace ns, String serviceFqn, String baseName) {
return Databases.create()
.name(ns.prefix(baseName))
.in(serviceFqn)
.withDescription("Test database created by integration test")
.execute();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,257 @@
/*
* Copyright 2021 Collate
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.openmetadata.it.tests;

import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.awaitility.Awaitility;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.openmetadata.it.factories.DatabaseSchemaTestFactory;
import org.openmetadata.it.factories.DatabaseServiceTestFactory;
import org.openmetadata.it.factories.DatabaseTestFactory;
import org.openmetadata.it.factories.TableTestFactory;
import org.openmetadata.it.util.SdkClients;
import org.openmetadata.it.util.TestNamespace;
import org.openmetadata.it.util.TestNamespaceExtension;
import org.openmetadata.schema.entity.data.Database;
import org.openmetadata.schema.entity.data.DatabaseSchema;
import org.openmetadata.schema.entity.services.DatabaseService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Benchmark comparing old recursive hard delete vs new FQN prefix hard delete.
*
* <p>Default topology: 5 databases × 5 schemas × 400 tables = 10,000 tables per service
* (~10,031 total entities including service, databases, schemas).
*
* <p>Run manually against a local stack:
*
* <pre>
* mvn verify -pl openmetadata-integration-tests \
* -Dgroups=benchmark \
* -Dit.test=PrefixDeletionBenchmarkIT \
* -Dtest.databases=5 # databases per service (default: 5)
* -Dtest.schemas=5 # schemas per database (default: 5)
* -Dtest.tables=400 # tables per schema (default: 400)
* -Dtest.seedThreads=32 # parallel seed threads (default: 32)
* </pre>
*
* <p>NOTE: Setup creates entities in parallel (default 32 threads, tunable via
* -Dtest.seedThreads). At ~50ms/call and 32 threads, 10k tables seed in ~20 s.
*
* <p>Both deletions are timed end-to-end: the old delete is synchronous; the new prefix
* delete is async (202), so we poll until the service is gone before recording elapsed time.
*/
@Tag("benchmark")
@Disabled("Manual benchmark — run explicitly against a local mysql/postgres stack")
@ExtendWith(TestNamespaceExtension.class)
class PrefixDeletionBenchmarkIT {

private static final Logger LOG = LoggerFactory.getLogger(PrefixDeletionBenchmarkIT.class);

private static final int DATABASES_PER_SERVICE = Integer.getInteger("test.databases", 5);
private static final int SCHEMAS_PER_DATABASE = Integer.getInteger("test.schemas", 5);
private static final int TABLES_PER_SCHEMA = Integer.getInteger("test.tables", 400);
private static final int SEED_THREADS = Integer.getInteger("test.seedThreads", 32);

private static final Duration DELETE_POLL_TIMEOUT = Duration.ofMinutes(10);
private static final Duration DELETE_POLL_INTERVAL = Duration.ofSeconds(2);

@BeforeAll
static void setup() {
SdkClients.adminClient();
}

@Test
void benchmark_oldRecursiveHardDelete_vs_newPrefixDelete(TestNamespace ns) throws Exception {
int totalTables = DATABASES_PER_SERVICE * SCHEMAS_PER_DATABASE * TABLES_PER_SCHEMA;
int totalEntities =
1 + DATABASES_PER_SERVICE + DATABASES_PER_SERVICE * SCHEMAS_PER_DATABASE + totalTables;
LOG.info(
"Benchmark topology: {} databases × {} schemas × {} tables = {} tables, {} total entities per service",
DATABASES_PER_SERVICE,
SCHEMAS_PER_DATABASE,
TABLES_PER_SCHEMA,
totalTables,
totalEntities);

DatabaseService oldService = buildHierarchy(ns, "old");
long oldMs = timeOldDelete(oldService);

DatabaseService newService = buildHierarchy(ns, "new");
long newMs = timeNewDelete(newService);

double speedup = (double) oldMs / Math.max(newMs, 1);
LOG.info("=== Deletion Benchmark Results ({} entities per service) ===", totalEntities);
LOG.info(" Old recursive hard delete : {} ms", oldMs);
LOG.info(" New FQN prefix hard delete: {} ms", newMs);
LOG.info(" Speedup : {}x", String.format("%.2f", speedup));
}

private DatabaseService buildHierarchy(TestNamespace ns, String tag) throws Exception {
DatabaseService service = DatabaseServiceTestFactory.createPostgres(ns);
int totalEntities =
1
+ DATABASES_PER_SERVICE
+ DATABASES_PER_SERVICE * SCHEMAS_PER_DATABASE
+ DATABASES_PER_SERVICE * SCHEMAS_PER_DATABASE * TABLES_PER_SCHEMA;
LOG.info(
"[{}] Seeding {} entities under service {} using {} threads ...",
tag,
totalEntities,
service.getName(),
SEED_THREADS);
long seedStart = System.currentTimeMillis();

ExecutorService pool = Executors.newFixedThreadPool(SEED_THREADS);
try {
List<Future<Database>> dbFutures = new ArrayList<>();
for (int d = 0; d < DATABASES_PER_SERVICE; d++) {
final int dIdx = d;
dbFutures.add(
pool.submit(
() ->
DatabaseTestFactory.createWithName(
ns, service.getFullyQualifiedName(), tag + "db" + dIdx)));
}
List<Database> databases = new ArrayList<>();
for (Future<Database> f : dbFutures) {
databases.add(f.get());
}

List<Future<DatabaseSchema>> schemaFutures = new ArrayList<>();
for (int d = 0; d < databases.size(); d++) {
final Database database = databases.get(d);
final int dIdx = d;
for (int s = 0; s < SCHEMAS_PER_DATABASE; s++) {
final int sIdx = s;
schemaFutures.add(
pool.submit(
() ->
DatabaseSchemaTestFactory.createWithName(
ns, database.getFullyQualifiedName(), tag + "sc" + dIdx + "x" + sIdx)));
}
}
List<DatabaseSchema> schemas = new ArrayList<>();
for (Future<DatabaseSchema> f : schemaFutures) {
schemas.add(f.get());
}

List<Future<?>> tableFutures = new ArrayList<>();
for (int s = 0; s < schemas.size(); s++) {
final DatabaseSchema schema = schemas.get(s);
final int sIdx = s;
for (int t = 0; t < TABLES_PER_SCHEMA; t++) {
final int tIdx = t;
tableFutures.add(
pool.submit(
() -> {
TableTestFactory.createWithName(
ns, schema.getFullyQualifiedName(), tag + "tbl" + sIdx + "x" + tIdx);
return null;
}));
}
}
for (Future<?> f : tableFutures) {
f.get();
}
} finally {
pool.shutdown();
pool.awaitTermination(30, TimeUnit.MINUTES);
}

long seedMs = System.currentTimeMillis() - seedStart;
LOG.info(
"[{}] Hierarchy seeded in {} ms ({} ms/entity avg)",
tag,
seedMs,
seedMs / Math.max(totalEntities, 1));
return service;
}

private long timeOldDelete(DatabaseService service) throws Exception {
LOG.info("Timing OLD recursive hard delete for service {} ...", service.getName());
long start = System.currentTimeMillis();

String url =
SdkClients.getServerUrl()
+ "/v1/services/databaseServices/"
+ service.getId()
+ "?hardDelete=true&recursive=true";
sendDelete(url);

long elapsed = System.currentTimeMillis() - start;
LOG.info("OLD recursive hard delete completed in {} ms", elapsed);
return elapsed;
}

private long timeNewDelete(DatabaseService service) throws Exception {
LOG.info("Timing NEW FQN prefix hard delete for service {} ...", service.getName());
long start = System.currentTimeMillis();

String url =
SdkClients.getServerUrl() + "/v1/services/databaseServices/prefix/" + service.getId();
sendDelete(url);

// Prefix delete is async — poll until the service is actually gone so we measure
// real deletion time, not just the time to hand off the job to the executor.
UUID serviceId = service.getId();
Awaitility.await("Wait for prefix deletion of " + service.getName() + " to complete")
.atMost(DELETE_POLL_TIMEOUT)
.pollInterval(DELETE_POLL_INTERVAL)
.until(
() -> {
try {
SdkClients.adminClient().databaseServices().get(serviceId.toString());
return false;
} catch (Exception e) {
return true;
}
});

long elapsed = System.currentTimeMillis() - start;
LOG.info("NEW FQN prefix hard delete completed in {} ms", elapsed);
return elapsed;
}
Comment on lines +198 to +241
Copy link

Copilot AI Apr 9, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This benchmark measures only HTTP request round-trip time; it doesn't wait for the asynchronous deletion to complete before stopping the timer, so the results will be misleading. Consider polling until the service is actually deleted (or consuming the websocket completion) before recording elapsed time.

Copilot uses AI. Check for mistakes.

private void sendDelete(String url) throws Exception {
HttpRequest request =
HttpRequest.newBuilder()
.uri(URI.create(url))
.header("Authorization", "Bearer " + SdkClients.getAdminToken())
.DELETE()
.build();
HttpResponse<String> response =
HttpClient.newHttpClient().send(request, HttpResponse.BodyHandlers.ofString());
if (response.statusCode() >= 300) {
throw new RuntimeException(
"Delete failed with status " + response.statusCode() + ": " + response.body());
}
}
}
Loading
Loading