diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index 109aeea24..a21ae8d76 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -65,7 +65,8 @@ jobs:
done
- name: Generate llms-full.txt
- run: bash website/scripts/generate-llms-full.sh
+ run: bash scripts/generate-llms-full.sh
+ working-directory: website
- name: Build Docusaurus
run: npm run build
diff --git a/.gitignore b/.gitignore
index c8a0d3a1e..7ef8a6b74 100644
--- a/.gitignore
+++ b/.gitignore
@@ -42,7 +42,7 @@ build/
.DS_Store
### Maven Flatten Plugin ###
-.flattened-pom.xml
+**/.flattened-pom.xml
### Docusaurus ###
website/node_modules/
diff --git a/README.md b/README.md
index 50b980055..f5b189384 100644
--- a/README.md
+++ b/README.md
@@ -10,7 +10,7 @@
**Storm** is a modern, high-performance ORM for Kotlin 2.0+ and Java 21+, built around a powerful SQL template engine. It focuses on simplicity, type safety, and predictable performance through immutable models and compile-time metadata.
-**Key benefits:**
+**Core ORM benefits:**
- **Minimal code**: Define entities with simple records/data classes and query with concise, readable syntax; no boilerplate.
- **Parameterized by default**: String interpolations are automatically converted to bind variables, making queries SQL injection safe by design.
@@ -21,6 +21,17 @@
- **Performance**: Template caching, transaction-scoped entity caching, and zero-overhead dirty checking (thanks to immutability) ensure efficient database interactions. Batch processing, lazy streams, and upserts are built in.
- **Universal Database Compatibility**: Fully compatible with all SQL databases, it offers flexibility and broad applicability across various database systems.
+Storm also includes an AI-assisted workflow for database development. It gives AI tools full schema awareness through a local MCP server, guides them with Storm-specific skills, and closes the loop with automated tests. The AI generates code, but the final checks are done by running code against the schema and captured SQL, not by trusting LLM reasoning.
+
+To set up the AI workflow in your project:
+
+```bash
+npm install -g @storm-orm/cli
+storm init
+```
+
+This installs Storm's rules, skills, and optional local MCP setup for supported AI coding tools. See [AI-Assisted Development](docs/ai.md) for the full workflow.
+
## Why Storm?
Storm draws inspiration from established ORMs such as Hibernate, but is built from scratch around a clear design philosophy: capturing exactly what you want to do using the minimum amount of code, optimized for Kotlin and modern Java.
@@ -42,6 +53,25 @@ Storm embraces SQL rather than abstracting it away. It simplifies database inter
**Storm is ideal for** developers who understand that the best solutions emerge when object model and database model work in harmony. If you value a database-first approach where records naturally mirror your schema, Storm is built for you. Custom mappings are supported when needed, but the real elegance comes from alignment, not abstraction.
+## AI Workflow
+
+AI tools can write a lot of database code quickly, but subtle mistakes are still common: wrong joins, missing constraints, stale schema assumptions, or queries that compile but do the wrong thing.
+
+Storm addresses that in two ways.
+
+First, it improves generation quality. A local MCP server gives the AI full schema awareness without exposing credentials or data, and Storm skills teach the AI how to create entities, queries, repositories, and migrations that follow Storm's conventions.
+
+Second, it verifies the result with automated tests. Storm can validate that generated entities still match the schema and that generated queries behave as intended. These checks run in unit tests, so the final gate is actual code execution rather than model self-evaluation.
+
+The workflow is simple:
+
+1. You prompt the AI.
+2. The AI uses Storm skills and local schema context to generate code.
+3. Storm verifies the generated entities and queries in tests.
+4. You review the result and keep moving with more confidence.
+
+This is the core idea: AI generates database code, and Storm closes the loop with context and verification.
+
## Choose Your Language
Both Kotlin and Java support SQL Templates for powerful query composition. Kotlin additionally provides a type-safe DSL with infix operators for a more idiomatic experience.
@@ -143,7 +173,7 @@ Storm provides a Bill of Materials (BOM) for centralized version management. Imp
st.orm
storm-bom
- 1.11.0
+ @@STORM_VERSION@@
pom
import
@@ -155,7 +185,7 @@ Storm provides a Bill of Materials (BOM) for centralized version management. Imp
```kotlin
dependencies {
- implementation(platform("st.orm:storm-bom:1.11.0"))
+ implementation(platform("st.orm:storm-bom:@@STORM_VERSION@@"))
}
```
@@ -165,7 +195,7 @@ With the BOM imported, add Storm modules without specifying versions:
```kotlin
dependencies {
- implementation(platform("st.orm:storm-bom:1.11.0"))
+ implementation(platform("st.orm:storm-bom:@@STORM_VERSION@@"))
implementation("st.orm:storm-kotlin")
runtimeOnly("st.orm:storm-core")
// Use storm-compiler-plugin-2.0 for Kotlin 2.0.x, -2.1 for 2.1.x, etc.
diff --git a/docs/api-java.md b/docs/api-java.md
index 2cbf6389d..e1b6828f2 100644
--- a/docs/api-java.md
+++ b/docs/api-java.md
@@ -16,7 +16,7 @@ The main Java API module. It provides the `ORMTemplate` entry point, repository
st.orm
storm-java21
- 1.11.0
+ @@STORM_VERSION@@
```
@@ -44,7 +44,7 @@ Spring Framework integration for Java. Provides `RepositoryBeanFactoryPostProces
st.orm
storm-spring
- 1.11.0
+ @@STORM_VERSION@@
```
@@ -58,7 +58,7 @@ Spring Boot auto-configuration for Java. Automatically creates an `ORMTemplate`
st.orm
storm-spring-boot-starter
- 1.11.0
+ @@STORM_VERSION@@
```
@@ -83,7 +83,7 @@ The `storm-metamodel-processor` annotation processor generates type-safe metamod
st.orm
storm-metamodel-processor
- 1.11.0
+ @@STORM_VERSION@@
provided
```
diff --git a/docs/api-kotlin.md b/docs/api-kotlin.md
index 5f860b873..643c35cc5 100644
--- a/docs/api-kotlin.md
+++ b/docs/api-kotlin.md
@@ -14,7 +14,7 @@ The main Kotlin API module. It provides the `ORMTemplate` interface, extension f
```kotlin
// Gradle (Kotlin DSL)
-implementation("st.orm:storm-kotlin:1.11.0")
+implementation("st.orm:storm-kotlin:@@STORM_VERSION@@")
```
```xml
@@ -22,7 +22,7 @@ implementation("st.orm:storm-kotlin:1.11.0")
st.orm
storm-kotlin
- 1.11.0
+ @@STORM_VERSION@@
```
@@ -33,7 +33,7 @@ The Kotlin API does not depend on any preview features. All APIs are stable and
Spring Framework integration for Kotlin. Provides `RepositoryBeanFactoryPostProcessor` for repository auto-discovery and injection, `@EnableTransactionIntegration` for bridging Storm's programmatic transactions with Spring's `@Transactional`, and transaction-aware coroutine support. Add this module when you use Spring Framework without Spring Boot.
```kotlin
-implementation("st.orm:storm-kotlin-spring:1.11.0")
+implementation("st.orm:storm-kotlin-spring:@@STORM_VERSION@@")
```
See [Spring Integration](spring-integration.md) for configuration details.
@@ -43,7 +43,7 @@ See [Spring Integration](spring-integration.md) for configuration details.
Spring Boot auto-configuration for Kotlin. Automatically creates an `ORMTemplate` bean from the `DataSource`, discovers repositories, enables transaction integration, and binds `storm.*` properties from `application.yml`. This is the recommended dependency for Spring Boot applications.
```kotlin
-implementation("st.orm:storm-kotlin-spring-boot-starter:1.11.0")
+implementation("st.orm:storm-kotlin-spring-boot-starter:@@STORM_VERSION@@")
```
See [Spring Integration: Spring Boot Starter](spring-integration.md#spring-boot-starter) for what the starter provides and how to override its defaults.
@@ -96,7 +96,7 @@ plugins {
}
dependencies {
- ksp("st.orm:storm-metamodel-ksp:1.11.0")
+ ksp("st.orm:storm-metamodel-ksp:@@STORM_VERSION@@")
}
```
@@ -115,7 +115,7 @@ dependencies {
st.orm
storm-metamodel-processor
- 1.11.0
+ @@STORM_VERSION@@
diff --git a/docs/batch-streaming.md b/docs/batch-streaming.md
index 7fa5faf5a..58d865012 100644
--- a/docs/batch-streaming.md
+++ b/docs/batch-streaming.md
@@ -12,7 +12,7 @@ Database performance often degrades when applications issue many individual SQL
## Batch Processing
-When you pass a list of entities to Storm's insert, update, delete, or upsert methods, Storm automatically uses JDBC batch statements. The framework groups rows together and sends them to the database in a single round-trip, rather than issuing one statement per entity.
+When you pass a list of entities to Storm's insert, update, remove, or upsert methods, Storm automatically uses JDBC batch statements. The framework groups rows together and sends them to the database in a single round-trip, rather than issuing one statement per entity.
### Batch Insert
@@ -73,25 +73,25 @@ orm.entity(User.class).update(updatedUsers);
-### Batch Delete
+### Batch Remove
-Batch deletes remove multiple entities in a single round-trip. Storm generates a batched DELETE using each entity's primary key.
+Batch removes delete multiple entities in a single round-trip. Storm generates a batched DELETE using each entity's primary key.
```kotlin
-orm delete users
+orm remove users
-// Or delete all entities of a type
-orm.deleteAll()
+// Or remove all entities of a type
+orm.removeAll()
```
```java
-orm.entity(User.class).delete(users);
+orm.entity(User.class).remove(users);
```
diff --git a/docs/common-patterns.md b/docs/common-patterns.md
index 6c013887c..fd2c74077 100644
--- a/docs/common-patterns.md
+++ b/docs/common-patterns.md
@@ -454,13 +454,13 @@ Use the `scroll()` method on any entity repository with a `Scrollable` that capt
// First page of 20 users ordered by ID
val window: Window = userRepository.scroll(Scrollable.of(User_.id, 20))
-// Navigate forward: nextScrollable() is non-null whenever the window has content.
+// Navigate forward: next() is non-null whenever the window has content.
// hasNext() is an informational flag indicating whether more rows existed at
// query time, but the developer decides whether to follow the cursor.
-val next: Window = userRepository.scroll(window.nextScrollable())
+val next: Window = userRepository.scroll(window.next())
// Navigate backward
-val previous: Window = userRepository.scroll(window.previousScrollable())
+val previous: Window = userRepository.scroll(window.previous())
```
@@ -470,19 +470,19 @@ val previous: Window = userRepository.scroll(window.previousScrollable())
// First page of 20 users ordered by ID
Window window = userRepository.scroll(Scrollable.of(User_.id, 20));
-// Navigate forward: nextScrollable() is non-null whenever the window has content.
+// Navigate forward: next() is non-null whenever the window has content.
// hasNext() is an informational flag indicating whether more rows existed at
// query time, but the developer decides whether to follow the cursor.
-Window next = userRepository.scroll(window.nextScrollable());
+Window next = userRepository.scroll(window.next());
// Navigate backward
-Window previous = userRepository.scroll(window.previousScrollable());
+Window previous = userRepository.scroll(window.previous());
```
-Each method returns a `Window` containing the page content and navigation cursors for sequential traversal. The `hasNext()` and `hasPrevious()` flags reflect whether additional rows existed at query time, but they are not prerequisites for calling `nextScrollable()` or `previousScrollable()`. Both methods return a non-null `Scrollable` whenever the window contains at least one element, and return `null` only when the window is empty. This means you can always follow the cursor if you choose to; for example, new rows may have been inserted after the original query. For REST APIs, `Window` also provides `nextCursor()` and `previousCursor()` to serialize the scroll position as an opaque string, and `Scrollable.fromCursor(key, cursor)` to reconstruct a `Scrollable` from a cursor string. See [Repositories: Scrolling](repositories.md#scrolling) for the full API, including sort overloads, filtering, and Ref variants.
+Each method returns a `Window` containing the page content and navigation cursors for sequential traversal. The `hasNext()` and `hasPrevious()` flags reflect whether additional rows existed at query time, but they are not prerequisites for calling `next()` or `previous()`. Both methods return a non-null `Scrollable` whenever the window contains at least one element, and return `null` only when the window is empty. This means you can always follow the cursor if you choose to; for example, new rows may have been inserted after the original query. For REST APIs, `Window` also provides `nextCursor()` and `previousCursor()` to serialize the scroll position as an opaque string, and `Scrollable.fromCursor(key, cursor)` to reconstruct a `Scrollable` from a cursor string. See [Repositories: Scrolling](repositories.md#scrolling) for the full API, including sort overloads, filtering, and Ref variants.
### Choosing Between the Two
@@ -496,8 +496,8 @@ Each method returns a `Window` containing the page content and navigation cursor
| Performance at page 1 | Good | Good |
| Performance at page 1,000 | Degrades (database must skip rows) | Consistent (index seek) |
| Handles concurrent inserts | Rows may shift between pages | Stable cursor |
-| Navigate forward | `page.nextPageable()` | `window.nextScrollable()` |
-| Navigate backward | `page.previousPageable()` | `window.previousScrollable()` |
+| Navigate forward | `page.nextPageable()` | `window.next()` |
+| Navigate backward | `page.previousPageable()` | `window.previous()` |
Use pagination when you need random page access or a total count (for example, displaying "Page 3 of 12" in a UI). Use scrolling when you need consistent performance over deep result sets or when the data changes frequently between requests.
diff --git a/docs/cursors.md b/docs/cursors.md
index db5f0a7a3..b8abff7ad 100644
--- a/docs/cursors.md
+++ b/docs/cursors.md
@@ -85,7 +85,7 @@ The following Java types can be used as cursor values (key or sort fields) out o
If your key or sort field uses a type not in this list, serialization via `toCursor()` will throw an `IllegalStateException`. You can either use one of the supported types for your key/sort columns, or register a custom codec.
-Note that in-memory navigation (using `nextScrollable()` and `previousScrollable()` directly, without serializing to a cursor string) works with any type, including inline records and other composite types. The type restriction only applies to `toCursor()` serialization.
+Note that in-memory navigation (using `next()` and `previous()` directly, without serializing to a cursor string) works with any type, including inline records and other composite types. The type restriction only applies to `toCursor()` serialization.
## Custom cursor codecs
diff --git a/docs/dialects.md b/docs/dialects.md
index 097e3d0ae..1c7650a7b 100644
--- a/docs/dialects.md
+++ b/docs/dialects.md
@@ -28,7 +28,7 @@ Add the dialect dependency for your database. Dialects are runtime-only dependen
st.orm
storm-oracle
- 1.11.0
+ @@STORM_VERSION@@
runtime
@@ -36,7 +36,7 @@ Add the dialect dependency for your database. Dialects are runtime-only dependen
st.orm
storm-mssqlserver
- 1.11.0
+ @@STORM_VERSION@@
runtime
@@ -44,7 +44,7 @@ Add the dialect dependency for your database. Dialects are runtime-only dependen
st.orm
storm-postgresql
- 1.11.0
+ @@STORM_VERSION@@
runtime
@@ -52,7 +52,7 @@ Add the dialect dependency for your database. Dialects are runtime-only dependen
st.orm
storm-mysql
- 1.11.0
+ @@STORM_VERSION@@
runtime
@@ -60,7 +60,7 @@ Add the dialect dependency for your database. Dialects are runtime-only dependen
st.orm
storm-mariadb
- 1.11.0
+ @@STORM_VERSION@@
runtime
@@ -68,7 +68,7 @@ Add the dialect dependency for your database. Dialects are runtime-only dependen
st.orm
storm-sqlite
- 1.11.0
+ @@STORM_VERSION@@
runtime
@@ -76,7 +76,7 @@ Add the dialect dependency for your database. Dialects are runtime-only dependen
st.orm
storm-h2
- 1.11.0
+ @@STORM_VERSION@@
runtime
```
@@ -85,50 +85,50 @@ Add the dialect dependency for your database. Dialects are runtime-only dependen
```groovy
// Oracle
-runtimeOnly 'st.orm:storm-oracle:1.11.0'
+runtimeOnly 'st.orm:storm-oracle:@@STORM_VERSION@@'
// MS SQL Server
-runtimeOnly 'st.orm:storm-mssqlserver:1.11.0'
+runtimeOnly 'st.orm:storm-mssqlserver:@@STORM_VERSION@@'
// PostgreSQL
-runtimeOnly 'st.orm:storm-postgresql:1.11.0'
+runtimeOnly 'st.orm:storm-postgresql:@@STORM_VERSION@@'
// MySQL
-runtimeOnly 'st.orm:storm-mysql:1.11.0'
+runtimeOnly 'st.orm:storm-mysql:@@STORM_VERSION@@'
// MariaDB
-runtimeOnly 'st.orm:storm-mariadb:1.11.0'
+runtimeOnly 'st.orm:storm-mariadb:@@STORM_VERSION@@'
// SQLite
-runtimeOnly 'st.orm:storm-sqlite:1.11.0'
+runtimeOnly 'st.orm:storm-sqlite:@@STORM_VERSION@@'
// H2
-runtimeOnly 'st.orm:storm-h2:1.11.0'
+runtimeOnly 'st.orm:storm-h2:@@STORM_VERSION@@'
```
### Gradle (Kotlin DSL)
```kotlin
// Oracle
-runtimeOnly("st.orm:storm-oracle:1.11.0")
+runtimeOnly("st.orm:storm-oracle:@@STORM_VERSION@@")
// MS SQL Server
-runtimeOnly("st.orm:storm-mssqlserver:1.11.0")
+runtimeOnly("st.orm:storm-mssqlserver:@@STORM_VERSION@@")
// PostgreSQL
-runtimeOnly("st.orm:storm-postgresql:1.11.0")
+runtimeOnly("st.orm:storm-postgresql:@@STORM_VERSION@@")
// MySQL
-runtimeOnly("st.orm:storm-mysql:1.11.0")
+runtimeOnly("st.orm:storm-mysql:@@STORM_VERSION@@")
// MariaDB
-runtimeOnly("st.orm:storm-mariadb:1.11.0")
+runtimeOnly("st.orm:storm-mariadb:@@STORM_VERSION@@")
// SQLite
-runtimeOnly("st.orm:storm-sqlite:1.11.0")
+runtimeOnly("st.orm:storm-sqlite:@@STORM_VERSION@@")
// H2
-runtimeOnly("st.orm:storm-h2:1.11.0")
+runtimeOnly("st.orm:storm-h2:@@STORM_VERSION@@")
```
## Automatic Detection
diff --git a/docs/entities.md b/docs/entities.md
index 9b8866823..185e60151 100644
--- a/docs/entities.md
+++ b/docs/entities.md
@@ -58,7 +58,7 @@ record User(@PK Integer id,
## Entity Interface
-Implementing the `Entity` interface is optional but required for using `EntityRepository` with built-in CRUD operations. The type parameter specifies the primary key type. Without this interface, you can still use Storm's SQL template features and query builder, but you lose the convenience methods like `findById`, `insert`, `update`, and `delete`. If you only need read access, consider using `Projection` instead (see [Projections](projections.md)).
+Implementing the `Entity` interface is optional but required for using `EntityRepository` with built-in CRUD operations. The type parameter specifies the primary key type. Without this interface, you can still use Storm's SQL template features and query builder, but you lose the convenience methods like `findById`, `insert`, `update`, and `remove`. If you only need read access, consider using `Projection` instead (see [Projections](projections.md)).
Storm also supports polymorphic entity hierarchies using sealed interfaces. A sealed interface extending `Entity` can define multiple record subtypes, enabling Single-Table or Joined Table inheritance with compile-time exhaustive pattern matching. See [Polymorphism](polymorphism.md) for details.
diff --git a/docs/faq.md b/docs/faq.md
index 23d048549..eb6bbf037 100644
--- a/docs/faq.md
+++ b/docs/faq.md
@@ -286,9 +286,9 @@ For large tables where users scroll through results sequentially, prefer **scrol
```kotlin
val window = userRepository.scroll(Scrollable.of(User_.id, 20))
-// nextScrollable() is non-null when the window has content.
+// next() is non-null when the window has content.
// hasNext() is informational; the developer decides whether to follow the cursor.
-val next = userRepository.scroll(window.nextScrollable())
+val next = userRepository.scroll(window.next())
```
@@ -296,9 +296,9 @@ val next = userRepository.scroll(window.nextScrollable())
```java
Window window = userRepository.scroll(Scrollable.of(User_.id, 20));
-// nextScrollable() is non-null when the window has content.
+// next() is non-null when the window has content.
// hasNext() is informational; the developer decides whether to follow the cursor.
-Window next = userRepository.scroll(window.nextScrollable());
+Window next = userRepository.scroll(window.next());
```
@@ -345,20 +345,20 @@ userRepository.delete()
-If you genuinely need to delete all rows from a table, use the `deleteAll()` convenience method:
+If you genuinely need to delete all rows from a table, use the `removeAll()` convenience method:
```kotlin
-userRepository.deleteAll()
+userRepository.removeAll()
```
```java
-userRepository.deleteAll();
+userRepository.removeAll();
```
@@ -383,7 +383,7 @@ userRepository.delete().unsafe().executeUpdate();
-The `unsafe()` method signals that the absence of a WHERE clause is intentional. Without it, Storm assumes the missing WHERE clause is a mistake. The `deleteAll()` convenience method calls `unsafe()` internally.
+The `unsafe()` method signals that the absence of a WHERE clause is intentional. Without it, Storm assumes the missing WHERE clause is a mistake. The `removeAll()` convenience method calls `unsafe()` internally.
### Can I use database-specific functions?
diff --git a/docs/first-entity.md b/docs/first-entity.md
index d1d05c058..1d8e2acf0 100644
--- a/docs/first-entity.md
+++ b/docs/first-entity.md
@@ -204,20 +204,20 @@ users.update(new User(user.id(), user.email(), "Alice Johnson", user.city()));
-## Delete a Record
+## Remove a Record
```kotlin
-orm delete user
+orm remove user
```
```java
-users.delete(user);
+users.remove(user);
```
@@ -267,7 +267,7 @@ You have now seen the core workflow:
1. Define entities as data classes or records with `@PK` and `@FK` annotations
2. Create an `ORMTemplate` from a `DataSource`
-3. Use `insert`, `findById`, `update`, and `delete` for basic CRUD
+3. Use `insert`, `findById`, `update`, and `remove` for basic CRUD
## Next Steps
diff --git a/docs/first-query.md b/docs/first-query.md
index 075f1fe1f..4765c38b6 100644
--- a/docs/first-query.md
+++ b/docs/first-query.md
@@ -95,7 +95,7 @@ val user = userRepository.findByEmail("alice@example.com")
val usersInCity = userRepository.findByNameInCity("Alice", city)
```
-Custom repositories inherit all built-in CRUD operations (`insert`, `findById`, `update`, `delete`, etc.) from `EntityRepository`. You only add methods for domain-specific queries.
+Custom repositories inherit all built-in CRUD operations (`insert`, `findById`, `update`, `remove`, etc.) from `EntityRepository`. You only add methods for domain-specific queries.
diff --git a/docs/getting-started.md b/docs/getting-started.md
index 707b6fee6..6ccf83984 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -71,7 +71,7 @@ Set up your project with the right dependencies, build flags, and optional modul
**2. First Entity**
-Define your first entity, create an ORM template, and perform insert, read, update, and delete operations.
+Define your first entity, create an ORM template, and perform insert, read, update, and remove operations.
**[Go to First Entity](first-entity.md)**
diff --git a/docs/glossary.md b/docs/glossary.md
index 020289775..78c624d3d 100644
--- a/docs/glossary.md
+++ b/docs/glossary.md
@@ -8,7 +8,7 @@ This page defines key terms used throughout the Storm documentation.
The process of determining which fields of an entity have changed since it was last read from the database. Storm compares the current entity state against the observed state stored in the transaction context. Only changed columns are included in the UPDATE statement. Because entities are immutable, dirty checking is fast and requires no bytecode manipulation. See [Dirty Checking](dirty-checking.md).
**Entity**
-A Kotlin data class or Java record that implements the `Entity` interface and maps to a database table. Entities support full CRUD operations (insert, update, delete) through repositories. They are stateless and immutable, with no proxies or hidden state. See [Entities](entities.md).
+A Kotlin data class or Java record that implements the `Entity` interface and maps to a database table. Entities support full CRUD operations (insert, update, remove) through repositories. They are stateless and immutable, with no proxies or hidden state. See [Entities](entities.md).
**Entity Cache**
A transaction-scoped cache that stores entities by primary key during a transaction. It avoids redundant database round-trips, skips repeated object construction during hydration, preserves object identity within a transaction, and tracks observed state for dirty checking. The cache is automatically cleared on commit or rollback. See [Entity Cache](entity-cache.md).
@@ -41,7 +41,7 @@ A lightweight identifier (`Ref`) that carries only the record type and primar
An interface that provides database access methods for an entity or projection type. `EntityRepository` offers built-in CRUD operations; `ProjectionRepository` offers read-only operations. Custom repositories extend these interfaces with domain-specific query methods. See [Repositories](repositories.md).
**Scrollable**
-A scroll request that captures cursor state for fetching a window of results. The scrolling counterpart of `Pageable`. Created via `Scrollable.of(key, size)` or obtained from `Window.nextScrollable()` / `Window.previousScrollable()`, which are always non-null when the window has content. Supports cursor serialization for REST APIs via `toCursor()` / `Scrollable.fromCursor(key, cursor)`. See [Pagination and Scrolling: Scrolling](pagination-and-scrolling.md#scrolling).
+A scroll request that captures cursor state for fetching a window of results. The scrolling counterpart of `Pageable`. Created via `Scrollable.of(key, size)` or obtained from `Window.next()` / `Window.previous()`, which are always non-null when the window has content. Supports cursor serialization for REST APIs via `toCursor()` / `Scrollable.fromCursor(key, cursor)`. See [Pagination and Scrolling: Scrolling](pagination-and-scrolling.md#scrolling).
**SQL Template**
Storm's template engine that uses string interpolation to embed entity types, metamodel fields, and parameter values into SQL text. Types expand to column lists, metamodel fields to column names, and values to parameterized placeholders. SQL Templates are the foundation of all Storm queries, including those generated by repositories. See [SQL Templates](sql-templates.md).
@@ -53,4 +53,4 @@ See [Metamodel](#metamodel) above.
A configuration object (`StormConfig`) that controls runtime behavior for features like dirty checking mode, entity cache retention, and template cache size. All settings have sensible defaults, so configuration is optional. See [Configuration](configuration.md).
**Window**
-A window of query results from a scrolling operation. A `Window` contains the result list (`content`), informational `hasNext` and `hasPrevious` flags (a snapshot at query time), and navigation tokens (`nextScrollable()`, `previousScrollable()`) for sequential traversal. The navigation tokens are always non-null when the window has content; `hasNext` and `hasPrevious` are not prerequisites for accessing them, since new data may appear after the query. Also provides `nextCursor()` / `previousCursor()` for REST API cursor strings. See [Pagination and Scrolling: Scrolling](pagination-and-scrolling.md#scrolling).
+A window of query results from a scrolling operation. A `Window` contains the result list (`content`), informational `hasNext` and `hasPrevious` flags (a snapshot at query time), and navigation tokens (`next()`, `previous()`) for sequential traversal. The navigation tokens are always non-null when the window has content; `hasNext` and `hasPrevious` are not prerequisites for accessing them, since new data may appear after the query. Also provides `nextCursor()` / `previousCursor()` for REST API cursor strings. See [Pagination and Scrolling: Scrolling](pagination-and-scrolling.md#scrolling).
diff --git a/docs/index.md b/docs/index.md
index 62aa9dc5c..bed1a501d 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -25,6 +25,20 @@ Storm's concise API, strict conventions, and absence of hidden complexity make i
- **Performance**: Template caching, transaction-scoped entity caching, and zero-overhead dirty checking (thanks to immutability) ensure efficient database interactions. Batch processing, lazy streams, and upserts are built in.
- **Universal Database Compatibility**: Fully compatible with all SQL databases, it offers flexibility and broad applicability across various database systems.
+## Built for the AI Era
+
+Storm is the ORM that AI coding assistants get right. Its stateless, immutable entities mean what you see in the source code is exactly what exists at runtime: no hidden proxies, no lazy loading surprises, no persistence context rules that trip up AI-generated code. When you ask your AI tool to write a query, define an entity, or build a repository, the output is straightforward data classes and explicit SQL, the same code a senior developer would write by hand.
+
+Traditional ORMs carry invisible complexity (managed entity state, implicit flushes, bytecode-enhanced proxies) that AI tools have no reliable way to reason about. Storm eliminates these failure modes entirely. Combined with its compile-time metamodel that catches errors before runtime, Storm and AI coding tools form a natural partnership.
+
+**Get started in seconds:**
+
+```bash
+npx @storm/cli
+```
+
+This configures your AI tool (Claude Code, Cursor, Copilot, Windsurf) with Storm's patterns, conventions, and slash commands. See [AI-Assisted Development](ai-assistant-setup.md) for details.
+
## Why Storm?
Storm draws inspiration from established ORMs such as Hibernate, but is built from scratch around a clear design philosophy: capture intent using the minimum amount of code, optimized for Kotlin and modern Java.
@@ -145,7 +159,7 @@ Storm provides a Bill of Materials (BOM) for centralized version management. Imp
```kotlin
dependencies {
- implementation(platform("st.orm:storm-bom:1.11.0"))
+ implementation(platform("st.orm:storm-bom:@@STORM_VERSION@@"))
implementation("st.orm:storm-kotlin")
runtimeOnly("st.orm:storm-core")
// Use storm-compiler-plugin-2.0 for Kotlin 2.0.x, -2.1 for 2.1.x, etc.
@@ -162,7 +176,7 @@ dependencies {
st.orm
storm-bom
- 1.11.0
+ @@STORM_VERSION@@
pom
import
diff --git a/docs/installation.md b/docs/installation.md
index 0fe3261ee..b282436f1 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -25,7 +25,7 @@ Storm provides a Bill of Materials (BOM) for centralized version management. Imp
```kotlin
dependencies {
- implementation(platform("st.orm:storm-bom:1.11.0"))
+ implementation(platform("st.orm:storm-bom:@@STORM_VERSION@@"))
}
```
@@ -40,7 +40,7 @@ dependencies {
st.orm
storm-bom
- 1.11.0
+ @@STORM_VERSION@@
pom
import
@@ -52,7 +52,7 @@ dependencies {
```kotlin
dependencies {
- implementation(platform("st.orm:storm-bom:1.11.0"))
+ implementation(platform("st.orm:storm-bom:@@STORM_VERSION@@"))
}
```
@@ -70,7 +70,7 @@ plugins {
}
dependencies {
- implementation(platform("st.orm:storm-bom:1.11.0"))
+ implementation(platform("st.orm:storm-bom:@@STORM_VERSION@@"))
implementation("st.orm:storm-kotlin")
runtimeOnly("st.orm:storm-core")
@@ -88,7 +88,7 @@ The `storm-metamodel-ksp` dependency generates type-safe metamodel classes (e.g.
```kotlin
dependencies {
- implementation(platform("st.orm:storm-bom:1.11.0"))
+ implementation(platform("st.orm:storm-bom:@@STORM_VERSION@@"))
implementation("st.orm:storm-java21")
runtimeOnly("st.orm:storm-core")
diff --git a/docs/json.md b/docs/json.md
index 5e772d153..777c153a8 100644
--- a/docs/json.md
+++ b/docs/json.md
@@ -19,12 +19,12 @@ Works with both Kotlin and Java projects. Two variants are available, matching t
st.orm
storm-jackson2
- 1.11.0
+ @@STORM_VERSION@@
```
```groovy
-implementation 'st.orm:storm-jackson2:1.11.0'
+implementation 'st.orm:storm-jackson2:@@STORM_VERSION@@'
```
**Jackson 3** (requires Jackson 3.0+):
@@ -33,12 +33,12 @@ implementation 'st.orm:storm-jackson2:1.11.0'
st.orm
storm-jackson3
- 1.11.0
+ @@STORM_VERSION@@
```
```groovy
-implementation 'st.orm:storm-jackson3:1.11.0'
+implementation 'st.orm:storm-jackson3:@@STORM_VERSION@@'
```
The two modules are mutually exclusive on the classpath. Both provide the same public API (`st.orm.jackson` package), so switching between them requires only changing the Maven dependency.
@@ -53,7 +53,7 @@ plugins {
}
dependencies {
- implementation("st.orm:storm-kotlinx-serialization:1.11.0")
+ implementation("st.orm:storm-kotlinx-serialization:@@STORM_VERSION@@")
}
```
diff --git a/docs/ktor-integration.md b/docs/ktor-integration.md
index 17f6e355d..ee994ca75 100644
--- a/docs/ktor-integration.md
+++ b/docs/ktor-integration.md
@@ -13,7 +13,7 @@ Add the Storm Ktor module alongside your core Storm dependencies:
```kotlin
dependencies {
- implementation(platform("st.orm:storm-bom:1.11.0"))
+ implementation(platform("st.orm:storm-bom:@@STORM_VERSION@@"))
implementation("st.orm:storm-kotlin")
implementation("st.orm:storm-ktor")
@@ -637,7 +637,7 @@ fun Application.module() {
delete("/users/{id}") {
val id = call.parameters.getOrFail("id").toInt()
transaction {
- call.orm.entity().deleteById(id)
+ call.orm.entity().removeById(id)
}
call.respond(HttpStatusCode.NoContent)
}
diff --git a/docs/metamodel.md b/docs/metamodel.md
index 4c3e47c33..ac9727daf 100644
--- a/docs/metamodel.md
+++ b/docs/metamodel.md
@@ -49,14 +49,14 @@ plugins {
}
dependencies {
- ksp("st.orm:storm-metamodel-processor:1.11.0")
+ ksp("st.orm:storm-metamodel-processor:@@STORM_VERSION@@")
}
```
### Gradle (Java)
```kotlin
-annotationProcessor("st.orm:storm-metamodel-processor:1.11.0")
+annotationProcessor("st.orm:storm-metamodel-processor:@@STORM_VERSION@@")
```
### Maven (Java)
@@ -65,7 +65,7 @@ annotationProcessor("st.orm:storm-metamodel-processor:1.11.0")
st.orm
storm-metamodel-processor
- 1.11.0
+ @@STORM_VERSION@@
provided
```
@@ -457,16 +457,16 @@ User user = userRepository.getBy(User_.email, "alice@example.com"); // throws i
```kotlin
val window: Window = userRepository.scroll(Scrollable.of(User_.id, 20))
-// nextScrollable() is non-null when the window has content.
+// next() is non-null when the window has content.
// hasNext() is informational; the developer decides whether to follow the cursor.
-val nextWindow: Window = userRepository.scroll(window.nextScrollable())
+val nextWindow: Window = userRepository.scroll(window.next())
```
Compound unique keys work the same way. The inline record is used as the cursor value:
```kotlin
val window: Window = repository.scroll(Scrollable.of(SomeEntity_.uniqueKey, 20))
-val nextWindow: Window = repository.scroll(window.nextScrollable())
+val nextWindow: Window = repository.scroll(window.next())
```
@@ -474,16 +474,16 @@ val nextWindow: Window = repository.scroll(window.nextScrollable())
```java
Window window = userRepository.scroll(Scrollable.of(User_.id, 20));
-// nextScrollable() is non-null when the window has content.
+// next() is non-null when the window has content.
// hasNext() is informational; the developer decides whether to follow the cursor.
-Window next = userRepository.scroll(window.nextScrollable());
+Window next = userRepository.scroll(window.next());
```
Compound unique keys work the same way:
```java
Window window = repository.scroll(Scrollable.of(SomeEntity_.uniqueKey, 20));
-Window next = repository.scroll(window.nextScrollable());
+Window next = repository.scroll(window.next());
```
diff --git a/docs/pagination-and-scrolling.md b/docs/pagination-and-scrolling.md
index fa90ee85d..3cf5df34e 100644
--- a/docs/pagination-and-scrolling.md
+++ b/docs/pagination-and-scrolling.md
@@ -14,10 +14,10 @@ Storm provides three ways to retrieve a subset of query results. The right choic
| Feature | Offset and Limit | Pagination | Scrolling |
|---------|-----------------|------------|-----------|
| Navigation | manual | page number | cursor |
-| Result type | `List` | `Page` | `Window` |
+| Result type | `List` | `Page` | `Window` |
| Count query | no | yes | no |
| Random access | yes | yes | no |
-| Navigation tokens | no | `nextPageable()` / `previousPageable()` | `nextScrollable()` / `previousScrollable()` |
+| Navigation tokens | no | `nextPageable()` / `previousPageable()` | `next()` / `previous()` |
| Performance on large datasets | degrades with offset | degrades with offset | constant |
**Offset and Limit** gives raw control with `offset()` and `limit()` on the query builder. Both pagination and offset/limit use SQL `OFFSET` under the hood, which degrades on large tables because the database must scan and discard all skipped rows.
@@ -159,7 +159,7 @@ For the full `Page` and `Pageable` API reference, see [Repositories: Offset-Base
## Scrolling
-Scrolling navigates sequentially using a cursor and returns a `Window`. A `Window` represents a portion of the result set: it contains the data, informational flags (`hasNext`, `hasPrevious`) that indicate whether adjacent results existed at query time, and `Scrollable` navigation tokens for sequential traversal, but no total count or page number. The navigation tokens `nextScrollable()` and `previousScrollable()` are always available when the window has content, regardless of whether `hasNext` or `hasPrevious` is `true`. This allows the developer to decide whether to follow a cursor, since new data may appear after the query was executed.
+Scrolling navigates sequentially using a cursor and returns a `Window`. A `Window` represents a portion of the result set: it contains the data, informational flags (`hasNext`, `hasPrevious`) that indicate whether adjacent results existed at query time, and navigation tokens for sequential traversal, but no total count or page number. The typed navigation methods `next()` and `previous()` are always available when the window has content, regardless of whether `hasNext` or `hasPrevious` is `true`. This allows the developer to decide whether to follow a cursor, since new data may appear after the query was executed.
Under the hood, scrolling uses keyset pagination: it remembers the last value seen on the current page and asks the database for rows after (or before) that value. This avoids the performance cliff of `OFFSET` on large tables, because the database can seek directly to the cursor position using an index.
@@ -167,15 +167,17 @@ Under the hood, scrolling uses keyset pagination: it remembers the last value se
Scrolling requires a stable sort order. The final sort column must be unique (typically the primary key). Using a non-unique sort column like `createdAt` without a tiebreaker will produce duplicate or missing rows at page boundaries. Use the [sort overload](#sorting-by-non-unique-columns) (`Scrollable.of(key, sort, size)`) when sorting by a non-unique column.
:::
-The `scroll` method is available directly on repositories and on the query builder. It accepts a `Scrollable` that captures the cursor state and returns a `Window` containing:
+The `scroll` method is available directly on repositories and on the query builder. It accepts a `Scrollable` that captures the cursor state and returns a `Window` containing:
| Field / Method | Description |
|-------|-------------|
| `content()` | The list of results for this window. |
| `hasNext()` | `true` if more results existed beyond this window at query time. |
| `hasPrevious()` | `true` if this window was fetched with a cursor position (i.e., not the first page). |
-| `nextScrollable()` | Returns a `Scrollable` for the next window, or `null` if the window is empty. |
-| `previousScrollable()` | Returns a `Scrollable` for the previous window, or `null` if the window is empty. |
+| `next()` | Returns a typed `Scrollable` for the next window, or `null` if the window is empty. |
+| `previous()` | Returns a typed `Scrollable` for the previous window, or `null` if the window is empty. |
+
+The `nextScrollable()` and `previousScrollable()` raw record component accessors also exist, returning `Scrollable>`. The typed `next()` and `previous()` methods are preferred for programmatic navigation.
Create a `Scrollable` using the factory methods, or obtain one from a `Window`:
@@ -183,8 +185,8 @@ Create a `Scrollable` using the factory methods, or obtain one from a `Window`:
|--------|---------|------------|
| `Scrollable.of(key, size)` | Request for the first page (ascending). | `ORDER BY key ASC LIMIT size+1` |
| `Scrollable.of(key, size).backward()` | Request for the first page (descending). | `ORDER BY key DESC LIMIT size+1` |
-| `window.nextScrollable()` | Request for the next page after the current window. | `WHERE key > cursor ORDER BY key ASC LIMIT size+1` |
-| `window.previousScrollable()` | Request for the previous page before the current window. | `WHERE key < cursor ORDER BY key DESC LIMIT size+1` |
+| `window.next()` | Request for the next page after the current window. | `WHERE key > cursor ORDER BY key ASC LIMIT size+1` |
+| `window.previous()` | Request for the previous page before the current window. | `WHERE key < cursor ORDER BY key DESC LIMIT size+1` |
The extra row (`size+1`) is used internally to determine the value of `hasNext`, then discarded from the returned content.
@@ -192,7 +194,7 @@ The extra row (`size+1`) is used internally to determine the value of `hasNext`,
**No total count.** Unlike pagination, scrolling does not include a total element count. A separate `COUNT(*)` query must execute the same joins, filters, and conditions as the main query, which can be expensive on large or complex result sets. Total counts are also inherently unstable: rows may be inserted or deleted while a user navigates through pages, so the count can become stale between requests. Scrolling is designed for sequential "load more" or infinite-scroll patterns where a total is rarely needed. If you do need a total count (for example, for a UI label like "showing 10 of 4,827 results"), call the `count` (Kotlin) or `getCount()` (Java) method on the query builder separately, keeping in mind that the value is a snapshot that may drift as the underlying data changes.
-**REST cursor support.** For REST APIs that need to pass scroll state as an opaque string (for example, as a query parameter), `Window` provides `nextCursor()` and `previousCursor()` methods that serialize the scroll position to a cursor string. These convenience methods are gated by the informational flags: `nextCursor()` returns `null` when `hasNext()` is `false`, and `previousCursor()` returns `null` when `hasPrevious()` is `false`. This makes them safe to use directly in REST responses without additional checks. The underlying `nextScrollable()` and `previousScrollable()` methods remain available whenever the window has content, so server-side code can still follow a cursor even when the flags indicate no more results were seen at query time. To reconstruct a `Scrollable` from a cursor string, use `Scrollable.fromCursor(key, cursor)`. For details on supported cursor types, security considerations, and custom codec registration, see [Cursor Serialization](cursors.md).
+**REST cursor support.** For REST APIs that need to pass scroll state as an opaque string (for example, as a query parameter), `Window` provides `nextCursor()` and `previousCursor()` methods that serialize the scroll position to a cursor string. These convenience methods are gated by the informational flags: `nextCursor()` returns `null` when `hasNext()` is `false`, and `previousCursor()` returns `null` when `hasPrevious()` is `false`. This makes them safe to use directly in REST responses without additional checks. The underlying `next()` and `previous()` methods remain available whenever the window has content, so server-side code can still follow a cursor even when the flags indicate no more results were seen at query time. To reconstruct a `Scrollable` from a cursor string, use `Scrollable.fromCursor(key, cursor)`. For details on supported cursor types, security considerations, and custom codec registration, see [Cursor Serialization](cursors.md).
@@ -298,10 +300,10 @@ val window = postRepository.select()
.scroll(Scrollable.of(Post_.id, Post_.createdAt, 20))
// Next page (cursor values are captured in the Scrollable automatically).
-// nextScrollable() is non-null whenever the window has content.
+// next() is non-null whenever the window has content.
// hasNext() is informational; the developer decides whether to follow the cursor.
val next = postRepository.select()
- .scroll(window.nextScrollable())
+ .scroll(window.next())
// First page sorted by creation date descending (most recent first)
val latest = postRepository.select()
@@ -309,7 +311,7 @@ val latest = postRepository.select()
// Previous page
val prev = postRepository.select()
- .scroll(window.previousScrollable())
+ .scroll(window.previous())
```
@@ -321,28 +323,28 @@ var window = postRepository.select()
.scroll(Scrollable.of(Post_.id, Post_.createdAt, 20));
// Next page (cursor values are captured in the Scrollable automatically).
-// nextScrollable() is non-null whenever the window has content.
+// next() is non-null whenever the window has content.
// You can check hasNext() if you only want to proceed when more results
// were known to exist at query time, or follow the cursor unconditionally
// to pick up data that may have arrived after the query.
-var nextScrollable = window.nextScrollable();
-if (nextScrollable != null) {
- var next = postRepository.select()
- .scroll(nextScrollable);
+var next = window.next();
+if (next != null) {
+ var nextWindow = postRepository.select()
+ .scroll(next);
}
// Previous page
-var previousScrollable = window.previousScrollable();
-if (previousScrollable != null) {
+var previous = window.previous();
+if (previous != null) {
var prev = postRepository.select()
- .scroll(previousScrollable);
+ .scroll(previous);
}
```
-The `Window` carries navigation tokens (`nextScrollable()`, `previousScrollable()`) that encode the cursor values internally, so the client does not need to extract cursor values manually.
+The `Window` carries navigation tokens (`next()`, `previous()`) that encode the cursor values internally, so the client does not need to extract cursor values manually.
The generated SQL uses a composite WHERE condition that maintains correct ordering even when `sort` values repeat:
@@ -391,11 +393,13 @@ var window = orm.query(Order.class)
See [Manual Key Wrapping](metamodel.md#manual-key-wrapping) for more details.
-### Window vs MappedWindow
+### Window Type Parameters
+
+`Window` is a record with a single type parameter: `R` is the result type. It provides result content, cursor-based string navigation (`nextCursor()`, `previousCursor()`), and typed `Scrollable` navigation via the generic `next()` and `previous()` convenience methods for programmatic traversal. The raw record component accessors `nextScrollable()` and `previousScrollable()` return `Scrollable>`.
-When calling `scroll` on the query builder directly (rather than through a repository), the return type is `MappedWindow` where `R` is the result type and `T` is the entity type from the FROM clause. For entity queries where `R` and `T` are the same type, `MappedWindow` carries `Scrollable` navigation tokens and works the same as `Window`. Repository convenience methods return `Window` directly.
+The repository convenience method `scroll()` returns `Window`. The query builder `scroll()` also returns `Window`. For entity queries, `Window` carries `Scrollable>` navigation tokens and the typed `next()` / `previous()` methods provide typed access.
-For queries where the result type differs from the entity type (for example, selecting into a data class that combines columns from multiple sources), `MappedWindow` does not carry navigation tokens because Storm cannot extract cursor values from a result type it does not know how to navigate. In this case, `nextScrollable()` and `previousScrollable()` return `null` (even when the window has content), and `hasNext()` still works correctly as an informational flag. To continue scrolling, check `hasNext()` and construct the next `Scrollable` manually using cursor values from your result:
+For queries where the result type differs from the entity type (for example, selecting into a data class that combines columns from multiple sources), `Window` does not carry navigation tokens because Storm cannot extract cursor values from a result type it does not know how to navigate. In this case, `next()` and `previous()` return `null` (even when the window has content), and `hasNext()` still works correctly as an informational flag. To continue scrolling, check `hasNext()` and construct the next `Scrollable` manually using cursor values from your result:
@@ -403,7 +407,7 @@ For queries where the result type differs from the entity type (for example, sel
```kotlin
data class OrderSummary(val city: Ref, val orderCount: Long) : Data
-val window: MappedWindow = orm.selectFrom(Order::class, OrderSummary::class) {
+val window: Window = orm.selectFrom(Order::class, OrderSummary::class) {
"""${Order_.city.id}, COUNT(*)"""
}
.groupBy(Order_.city)
@@ -413,7 +417,7 @@ val window: MappedWindow = orm.selectFrom(Order::class, Ord
// Construct the next scrollable manually from the last result.
// hasNext() is informational; the developer decides whether to follow the cursor.
val lastCity = window.content.last().city.id()
-val next: MappedWindow = orm.selectFrom(Order::class, OrderSummary::class) { ... }
+val next: Window = orm.selectFrom(Order::class, OrderSummary::class) { ... }
.groupBy(Order_.city)
.scroll(Scrollable.of(Order_.city.key(), lastCity, 20))
```
@@ -424,7 +428,7 @@ val next: MappedWindow = orm.selectFrom(Order::class, Order
```java
record OrderSummary(Ref city, long orderCount) implements Data {}
-MappedWindow window = orm.selectFrom(Order.class, OrderSummary.class,
+Window window = orm.selectFrom(Order.class, OrderSummary.class,
RAW."""SELECT \{Order_.city.id}, COUNT(*)""")
.groupBy(Order_.city)
.scroll(Scrollable.of(Metamodel.key(Order_.city), 20));
@@ -433,7 +437,7 @@ MappedWindow window = orm.selectFrom(Order.class, OrderSumm
// Construct the next scrollable manually from the last result.
// hasNext() is informational; the developer decides whether to follow the cursor.
var lastCity = window.content().getLast().city().id();
-MappedWindow next = orm.selectFrom(Order.class, OrderSummary.class, ...)
+Window next = orm.selectFrom(Order.class, OrderSummary.class, ...)
.groupBy(Order_.city)
.scroll(Scrollable.of(Metamodel.key(Order_.city), lastCity, 20));
```
@@ -448,5 +452,5 @@ MappedWindow next = orm.selectFrom(Order.class, OrderSummar
| Request | `Pageable` | `Scrollable` |
| Result | `Page` | `Window` |
| Method | `page(pageable)` | `scroll(scrollable)` |
-| Navigate forward | `page.nextPageable()` | `window.nextScrollable()` |
-| Navigate backward | `page.previousPageable()` | `window.previousScrollable()` |
+| Navigate forward | `page.nextPageable()` | `window.next()` |
+| Navigate backward | `page.previousPageable()` | `window.previous()` |
diff --git a/docs/performance.md b/docs/performance.md
index 573171e1a..d615c7d29 100644
--- a/docs/performance.md
+++ b/docs/performance.md
@@ -159,7 +159,7 @@ storm:
## Batch Operations
-Batch operations group multiple SQL statements into a single JDBC round-trip. Storm automatically uses JDBC batching when you pass collections to `insert`, `update`, `delete`, or `upsert`.
+Batch operations group multiple SQL statements into a single JDBC round-trip. Storm automatically uses JDBC batching when you pass collections to `insert`, `update`, `remove`, or `upsert`.
### Performance Characteristics
diff --git a/docs/polymorphism.md b/docs/polymorphism.md
index 987ea8459..3e3d68ce1 100644
--- a/docs/polymorphism.md
+++ b/docs/polymorphism.md
@@ -420,8 +420,8 @@ pets.insert(Cat(name = "Bella", indoor = true))
// Update
pets.update(Cat(id = 1, name = "Sir Whiskers", indoor = true))
-// Delete
-pets.delete(somePet)
+// Remove
+pets.remove(somePet)
```
@@ -445,8 +445,8 @@ pets.insert(new Cat(null, "Bella", true));
// Update
pets.update(new Cat(1, "Sir Whiskers", true));
-// Delete
-pets.delete(somePet);
+// Remove
+pets.remove(somePet);
```
@@ -761,8 +761,8 @@ pets.insert(Cat(name = "Bella", indoor = true))
// Update a Cat - updates both base and extension tables
pets.update(Cat(id = 1, name = "Sir Whiskers", indoor = true))
-// Delete - deletes from extension table first, then base table
-pets.delete(somePet)
+// Remove - deletes from extension table first, then base table
+pets.remove(somePet)
```
@@ -780,8 +780,8 @@ pets.insert(new Cat(null, "Bella", true));
// Update a Cat - updates both base and extension tables
pets.update(new Cat(1, "Sir Whiskers", true));
-// Delete - deletes from extension table first, then base table
-pets.delete(somePet);
+// Remove - deletes from extension table first, then base table
+pets.remove(somePet);
```
@@ -955,7 +955,7 @@ Type changes require a transactional context for atomicity, since the operation
### Batch Operations
-Storm supports batch operations with mixed subtypes. You can pass a list containing different concrete subtypes to `insert()`, `update()`, or `delete()`, and Storm handles them correctly.
+Storm supports batch operations with mixed subtypes. You can pass a list containing different concrete subtypes to `insert()`, `update()`, or `remove()`, and Storm handles them correctly.
@@ -971,8 +971,8 @@ pets.insert(listOf(
// Update mixed subtypes
pets.update(listOf(updatedCat, updatedDog))
-// Delete mixed subtypes
-pets.delete(listOf(someCat, someDog))
+// Remove mixed subtypes
+pets.remove(listOf(someCat, someDog))
```
@@ -989,8 +989,8 @@ pets.insert(List.of(
// Update mixed subtypes
pets.update(List.of(updatedCat, updatedDog));
-// Delete mixed subtypes
-pets.delete(List.of(someCat, someDog));
+// Remove mixed subtypes
+pets.remove(List.of(someCat, someDog));
```
diff --git a/docs/projections.md b/docs/projections.md
index 23fdf2c14..4f83f60e5 100644
--- a/docs/projections.md
+++ b/docs/projections.md
@@ -5,7 +5,7 @@ import TabItem from '@theme/TabItem';
## What Are Projections?
-Projections are **read-only** data structures that represent database views or complex queries defined via `@ProjectionQuery`. Like entities, they are plain Kotlin data classes or Java records with no proxies and no bytecode manipulation. Unlike entities, projections support only read operations: no insert, update, or delete.
+Projections are **read-only** data structures that represent database views or complex queries defined via `@ProjectionQuery`. Like entities, they are plain Kotlin data classes or Java records with no proxies and no bytecode manipulation. Unlike entities, projections support only read operations: no insert, update, or remove.
```
┌─────────────────────────────────────────────────────────────────────┐
@@ -177,7 +177,7 @@ This is useful for aggregations, complex joins, or mapping database views.
### Getting a ProjectionRepository
-Obtain a `ProjectionRepository` from the ORM template. This is the read-only counterpart to `EntityRepository`. It provides find, select, count, and existence-check operations, but no insert, update, or delete.
+Obtain a `ProjectionRepository` from the ORM template. This is the read-only counterpart to `EntityRepository`. It provides find, select, count, and existence-check operations, but no insert, update, or remove.
@@ -494,7 +494,7 @@ Use `@DbColumn` to map fields to columns with different names.
| `select()` | Query builder for filtering |
| `selectCount()` | Query builder for counting |
-Note: Unlike `EntityRepository`, there are no `insert`, `update`, `delete`, or `upsert` methods. Projections are read-only.
+Note: Unlike `EntityRepository`, there are no `insert`, `update`, `remove`, or `upsert` methods. Projections are read-only.
---
diff --git a/docs/queries.md b/docs/queries.md
index b913c5833..64bfdaa2a 100644
--- a/docs/queries.md
+++ b/docs/queries.md
@@ -463,7 +463,7 @@ When working with large result sets, Storm supports three strategies for retriev
**Pagination** navigates by page number and includes a total count. It uses SQL `OFFSET` under the hood, which degrades on large tables. **Scrolling** uses keyset pagination for constant-time performance regardless of depth, but only supports sequential forward/backward navigation.
-For detailed usage, sorting, composite scrolling, `MappedWindow` vs `Window`, GROUP BY with scrolling, and REST cursor support, see [Pagination and Scrolling](pagination-and-scrolling.md).
+For detailed usage, sorting, composite scrolling, `Window` type parameters, GROUP BY with scrolling, and REST cursor support, see [Pagination and Scrolling](pagination-and-scrolling.md).
### Quick examples
@@ -485,9 +485,9 @@ val page: Page = orm.entity(User::class).select()
// Scrolling
val window: Window = userRepository.scroll(Scrollable.of(User_.id, 20))
-// nextScrollable() is non-null when the window has content.
+// next() is non-null when the window has content.
// hasNext is informational; the developer decides whether to follow the cursor.
-val next = userRepository.scroll(window.nextScrollable())
+val next = userRepository.scroll(window.next())
```
@@ -507,9 +507,9 @@ Page page = orm.entity(User.class).select()
// Scrolling
Window window = userRepository.scroll(Scrollable.of(User_.id, 20));
-// nextScrollable() is non-null when the window has content.
+// next() is non-null when the window has content.
// hasNext() is informational; the developer decides whether to follow the cursor.
-var next = userRepository.scroll(window.nextScrollable());
+var next = userRepository.scroll(window.next());
```
diff --git a/docs/repositories.md b/docs/repositories.md
index 24f1cb812..c7179ea23 100644
--- a/docs/repositories.md
+++ b/docs/repositories.md
@@ -46,7 +46,7 @@ EntityRepository userRepository = orm.entity(User.class);
-All CRUD operations use the entity's primary key (marked with `@PK`) for identity. Insert returns the entity with any database-generated fields populated (such as auto-increment IDs). Update and delete match by primary key. Query methods accept metamodel-based filter expressions that compile to parameterized WHERE clauses.
+All CRUD operations use the entity's primary key (marked with `@PK`) for identity. Insert returns the entity with any database-generated fields populated (such as auto-increment IDs). Update and remove match by primary key. Query methods accept metamodel-based filter expressions that compile to parameterized WHERE clauses.
```kotlin
// Create
@@ -64,14 +64,17 @@ val all: List = orm.findAll(User_.city eq city)
// Update
orm update user.copy(name = "Alice Johnson")
-// Delete
-orm delete user
+// Remove
+orm remove user
-// Delete by condition
-orm.delete(User_.city eq city)
+// Remove by condition
+orm.removeBy(User_.city, city)
-// Delete all
-orm.deleteAll()
+// Remove by predicate
+orm.removeAll(User_.active eq false)
+
+// Remove all
+orm.removeAll()
// Delete all (builder approach, requires unsafe() to confirm intent)
orm.entity(User::class).delete().unsafe().executeUpdate()
@@ -102,11 +105,11 @@ userRepository.update(new User(
user.id(), "alice@example.com", "Alice Johnson", user.birthDate(), user.city()
));
-// Delete
-userRepository.delete(user);
+// Remove
+userRepository.remove(user);
-// Delete all
-userRepository.deleteAll();
+// Remove all
+userRepository.removeAll();
// Delete all (builder approach, requires unsafe() to confirm intent)
userRepository.delete().unsafe().executeUpdate();
@@ -116,7 +119,7 @@ userRepository.delete().unsafe().executeUpdate();
:::warning Safety Check
-Storm rejects DELETE and UPDATE queries that have no WHERE clause, throwing a `PersistenceException`. This prevents accidental bulk deletions, which is especially important because `QueryBuilder` is immutable and a lost `where()` return value would silently drop the filter. Call `unsafe()` to opt out of this check when you intentionally want to affect all rows. The `deleteAll()` convenience method calls `unsafe()` internally.
+Storm rejects DELETE and UPDATE queries that have no WHERE clause, throwing a `PersistenceException`. This prevents accidental bulk deletions, which is especially important because `QueryBuilder` is immutable and a lost `where()` return value would silently drop the filter. Call `unsafe()` to opt out of this check when you intentionally want to affect all rows. The `removeAll()` convenience method calls `unsafe()` internally.
:::
Storm uses dirty checking to determine which columns to include in the UPDATE statement. See [Dirty Checking](dirty-checking.md) for configuration details.
@@ -279,7 +282,7 @@ Repositories provide convenience methods for scrolling through result sets, wher
The key parameter must be a `Metamodel.Key`, which is generated for fields annotated with `@UK` or `@PK`. See [Metamodel](metamodel.md#unique-keys-uk-and-metamodelkey) for details.
-The `scroll` method accepts a `Scrollable` that captures the cursor state (key, page size, direction, and cursor values) and returns a `Window` containing the page content, informational `hasNext`/`hasPrevious` flags, and `Scrollable` navigation tokens for fetching the adjacent window. Navigation tokens (`nextScrollable()`, `previousScrollable()`) are always present when the window has content; they are only `null` when the window is empty. The `hasNext` and `hasPrevious` flags indicate whether more results existed at query time, but they do not gate access to the navigation tokens. Since new data may appear after the query, the developer decides whether to follow a cursor.
+The `scroll` method accepts a `Scrollable` that captures the cursor state (key, page size, direction, and cursor values) and returns a `Window` containing the page content, informational `hasNext`/`hasPrevious` flags, and `Scrollable` navigation tokens for fetching the adjacent window. Navigation tokens (`next()`, `previous()`) are always present when the window has content; they are only `null` when the window is empty. The `hasNext` and `hasPrevious` flags indicate whether more results existed at query time, but they do not gate access to the navigation tokens. Since new data may appear after the query, the developer decides whether to follow a cursor.
Create a `Scrollable` using the factory methods, then use the navigation tokens on the returned `Window` to move forward or backward:
@@ -290,11 +293,11 @@ Create a `Scrollable` using the factory methods, then use the navigation tokens
// First page of 20 users ordered by ID
val window: Window = userRepository.scroll(Scrollable.of(User_.id, 20))
-// Next page (nextScrollable() is non-null whenever the window has content)
-val next: Window = userRepository.scroll(window.nextScrollable())
+// Next page (next() is non-null whenever the window has content)
+val next: Window = userRepository.scroll(window.next())
// Previous page
-val previous: Window = userRepository.scroll(window.previousScrollable())
+val previous: Window = userRepository.scroll(window.previous())
// Optionally check hasNext/hasPrevious to decide whether to follow the cursor.
// These flags reflect a snapshot at query time; new data may appear afterward.
@@ -311,7 +314,7 @@ val activeWindow = userRepository.select()
.scroll(Scrollable.of(User_.id, 20))
val nextActive = userRepository.select()
.where(User_.active, EQUALS, true)
- .scroll(activeWindow.nextScrollable())
+ .scroll(activeWindow.next())
```
For backward scrolling (starting from the end of the result set), use `.backward()`:
@@ -325,17 +328,17 @@ The scroll methods handle ordering internally and reject explicit `orderBy()` ca
-The same scrolling methods described in the Kotlin section are available on Java repositories. The `scroll` method accepts a `Scrollable` and returns a `Window` containing the page `content()`, informational `hasNext()`/`hasPrevious()` flags, and `Scrollable` navigation tokens that are always present when the window has content.
+The same scrolling methods described in the Kotlin section are available on Java repositories. The `scroll` method accepts a `Scrollable` and returns a `Window` containing the page `content()`, informational `hasNext()`/`hasPrevious()` flags, and `Scrollable` navigation tokens (`next()`, `previous()`) that are always present when the window has content.
```java
// First page of 20 users ordered by ID
Window window = userRepository.scroll(Scrollable.of(User_.id, 20));
-// Next page (nextScrollable() is non-null whenever the window has content)
-Window next = userRepository.scroll(window.nextScrollable());
+// Next page (next() is non-null whenever the window has content)
+Window next = userRepository.scroll(window.next());
// Previous page
-Window previous = userRepository.scroll(window.previousScrollable());
+Window previous = userRepository.scroll(window.previous());
// Optionally check hasNext/hasPrevious to decide whether to follow the cursor.
// These flags reflect a snapshot at query time; new data may appear afterward.
@@ -375,7 +378,7 @@ When you need to sort by a non-unique column (for example, a date or status), us
val window: Window = postRepository.scroll(Scrollable.of(Post_.id, Post_.createdAt, 20))
// Next page
-val next: Window = postRepository.scroll(window.nextScrollable())
+val next: Window = postRepository.scroll(window.next())
// With filter (use query builder)
val activeWindow = postRepository.select()
@@ -391,13 +394,13 @@ val activeWindow = postRepository.select()
Window window = postRepository.scroll(Scrollable.of(Post_.id, Post_.createdAt, 20));
// Next page
-Window next = postRepository.scroll(window.nextScrollable());
+Window next = postRepository.scroll(window.next());
```
-The `Window` carries navigation tokens (`nextScrollable()`, `previousScrollable()`) that encode the cursor values internally, so the client does not need to extract cursor values manually. These tokens are always non-null when the window contains content. For REST APIs, `nextCursor()` and `previousCursor()` provide a convenient serialized form: `nextCursor()` returns `null` when `hasNext` is false, and `previousCursor()` returns `null` when `hasPrevious` is false.
+The `Window` carries navigation tokens (`next()`, `previous()`) that encode the cursor values internally, so the client does not need to extract cursor values manually. These tokens are always non-null when the window contains content. For REST APIs, `nextCursor()` and `previousCursor()` provide a convenient serialized form: `nextCursor()` returns `null` when `hasNext` is false, and `previousCursor()` returns `null` when `hasPrevious` is false.
For queries that need joins, projections, or more complex filtering, use the query builder and call `scroll` as a terminal operation. See [Pagination and Scrolling: Scrolling](pagination-and-scrolling.md#scrolling) for full details on how scrolling composes with WHERE and ORDER BY clauses, including indexing recommendations.
@@ -415,8 +418,8 @@ Storm supports two strategies for traversing large result sets. The table below
| Performance at page 1 | Good | Good |
| Performance at page 1,000 | Degrades (database must skip rows) | Consistent (index seek) |
| Handles concurrent inserts | Rows may shift between pages | Stable cursor |
-| Navigate forward | `page.nextPageable()` | `window.nextScrollable()` |
-| Navigate backward | `page.previousPageable()` | `window.previousScrollable()` |
+| Navigate forward | `page.nextPageable()` | `window.next()` |
+| Navigate backward | `page.previousPageable()` | `window.previous()` |
Use pagination when you need random page access or a total count (for example, displaying "Page 3 of 12" in a UI). Use scrolling when you need consistent performance over deep result sets or when the data changes frequently between requests.
diff --git a/docs/security.md b/docs/security.md
index f29ee965c..8e806684d 100644
--- a/docs/security.md
+++ b/docs/security.md
@@ -46,7 +46,7 @@ SELECT ... FROM "user" WHERE "email" = ?
This applies to all Storm APIs, including:
-- Repository methods (`find`, `findAll`, `select`, `insert`, `update`, `delete`)
+- Repository methods (`find`, `findAll`, `select`, `insert`, `update`, `remove`, `delete`)
- Query builder operations (`.where()`, `.set()`, `.values()`)
- SQL templates with embedded expressions
diff --git a/docs/spring-integration.md b/docs/spring-integration.md
index c80c16835..5f20b907e 100644
--- a/docs/spring-integration.md
+++ b/docs/spring-integration.md
@@ -18,7 +18,7 @@ The starter modules provide zero-configuration setup: an `ORMTemplate` bean is c
```kotlin
// Gradle (Kotlin DSL)
-implementation("st.orm:storm-kotlin-spring-boot-starter:1.11.0")
+implementation("st.orm:storm-kotlin-spring-boot-starter:@@STORM_VERSION@@")
```
```xml
@@ -26,7 +26,7 @@ implementation("st.orm:storm-kotlin-spring-boot-starter:1.11.0")
st.orm
storm-kotlin-spring-boot-starter
- 1.11.0
+ @@STORM_VERSION@@
```
@@ -38,13 +38,13 @@ implementation("st.orm:storm-kotlin-spring-boot-starter:1.11.0")
st.orm
storm-spring-boot-starter
- 1.11.0
+ @@STORM_VERSION@@
```
```kotlin
// Gradle (Kotlin DSL)
-implementation("st.orm:storm-spring-boot-starter:1.11.0")
+implementation("st.orm:storm-spring-boot-starter:@@STORM_VERSION@@")
```
@@ -59,7 +59,7 @@ If you prefer manual configuration, or need to customize the setup beyond what t
```kotlin
// Gradle (Kotlin DSL)
-implementation("st.orm:storm-kotlin-spring:1.11.0")
+implementation("st.orm:storm-kotlin-spring:@@STORM_VERSION@@")
```
```xml
@@ -67,7 +67,7 @@ implementation("st.orm:storm-kotlin-spring:1.11.0")
st.orm
storm-kotlin-spring
- 1.11.0
+ @@STORM_VERSION@@
```
@@ -79,13 +79,13 @@ implementation("st.orm:storm-kotlin-spring:1.11.0")
st.orm
storm-spring
- 1.11.0
+ @@STORM_VERSION@@
```
```kotlin
// Gradle (Kotlin DSL)
-implementation("st.orm:storm-spring:1.11.0")
+implementation("st.orm:storm-spring:@@STORM_VERSION@@")
```
@@ -135,7 +135,7 @@ fun processUsers() {
transactionBlocking {
// Participates in Spring transaction
- orm.deleteAll()
+ orm.removeAll()
}
}
```
diff --git a/docs/sql-templates.md b/docs/sql-templates.md
index 66a55364b..916e2ea4e 100644
--- a/docs/sql-templates.md
+++ b/docs/sql-templates.md
@@ -55,7 +55,7 @@ orm.query(RAW."""
The `Data` interface marks a record or data class as eligible for Storm's SQL generation. Without this marker, Storm treats the type as a plain container and expects you to write all SQL manually. With it, template expressions like `${MyType::class}` in a SELECT clause expand into the full column list, and the same expression in a FROM clause generates the table name with appropriate joins for `@FK` fields.
-Use `Data` for query-specific result types that do not need full repository support (insert, update, delete). If you need CRUD operations, use `Entity` or `Projection` instead, which extend `Data`.
+Use `Data` for query-specific result types that do not need full repository support (insert, update, remove). If you need CRUD operations, use `Entity` or `Projection` instead, which extend `Data`.
diff --git a/docs/string-templates.md b/docs/string-templates.md
index 792ea8acc..66a713efe 100644
--- a/docs/string-templates.md
+++ b/docs/string-templates.md
@@ -55,7 +55,7 @@ Add the Storm compiler plugin to your Kotlin compiler configuration. The plugin
| 2.2.x | `storm-compiler-plugin-2.2` |
| 2.3.x | `storm-compiler-plugin-2.3` |
-The artifact version matches the Storm version (e.g., `1.11.0`).
+The artifact version matches the Storm version (e.g., `@@STORM_VERSION@@`).
diff --git a/docs/transactions.md b/docs/transactions.md
index d7b1384cb..8958830ed 100644
--- a/docs/transactions.md
+++ b/docs/transactions.md
@@ -31,7 +31,7 @@ Use `transaction` for coroutine code:
```kotlin
transaction {
- orm.deleteAll()
+ orm.removeAll()
orm insert User(email = "alice@example.com", name = "Alice")
// Commits automatically on success, rolls back on exception
}
@@ -59,7 +59,7 @@ Use `transactionBlocking` for synchronous code:
```kotlin
transactionBlocking {
- orm.deleteAll()
+ orm.removeAll()
orm insert User(email = "alice@example.com", name = "Alice")
// Commits automatically on success, rolls back on exception
}
@@ -584,7 +584,7 @@ Long-running transactions hold database locks and consume connection pool resour
```kotlin
transaction(timeoutSeconds = 30) {
- orm.deleteAll()
+ orm.removeAll()
delay(35_000) // Will cause timeout
}
```
@@ -606,7 +606,7 @@ Sometimes you need to abort a transaction based on a runtime condition rather th
```kotlin
transaction {
- orm.deleteAll()
+ orm.removeAll()
if (someCondition) {
setRollbackOnly() // Mark for rollback
@@ -959,14 +959,14 @@ When you need different transaction settings for a specific section of code with
withTransactionOptions(timeoutSeconds = 60) {
transaction {
// Uses 60 second timeout
- orm.deleteAll()
+ orm.removeAll()
}
}
withTransactionOptionsBlocking(isolation = SERIALIZABLE) {
transactionBlocking {
// Uses SERIALIZABLE isolation
- orm.deleteAll()
+ orm.removeAll()
}
}
```
diff --git a/pom.xml b/pom.xml
index fa86bfc8d..c85ad3d10 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,6 +3,7 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
4.0.0
+ 1.11.1
21
${java.version}
${java.version}
@@ -18,7 +19,7 @@
st.orm
storm-framework
- 1.11.0
+ ${revision}
pom
Storm Framework
A SQL Template and ORM framework, focusing on modernizing and simplifying database programming.
@@ -171,9 +172,34 @@
jacoco-maven-plugin
${jacoco.version}
+
+ org.codehaus.mojo
+ flatten-maven-plugin
+ 1.6.0
+
+ true
+ resolveCiFriendliesOnly
+
+
+
+ flatten
+ process-resources
+ flatten
+
+
+ flatten.clean
+ clean
+ clean
+
+
+
+
+ org.codehaus.mojo
+ flatten-maven-plugin
+
org.jacoco
jacoco-maven-plugin
diff --git a/storm-bom/pom.xml b/storm-bom/pom.xml
index db7303409..6da08aafc 100644
--- a/storm-bom/pom.xml
+++ b/storm-bom/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-framework
- 1.11.0
+ ${revision}
../pom.xml
storm-bom
diff --git a/storm-cli/storm.mjs b/storm-cli/storm.mjs
index 6f32ac342..6f7960c9c 100644
--- a/storm-cli/storm.mjs
+++ b/storm-cli/storm.mjs
@@ -1862,6 +1862,7 @@ async function demo() {
{ name: 'Cursor', value: 'cursor' },
{ name: 'GitHub Copilot', value: 'copilot' },
{ name: 'Windsurf', value: 'windsurf' },
+ { name: 'Codex', value: 'codex' },
],
});
@@ -2040,6 +2041,10 @@ async function demo() {
console.log(` Start ${boltYellow('Claude Code')} in this directory and type:`);
console.log();
console.log(` ${bold('/storm-demo')}`);
+ } else if (tool === 'codex') {
+ console.log(` Start ${boltYellow('Codex')} in this directory and ask:`);
+ console.log();
+ console.log(` ${bold('Run the Storm demo')}`);
} else {
console.log(` Open this directory in ${boltYellow(config.name)} and ask:`);
console.log();
diff --git a/storm-compiler-plugin/kotlin-2.0/pom.xml b/storm-compiler-plugin/kotlin-2.0/pom.xml
index 2eece00b4..4f0771759 100644
--- a/storm-compiler-plugin/kotlin-2.0/pom.xml
+++ b/storm-compiler-plugin/kotlin-2.0/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-compiler-plugin
- 1.11.0
+ ${revision}
../pom.xml
storm-compiler-plugin-2.0
diff --git a/storm-compiler-plugin/kotlin-2.1/pom.xml b/storm-compiler-plugin/kotlin-2.1/pom.xml
index 6e47bcb86..d40d43193 100644
--- a/storm-compiler-plugin/kotlin-2.1/pom.xml
+++ b/storm-compiler-plugin/kotlin-2.1/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-compiler-plugin
- 1.11.0
+ ${revision}
../pom.xml
storm-compiler-plugin-2.1
diff --git a/storm-compiler-plugin/kotlin-2.2/pom.xml b/storm-compiler-plugin/kotlin-2.2/pom.xml
index 0cdfa4367..451fb90cd 100644
--- a/storm-compiler-plugin/kotlin-2.2/pom.xml
+++ b/storm-compiler-plugin/kotlin-2.2/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-compiler-plugin
- 1.11.0
+ ${revision}
../pom.xml
storm-compiler-plugin-2.2
diff --git a/storm-compiler-plugin/kotlin-2.3/pom.xml b/storm-compiler-plugin/kotlin-2.3/pom.xml
index cc681f569..dddc4df1e 100644
--- a/storm-compiler-plugin/kotlin-2.3/pom.xml
+++ b/storm-compiler-plugin/kotlin-2.3/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-compiler-plugin
- 1.11.0
+ ${revision}
../pom.xml
storm-compiler-plugin-2.3
diff --git a/storm-compiler-plugin/pom.xml b/storm-compiler-plugin/pom.xml
index 34fc9f6a9..b09603131 100644
--- a/storm-compiler-plugin/pom.xml
+++ b/storm-compiler-plugin/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-framework
- 1.11.0
+ ${revision}
../pom.xml
storm-compiler-plugin
diff --git a/storm-core/pom.xml b/storm-core/pom.xml
index 307bba652..0e23cc541 100644
--- a/storm-core/pom.xml
+++ b/storm-core/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-framework
- 1.11.0
+ ${revision}
../pom.xml
storm-core
diff --git a/storm-core/src/main/java/st/orm/core/repository/EntityRepository.java b/storm-core/src/main/java/st/orm/core/repository/EntityRepository.java
index 31a2e5a06..19dacdbea 100644
--- a/storm-core/src/main/java/st/orm/core/repository/EntityRepository.java
+++ b/storm-core/src/main/java/st/orm/core/repository/EntityRepository.java
@@ -147,11 +147,11 @@
*
* Delete
*
- * Delete user in the database. The repository also supports updates for multiple entries in batch mode by passing a
- * list entities or primary keys. Alternatively, deletion can be executed in using a stream of entities.
+ *
Remove user from the database. The repository also supports removals for multiple entries in batch mode by passing a
+ * list entities or primary keys. Alternatively, removal can be executed using a stream of entities.
*
{@code
* User user = ...;
- * userRepository.delete(user);
+ * userRepository.remove(user);
* }
*
* Also here, the QueryBuilder can be used to create specialized statement, for instance, to delete all users where
@@ -491,55 +491,55 @@ public interface EntityRepository, ID> extends Repository {
E upsertAndFetch(@Nonnull E entity);
/**
- * Deletes an entity from the database.
+ * Removes an entity from the database.
*
* This method removes an existing entity from the database. The entity must exist in the database; if it does
- * not, a {@link PersistenceException} is thrown. Unlike {@link #deleteById} and {@link #deleteByRef}, this method
+ * not, a {@link PersistenceException} is thrown. Unlike {@link #removeById} and {@link #removeByRef}, this method
* is strict rather than idempotent, because possessing the full entity implies the caller expects it to exist.
*
- * @param entity the entity to delete. The entity must exist in the database and should be correctly identified by
+ * @param entity the entity to remove. The entity must exist in the database and should be correctly identified by
* its primary key.
- * @throws PersistenceException if the deletion operation fails. Reasons for failure might include the entity not
+ * @throws PersistenceException if the removal operation fails. Reasons for failure might include the entity not
* being found in the database, violations of database constraints, connectivity
* issues, or if the entity parameter is null.
*/
- void delete(@Nonnull E entity);
+ void remove(@Nonnull E entity);
/**
- * Deletes an entity from the database based on its primary key.
+ * Removes an entity from the database based on its primary key.
*
* This method ensures the entity with the given primary key is removed from the database. If the entity does
* not exist, the operation completes successfully without error (idempotent behavior).
*
- * @param id the primary key of the entity to delete.
- * @throws PersistenceException if the deletion operation fails due to violations of database constraints,
+ * @param id the primary key of the entity to remove.
+ * @throws PersistenceException if the removal operation fails due to violations of database constraints,
* connectivity issues, or if the id parameter is null.
*/
- void deleteById(@Nonnull ID id);
+ void removeById(@Nonnull ID id);
/**
- * Deletes an entity from the database by its reference.
+ * Removes an entity from the database by its reference.
*
* This method ensures the entity identified by the given reference is removed from the database. If the entity
* does not exist, the operation completes successfully without error (idempotent behavior).
*
- * @param ref the reference to the entity to delete.
- * @throws PersistenceException if the deletion operation fails due to violations of database constraints,
+ * @param ref the reference to the entity to remove.
+ * @throws PersistenceException if the removal operation fails due to violations of database constraints,
* connectivity issues, or if the ref parameter is null.
*/
- void deleteByRef(@Nonnull Ref ref);
+ void removeByRef(@Nonnull Ref ref);
/**
- * Deletes all entities from the database.
+ * Removes all entities from the database.
*
- * This method performs a bulk deletion operation, removing all instances of the entities managed by this
+ *
This method performs a bulk removal operation, removing all instances of the entities managed by this
* repository from the database.
*
- * @throws PersistenceException if the bulk deletion operation fails. Failure can occur for several reasons,
+ * @throws PersistenceException if the bulk removal operation fails. Failure can occur for several reasons,
* including but not limited to database access issues, transaction failures, or
- * underlying database constraints that prevent the deletion of certain records.
+ * underlying database constraints that prevent the removal of certain records.
*/
- void deleteAll();
+ void removeAll();
// Singular findBy methods.
@@ -721,8 +721,7 @@ default Page[> pageRef(@Nonnull Pageable pageable) {
* Scrolls through entities using the given scrollable request.
*
* ]This is a convenience method that delegates to {@code select().scroll(scrollable)}. It is typically used
- * with a {@link Scrollable} obtained from {@link Window#nextScrollable()} or
- * {@link Window#previousScrollable()}.
+ * with a {@link Scrollable} obtained from {@link Window#next()} or {@link Window#previous()}.
*
* @param scrollable the scroll request describing cursor position and page size.
* @return a window containing the results.
@@ -730,7 +729,7 @@ default Page[> pageRef(@Nonnull Pageable pageable) {
* @since 1.11
*/
default Window scroll(@Nonnull Scrollable scrollable) {
- return Window.of(select().scroll(scrollable));
+ return select().scroll(scrollable);
}
// List based methods.
@@ -748,6 +747,23 @@ default Window scroll(@Nonnull Scrollable scrollable) {
*/
List findAll();
+ /**
+ * Returns a list of refs to all entities of the type supported by this repository. Each element in the list
+ * represents a lightweight reference to an entity in the database, containing only the primary key.
+ *
+ * ]This method is useful when you need to retrieve all entity identifiers without loading the full entity data.
+ * The complete entity can be fetched on demand by calling {@link Ref#fetch()} on any of the returned refs.
+ *
+ * Note: While this method is more memory-efficient than {@link #findAll()} since it only
+ * loads primary keys, loading all refs into memory at once can still be memory-intensive for very large tables.
+ *
+ * @return a list of refs to all entities of the type supported by this repository.
+ * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
+ * connectivity.
+ * @since 1.3
+ */
+ List[> findAllRef();
+
/**
* Retrieves a list of entities based on their primary keys.
*
@@ -961,149 +977,35 @@ default Window scroll(@Nonnull Scrollable scrollable) {
List upsertAndFetch(@Nonnull Iterable entities);
/**
- * Deletes a collection of entities from the database in batches.
+ * Removes a collection of entities from the database in batches.
*
* ]This method processes the provided entities in batches to optimize performance when handling larger collections,
* reducing database overhead. For each entity in the collection, the method removes the corresponding record from
- * the database, if it exists. Batch processing ensures efficient handling of deletions, particularly for large data sets.
+ * the database, if it exists. Batch processing ensures efficient handling of removals, particularly for large data sets.
*
- * @param entities an iterable collection of entities to be deleted. Each entity in the collection must be non-null
- * and represent a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param entities an iterable collection of entities to be removed. Each entity in the collection must be non-null
+ * and represent a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
- void delete(@Nonnull Iterable entities);
+ void remove(@Nonnull Iterable entities);
/**
- * Deletes a collection of entities from the database in batches.
+ * Removes a collection of entities from the database in batches.
*
* This method processes the provided entities in batches to optimize performance when handling larger collections,
* reducing database overhead. For each entity in the collection, the method removes the corresponding record from
- * the database, if it exists. Batch processing ensures efficient handling of deletions, particularly for large data sets.
+ * the database, if it exists. Batch processing ensures efficient handling of removals, particularly for large data sets.
*
- * @param refs an iterable collection of entities to be deleted. Each entity in the collection must be non-null
- * and represent a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param refs an iterable collection of entities to be removed. Each entity in the collection must be non-null
+ * and represent a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
- void deleteByRef(@Nonnull Iterable[> refs);
+ void removeByRef(@Nonnull Iterable][> refs);
// Stream based methods.
- /**
- * Retrieves a stream of entities based on their primary keys.
- *
- * ]This method executes queries in batches, depending on the number of primary keys in the specified ids stream.
- * This optimization aims to reduce the overhead of executing multiple queries and efficiently retrieve entities.
- * The batching strategy enhances performance, particularly when dealing with large sets of primary keys.
- *
- * The resulting stream is lazily loaded, meaning that the entities are only retrieved from the database as they
- * are consumed by the stream. This approach is efficient and minimizes the memory footprint, especially when
- * dealing with large volumes of entities.
- *
- * Note: Calling this method does trigger the execution of the underlying
- * query, so it should only be invoked when the query is intended to run. Since the stream holds resources open
- * while in use, it must be closed after usage to prevent resource leaks. As the stream is {@code AutoCloseable}, it
- * is recommended to use it within a {@code try-with-resources} block.
- *
- * @param ids a stream of entity IDs to retrieve from the repository.
- * @return a stream of entities corresponding to the provided primary keys. The order of entities in the stream is
- * not guaranteed to match the order of ids in the input stream. If an id does not correspond to any entity
- * in the database, it will simply be skipped, and no corresponding entity will be included in the returned
- * stream. If the same entity is requested multiple times, it may be included in the stream multiple times
- * if it is part of a separate batch.
- * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
- * connectivity.
- */
- Stream selectById(@Nonnull Stream ids);
-
- /**
- * Retrieves a stream of entities based on their primary keys.
- *
- * This method executes queries in batches, depending on the number of primary keys in the specified ids stream.
- * This optimization aims to reduce the overhead of executing multiple queries and efficiently retrieve entities.
- * The batching strategy enhances performance, particularly when dealing with large sets of primary keys.
- *
- * The resulting stream is lazily loaded, meaning that the entities are only retrieved from the database as they
- * are consumed by the stream. This approach is efficient and minimizes the memory footprint, especially when
- * dealing with large volumes of entities.
- *
- * Note: Calling this method does trigger the execution of the underlying
- * query, so it should only be invoked when the query is intended to run. Since the stream holds resources open
- * while in use, it must be closed after usage to prevent resource leaks. As the stream is {@code AutoCloseable}, it
- * is recommended to use it within a {@code try-with-resources} block.
- *
- * @param refs a stream of refs to retrieve from the repository.
- * @return a stream of entities corresponding to the provided primary keys. The order of entities in the stream is
- * not guaranteed to match the order of ids in the input stream. If an id does not correspond to any entity
- * in the database, it will simply be skipped, and no corresponding entity will be included in the returned
- * stream. If the same entity is requested multiple times, it may be included in the stream multiple times
- * if it is part of a separate batch.
- * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
- * connectivity.
- */
- Stream selectByRef(@Nonnull Stream[> refs);
-
- /**
- * Retrieves a stream of entities based on their primary keys.
- *
- * ]This method executes queries in batches, with the batch size determined by the provided parameter. This
- * optimization aims to reduce the overhead of executing multiple queries and efficiently retrieve entities. The
- * batching strategy enhances performance, particularly when dealing with large sets of primary keys.
- *
- * The resulting stream is lazily loaded, meaning that the entities are only retrieved from the database as they
- * are consumed by the stream. This approach is efficient and minimizes the memory footprint, especially when
- * dealing with large volumes of entities.
- *
- * Note: Calling this method does trigger the execution of the underlying
- * query, so it should only be invoked when the query is intended to run. Since the stream holds resources open
- * while in use, it must be closed after usage to prevent resource leaks. As the stream is {@code AutoCloseable}, it
- * is recommended to use it within a {@code try-with-resources} block.
- *
- * @param ids a stream of entity IDs to retrieve from the repository.
- * @param chunkSize the number of primary keys to include in each batch. This parameter determines the size of the
- * batches used to execute the selection operation. A larger batch size can improve performance, especially when
- * dealing with large sets of primary keys.
- * @return a stream of entities corresponding to the provided primary keys. The order of entities in the stream is
- * not guaranteed to match the order of refs in the input stream. If an id does not correspond to any entity in the
- * database, it will simply be skipped, and no corresponding entity will be included in the returned stream. If the
- * same entity is requested multiple times, it may be included in the stream multiple times if it is part of a
- * separate batch.
- * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
- * connectivity.
- */
- Stream selectById(@Nonnull Stream ids, int chunkSize);
-
- /**
- * Retrieves a stream of entities based on their primary keys.
- *
- * This method executes queries in batches, with the batch size determined by the provided parameter. This
- * optimization aims to reduce the overhead of executing multiple queries and efficiently retrieve entities. The
- * batching strategy enhances performance, particularly when dealing with large sets of primary keys.
- *
- * The resulting stream is lazily loaded, meaning that the entities are only retrieved from the database as they
- * are consumed by the stream. This approach is efficient and minimizes the memory footprint, especially when
- * dealing with large volumes of entities.
- *
- * Note: Calling this method does trigger the execution of the underlying
- * query, so it should only be invoked when the query is intended to run. Since the stream holds resources open
- * while in use, it must be closed after usage to prevent resource leaks. As the stream is {@code AutoCloseable}, it
- * is recommended to use it within a {@code try-with-resources} block.
- *
- * @param refs a stream of refs to retrieve from the repository.
- * @param chunkSize the number of primary keys to include in each batch. This parameter determines the size of the
- * batches used to execute the selection operation. A larger batch size can improve performance, especially when
- * dealing with large sets of primary keys.
- * @return a stream of entities corresponding to the provided primary keys. The order of entities in the stream is
- * not guaranteed to match the order of refs in the input stream. If an id does not correspond to any entity in the
- * database, it will simply be skipped, and no corresponding entity will be included in the returned stream. If the
- * same entity is requested multiple times, it may be included in the stream multiple times if it is part of a
- * separate batch.
- * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
- * connectivity.
- */
- Stream selectByRef(@Nonnull Stream[> refs, int chunkSize);
-
/**
* Counts the number of entities identified by the provided stream of IDs using the default batch size.
*
@@ -1292,68 +1194,68 @@ default Window scroll(@Nonnull Scrollable scrollable) {
void upsert(@Nonnull Stream entities, int batchSize);
/**
- * Deletes a stream of entities from the database in batches.
+ * Removes a stream of entities from the database in batches.
*
* ]This method processes the provided stream of entities in batches to optimize performance for larger
- * data sets, reducing database overhead during deletion. For each entity in the stream, the method removes
+ * data sets, reducing database overhead during removal. For each entity in the stream, the method removes
* the corresponding record from the database, if it exists. Batch processing allows efficient handling
- * of deletions, particularly for large collections of entities.
+ * of removals, particularly for large collections of entities.
*
- * @param entities a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param entities a stream of entities to be removed. Each entity in the stream must be non-null and represent
+ * a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
- void delete(@Nonnull Stream entities);
+ void remove(@Nonnull Stream entities);
/**
- * Deletes a stream of entities from the database in configurable batch sizes.
+ * Removes a stream of entities from the database in configurable batch sizes.
*
* This method processes the provided stream of entities in batches, with the size of each batch specified
- * by the `batchSize` parameter. This allows for control over the number of entities deleted in each database
+ * by the `batchSize` parameter. This allows for control over the number of entities removed in each database
* operation, optimizing performance and memory usage based on system requirements. For each entity in the
* stream, the method removes the corresponding record from the database, if it exists.
*
- * @param entities a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * a valid database record for deletion.
+ * @param entities a stream of entities to be removed. Each entity in the stream must be non-null and represent
+ * a valid database record for removal.
* @param batchSize the number of entities to process in each batch. Larger batch sizes may improve performance
* but require more memory, while smaller batch sizes may reduce memory usage but increase
* the number of database operations.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
- void delete(@Nonnull Stream entities, int batchSize);
+ void remove(@Nonnull Stream entities, int batchSize);
/**
- * Deletes a stream of entities from the database in batches.
+ * Removes a stream of entities from the database in batches.
*
* This method processes the provided stream of entities in batches to optimize performance for larger
- * data sets, reducing database overhead during deletion. For each entity in the stream, the method removes
+ * data sets, reducing database overhead during removal. For each entity in the stream, the method removes
* the corresponding record from the database, if it exists. Batch processing allows efficient handling
- * of deletions, particularly for large collections of entities.
+ * of removals, particularly for large collections of entities.
*
- * @param refs a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param refs a stream of entities to be removed. Each entity in the stream must be non-null and represent
+ * a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
- void deleteByRef(@Nonnull Stream[> refs);
+ void removeByRef(@Nonnull Stream][> refs);
/**
- * Deletes a stream of entities from the database in configurable batch sizes.
+ * Removes a stream of entities from the database in configurable batch sizes.
*
* ]This method processes the provided stream of entities in batches, with the size of each batch specified
- * by the `batchSize` parameter. This allows for control over the number of entities deleted in each database
+ * by the `batchSize` parameter. This allows for control over the number of entities removed in each database
* operation, optimizing performance and memory usage based on system requirements. For each entity in the
* stream, the method removes the corresponding record from the database, if it exists.
*
- * @param refs a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * valid database record for deletion.
+ * @param refs a stream of entities to be removed. Each entity in the stream must be non-null and represent
+ * valid database record for removal.
* @param batchSize the number of entities to process in each batch. Larger batch sizes may improve performance
* but require more memory, while smaller batch sizes may reduce memory usage but increase
* the number of database operations.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
- void deleteByRef(@Nonnull Stream[> refs, int batchSize);
+ void removeByRef(@Nonnull Stream][> refs, int batchSize);
}
diff --git a/storm-core/src/main/java/st/orm/core/repository/ProjectionRepository.java b/storm-core/src/main/java/st/orm/core/repository/ProjectionRepository.java
index 2841233a7..513414b8c 100644
--- a/storm-core/src/main/java/st/orm/core/repository/ProjectionRepository.java
+++ b/storm-core/src/main/java/st/orm/core/repository/ProjectionRepository.java
@@ -362,8 +362,7 @@ default Page][> pageRef(@Nonnull Pageable pageable) {
* Scrolls through projections using the given scrollable request.
*
* ]This is a convenience method that delegates to {@code select().scroll(scrollable)}. It is typically used
- * with a {@link Scrollable} obtained from {@link Window#nextScrollable()} or
- * {@link Window#previousScrollable()}.
+ * with a {@link Scrollable} obtained from {@link Window#next()} or {@link Window#previous()}.
*
* @param scrollable the scroll request describing cursor position and page size.
* @return a window containing the results.
@@ -371,7 +370,7 @@ default Page[> pageRef(@Nonnull Pageable pageable) {
* @since 1.11
*/
default Window] scroll(@Nonnull Scrollable
scrollable) {
- return Window.of(select().scroll(scrollable));
+ return select().scroll(scrollable);
}
// List based methods.
@@ -389,6 +388,24 @@ default Window
scroll(@Nonnull Scrollable
scrollable) {
*/
List
findAll();
+ /**
+ * Returns a list of refs to all projections of the type supported by this repository. Each element in the list
+ * represents a lightweight reference to a projection in the database, containing only the primary key.
+ *
+ *
This method is useful when you need to retrieve all projection identifiers without loading the full
+ * projection data. The complete projection can be fetched on demand by calling {@link Ref#fetch()} on any of
+ * the returned refs.
+ *
+ * Note: While this method is more memory-efficient than {@link #findAll()} since it only
+ * loads primary keys, loading all refs into memory at once can still be memory-intensive for very large tables.
+ *
+ * @return a list of refs to all projections of the type supported by this repository.
+ * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
+ * connectivity.
+ * @since 1.3
+ */
+ List[> findAllRef();
+
/**
* Retrieves a list of projections based on their primary keys.
*
diff --git a/storm-core/src/main/java/st/orm/core/repository/impl/BaseRepositoryImpl.java b/storm-core/src/main/java/st/orm/core/repository/impl/BaseRepositoryImpl.java
index 97d273d4c..750b2707f 100644
--- a/storm-core/src/main/java/st/orm/core/repository/impl/BaseRepositoryImpl.java
+++ b/storm-core/src/main/java/st/orm/core/repository/impl/BaseRepositoryImpl.java
@@ -387,6 +387,10 @@ public List findAll() {
return select().getResultList();
}
+ public List][> findAllRef() {
+ return selectRef().getResultList();
+ }
+
/**
* Retrieves a list of entities based on their primary keys.
*
diff --git a/storm-core/src/main/java/st/orm/core/repository/impl/EntityRepositoryImpl.java b/storm-core/src/main/java/st/orm/core/repository/impl/EntityRepositoryImpl.java
index 0f0bc1cab..743b75ff0 100644
--- a/storm-core/src/main/java/st/orm/core/repository/impl/EntityRepositoryImpl.java
+++ b/storm-core/src/main/java/st/orm/core/repository/impl/EntityRepositoryImpl.java
@@ -1027,17 +1027,17 @@ public E upsertAndFetch(@Nonnull E entity) {
* Deletes an entity from the database.
*
* ]This method removes an existing entity from the database. The entity must exist in the database; if it does
- * not, a {@link PersistenceException} is thrown. Unlike {@link #deleteById} and {@link #deleteByRef}, this method
+ * not, a {@link PersistenceException} is thrown. Unlike {@link #removeById} and {@link #removeByRef}, this method
* is strict rather than idempotent, because possessing the full entity implies the caller expects it to exist.
*
- * @param entity the entity to delete. The entity must exist in the database and should be correctly identified by
+ * @param entity the entity to remove. The entity must exist in the database and should be correctly identified by
* its primary key.
- * @throws PersistenceException if the deletion operation fails. Reasons for failure might include the entity not
+ * @throws PersistenceException if the removal operation fails. Reasons for failure might include the entity not
* being found in the database, violations of database constraints, connectivity
* issues, or if the entity parameter is null.
*/
@Override
- public void delete(@Nonnull E entity) {
+ public void remove(@Nonnull E entity) {
validateDelete(entity);
fireBeforeDelete(entity);
entityCache().ifPresent(cache -> {
@@ -1046,7 +1046,7 @@ public void delete(@Nonnull E entity) {
}
});
if (model.isJoinedInheritance()) {
- JoinedEntityHelper.delete(ormTemplate, model, entity);
+ JoinedEntityHelper.remove(ormTemplate, model, entity);
fireAfterDelete(entity);
return;
}
@@ -1057,26 +1057,26 @@ public void delete(@Nonnull E entity) {
.managed()
.executeUpdate();
if (result != 1) {
- throw new PersistenceException("Delete of %s failed. 0 rows were affected, possibly because the entity does not exist or a foreign key constraint prevents deletion.".formatted(model.type().getSimpleName()));
+ throw new PersistenceException("Remove of %s failed. 0 rows were affected, possibly because the entity does not exist or a foreign key constraint prevents deletion.".formatted(model.type().getSimpleName()));
}
fireAfterDelete(entity);
}
/**
- * Deletes an entity from the database based on its primary key.
+ * Removes an entity from the database based on its primary key.
*
* This method ensures the entity with the given primary key is removed from the database. If the entity does
* not exist, the operation completes successfully without error (idempotent behavior).
*
- * @param id the primary key of the entity to delete.
- * @throws PersistenceException if the deletion operation fails due to violations of database constraints,
+ * @param id the primary key of the entity to remove.
+ * @throws PersistenceException if the removal operation fails due to violations of database constraints,
* connectivity issues, or if the id parameter is null.
*/
@Override
- public void deleteById(@Nonnull ID id) {
+ public void removeById(@Nonnull ID id) {
entityCache().ifPresent(cache -> cache.remove(id));
if (model.isJoinedInheritance()) {
- JoinedEntityHelper.deleteById(ormTemplate, model, id);
+ JoinedEntityHelper.removeById(ormTemplate, model, id);
return;
}
// Don't use query builder to prevent WHERE IN clause.
@@ -1088,17 +1088,17 @@ public void deleteById(@Nonnull ID id) {
}
/**
- * Deletes an entity from the database by its reference.
+ * Removes an entity from the database by its reference.
*
* This method ensures the entity identified by the given reference is removed from the database. If the entity
* does not exist, the operation completes successfully without error (idempotent behavior).
*
- * @param ref the reference to the entity to delete.
- * @throws PersistenceException if the deletion operation fails due to violations of database constraints,
+ * @param ref the reference to the entity to remove.
+ * @throws PersistenceException if the removal operation fails due to violations of database constraints,
* connectivity issues, or if the ref parameter is null.
*/
@Override
- public void deleteByRef(@Nonnull Ref ref) {
+ public void removeByRef(@Nonnull Ref ref) {
//noinspection unchecked
entityCache().ifPresent(cache -> cache.remove((ID) ref.id()));
// Don't use query builder to prevent WHERE IN clause.
@@ -1110,17 +1110,17 @@ public void deleteByRef(@Nonnull Ref ref) {
}
/**
- * Deletes all entities from the database.
+ * Removes all entities from the database.
*
- * This method performs a bulk deletion operation, removing all instances of the entities managed by this
+ *
This method performs a bulk removal operation, removing all instances of the entities managed by this
* repository from the database.
*
- * @throws PersistenceException if the bulk deletion operation fails. Failure can occur for several reasons,
+ * @throws PersistenceException if the bulk removal operation fails. Failure can occur for several reasons,
* including but not limited to database access issues, transaction failures, or
- * underlying database constraints that prevent the deletion of certain records.
+ * underlying database constraints that prevent the removal of certain records.
*/
@Override
- public void deleteAll() {
+ public void removeAll() {
entityCache().ifPresent(EntityCache::clear);
// Don't use query builder to prevent WHERE IN clause.
ormTemplate.query(TemplateString.raw("DELETE FROM \0", model.type()))
@@ -1393,37 +1393,37 @@ public List upsertAndFetch(@Nonnull Iterable entities) {
}
/**
- * Deletes a collection of entities from the database in batches.
+ * Removes a collection of entities from the database in batches.
*
* This method processes the provided entities in batches to optimize performance when handling larger collections,
* reducing database overhead. For each entity in the collection, the method removes the corresponding record from
- * the database, if it exists. Batch processing ensures efficient handling of deletions, particularly for large data sets.
+ * the database, if it exists. Batch processing ensures efficient handling of removals, particularly for large data sets.
*
- * @param entities an iterable collection of entities to be deleted. Each entity in the collection must be non-null
- * and represent a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param entities an iterable collection of entities to be removed. Each entity in the collection must be non-null
+ * and represent a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
@Override
- public void delete(@Nonnull Iterable entities) {
- delete(toStream(entities), defaultBatchSize);
+ public void remove(@Nonnull Iterable entities) {
+ remove(toStream(entities), defaultBatchSize);
}
/**
- * Deletes a collection of entities from the database in batches.
+ * Removes a collection of entities from the database in batches.
*
* This method processes the provided entities in batches to optimize performance when handling larger collections,
* reducing database overhead. For each entity in the collection, the method removes the corresponding record from
- * the database, if it exists. Batch processing ensures efficient handling of deletions, particularly for large data sets.
+ * the database, if it exists. Batch processing ensures efficient handling of removals, particularly for large data sets.
*
- * @param refs an iterable collection of entities to be deleted. Each entity in the collection must be non-null
- * and represent a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param refs an iterable collection of entities to be removed. Each entity in the collection must be non-null
+ * and represent a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
@Override
- public void deleteByRef(@Nonnull Iterable[> refs) {
- deleteByRef(toStream(refs), defaultBatchSize);
+ public void removeByRef(@Nonnull Iterable][> refs) {
+ removeByRef(toStream(refs), defaultBatchSize);
}
// Stream based methods. These methods operate in multiple batches.
@@ -1894,41 +1894,41 @@ protected List doUpsertAndFetchIdsBatch(@Nonnull List batch, @Nonnull Pre
}
/**
- * Deletes a stream of entities from the database in batches.
+ * Removes a stream of entities from the database in batches.
*
* ]This method processes the provided stream of entities in batches to optimize performance for larger
- * data sets, reducing database overhead during deletion. For each entity in the stream, the method removes
+ * data sets, reducing database overhead during removal. For each entity in the stream, the method removes
* the corresponding record from the database, if it exists. Batch processing allows efficient handling
- * of deletions, particularly for large collections of entities.
+ * of removals, particularly for large collections of entities.
*
- * @param entities a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param entities a stream of entities to be removed. Each entity in the stream must be non-null and represent
+ * a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
@Override
- public void delete(@Nonnull Stream entities) {
- delete(entities, defaultBatchSize);
+ public void remove(@Nonnull Stream entities) {
+ remove(entities, defaultBatchSize);
}
/**
- * Deletes a stream of entities from the database in configurable batch sizes.
+ * Removes a stream of entities from the database in configurable batch sizes.
*
* This method processes the provided stream of entities in batches, with the size of each batch specified
- * by the `batchSize` parameter. This allows for control over the number of entities deleted in each database
+ * by the `batchSize` parameter. This allows for control over the number of entities removed in each database
* operation, optimizing performance and memory usage based on system requirements. For each entity in the
* stream, the method removes the corresponding record from the database, if it exists.
*
- * @param entities a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * a valid database record for deletion.
+ * @param entities a stream of entities to be removed. Each entity in the stream must be non-null and represent
+ * a valid database record for removal.
* @param batchSize the number of entities to process in each batch. Larger batch sizes may improve performance
* but require more memory, while smaller batch sizes may reduce memory usage but increase
* the number of database operations.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
@Override
- public void delete(@Nonnull Stream entities, int batchSize) {
+ public void remove(@Nonnull Stream entities, int batchSize) {
if (model.isJoinedInheritance()) {
var entityCache = entityCache();
chunked(entities, batchSize).forEach(batch -> {
@@ -1939,7 +1939,7 @@ public void delete(@Nonnull Stream entities, int batchSize) {
entityCache.ifPresent(cache -> batch.stream()
.filter(e -> !model.isDefaultPrimaryKey(e.id()))
.forEach(e -> cache.remove(e.id())));
- JoinedEntityHelper.deleteBatch(ormTemplate, model, batch);
+ JoinedEntityHelper.removeBatch(ormTemplate, model, batch);
batch.forEach(this::fireAfterDelete);
});
return;
@@ -1959,7 +1959,7 @@ public void delete(@Nonnull Stream entities, int batchSize) {
.forEach(e -> cache.remove(e.id())));
int[] result = query.executeBatch();
if (IntStream.of(result).anyMatch(r -> r != 1)) {
- throw new PersistenceException("Batch delete of %s failed. One or more rows were not affected.".formatted(model.type().getSimpleName()));
+ throw new PersistenceException("Batch remove of %s failed. One or more rows were not affected.".formatted(model.type().getSimpleName()));
}
chunk.forEach(this::fireAfterDelete);
});
@@ -1967,41 +1967,41 @@ public void delete(@Nonnull Stream entities, int batchSize) {
}
/**
- * Deletes a stream of entities from the database in batches.
+ * Removes a stream of entities from the database in batches.
*
* This method processes the provided stream of entities in batches to optimize performance for larger
- * data sets, reducing database overhead during deletion. For each entity in the stream, the method removes
+ * data sets, reducing database overhead during removal. For each entity in the stream, the method removes
* the corresponding record from the database, if it exists. Batch processing allows efficient handling
- * of deletions, particularly for large collections of entities.
+ * of removals, particularly for large collections of entities.
*
- * @param refs a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param refs a stream of entities to be removed. Each entity in the stream must be non-null and represent
+ * a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
@Override
- public void deleteByRef(@Nonnull Stream[> refs) {
- deleteByRef(refs, defaultBatchSize);
+ public void removeByRef(@Nonnull Stream][> refs) {
+ removeByRef(refs, defaultBatchSize);
}
/**
- * Deletes a stream of entities from the database in configurable batch sizes.
+ * Removes a stream of entities from the database in configurable batch sizes.
*
* ]This method processes the provided stream of entities in batches, with the size of each batch specified
- * by the `batchSize` parameter. This allows for control over the number of entities deleted in each database
+ * by the `batchSize` parameter. This allows for control over the number of entities removed in each database
* operation, optimizing performance and memory usage based on system requirements. For each entity in the
* stream, the method removes the corresponding record from the database, if it exists.
*
- * @param refs a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * valid database record for deletion.
+ * @param refs a stream of entities to be removed. Each entity in the stream must be non-null and represent
+ * valid database record for removal.
* @param batchSize the number of entities to process in each batch. Larger batch sizes may improve performance
* but require more memory, while smaller batch sizes may reduce memory usage but increase
* the number of database operations.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
@Override
- public void deleteByRef(@Nonnull Stream[> refs, int batchSize) {
+ public void removeByRef(@Nonnull Stream][> refs, int batchSize) {
if (model.isJoinedInheritance()) {
var entityCache = entityCache();
chunked(refs, batchSize).forEach(chunk -> {
@@ -2009,7 +2009,7 @@ public void deleteByRef(@Nonnull Stream][> refs, int batchSize) {
entityCache.ifPresent(cache -> chunk.stream()
.filter(r -> !model.isDefaultPrimaryKey((ID) r.id()))
.forEach(r -> cache.remove((ID) r.id())));
- JoinedEntityHelper.deleteBatchByRef(ormTemplate, model, chunk);
+ JoinedEntityHelper.removeBatchByRef(ormTemplate, model, chunk);
});
return;
}
diff --git a/storm-core/src/main/java/st/orm/core/spi/ORMReflection.java b/storm-core/src/main/java/st/orm/core/spi/ORMReflection.java
index bcffc9857..14d317fe4 100644
--- a/storm-core/src/main/java/st/orm/core/spi/ORMReflection.java
+++ b/storm-core/src/main/java/st/orm/core/spi/ORMReflection.java
@@ -21,45 +21,172 @@
import java.util.List;
import java.util.Optional;
import st.orm.Data;
+import st.orm.PK;
import st.orm.PersistenceException;
import st.orm.mapping.RecordField;
import st.orm.mapping.RecordType;
/**
* Provides pluggable reflection support for the ORM to support different JVM languages, such as Java and Kotlin.
+ *
+ * ]The Java implementation ({@code DefaultORMReflectionImpl}) handles Java records using the standard
+ * {@link java.lang.reflect.RecordComponent} API. The Kotlin implementation ({@code ORMReflectionImpl}) additionally
+ * handles Kotlin data classes using the Kotlin reflection API ({@code KClass}, {@code KProperty1}, etc.), falling back
+ * to the Java implementation for plain Java records.
*/
public interface ORMReflection {
+ /**
+ * Returns the primary key value of the specified {@link Data} instance.
+ *
+ * Locates the first field annotated with {@link PK} via {@link #getRecordType(Class)} and invokes its accessor
+ * to retrieve the value. The result is cached per class for subsequent lookups.
+ *
+ * @param data the data instance to extract the primary key from.
+ * @return the primary key value.
+ * @throws PersistenceException if no {@code @PK}-annotated field is found.
+ */
Object getId(@Nonnull Data data);
+ /**
+ * Returns the value of the record component at the specified index.
+ *
+ * Uses {@link #getRecordType(Class)} to obtain the field list, then invokes the accessor for the field at the
+ * given position. For Java records this is the record component accessor; for Kotlin data classes it is the
+ * Kotlin property getter.
+ *
+ * @param record the record instance (Java record or Kotlin data class).
+ * @param index the zero-based index of the component.
+ * @return the component value.
+ * @throws PersistenceException if the type is not a recognized record type or the index is out of bounds.
+ */
Object getRecordValue(@Nonnull Object record, int index);
+ /**
+ * Returns an {@link Optional} containing the {@link RecordType} descriptor for the specified class if it is a
+ * recognized record type, or an empty {@code Optional} otherwise.
+ *
+ * The Java implementation recognizes Java {@code record} classes and builds the descriptor from
+ * {@link java.lang.reflect.RecordComponent} metadata. The Kotlin implementation additionally recognizes Kotlin
+ * data classes (detected via the {@code @Metadata} annotation and {@code KClass.isData()}) and builds the
+ * descriptor from the primary constructor parameters and corresponding property accessors.
+ *
+ * Results are cached per class.
+ *
+ * @param type the class to inspect.
+ * @return the record type descriptor, or empty if the class is not a recognized record type.
+ */
Optional findRecordType(@Nonnull Class> type);
+ /**
+ * Returns the {@link RecordType} descriptor for the specified class, throwing if the class is not a recognized
+ * record type.
+ *
+ * @param type the class to inspect.
+ * @return the record type descriptor.
+ * @throws PersistenceException if the class is not a recognized record type.
+ */
default RecordType getRecordType(@Nonnull Class> type) {
return findRecordType(type)
.orElseThrow(() -> new PersistenceException("Record type expected: %s.".formatted(type.getName())));
}
+ /**
+ * Returns whether the specified object represents a type reference that this reflection implementation can handle.
+ *
+ * The Java implementation returns {@code true} for {@link Class} instances. The Kotlin implementation
+ * additionally returns {@code true} for {@code KClass} instances.
+ *
+ * @param o the object to test.
+ * @return {@code true} if this implementation can resolve the object to a Java class.
+ */
boolean isSupportedType(@Nonnull Object o);
+ /**
+ * Resolves a type reference to its corresponding Java {@link Class}.
+ *
+ * The Java implementation expects a {@link Class} that implements {@link Data} and returns it directly. The
+ * Kotlin implementation additionally handles {@code KClass} instances by mapping them to their Java class via
+ * {@code JvmClassMappingKt.getJavaClass}.
+ *
+ * @param o the type reference ({@link Class} or {@code KClass}).
+ * @return the resolved Java class.
+ * @throws PersistenceException if the object is not a supported type reference or is not a {@link Data} type.
+ */
Class> getType(@Nonnull Object o);
+ /**
+ * Resolves a type reference to its corresponding Java {@link Class}, cast to {@code Class extends Data>}.
+ *
+ * Behaves like {@link #getType(Object)} but returns a {@code Data}-bounded type. Throws if the resolved class
+ * does not implement {@link Data}.
+ *
+ * @param o the type reference ({@link Class} or {@code KClass}).
+ * @return the resolved Java class as a {@code Data} subtype.
+ * @throws PersistenceException if the object is not a supported type reference or is not a {@link Data} type.
+ */
Class extends Data> getDataType(@Nonnull Object o);
+ /**
+ * Returns whether the specified object is a default value for its type.
+ *
+ * Returns {@code true} for: {@code null}; primitive wrappers with their default value ({@code 0},
+ * {@code false}, {@code '\u0000'}); and record instances whose every component is itself a default value
+ * (checked recursively).
+ *
+ * @param o the value to test, may be {@code null}.
+ * @return {@code true} if the value is considered a default.
+ */
boolean isDefaultValue(@Nullable Object o);
/**
* Returns the permitted subclasses of the specified sealed class.
*
+ * The Java implementation delegates to {@link Class#getPermittedSubclasses()}. The Kotlin implementation uses
+ * {@code KClass.getSealedSubclasses()} to also handle Kotlin sealed classes.
+ *
* @param sealedClass the sealed class to get the permitted subclasses for.
* @return a list of permitted subclasses of the specified sealed class.
*/
List> getPermittedSubclasses(@Nonnull Class sealedClass);
+ /**
+ * Returns whether the specified method is a default method that can be invoked through a proxy.
+ *
+ * The Java implementation checks {@link Method#isDefault()}. The Kotlin implementation additionally returns
+ * {@code true} for methods declared on classes annotated with {@code @Metadata} (indicating Kotlin default
+ * implementations compiled into a {@code DefaultImpls} companion class).
+ *
+ * @param method the method to test.
+ * @return {@code true} if the method is a default method.
+ */
boolean isDefaultMethod(@Nonnull Method method);
+ /**
+ * Invokes the accessor for the specified {@link RecordField} on the given record instance and returns the result.
+ *
+ * Uses {@link java.lang.invoke.MethodHandle}-based invocation for performance, falling back to reflective
+ * invocation when the method handle cannot be obtained (e.g., due to module restrictions).
+ *
+ * @param field the record field whose accessor should be invoked.
+ * @param record the record instance.
+ * @return the field value.
+ * @throws PersistenceException if the invocation fails.
+ */
Object invoke(@Nonnull RecordField field, @Nonnull Object record);
+ /**
+ * Invokes a default or Kotlin-compiled default method on a proxy instance.
+ *
+ * The Java implementation uses {@link java.lang.invoke.MethodHandles} to call the default method via
+ * {@code findSpecial}. The Kotlin implementation locates the static {@code DefaultImpls} companion class and
+ * invokes the corresponding static method, passing the proxy as the first argument.
+ *
+ * @param proxy the proxy instance on which the method was called.
+ * @param method the default method to invoke.
+ * @param args the method arguments.
+ * @return the return value of the method.
+ * @throws Throwable if the invocation fails.
+ */
Object execute(@Nonnull Object proxy, @Nonnull Method method, @Nonnull Object... args) throws Throwable;
}
diff --git a/storm-core/src/main/java/st/orm/core/template/QueryBuilder.java b/storm-core/src/main/java/st/orm/core/template/QueryBuilder.java
index 6e665647b..cdbb970f7 100644
--- a/storm-core/src/main/java/st/orm/core/template/QueryBuilder.java
+++ b/storm-core/src/main/java/st/orm/core/template/QueryBuilder.java
@@ -30,7 +30,6 @@
import java.util.stream.Stream;
import st.orm.Data;
import st.orm.JoinType;
-import st.orm.MappedWindow;
import st.orm.Metamodel;
import st.orm.NoResultException;
import st.orm.NonUniqueResultException;
@@ -758,21 +757,21 @@ public final Page page(@Nonnull Pageable pageable, long totalCount) {
* and ORDER BY clauses externally.
*
* Because this method has no key or sort information, the returned window does not carry navigation tokens
- * ({@code nextScrollable} and {@code previousScrollable} are {@code null}).
+ * ({@code next()} and {@code previous()} return {@code null}).
*
* @param size the maximum number of results to include in the window (must be positive).
* @return a window containing the results and a flag indicating whether more results exist.
* @throws IllegalArgumentException if {@code size} is not positive.
* @since 1.11
*/
- public final MappedWindow scroll(int size) {
+ public final Window scroll(int size) {
if (size <= 0) {
throw new IllegalArgumentException("size must be positive.");
}
List results = this.limit(size + 1).getResultList();
boolean hasNext = results.size() > size;
List content = hasNext ? results.subList(0, size) : results;
- return new MappedWindow<>(content, hasNext, false, null, null);
+ return new Window<>(content, hasNext, false, null, null);
}
/**
@@ -791,7 +790,7 @@ public final MappedWindow scroll(int size) {
* @since 1.11
*/
@SuppressWarnings("unchecked")
- private MappedWindow toWindow(@Nonnull MappedWindow raw, @Nonnull Metamodel.Key key,
+ private Window toWindow(@Nonnull Window raw, @Nonnull Metamodel.Key key,
@Nullable Metamodel sort, int size, boolean forward, boolean hasCursor) {
if (raw.content().isEmpty()) {
return raw;
@@ -818,7 +817,7 @@ private MappedWindow toWindow(@Nonnull MappedWindow raw, @Nonnull Me
sort != null ? sort.getValue((T) first) : null,
size, !forward);
}
- return new MappedWindow<>(raw.content(), raw.hasNext(), hasCursor, nextScrollable, previousScrollable);
+ return new Window<>(raw.content(), raw.hasNext(), hasCursor, nextScrollable, previousScrollable);
}
/**
@@ -844,14 +843,14 @@ private static void validateKeyNotNullable(@Nonnull Metamode
/**
* Executes a scroll request from a {@link Scrollable} token, typically obtained from
- * {@link Window#nextScrollable()} or {@link Window#previousScrollable()}.
+ * {@link Window#next()} or {@link Window#previous()}.
*
* @param scrollable the scroll request containing cursor state, key, sort, size, and direction.
* @return a window containing the results and navigation tokens.
* @since 1.11
*/
@SuppressWarnings("unchecked")
- public final MappedWindow scroll(@Nonnull Scrollable scrollable) {
+ public final Window scroll(@Nonnull Scrollable scrollable) {
var key = (Metamodel.Key) scrollable.key();
int size = scrollable.size();
boolean forward = scrollable.isForward();
diff --git a/storm-core/src/main/java/st/orm/core/template/impl/JoinedEntityHelper.java b/storm-core/src/main/java/st/orm/core/template/impl/JoinedEntityHelper.java
index 4f6d351cf..a9d3b4668 100644
--- a/storm-core/src/main/java/st/orm/core/template/impl/JoinedEntityHelper.java
+++ b/storm-core/src/main/java/st/orm/core/template/impl/JoinedEntityHelper.java
@@ -133,16 +133,16 @@ public static , ID> void update(
}
/**
- * Deletes a joined entity from both the extension and base tables.
+ * Removes a joined entity from both the extension and base tables.
*
* @param queryTemplate the query template for executing SQL.
* @param model the model describing the sealed entity.
- * @param entity the entity to delete.
+ * @param entity the entity to remove.
* @param the entity type (sealed interface).
* @param the primary key type.
- * @throws PersistenceException if the delete fails.
+ * @throws PersistenceException if the remove fails.
*/
- public static , ID> void delete(
+ public static , ID> void remove(
@Nonnull QueryTemplate queryTemplate,
@Nonnull Model model,
@Nonnull E entity
@@ -150,21 +150,21 @@ public static , ID> void delete(
try {
deleteJoined(queryTemplate, model, entity.getClass(), entity.id());
} catch (SqlTemplateException e) {
- throw new PersistenceException("Failed to construct delete statement for joined entity %s.".formatted(model.type().getSimpleName()), e);
+ throw new PersistenceException("Failed to construct remove statement for joined entity %s.".formatted(model.type().getSimpleName()), e);
}
}
/**
- * Deletes a joined entity by primary key from all extension and base tables.
+ * Removes a joined entity by primary key from all extension and base tables.
*
* @param queryTemplate the query template for executing SQL.
* @param model the model describing the sealed entity.
- * @param id the primary key of the entity to delete.
+ * @param id the primary key of the entity to remove.
* @param the entity type (sealed interface).
* @param the primary key type.
- * @throws PersistenceException if the delete fails.
+ * @throws PersistenceException if the remove fails.
*/
- public static , ID> void deleteById(
+ public static , ID> void removeById(
@Nonnull QueryTemplate queryTemplate,
@Nonnull Model model,
@Nonnull ID id
@@ -172,7 +172,7 @@ public static , ID> void deleteById(
try {
deleteJoined(queryTemplate, model, null, id);
} catch (SqlTemplateException e) {
- throw new PersistenceException("Failed to construct delete-by-id statement for joined entity %s.".formatted(model.type().getSimpleName()), e);
+ throw new PersistenceException("Failed to construct remove-by-id statement for joined entity %s.".formatted(model.type().getSimpleName()), e);
}
}
@@ -275,20 +275,20 @@ public static , ID> void updateBatch(
}
/**
- * Deletes a batch of joined entities from both extension and base tables.
+ * Removes a batch of joined entities from both extension and base tables.
*
* Phase 1 deletes from extension tables first (FK constraints), partitioned by concrete subtype.
* Phase 2 deletes all entities from the base table.
*
* @param queryTemplate the query template for executing SQL.
* @param model the model describing the sealed entity.
- * @param entities the entities to delete (already validated).
+ * @param entities the entities to remove (already validated).
* @param the entity type (sealed interface).
* @param the primary key type.
- * @throws PersistenceException if the delete fails.
+ * @throws PersistenceException if the remove fails.
* @since 1.9
*/
- public static , ID> void deleteBatch(
+ public static , ID> void removeBatch(
@Nonnull QueryTemplate queryTemplate,
@Nonnull Model model,
@Nonnull List entities
@@ -299,25 +299,25 @@ public static , ID> void deleteBatch(
try {
deleteJoinedBatch(queryTemplate, model, entities);
} catch (SqlTemplateException e) {
- throw new PersistenceException("Failed to construct batch delete statement for joined entity %s.".formatted(model.type().getSimpleName()), e);
+ throw new PersistenceException("Failed to construct batch remove statement for joined entity %s.".formatted(model.type().getSimpleName()), e);
}
}
/**
- * Deletes a batch of joined entities by reference from all extension and base tables.
+ * Removes a batch of joined entities by reference from all extension and base tables.
*
* When the concrete type is unknown, attempts DELETE from all extension tables for all IDs
* (at most one will match per entity), then deletes from the base table.
*
* @param queryTemplate the query template for executing SQL.
* @param model the model describing the sealed entity.
- * @param refs the entity references to delete.
+ * @param refs the entity references to remove.
* @param the entity type (sealed interface).
* @param the primary key type.
- * @throws PersistenceException if the delete fails.
+ * @throws PersistenceException if the remove fails.
* @since 1.9
*/
- public static , ID> void deleteBatchByRef(
+ public static , ID> void removeBatchByRef(
@Nonnull QueryTemplate queryTemplate,
@Nonnull Model model,
@Nonnull List[> refs
@@ -328,7 +328,7 @@ public static , ID> void deleteBatchByRef(
try {
deleteJoinedBatchByRef(queryTemplate, model, refs);
} catch (SqlTemplateException e) {
- throw new PersistenceException("Failed to construct batch delete-by-ref statement for joined entity %s.".formatted(model.type().getSimpleName()), e);
+ throw new PersistenceException("Failed to construct batch remove-by-ref statement for joined entity %s.".formatted(model.type().getSimpleName()), e);
}
}
diff --git a/storm-core/src/main/java/st/orm/core/template/impl/ParamProcessor.java b/storm-core/src/main/java/st/orm/core/template/impl/ParamProcessor.java
index 9ce107263..e7fbf9ed8 100644
--- a/storm-core/src/main/java/st/orm/core/template/impl/ParamProcessor.java
+++ b/storm-core/src/main/java/st/orm/core/template/impl/ParamProcessor.java
@@ -16,11 +16,18 @@
package st.orm.core.template.impl;
import jakarta.annotation.Nonnull;
+import jakarta.annotation.Nullable;
+import st.orm.Data;
+import st.orm.Ref;
+import st.orm.core.spi.ORMReflection;
+import st.orm.core.spi.Providers;
import st.orm.core.template.SqlTemplateException;
import st.orm.core.template.impl.Elements.Param;
final class ParamProcessor implements ElementProcessor {
+ private static final ORMReflection REFLECTION = Providers.getORMReflection();
+
/**
* Returns a key that represents the compiled shape of the given element.
*
@@ -53,10 +60,11 @@ public Object getCompilationKey(@Nonnull Param param) {
@Override
public CompiledElement compile(@Nonnull Param param, @Nonnull TemplateCompiler compiler)
throws SqlTemplateException {
+ Object value = resolveParamValue(param.dbValue());
if (param.name() != null) {
- return new CompiledElement(compiler.mapParameter(param.name(), param.dbValue()));
+ return new CompiledElement(compiler.mapParameter(param.name(), value));
}
- return new CompiledElement(compiler.mapParameter(param.dbValue()));
+ return new CompiledElement(compiler.mapParameter(value));
}
/**
@@ -72,10 +80,34 @@ public CompiledElement compile(@Nonnull Param param, @Nonnull TemplateCompiler c
*/
@Override
public void bind(@Nonnull Param param, @Nonnull TemplateBinder binder, @Nonnull BindHint bindHint) throws SqlTemplateException {
+ Object value = resolveParamValue(param.dbValue());
if (param.name() != null) {
- binder.bindParameter(param.name(), param.dbValue());
+ binder.bindParameter(param.name(), value);
} else {
- binder.bindParameter(param.dbValue());
+ binder.bindParameter(value);
+ }
+ }
+
+ /**
+ * Resolves a parameter value for binding. {@link Ref} instances are unwrapped to their primary key value via
+ * {@link Ref#id()}. {@link Data} instances are unwrapped to their primary key value via
+ * {@link ORMReflection#getId(Data)}.
+ *
+ * ]This allows {@code Ref} and {@code Data} instances (entities, projections, etc.) to be used directly as
+ * bind variables in raw SQL templates (e.g., {@code "WHERE id = $ref"} or {@code "WHERE id = $entity"}) without
+ * requiring the caller to extract the ID manually.
+ *
+ * @param value the parameter value.
+ * @return the resolved value suitable for JDBC binding.
+ */
+ @Nullable
+ private static Object resolveParamValue(@Nullable Object value) throws SqlTemplateException {
+ if (value instanceof Ref> ref) {
+ return ref.id();
+ }
+ if (value instanceof Data data) {
+ return REFLECTION.getId(data);
}
+ return value;
}
}
diff --git a/storm-core/src/main/java/st/orm/core/template/impl/QueryModelImpl.java b/storm-core/src/main/java/st/orm/core/template/impl/QueryModelImpl.java
index 42254a42b..ab8bd2a7e 100644
--- a/storm-core/src/main/java/st/orm/core/template/impl/QueryModelImpl.java
+++ b/storm-core/src/main/java/st/orm/core/template/impl/QueryModelImpl.java
@@ -24,7 +24,6 @@
import static st.orm.core.template.impl.RecordReflection.hasDiscriminator;
import static st.orm.core.template.impl.RecordReflection.isJoinedEntity;
import static st.orm.core.template.impl.RecordReflection.isSealedEntity;
-import static st.orm.core.template.impl.SqlParser.removeComments;
import jakarta.annotation.Nonnull;
import jakarta.annotation.Nullable;
@@ -32,7 +31,6 @@
import java.util.LinkedHashMap;
import java.util.List;
import java.util.SequencedMap;
-import java.util.regex.Pattern;
import java.util.stream.Stream;
import st.orm.BindVars;
import st.orm.Data;
@@ -54,6 +52,7 @@
import st.orm.core.template.impl.Elements.ObjectExpression;
import st.orm.core.template.impl.Elements.Subquery;
import st.orm.core.template.impl.Elements.TemplateExpression;
+
/**
* Query model implementation responsible for translating high-level query expressions into SQL fragments and bind
* values.
@@ -265,8 +264,8 @@ private String compileTemplateExpression(@Nonnull TemplateString stringTemplate,
String fragment = fragments.get(i);
parts.add(fragment);
if (i < values.size()) {
- Object value = resolveElements(values.get(i), fragment, i + 1 < fragments.size() ? fragments.get(i + 1) : "");
- switch (value) {
+ Object resolved = resolveElements(values.get(i));
+ switch (resolved) {
case Stream> ignore -> throw new SqlTemplateException("Stream is not supported in expressions. Collect the Stream into a List before passing it.");
case Query ignore -> throw new SqlTemplateException("Query is not supported in expressions. Use a QueryBuilder subquery instead.");
case Expression it -> parts.add(compileExpression(it, compiler));
@@ -275,7 +274,7 @@ private String compileTemplateExpression(@Nonnull TemplateString stringTemplate,
case Class> it -> parts.add(compiler.compile(alias(REFLECTION.getDataType(it))));
case Object it when REFLECTION.isSupportedType(it) -> parts.add(compiler.compile(alias(REFLECTION.getDataType(it))));
case Element it -> parts.add(compiler.compile(it));
- default -> parts.add(compiler.compile(param(value)));
+ default -> parts.add(compiler.compile(param(resolved)));
}
}
}
@@ -292,23 +291,18 @@ private String compileTemplateExpression(@Nonnull TemplateString stringTemplate,
* @throws SqlTemplateException if an unsupported value is encountered.
*/
private void bindTemplateExpression(@Nonnull TemplateString stringTemplate, @Nonnull TemplateBinder binder) throws SqlTemplateException{
- var fragments = stringTemplate.fragments();
- var values = stringTemplate.values();
- for (int i = 0; i < fragments.size(); i++) {
- String fragment = fragments.get(i);
- if (i < values.size()) {
- Object value = resolveElements(values.get(i), fragment, i + 1 < fragments.size() ? fragments.get(i + 1) : "");
- switch (value) {
- case Stream> ignore -> throw new SqlTemplateException("Stream is not supported in expressions. Collect the Stream into a List before passing it.");
- case Query ignore -> throw new SqlTemplateException("Query is not supported in expressions. Use a QueryBuilder subquery instead.");
- case Expression it -> bindExpression(it, binder);
- case Ref> it -> bindExpression(new ObjectExpression(it), binder);
- case Data it -> bindExpression(new ObjectExpression(it), binder);
- case Class> it -> binder.bind(alias(REFLECTION.getDataType(it)));
- case Object it when REFLECTION.isSupportedType(it) -> binder.bind(alias(REFLECTION.getDataType(it)));
- case Element it -> binder.bind(it);
- default -> binder.bind(param(value));
- }
+ for (var value : stringTemplate.values()) {
+ Object resolved = resolveElements(value);
+ switch (resolved) {
+ case Stream> ignore -> throw new SqlTemplateException("Stream is not supported in expressions. Collect the Stream into a List before passing it.");
+ case Query ignore -> throw new SqlTemplateException("Query is not supported in expressions. Use a QueryBuilder subquery instead.");
+ case Expression it -> bindExpression(it, binder);
+ case Ref> it -> bindExpression(new ObjectExpression(it), binder);
+ case Data it -> bindExpression(new ObjectExpression(it), binder);
+ case Class> it -> binder.bind(alias(REFLECTION.getDataType(it)));
+ case Object it when REFLECTION.isSupportedType(it) -> binder.bind(alias(REFLECTION.getDataType(it)));
+ case Element it -> binder.bind(it);
+ default -> binder.bind(param(resolved));
}
}
}
@@ -529,41 +523,29 @@ private static boolean isPrimitiveCompatible(@Nonnull Object o, @Nonnull Class
return false;
}
- private static final Pattern ENDS_WITH_OPERATOR = Pattern.compile(".*[<=>]$");
- private static final Pattern STARTS_WITH_OPERATOR = Pattern.compile("^[<=>].*");
-
/**
* Resolves a template value into a form that can be processed by the compiler or binder.
*
- * This method validates contextual correctness, such as preventing records from being used
- * next to operators.
+ * This method transforms known template value types into their corresponding internal representations
+ * (e.g., {@link Subqueryable} to {@link Subquery}, column-level {@link Metamodel} to {@link Column}), and rejects
+ * invalid types such as {@link TemplateString} and {@link Stream}.
*
- * @param value the value to resolve.
- * @param previousFragment the preceding SQL fragment.
- * @param nextFragment the following SQL fragment.
+ * {@link Data} and {@link Ref} instances pass through unchanged and are handled by the caller's switch
+ * (compiled via {@code ObjectExpression}, which resolves columns through the model). Other values (scalars,
+ * {@link Element} instances, etc.) also pass through unchanged and are compiled or bound by the caller.
+ *
+ * @param value the value to resolve.
* @return the resolved value.
* @throws SqlTemplateException if the value is invalid in this context.
*/
- private Object resolveElements(@Nullable Object value, @Nonnull String previousFragment, @Nonnull String nextFragment) throws SqlTemplateException {
+ private Object resolveElements(@Nullable Object value) throws SqlTemplateException {
return switch (value) {
case TemplateString ignore -> throw new SqlTemplateException("TemplateString is not allowed as a string template value.");
case Stream> ignore -> throw new SqlTemplateException("Stream is not supported as a string template value. Collect the Stream into a List before passing it.");
case Subqueryable t -> new Subquery(t.getSubquery(), true);
case Metamodel, ?> m when m.isColumn() -> new st.orm.core.template.impl.Elements.Column(m, CASCADE);
case Metamodel, ?> ignore -> throw new SqlTemplateException("Metamodel does not reference a column. Use a column-level metamodel (e.g., User_.name) rather than a table-level metamodel.");
- case null, default -> {
- if (!(value instanceof Element) && value instanceof Record) {
- String previous = removeComments(previousFragment, template.dialect()).stripTrailing().toUpperCase();
- if (ENDS_WITH_OPERATOR.matcher(previous).find()) {
- throw new SqlTemplateException("Record is not allowed directly next to a comparison operator in an expression. Use a specific field value or metamodel reference instead.");
- }
- String next = removeComments(nextFragment, template.dialect()).stripLeading().toUpperCase();
- if (STARTS_WITH_OPERATOR.matcher(next).find()) {
- throw new SqlTemplateException("Record is not allowed directly next to a comparison operator in an expression. Use a specific field value or metamodel reference instead.");
- }
- }
- yield value;
- }
+ case null, default -> value;
};
}
diff --git a/storm-core/src/test/java/st/orm/core/BuilderPreparedStatementIntegrationTest.java b/storm-core/src/test/java/st/orm/core/BuilderPreparedStatementIntegrationTest.java
index d48fbd2a9..eb59b24a3 100644
--- a/storm-core/src/test/java/st/orm/core/BuilderPreparedStatementIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/BuilderPreparedStatementIntegrationTest.java
@@ -598,7 +598,7 @@ public void testScrollNavigationForwardThenBackward() {
// Navigate forward: vets 4, 5, 6.
var nextWindow = ORMTemplate.of(dataSource)
.selectFrom(Vet.class)
- .scroll(firstWindow.nextScrollable());
+ .scroll(firstWindow.next());
assertEquals(3, nextWindow.content().size());
assertFalse(nextWindow.hasNext());
assertEquals(4, nextWindow.content().get(0).id());
@@ -608,7 +608,7 @@ public void testScrollNavigationForwardThenBackward() {
// Navigate backward: should return the same vets as the first window, but in descending order (3, 2, 1).
var backWindow = ORMTemplate.of(dataSource)
.selectFrom(Vet.class)
- .scroll(nextWindow.previousScrollable());
+ .scroll(nextWindow.previous());
assertEquals(3, backWindow.content().size());
assertEquals(3, backWindow.content().get(0).id());
assertEquals(2, backWindow.content().get(1).id());
@@ -642,7 +642,7 @@ public void testScrollWindowNextCursorRoundTrip() {
// Scroll using the navigation token for comparison.
var tokenWindow = ORMTemplate.of(dataSource)
.selectFrom(Vet.class)
- .scroll(firstWindow.nextScrollable());
+ .scroll(firstWindow.next());
// Both approaches should yield the same results.
assertEquals(tokenWindow.content().size(), cursorWindow.content().size());
@@ -666,10 +666,10 @@ public void testScrollBackwardNavigation() {
assertEquals(5, firstWindow.content().get(1).id());
assertEquals(4, firstWindow.content().get(2).id());
- // Navigate further back using previousScrollable: vets 3, 2, 1.
+ // Navigate further back using next(): vets 3, 2, 1.
var previousWindow = ORMTemplate.of(dataSource)
.selectFrom(Vet.class)
- .scroll(firstWindow.nextScrollable());
+ .scroll(firstWindow.next());
assertEquals(3, previousWindow.content().size());
assertFalse(previousWindow.hasNext());
assertEquals(3, previousWindow.content().get(0).id());
diff --git a/storm-core/src/test/java/st/orm/core/CursorSerializationTest.java b/storm-core/src/test/java/st/orm/core/CursorSerializationTest.java
index 4a9d562f7..cb8eec189 100644
--- a/storm-core/src/test/java/st/orm/core/CursorSerializationTest.java
+++ b/storm-core/src/test/java/st/orm/core/CursorSerializationTest.java
@@ -16,9 +16,9 @@
import java.util.UUID;
import org.junit.jupiter.api.Test;
import st.orm.Data;
-import st.orm.MappedWindow;
import st.orm.Metamodel;
import st.orm.Scrollable;
+import st.orm.Window;
/**
* Tests cursor serialization (toCursor/fromCursor) which requires storm-core on the classpath.
@@ -244,7 +244,7 @@ void toCursorProducesUrlSafeString() {
void windowNextCursorProducesStringFromScrollable() {
var key = Metamodel.key(Metamodel.of(StubEntity.class, "id"));
var next = new Scrollable<>(key, 42, null, null, 20, true);
- var window = new MappedWindow<>(java.util.List.of("a"), true, false, next, null);
+ var window = new Window<>(java.util.List.of("a"), true, false, next, null);
String cursor = window.nextCursor();
assertNotNull(cursor);
var restored = Scrollable.fromCursor(key, cursor);
@@ -257,7 +257,7 @@ void windowNextCursorProducesStringFromScrollable() {
void windowPreviousCursorProducesStringFromScrollable() {
var key = Metamodel.key(Metamodel.of(StubEntity.class, "id"));
var prev = new Scrollable<>(key, 5, null, null, 10, false);
- var window = new MappedWindow<>(java.util.List.of("a"), false, true, null, prev);
+ var window = new Window<>(java.util.List.of("a"), false, true, null, prev);
String cursor = window.previousCursor();
assertNotNull(cursor);
var restored = Scrollable.fromCursor(key, cursor);
diff --git a/storm-core/src/test/java/st/orm/core/EntityCallbackIntegrationTest.java b/storm-core/src/test/java/st/orm/core/EntityCallbackIntegrationTest.java
index ffb257bc1..b5555ae5d 100644
--- a/storm-core/src/test/java/st/orm/core/EntityCallbackIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/EntityCallbackIntegrationTest.java
@@ -95,7 +95,7 @@ public void beforeDelete(@Nonnull City entity) {
});
// Insert a city with no FK references, then delete it.
var id = orm.entity(City.class).insertAndFetchId(City.builder().name("Deletable").build());
- orm.entity(City.class).delete(City.builder().id(id).name("Deletable").build());
+ orm.entity(City.class).remove(City.builder().id(id).name("Deletable").build());
assertEquals(List.of("before:Deletable"), log);
}
@@ -110,7 +110,7 @@ public void afterDelete(@Nonnull City entity) {
});
// Insert a city with no FK references, then delete it.
var id = orm.entity(City.class).insertAndFetchId(City.builder().name("Deletable").build());
- orm.entity(City.class).delete(City.builder().id(id).name("Deletable").build());
+ orm.entity(City.class).remove(City.builder().id(id).name("Deletable").build());
assertEquals(List.of("after:Deletable"), log);
}
diff --git a/storm-core/src/test/java/st/orm/core/EntityRepositoryAdditionalIntegrationTest.java b/storm-core/src/test/java/st/orm/core/EntityRepositoryAdditionalIntegrationTest.java
index 55f3b471b..71d8e8e01 100644
--- a/storm-core/src/test/java/st/orm/core/EntityRepositoryAdditionalIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/EntityRepositoryAdditionalIntegrationTest.java
@@ -71,12 +71,12 @@ public void testUpdateWithDefaultPrimaryKeyThrows() {
}
@Test
- public void testDeleteWithDefaultPrimaryKeyThrows() {
+ public void testRemoveWithDefaultPrimaryKeyThrows() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
// Deleting with null PK should throw.
assertThrows(PersistenceException.class,
- () -> cities.delete(City.builder().name("NoPK").build()));
+ () -> cities.remove(City.builder().name("NoPK").build()));
}
// insert with ignoreAutoGenerate
@@ -92,7 +92,7 @@ public void testInsertWithIgnoreAutoGenerateExplicitPrimaryKey() {
City fetched = cities.getById(9990);
assertEquals("ExplicitPK", fetched.name());
// Clean up.
- cities.deleteById(9990);
+ cities.removeById(9990);
}
@Test
@@ -107,7 +107,7 @@ public void testInsertWithIgnoreAutoGenerateNullPrimaryKeyThrows() {
// deleteAll
@Test
- public void testDeleteAllRemovesAllEntities() {
+ public void testRemoveAllRemovesAllEntities() {
var orm = ORMTemplate.of(dataSource);
// Use pet_extension to avoid FK constraint issues (it's a leaf table).
// Insert some cities that are not referenced by anything else.
@@ -123,7 +123,7 @@ public void testDeleteAllRemovesAllEntities() {
var visits = orm.entity(Visit.class);
long visitCountBefore = visits.count();
assertTrue(visitCountBefore > 0);
- visits.deleteAll();
+ visits.removeAll();
assertEquals(0, visits.count());
}
@@ -342,7 +342,7 @@ public void testNoCallbacksRegistered() {
Integer insertedId = cities.insertAndFetchId(City.builder().name("NoCb").build());
assertNotNull(insertedId);
cities.update(City.builder().id(insertedId).name("NoCbUpdated").build());
- cities.delete(City.builder().id(insertedId).name("NoCbUpdated").build());
+ cities.remove(City.builder().id(insertedId).name("NoCbUpdated").build());
}
// Batch update with joined entities and callbacks
diff --git a/storm-core/src/test/java/st/orm/core/EntityRepositoryBatchIntegrationTest.java b/storm-core/src/test/java/st/orm/core/EntityRepositoryBatchIntegrationTest.java
index 596e6de48..70fbe3eab 100644
--- a/storm-core/src/test/java/st/orm/core/EntityRepositoryBatchIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/EntityRepositoryBatchIntegrationTest.java
@@ -106,32 +106,32 @@ public void testBatchUpdatePersistsAllChanges() {
// Delete
@Test
- public void testDeleteEntityRemovesIt() {
+ public void testRemoveEntityRemovesIt() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var insertedId = cities.insertAndFetchId(City.builder().name("ToDelete").build());
long countBefore = cities.count();
- cities.delete(City.builder().id(insertedId).name("ToDelete").build());
+ cities.remove(City.builder().id(insertedId).name("ToDelete").build());
assertEquals(countBefore - 1, cities.count());
// Verify the deleted city is actually gone.
assertFalse(cities.findById(insertedId).isPresent());
}
@Test
- public void testDeleteByRefRemovesEntity() {
+ public void testRemoveByRefRemovesEntity() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var insertedId = cities.insertAndFetchId(City.builder().name("RefDelete").build());
- cities.deleteByRef(Ref.of(City.class, insertedId));
+ cities.removeByRef(Ref.of(City.class, insertedId));
assertFalse(cities.findById(insertedId).isPresent());
}
@Test
- public void testBatchDeleteRemovesAllSpecified() {
+ public void testBatchRemoveRemovesAllSpecified() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
@@ -139,7 +139,7 @@ public void testBatchDeleteRemovesAllSpecified() {
var id2 = cities.insertAndFetchId(City.builder().name("BatchDel2").build());
long countBefore = cities.count();
- cities.delete(List.of(
+ cities.remove(List.of(
City.builder().id(id1).name("BatchDel1").build(),
City.builder().id(id2).name("BatchDel2").build()
));
@@ -149,14 +149,14 @@ public void testBatchDeleteRemovesAllSpecified() {
}
@Test
- public void testBatchDeleteByRefRemovesAllSpecified() {
+ public void testBatchRemoveByRefRemovesAllSpecified() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("RefBatchDel1").build());
var id2 = cities.insertAndFetchId(City.builder().name("RefBatchDel2").build());
- cities.deleteByRef(List.of(
+ cities.removeByRef(List.of(
Ref.of(City.class, id1),
Ref.of(City.class, id2)
));
diff --git a/storm-core/src/test/java/st/orm/core/EntityRepositoryIntegrationTest.java b/storm-core/src/test/java/st/orm/core/EntityRepositoryIntegrationTest.java
index 40c4374ee..96804a5da 100644
--- a/storm-core/src/test/java/st/orm/core/EntityRepositoryIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/EntityRepositoryIntegrationTest.java
@@ -51,48 +51,48 @@ public class EntityRepositoryIntegrationTest {
// deleteById
@Test
- public void testDeleteById() {
+ public void testRemoveById() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
// Insert a new city so we can delete it without FK constraint issues.
var id = cities.insertAndFetchId(City.builder().name("ToDelete").build());
long before = cities.count();
- cities.deleteById(id);
+ cities.removeById(id);
assertEquals(before - 1, cities.count());
}
@Test
- public void testDeleteByIdNonExistent() {
+ public void testRemoveByIdNonExistent() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
- assertDoesNotThrow(() -> cities.deleteById(99999));
+ assertDoesNotThrow(() -> cities.removeById(99999));
}
// deleteByRef
@Test
- public void testDeleteByRef() {
+ public void testRemoveByRef() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id = cities.insertAndFetchId(City.builder().name("RefDelete").build());
long before = cities.count();
Ref ref = Ref.of(City.class, id);
- cities.deleteByRef(ref);
+ cities.removeByRef(ref);
assertEquals(before - 1, cities.count());
}
@Test
- public void testDeleteByRefNonExistent() {
+ public void testRemoveByRefNonExistent() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
Ref ref = Ref.of(City.class, 99999);
- assertDoesNotThrow(() -> cities.deleteByRef(ref));
+ assertDoesNotThrow(() -> cities.removeByRef(ref));
}
// deleteAll
@Test
- public void testDeleteAll() {
+ public void testRemoveAll() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
// First remove owners that reference cities to avoid FK constraint violations.
@@ -128,7 +128,7 @@ public void testDeleteAll() {
// Insert cities that have no dependents, then use deleteAll. Since other cities have
// FK dependents, deleteAll will throw. This IS a valid test of the error path.
- assertThrows(PersistenceException.class, () -> cities.deleteAll());
+ assertThrows(PersistenceException.class, () -> cities.removeAll());
}
// ref(E entity)
@@ -431,7 +431,7 @@ public void testGetDefaultBatchSize() {
// deleteByRef(Iterable[>)
@Test
- public void testDeleteByRefIterable() {
+ public void testRemoveByRefIterable() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("RefDelA").build());
@@ -441,14 +441,14 @@ public void testDeleteByRefIterable() {
Ref.of(City.class, id1),
Ref.of(City.class, id2)
);
- cities.deleteByRef(refs);
+ cities.removeByRef(refs);
assertEquals(before - 2, cities.count());
}
// delete(Iterable)
@Test
- public void testDeleteIterable() {
+ public void testRemoveIterable() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("DelIterA").build());
@@ -458,7 +458,7 @@ public void testDeleteIterable() {
City.builder().id(id1).name("DelIterA").build(),
City.builder().id(id2).name("DelIterB").build()
);
- cities.delete(toDelete);
+ cities.remove(toDelete);
assertEquals(before - 2, cities.count());
}
@@ -562,13 +562,13 @@ public void testUpsertStreamWithBatchSize() {
// delete(Stream)
@Test
- public void testDeleteStream() {
+ public void testRemoveStream() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("StreamDelA").build());
var id2 = cities.insertAndFetchId(City.builder().name("StreamDelB").build());
long before = cities.count();
- cities.delete(Stream.of(
+ cities.remove(Stream.of(
City.builder().id(id1).name("StreamDelA").build(),
City.builder().id(id2).name("StreamDelB").build()
));
@@ -578,13 +578,13 @@ public void testDeleteStream() {
// delete(Stream, int batchSize)
@Test
- public void testDeleteStreamWithBatchSize() {
+ public void testRemoveStreamWithBatchSize() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("BatchDelA").build());
var id2 = cities.insertAndFetchId(City.builder().name("BatchDelB").build());
long before = cities.count();
- cities.delete(
+ cities.remove(
Stream.of(
City.builder().id(id1).name("BatchDelA").build(),
City.builder().id(id2).name("BatchDelB").build()
@@ -597,13 +597,13 @@ public void testDeleteStreamWithBatchSize() {
// deleteByRef(Stream][>)
@Test
- public void testDeleteByRefStream() {
+ public void testRemoveByRefStream() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("RefStreamDelA").build());
var id2 = cities.insertAndFetchId(City.builder().name("RefStreamDelB").build());
long before = cities.count();
- cities.deleteByRef(Stream.of(
+ cities.removeByRef(Stream.of(
Ref.of(City.class, id1),
Ref.of(City.class, id2)
));
@@ -613,13 +613,13 @@ public void testDeleteByRefStream() {
// deleteByRef(Stream][>, int batchSize)
@Test
- public void testDeleteByRefStreamWithBatchSize() {
+ public void testRemoveByRefStreamWithBatchSize() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("RefBatchDelA").build());
var id2 = cities.insertAndFetchId(City.builder().name("RefBatchDelB").build());
long before = cities.count();
- cities.deleteByRef(
+ cities.removeByRef(
Stream.of(
Ref.of(City.class, id1),
Ref.of(City.class, id2)
@@ -727,29 +727,6 @@ public void testSelectAll() {
}
}
- @Test
- public void testSelectById() {
- var orm = ORMTemplate.of(dataSource);
- var cities = orm.entity(City.class);
- try (var stream = cities.selectById(Stream.of(1, 2, 3))) {
- List result = stream.toList();
- assertEquals(3, result.size());
- }
- }
-
- @Test
- public void testSelectByRef() {
- var orm = ORMTemplate.of(dataSource);
- var cities = orm.entity(City.class);
- try (var stream = cities.selectByRef(Stream.of(
- Ref.of(City.class, 1),
- Ref.of(City.class, 2)
- ))) {
- List result = stream.toList();
- assertEquals(2, result.size());
- }
- }
-
@Test
public void testCountById() {
var orm = ORMTemplate.of(dataSource);
@@ -852,33 +829,6 @@ public void testFindAll() {
assertEquals(cities.count(), all.size());
}
- // selectById with chunkSize
-
- @Test
- public void testSelectByIdWithChunkSize() {
- var orm = ORMTemplate.of(dataSource);
- var cities = orm.entity(City.class);
- try (var stream = cities.selectById(Stream.of(1, 2, 3, 4), 2)) {
- List result = stream.toList();
- assertEquals(4, result.size());
- }
- }
-
- // selectByRef with chunkSize
-
- @Test
- public void testSelectByRefWithChunkSize() {
- var orm = ORMTemplate.of(dataSource);
- var cities = orm.entity(City.class);
- try (var stream = cities.selectByRef(
- Stream.of(Ref.of(City.class, 1), Ref.of(City.class, 2), Ref.of(City.class, 3)),
- 2
- )) {
- List result = stream.toList();
- assertEquals(3, result.size());
- }
- }
-
// countById with chunkSize
@Test
@@ -992,21 +942,21 @@ public void testUuidUpdate() {
}
@Test
- public void testUuidDelete() {
+ public void testUuidRemove() {
var orm = ORMTemplate.of(dataSource);
var apiKeys = orm.entity(ApiKey.class);
long before = apiKeys.count();
- apiKeys.delete(apiKeys.getById(DEFAULT_KEY_ID));
+ apiKeys.remove(apiKeys.getById(DEFAULT_KEY_ID));
assertEquals(before - 1, apiKeys.count());
assertTrue(apiKeys.findById(DEFAULT_KEY_ID).isEmpty());
}
@Test
- public void testUuidDeleteById() {
+ public void testUuidRemoveById() {
var orm = ORMTemplate.of(dataSource);
var apiKeys = orm.entity(ApiKey.class);
long before = apiKeys.count();
- apiKeys.deleteById(SECONDARY_KEY_ID);
+ apiKeys.removeById(SECONDARY_KEY_ID);
assertEquals(before - 1, apiKeys.count());
}
}
diff --git a/storm-core/src/test/java/st/orm/core/EntityRepositoryValidationIntegrationTest.java b/storm-core/src/test/java/st/orm/core/EntityRepositoryValidationIntegrationTest.java
index a9398136d..c1f8cd49f 100644
--- a/storm-core/src/test/java/st/orm/core/EntityRepositoryValidationIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/EntityRepositoryValidationIntegrationTest.java
@@ -102,18 +102,18 @@ public void testUpdateWithZeroPkThrows() {
// Delete validation: PK must be set
@Test
- public void testDeleteWithDefaultPkThrows() {
+ public void testRemoveWithDefaultPkThrows() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
// Deleting a city with PK=0 (default) should fail validation.
assertThrows(PersistenceException.class,
- () -> cities.delete(City.builder().id(0).name("No PK Delete").build()));
+ () -> cities.remove(City.builder().id(0).name("No PK Delete").build()));
}
// Non-auto-generated PK: VetSpecialty uses @PK(generation = NONE)
@Test
- public void testNonAutoGenPkInsertAndDelete() {
+ public void testNonAutoGenPkInsertAndRemove() {
var orm = ORMTemplate.of(dataSource);
var vetSpecialties = orm.entity(VetSpecialty.class);
// VetSpecialty has @PK(generation = NONE) with compound key.
@@ -124,7 +124,7 @@ public void testNonAutoGenPkInsertAndDelete() {
VetSpecialty fetched = vetSpecialties.getById(pk);
assertNotNull(fetched);
// Delete it.
- vetSpecialties.delete(new VetSpecialty(pk));
+ vetSpecialties.remove(new VetSpecialty(pk));
}
@Test
@@ -164,7 +164,7 @@ public void testOptimisticLockExceptionOnVersionMismatch() {
}
@Test
- public void testDeleteWithStaleVersionThrows() {
+ public void testRemoveWithStaleVersionThrows() {
var orm = ORMTemplate.of(dataSource);
var owners = orm.entity(Owner.class);
Owner owner = owners.getById(1);
@@ -172,7 +172,7 @@ public void testDeleteWithStaleVersionThrows() {
owners.update(owner.toBuilder().firstName("Updated").build());
// Deleting with stale version should throw PersistenceException.
assertThrows(PersistenceException.class,
- () -> owners.delete(owner));
+ () -> owners.remove(owner));
}
// InsertAndFetch
@@ -215,11 +215,11 @@ public void testUpdateAndFetch() {
// Delete non-existent entity throws
@Test
- public void testDeleteNonExistentEntityThrows() {
+ public void testRemoveNonExistentEntityThrows() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
assertThrows(PersistenceException.class,
- () -> cities.delete(City.builder().id(99999).name("NonExistent").build()));
+ () -> cities.remove(City.builder().id(99999).name("NonExistent").build()));
}
// Batch insert with auto-gen PK
@@ -266,13 +266,13 @@ public void testBatchUpdate() {
// Batch delete
@Test
- public void testBatchDelete() {
+ public void testBatchRemove() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("TempDel1").build());
var id2 = cities.insertAndFetchId(City.builder().name("TempDel2").build());
long before = cities.count();
- cities.delete(List.of(
+ cities.remove(List.of(
City.builder().id(id1).name("TempDel1").build(),
City.builder().id(id2).name("TempDel2").build()
));
@@ -306,7 +306,7 @@ public void afterInsert(@Nonnull City entity) {
// Callback on delete operations
@Test
- public void testDeleteCallbacksWithBatch() {
+ public void testRemoveCallbacksWithBatch() {
List beforeLog = new ArrayList<>();
List afterLog = new ArrayList<>();
var orm = ORMTemplate.of(dataSource).withEntityCallback(new EntityCallback() {
@@ -323,7 +323,7 @@ public void afterDelete(@Nonnull City entity) {
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("DelCbA").build());
var id2 = cities.insertAndFetchId(City.builder().name("DelCbB").build());
- cities.delete(Stream.of(
+ cities.remove(Stream.of(
City.builder().id(id1).name("DelCbA").build(),
City.builder().id(id2).name("DelCbB").build()
));
@@ -507,13 +507,13 @@ public void testModelValuesForOwner() throws SqlTemplateException {
// Stream-based delete
@Test
- public void testStreamBasedDelete() {
+ public void testStreamBasedRemove() {
var orm = ORMTemplate.of(dataSource);
var cities = orm.entity(City.class);
var id1 = cities.insertAndFetchId(City.builder().name("StreamDel1").build());
var id2 = cities.insertAndFetchId(City.builder().name("StreamDel2").build());
long before = cities.count();
- cities.delete(Stream.of(
+ cities.remove(Stream.of(
City.builder().id(id1).name("StreamDel1").build(),
City.builder().id(id2).name("StreamDel2").build()
));
@@ -799,21 +799,21 @@ public void testJoinedInheritanceBatchUpdate() {
}
@Test
- public void testJoinedInheritanceDelete() {
+ public void testJoinedInheritanceRemove() {
var orm = ORMTemplate.of(dataSource);
var joinedAnimals = orm.entity(st.orm.core.model.polymorphic.JoinedAnimal.class);
var dog = new st.orm.core.model.polymorphic.JoinedDog(null, "DeleteDog", 30);
Integer id = joinedAnimals.insertAndFetchId(dog);
- joinedAnimals.delete(new st.orm.core.model.polymorphic.JoinedDog(id, "DeleteDog", 30));
+ joinedAnimals.remove(new st.orm.core.model.polymorphic.JoinedDog(id, "DeleteDog", 30));
}
@Test
- public void testJoinedInheritanceBatchDelete() {
+ public void testJoinedInheritanceBatchRemove() {
var orm = ORMTemplate.of(dataSource);
var joinedAnimals = orm.entity(st.orm.core.model.polymorphic.JoinedAnimal.class);
Integer id1 = joinedAnimals.insertAndFetchId(new st.orm.core.model.polymorphic.JoinedCat(null, "BD Cat1", true));
Integer id2 = joinedAnimals.insertAndFetchId(new st.orm.core.model.polymorphic.JoinedDog(null, "BD Dog1", 15));
- joinedAnimals.delete(Stream.of(
+ joinedAnimals.remove(Stream.of(
new st.orm.core.model.polymorphic.JoinedCat(id1, "BD Cat1", true),
new st.orm.core.model.polymorphic.JoinedDog(id2, "BD Dog1", 15)
));
diff --git a/storm-core/src/test/java/st/orm/core/JoinedEntityCallbackIntegrationTest.java b/storm-core/src/test/java/st/orm/core/JoinedEntityCallbackIntegrationTest.java
index a72075362..71bbeec45 100644
--- a/storm-core/src/test/java/st/orm/core/JoinedEntityCallbackIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/JoinedEntityCallbackIntegrationTest.java
@@ -167,7 +167,7 @@ public void afterUpdate(@Nonnull JoinedAnimal entity) {
// Exercises deleteJoined + beforeDelete/afterDelete callback paths
@Test
- public void testJoinedEntityDeleteWithCallbacks() {
+ public void testJoinedEntityRemoveWithCallbacks() {
List log = new ArrayList<>();
var orm = ORMTemplate.of(dataSource).withEntityCallback(new EntityCallback() {
@Override
@@ -184,13 +184,13 @@ public void afterDelete(@Nonnull JoinedAnimal entity) {
// First insert a new entity so we can safely delete it.
Integer newId = animals.insertAndFetchId(new JoinedCat(null, "TempCatDel", false));
JoinedAnimal toDelete = animals.getById(newId);
- animals.delete(toDelete);
+ animals.remove(toDelete);
assertTrue(log.contains("beforeDelete"));
assertTrue(log.contains("afterDelete"));
}
@Test
- public void testJoinedEntityBatchDeleteWithCallbacks() {
+ public void testJoinedEntityBatchRemoveWithCallbacks() {
List log = new ArrayList<>();
var orm = ORMTemplate.of(dataSource).withEntityCallback(new EntityCallback() {
@Override
@@ -208,32 +208,32 @@ public void afterDelete(@Nonnull JoinedAnimal entity) {
Integer id2 = animals.insertAndFetchId(new JoinedDog(null, "DelBatch2", 10));
JoinedAnimal cat = animals.getById(id1);
JoinedAnimal dog = animals.getById(id2);
- animals.delete(List.of(cat, dog));
+ animals.remove(List.of(cat, dog));
assertEquals(4, log.size()); // 2 beforeDelete + 2 afterDelete
}
// Joined entity deleteById (exercises deleteJoined with null concreteType)
@Test
- public void testJoinedEntityDeleteById() {
+ public void testJoinedEntityRemoveById() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
Integer newId = animals.insertAndFetchId(new JoinedDog(null, "TempDogDel", 5));
long countBefore = animals.count();
- animals.deleteById(newId);
+ animals.removeById(newId);
assertEquals(countBefore - 1, animals.count());
}
// Joined entity batch deleteByRef
@Test
- public void testJoinedEntityBatchDeleteByRef() {
+ public void testJoinedEntityBatchRemoveByRef() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
Integer id1 = animals.insertAndFetchId(new JoinedCat(null, "RefDel1", true));
Integer id2 = animals.insertAndFetchId(new JoinedDog(null, "RefDel2", 15));
long countBefore = animals.count();
- animals.deleteByRef(List.of(
+ animals.removeByRef(List.of(
Ref.of(JoinedAnimal.class, id1),
Ref.of(JoinedAnimal.class, id2)
));
@@ -243,14 +243,14 @@ public void testJoinedEntityBatchDeleteByRef() {
// Joined entity batch deleteById
@Test
- public void testJoinedEntityBatchDeleteByIdList() {
+ public void testJoinedEntityBatchRemoveByIdList() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
Integer id1 = animals.insertAndFetchId(new JoinedCat(null, "IdDel1", false));
Integer id2 = animals.insertAndFetchId(new JoinedDog(null, "IdDel2", 8));
long countBefore = animals.count();
- animals.deleteById(id1);
- animals.deleteById(id2);
+ animals.removeById(id1);
+ animals.removeById(id2);
assertEquals(countBefore - 2, animals.count());
}
@@ -286,7 +286,7 @@ public void testJoinedEntityStreamUpdate() {
// Joined entity stream-based batch delete
@Test
- public void testJoinedEntityStreamDelete() {
+ public void testJoinedEntityStreamRemove() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
Integer id1 = animals.insertAndFetchId(new JoinedCat(null, "StreamDel1", true));
@@ -294,7 +294,7 @@ public void testJoinedEntityStreamDelete() {
JoinedAnimal cat = animals.getById(id1);
JoinedAnimal dog = animals.getById(id2);
long countBefore = animals.count();
- animals.delete(Stream.of(cat, dog), 1);
+ animals.remove(Stream.of(cat, dog), 1);
assertEquals(countBefore - 2, animals.count());
}
@@ -497,7 +497,7 @@ public void testJoinedEntityBatchInsertAndFetch() {
// Joined entity: delete with callback transforms nothing (void)
@Test
- public void testJoinedEntityDeleteByIdWithCallbacks() {
+ public void testJoinedEntityRemoveByIdWithCallbacks() {
List log = new ArrayList<>();
var orm = ORMTemplate.of(dataSource).withEntityCallback(new EntityCallback() {
@Override
@@ -508,7 +508,7 @@ public void beforeDelete(@Nonnull JoinedAnimal entity) {
var animals = orm.entity(JoinedAnimal.class);
Integer newId = animals.insertAndFetchId(new JoinedCat(null, "DelByIdCb", true));
// deleteById does NOT fire entity callbacks (entity not loaded).
- animals.deleteById(newId);
+ animals.removeById(newId);
// No callback should fire for deleteById.
assertTrue(log.isEmpty(), "deleteById should not fire entity callbacks");
}
@@ -526,16 +526,6 @@ public void testJoinedEntitySelectAll() {
assertTrue(all.stream().anyMatch(a -> a instanceof JoinedDog));
}
- @Test
- public void testJoinedEntitySelectById() {
- var orm = ORMTemplate.of(dataSource);
- var animals = orm.entity(JoinedAnimal.class);
- try (var stream = animals.selectById(Stream.of(1, 3), 2)) {
- List result = stream.toList();
- assertEquals(2, result.size());
- }
- }
-
// EntityRepositoryImpl: various edge cases
@Test
diff --git a/storm-core/src/test/java/st/orm/core/JoinedEntityCrudIntegrationTest.java b/storm-core/src/test/java/st/orm/core/JoinedEntityCrudIntegrationTest.java
index c293ef425..bc93a96d8 100644
--- a/storm-core/src/test/java/st/orm/core/JoinedEntityCrudIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/JoinedEntityCrudIntegrationTest.java
@@ -160,13 +160,13 @@ public void testBatchUpdateJoinedEntities() {
// Delete joined entity
@Test
- public void testDeleteJoinedEntity() {
+ public void testRemoveJoinedEntity() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
// Insert a new entity to delete (avoid FK constraint issues with seed data).
Integer insertedId = animals.insertAndFetchId(new JoinedCat(null, "ToDelete", true));
long countBefore = animals.count();
- animals.delete(new JoinedCat(insertedId, "ToDelete", true));
+ animals.remove(new JoinedCat(insertedId, "ToDelete", true));
assertEquals(countBefore - 1, animals.count());
assertTrue(animals.findById(insertedId).isEmpty());
}
@@ -174,13 +174,13 @@ public void testDeleteJoinedEntity() {
// Batch delete joined entities
@Test
- public void testBatchDeleteJoinedEntities() {
+ public void testBatchRemoveJoinedEntities() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
Integer id1 = animals.insertAndFetchId(new JoinedCat(null, "BatchDel1", true));
Integer id2 = animals.insertAndFetchId(new JoinedDog(null, "BatchDel2", 15));
long countBefore = animals.count();
- animals.delete(List.of(
+ animals.remove(List.of(
new JoinedCat(id1, "BatchDel1", true),
new JoinedDog(id2, "BatchDel2", 15)
));
@@ -228,13 +228,13 @@ public void testGetByIdJoinedDog() {
// Delete joined entity by id (using delete(entity))
@Test
- public void testDeleteJoinedDogByEntity() {
+ public void testRemoveJoinedDogByEntity() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
Integer insertedId = animals.insertAndFetchId(new JoinedDog(null, "DeleteDog", 18));
long countBefore = animals.count();
JoinedAnimal toDelete = animals.getById(insertedId);
- animals.delete(toDelete);
+ animals.remove(toDelete);
assertEquals(countBefore - 1, animals.count());
}
diff --git a/storm-core/src/test/java/st/orm/core/PolymorphicIntegrationTest.java b/storm-core/src/test/java/st/orm/core/PolymorphicIntegrationTest.java
index 22132d942..00ec0fea0 100644
--- a/storm-core/src/test/java/st/orm/core/PolymorphicIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/PolymorphicIntegrationTest.java
@@ -125,7 +125,7 @@ public void testUpdateCat() {
}
@Test
- public void testDeleteAnimal() {
+ public void testRemoveAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(Animal.class);
// Insert a new animal and then delete it (avoids FK constraint from adoption table).
@@ -133,7 +133,7 @@ public void testDeleteAnimal() {
long before = animals.count();
var result = animals.select().getResultList();
var last = result.getLast();
- animals.delete(last);
+ animals.remove(last);
assertEquals(before - 1, animals.count());
}
@@ -318,7 +318,7 @@ public void testUpdateJoinedDogToCat() {
}
@Test
- public void testDeleteJoinedAnimal() {
+ public void testRemoveJoinedAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
// Insert a new animal and then delete it (avoids FK constraint from joined_adoption table).
@@ -326,18 +326,18 @@ public void testDeleteJoinedAnimal() {
long before = animals.count();
var result = animals.select().getResultList();
var last = result.getLast();
- animals.delete(last);
+ animals.remove(last);
assertEquals(before - 1, animals.count());
}
@Test
- public void testDeleteByIdJoinedAnimal() {
+ public void testRemoveByIdJoinedAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
// Insert a new animal and then delete by ID.
var id = animals.insertAndFetchId(new JoinedDog(null, "TempDog", 10));
long before = animals.count();
- animals.deleteById(id);
+ animals.removeById(id);
assertEquals(before - 1, animals.count());
}
@@ -497,7 +497,7 @@ public void testUpdateNodscBirdToCat() {
}
@Test
- public void testDeleteNodscAnimal() {
+ public void testRemoveNodscAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(NodscAnimal.class);
// Insert a new animal and then delete it.
@@ -505,12 +505,12 @@ public void testDeleteNodscAnimal() {
long before = animals.count();
var result = animals.select().getResultList();
var last = result.getLast();
- animals.delete(last);
+ animals.remove(last);
assertEquals(before - 1, animals.count());
}
@Test
- public void testDeleteNodscBird() {
+ public void testRemoveNodscBird() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(NodscAnimal.class);
// Insert and delete a bird (PK-only extension table).
@@ -519,17 +519,17 @@ public void testDeleteNodscBird() {
var result = animals.select().getResultList();
var last = result.getLast();
assertTrue(last instanceof NodscBird);
- animals.delete(last);
+ animals.remove(last);
assertEquals(before - 1, animals.count());
}
@Test
- public void testDeleteByIdNodscAnimal() {
+ public void testRemoveByIdNodscAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(NodscAnimal.class);
var id = animals.insertAndFetchId(new NodscDog(null, "TempDog", 10));
long before = animals.count();
- animals.deleteById(id);
+ animals.removeById(id);
assertEquals(before - 1, animals.count());
}
@@ -630,7 +630,7 @@ public void testBatchUpdateJoinedAnimalsWithTypeChange() {
}
@Test
- public void testBatchDeleteJoinedAnimals() {
+ public void testBatchRemoveJoinedAnimals() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
// Insert entities to delete.
@@ -642,12 +642,12 @@ public void testBatchDeleteJoinedAnimals() {
var result = animals.select().getResultList();
var cat = result.get(result.size() - 2);
var dog = result.get(result.size() - 1);
- animals.delete(List.of(cat, dog));
+ animals.remove(List.of(cat, dog));
assertEquals(before - 2, animals.count());
}
@Test
- public void testBatchDeleteByRefJoinedAnimals() {
+ public void testBatchRemoveByRefJoinedAnimals() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
// Insert entities to delete by ref.
@@ -660,7 +660,7 @@ public void testBatchDeleteByRefJoinedAnimals() {
List][> refs = ids.stream()
.map(id -> Ref.of(JoinedAnimal.class, id))
.toList();
- animals.deleteByRef(refs);
+ animals.removeByRef(refs);
assertEquals(before - 2, animals.count());
}
@@ -742,7 +742,7 @@ public void testBatchUpdateNodscAnimalsWithTypeChange() {
}
@Test
- public void testBatchDeleteNodscAnimals() {
+ public void testBatchRemoveNodscAnimals() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(NodscAnimal.class);
// Insert entities to delete (mixed subtypes).
@@ -756,7 +756,7 @@ public void testBatchDeleteNodscAnimals() {
var cat = result.get(result.size() - 3);
var dog = result.get(result.size() - 2);
var bird = result.get(result.size() - 1);
- animals.delete(List.of(cat, dog, bird));
+ animals.remove(List.of(cat, dog, bird));
assertEquals(before - 3, animals.count());
}
@@ -817,13 +817,13 @@ public void testUpdateIntDiscCat() {
}
@Test
- public void testDeleteIntDiscAnimal() {
+ public void testRemoveIntDiscAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(IntDiscAnimal.class);
animals.insert(new IntDiscCat(null, "Temp", false));
long before = animals.count();
var result = animals.select().getResultList();
- animals.delete(result.getLast());
+ animals.remove(result.getLast());
assertEquals(before - 1, animals.count());
}
@@ -883,13 +883,13 @@ public void testUpdateCharDiscCat() {
}
@Test
- public void testDeleteCharDiscAnimal() {
+ public void testRemoveCharDiscAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(CharDiscAnimal.class);
animals.insert(new CharDiscDog(null, "Temp", 5));
long before = animals.count();
var result = animals.select().getResultList();
- animals.delete(result.getLast());
+ animals.remove(result.getLast());
assertEquals(before - 1, animals.count());
}
@@ -957,12 +957,12 @@ public void testUpdateComment() {
}
@Test
- public void testDeleteComment() {
+ public void testRemoveComment() {
var orm = ORMTemplate.of(dataSource);
var comments = orm.entity(Comment.class);
var insertedId = comments.insertAndFetchId(new Comment(null, "To be deleted", commentableRef(Post.class, 1)));
long before = comments.count();
- comments.deleteById(insertedId);
+ comments.removeById(insertedId);
assertEquals(before - 1, comments.count());
}
@@ -1057,7 +1057,7 @@ public void testBatchUpdateSingleTableAnimals() {
}
@Test
- public void testBatchDeleteSingleTableAnimals() {
+ public void testBatchRemoveSingleTableAnimals() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(Animal.class);
animals.insert(List.of(
@@ -1068,7 +1068,7 @@ public void testBatchDeleteSingleTableAnimals() {
var result = animals.select().getResultList();
var cat = result.get(result.size() - 2);
var dog = result.get(result.size() - 1);
- animals.delete(List.of(cat, dog));
+ animals.remove(List.of(cat, dog));
assertEquals(before - 2, animals.count());
}
diff --git a/storm-core/src/test/java/st/orm/core/QueryModelAndUpsertIntegrationTest.java b/storm-core/src/test/java/st/orm/core/QueryModelAndUpsertIntegrationTest.java
index fd8d8dfcb..75e57c305 100644
--- a/storm-core/src/test/java/st/orm/core/QueryModelAndUpsertIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/QueryModelAndUpsertIntegrationTest.java
@@ -116,24 +116,6 @@ public void testUpsertAndFetchIdWithNonAutoGeneratedPKAndDefaultPKThrows() {
() -> petTypes.upsertAndFetchId(new PetTypeNoneGeneration(null, "TestType")));
}
- // QueryModelImpl.resolveElements: Record next to operator should throw
-
- @Test
- public void testRecordNextToOperatorThrows() {
- var orm = of(dataSource);
- City city = new City(1, "Test");
- assertThrows(PersistenceException.class, () ->
- orm.query(raw("SELECT * FROM city WHERE id = \0", city)).getResultList(City.class));
- }
-
- @Test
- public void testRecordNextToLeadingOperatorThrows() {
- var orm = of(dataSource);
- City city = new City(1, "Test");
- assertThrows(PersistenceException.class, () ->
- orm.query(raw("SELECT * FROM city WHERE \0 = 1", city)).getResultList(City.class));
- }
-
// QueryModelImpl: Stream as value should throw
@Test
diff --git a/storm-core/src/test/java/st/orm/core/RepositoryPreparedStatementIntegrationTest.java b/storm-core/src/test/java/st/orm/core/RepositoryPreparedStatementIntegrationTest.java
index e841f4653..e74296f77 100644
--- a/storm-core/src/test/java/st/orm/core/RepositoryPreparedStatementIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/RepositoryPreparedStatementIntegrationTest.java
@@ -444,13 +444,11 @@ public void testSelectWithInvalidPath() {
@Test
public void testWhereWithOperatorAndRecord() {
- var e = assertThrows(PersistenceException.class, () -> {
- var owner = ORMTemplate.of(dataSource).entity(Owner.class).getById(1);
- ORMTemplate.of(dataSource).entity(Pet.class).select()
- .where(raw("\0 = \0", Pet_.owner, owner))
- .getResultList();
- });
- assertInstanceOf(SqlTemplateException.class, e.getCause());
+ var owner = ORMTemplate.of(dataSource).entity(Owner.class).getById(1);
+ var pets = ORMTemplate.of(dataSource).entity(Pet.class).select()
+ .where(raw("\0 = \0", Pet_.owner, owner))
+ .getResultList();
+ assertFalse(pets.isEmpty());
}
@Test
@@ -1209,7 +1207,7 @@ public void testPetVisitCount() {
public void delete() {
// data.sql: 14 visits. After deleting visit 1, expect 13.
var repo = ORMTemplate.of(dataSource).entity(Visit.class);
- repo.delete(Visit.builder().id(1).build());
+ repo.remove(Visit.builder().id(1).build());
assertEquals(13, repo.select().getResultCount());
}
@@ -1233,7 +1231,7 @@ public void deleteByOwner() {
public void deleteAll() {
// After deleting all visits, count should be 0.
var repo = ORMTemplate.of(dataSource).entity(Visit.class);
- repo.deleteAll();
+ repo.removeAll();
assertEquals(0, repo.select().getResultCount());
}
@@ -1241,7 +1239,7 @@ public void deleteAll() {
public void deleteBatch() {
var repo = ORMTemplate.of(dataSource).entity(Visit.class);
try (var stream = repo.select().getResultStream()) {
- repo.delete(stream);
+ repo.remove(stream);
}
assertEquals(0, repo.count());
}
@@ -1250,7 +1248,7 @@ public void deleteBatch() {
public void deleteRefBatch() {
var repo = ORMTemplate.of(dataSource).entity(Visit.class);
try (var stream = repo.select().getResultStream().map(Ref::of)) {
- repo.deleteByRef(stream);
+ repo.removeByRef(stream);
}
assertEquals(0, repo.count());
}
@@ -2086,23 +2084,6 @@ public void testRefFetchNoSqlWhenCached() {
assertSame(pet, ref.fetch(), "Ref.fetch() should return the cached instance");
}
- @Transactional(propagation = REQUIRED, isolation = REPEATABLE_READ)
- @Test
- public void testSelectByIdPartialCacheHit() {
- // When some entities are cached, only uncached ones should be queried.
- var repository = ORMTemplate.of(dataSource).entity(Pet.class);
- // Pre-load entity 1 into cache.
- var pet1 = repository.getById(1);
- AtomicReference sql = new AtomicReference<>();
- // Select by multiple IDs - should query only uncached IDs.
- observe(sql::set, () -> repository.selectById(List.of(1, 2, 3).stream(), 100).toList());
- assertNotNull(sql.get(), "Should execute SQL for uncached IDs");
- // Verify cached entity is same instance.
- var result = repository.selectById(List.of(1).stream(), 100).toList();
- assertEquals(1, result.size());
- assertSame(pet1, result.get(0), "Cached entity should be the same instance");
- }
-
@Transactional(propagation = REQUIRED, isolation = REPEATABLE_READ)
@Test
public void testRefFetchFromCachedOwner() {
@@ -2446,6 +2427,20 @@ public void testEntityPageRef() {
assertInstanceOf(Ref.class, page.content().getFirst());
}
+ @Test
+ public void testEntityFindAllRef() {
+ var refs = ORMTemplate.of(dataSource).entity(City.class).findAllRef();
+ assertEquals(6, refs.size());
+ assertInstanceOf(Ref.class, refs.getFirst());
+ }
+
+ @Test
+ public void testProjectionFindAllRef() {
+ var refs = ORMTemplate.of(dataSource).projection(OwnerView.class).findAllRef();
+ assertEquals(10, refs.size());
+ assertInstanceOf(Ref.class, refs.getFirst());
+ }
+
@Test
public void testFindByKey() {
var repo = ORMTemplate.of(dataSource).entity(Owner.class);
diff --git a/storm-core/src/test/java/st/orm/core/RepositoryProxyDispatchIntegrationTest.java b/storm-core/src/test/java/st/orm/core/RepositoryProxyDispatchIntegrationTest.java
index 25d2779cf..fc667556e 100644
--- a/storm-core/src/test/java/st/orm/core/RepositoryProxyDispatchIntegrationTest.java
+++ b/storm-core/src/test/java/st/orm/core/RepositoryProxyDispatchIntegrationTest.java
@@ -88,14 +88,14 @@ public void testEntityRepositoryProxyFindById() {
}
@Test
- public void testEntityRepositoryProxyInsertAndDelete() {
+ public void testEntityRepositoryProxyInsertAndRemove() {
var orm = ORMTemplate.of(dataSource);
CityEntityRepository repository = orm.repository(CityEntityRepository.class);
long countBefore = repository.count();
Integer insertedId = repository.insertAndFetchId(City.builder().name("ProxyCity").build());
assertNotNull(insertedId);
assertEquals(countBefore + 1, repository.count());
- repository.deleteById(insertedId);
+ repository.removeById(insertedId);
assertEquals(countBefore, repository.count());
}
diff --git a/storm-foundation/pom.xml b/storm-foundation/pom.xml
index 9afa6efd4..4381f19cd 100644
--- a/storm-foundation/pom.xml
+++ b/storm-foundation/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-framework
- 1.11.0
+ ${revision}
../pom.xml
storm-foundation
diff --git a/storm-foundation/src/main/java/st/orm/MappedWindow.java b/storm-foundation/src/main/java/st/orm/MappedWindow.java
deleted file mode 100644
index 4f47fad3f..000000000
--- a/storm-foundation/src/main/java/st/orm/MappedWindow.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2024 - 2026 the original author or authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package st.orm;
-
-import static java.util.List.copyOf;
-
-import jakarta.annotation.Nonnull;
-import jakarta.annotation.Nullable;
-import java.util.List;
-
-/**
- * Represents a window of query results from a scrolling operation where the result type differs from the data type.
- *
- * ]This is used for queries where the result type {@code R} is not the same as the data type {@code T}, such as
- * ref queries ({@code R = Ref}, {@code T = E}). For the common case where the result type matches the data type,
- * use {@link Window} instead.
- *
- * {@code
- * MappedWindow[, User> window = userRepository.selectRef().scroll(Scrollable.of(User_.id, 20));
- * if (window.hasNext()) {
- * MappedWindow][, User> next = userRepository.selectRef().scroll(window.nextScrollable());
- * }
- * }]
- *
- * The {@code nextScrollable} and {@code previousScrollable} navigation tokens are always provided when the window
- * has content, regardless of whether {@code hasNext} or {@code hasPrevious} is {@code true}. This allows developers
- * to follow the cursor even when no more results were detected at query time, which is useful for polling scenarios
- * where new data may appear after the initial query. The {@code hasNext} and {@code hasPrevious} flags are
- * informational: they indicate whether more results existed at the time of the query, but the decision to follow
- * the cursor is left to the developer.
- *
- * @param content the list of results in this window; never contains {@code null} elements.
- * @param hasNext {@code true} if more results existed beyond this window in the scroll direction at query time.
- * @param hasPrevious {@code true} if this window was fetched with a cursor position (i.e., not the first page).
- * @param nextScrollable the scrollable to fetch the next window, or {@code null} if the window is empty.
- * @param previousScrollable the scrollable to fetch the previous window, or {@code null} if the window is empty.
- * @param the result type (e.g., {@code Ref} for ref queries).
- * @param the data type, used to type the {@link Scrollable} navigation tokens.
- * @since 1.11
- */
-public record MappedWindow(
- @Nonnull List content,
- boolean hasNext,
- boolean hasPrevious,
- @Nullable Scrollable nextScrollable,
- @Nullable Scrollable previousScrollable
-) {
- public MappedWindow {
- content = copyOf(content);
- }
-
- /**
- * Returns an empty mapped window with no content and no navigation tokens.
- *
- * @param the result type.
- * @param the data type.
- * @return an empty mapped window.
- */
- public static MappedWindow empty() {
- return new MappedWindow<>(List.of(), false, false, null, null);
- }
-
- /**
- * Returns an opaque cursor string for fetching the next window, or {@code null} if there is no next window
- * according to {@link #hasNext()}.
- *
- * This method is a convenience for REST APIs that want to include a cursor only when more results were
- * detected. For polling or streaming use cases where you want to follow the cursor regardless, use
- * {@link #nextScrollable()} directly.
- *
- * @return the cursor string, or {@code null}.
- * @see Scrollable#toCursor()
- * @see Scrollable#fromCursor(Metamodel.Key, String)
- */
- @Nullable
- public String nextCursor() {
- return hasNext && nextScrollable != null ? nextScrollable.toCursor() : null;
- }
-
- /**
- * Returns an opaque cursor string for fetching the previous window, or {@code null} if this is the first window
- * according to {@link #hasPrevious()}.
- *
- * This method is a convenience for REST APIs that want to include a cursor only when previous results exist.
- * For use cases where you want to follow the cursor regardless, use {@link #previousScrollable()} directly.
- *
- * @return the cursor string, or {@code null}.
- * @see Scrollable#toCursor()
- * @see Scrollable#fromCursor(Metamodel.Key, String)
- */
- @Nullable
- public String previousCursor() {
- return hasPrevious && previousScrollable != null ? previousScrollable.toCursor() : null;
- }
-}
diff --git a/storm-foundation/src/main/java/st/orm/Page.java b/storm-foundation/src/main/java/st/orm/Page.java
index 5a0c3b1a7..4fb47142d 100644
--- a/storm-foundation/src/main/java/st/orm/Page.java
+++ b/storm-foundation/src/main/java/st/orm/Page.java
@@ -35,7 +35,7 @@
* @param the type of the results.
* @since 1.10
*/
-public record Page(@Nonnull List content, long totalCount, @Nonnull Pageable pageable) {
+public record Page(@Nonnull List content, long totalCount, @Nonnull Pageable pageable) implements Slice {
public Page {
content = copyOf(content);
if (totalCount < 0) {
diff --git a/storm-foundation/src/main/java/st/orm/Scrollable.java b/storm-foundation/src/main/java/st/orm/Scrollable.java
index e318b89a3..17318516a 100644
--- a/storm-foundation/src/main/java/st/orm/Scrollable.java
+++ b/storm-foundation/src/main/java/st/orm/Scrollable.java
@@ -26,7 +26,7 @@
*
* A {@code Scrollable} is the scrolling counterpart of {@link Pageable}. While a {@code Pageable} navigates by
* page number, a {@code Scrollable} navigates by cursor position. Scrollable instances are typically obtained from
- * {@link Window#nextScrollable()} or {@link Window#previousScrollable()}, but can also be created directly using
+ * {@link Window#next()} or {@link Window#previous()}, but can also be created directly using
* the factory methods.
*
* The serialized cursor is opaque and URL-safe, but it is not tamper-proof. If the cursor is exposed to
@@ -203,7 +203,7 @@ public boolean isComposite() {
*
*
{@code
* // Server: include cursor in response
- * String cursor = window.nextScrollable().toCursor();
+ * String cursor = window.nextCursor();
*
* // Client sends cursor back as query parameter
* // Server: reconstruct scrollable
diff --git a/storm-foundation/src/main/java/st/orm/Slice.java b/storm-foundation/src/main/java/st/orm/Slice.java
new file mode 100644
index 000000000..8b931dfb0
--- /dev/null
+++ b/storm-foundation/src/main/java/st/orm/Slice.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2024 - 2026 the original author or authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package st.orm;
+
+import jakarta.annotation.Nonnull;
+import java.util.List;
+
+/**
+ * Represents a slice of query results — a chunk of data with informational navigation flags.
+ *
+ * A {@code Slice} is the common base for both {@link Window} (cursor-based scrolling) and {@link Page}
+ * (offset-based pagination). It provides access to the result content and flags indicating whether adjacent
+ * results exist, without prescribing a specific navigation mechanism.
+ *
+ * @param the result type of the slice content.
+ * @since 1.11
+ */
+public interface Slice {
+
+ /**
+ * Returns the list of results in this slice. The list is immutable and never contains {@code null} elements.
+ *
+ * @return the results.
+ */
+ @Nonnull
+ List content();
+
+ /**
+ * Returns {@code true} if more results exist beyond this slice in the forward direction.
+ *
+ * @return whether more results exist.
+ */
+ boolean hasNext();
+
+ /**
+ * Returns {@code true} if results exist before this slice.
+ *
+ * @return whether previous results exist.
+ */
+ boolean hasPrevious();
+}
diff --git a/storm-foundation/src/main/java/st/orm/Window.java b/storm-foundation/src/main/java/st/orm/Window.java
index 96a32d032..a8d0bf7f3 100644
--- a/storm-foundation/src/main/java/st/orm/Window.java
+++ b/storm-foundation/src/main/java/st/orm/Window.java
@@ -22,27 +22,20 @@
import java.util.List;
/**
- * Represents a window of query results from a scrolling operation where the result type matches the data type.
+ * Represents a window of query results from a scrolling operation with {@link Scrollable} navigation tokens.
*
- * A {@code Window} is the scrolling counterpart of {@link Page}. While a {@code Page} contains total counts and
- * page numbers for offset-based navigation, a {@code Window} contains cursor-based navigation tokens that allow
- * sequential traversal through large result sets.
- *
- * This is the common case for entity and projection queries where the result type is the same as the data type.
- * For queries where the result type differs from the data type (e.g., ref queries), see {@link MappedWindow}.
- *
- * Use {@link #hasNext()} and {@link #nextScrollable()} to move forward, and {@link #hasPrevious()} and
- * {@link #previousScrollable()} to move backward. Pass the returned {@link Scrollable} to the repository's
- * {@code scroll} method to fetch the adjacent window.
+ * A {@code Window} implements {@link Slice} and provides cursor-based navigation for sequential traversal
+ * through large result sets. Use {@link #next()} and {@link #previous()} for typed programmatic navigation,
+ * or {@link #nextCursor()} and {@link #previousCursor()} for serialized cursor strings suitable for REST APIs.
*
* {@code
* Window window = userRepository.scroll(Scrollable.of(User_.id, 20));
* if (window.hasNext()) {
- * Window next = userRepository.scroll(window.nextScrollable());
+ * Window next = userRepository.scroll(window.next());
* }
* }
*
- * The {@code nextScrollable} and {@code previousScrollable} navigation tokens are always provided when the window
+ *
The {@link #next()} and {@link #previous()} navigation tokens are always provided when the window
* has content, regardless of whether {@code hasNext} or {@code hasPrevious} is {@code true}. This allows developers
* to follow the cursor even when no more results were detected at query time, which is useful for polling scenarios
* where new data may appear after the initial query. The {@code hasNext} and {@code hasPrevious} flags are
@@ -54,16 +47,17 @@
* @param hasPrevious {@code true} if this window was fetched with a cursor position (i.e., not the first page).
* @param nextScrollable the scrollable to fetch the next window, or {@code null} if the window is empty.
* @param previousScrollable the scrollable to fetch the previous window, or {@code null} if the window is empty.
- * @param the data type of both the results and the {@link Scrollable} navigation tokens.
+ * @param the result type (e.g., {@code User} for entity queries, {@code Ref} for ref queries).
* @since 1.11
*/
-public record Window(
- @Nonnull List content,
+public record Window(
+ @Nonnull List content,
boolean hasNext,
boolean hasPrevious,
- @Nullable Scrollable nextScrollable,
- @Nullable Scrollable previousScrollable
-) {
+ @Nullable Scrollable> nextScrollable,
+ @Nullable Scrollable> previousScrollable
+) implements Slice {
+
public Window {
content = copyOf(content);
}
@@ -71,23 +65,49 @@ public record Window(
/**
* Returns an empty window with no content and no navigation tokens.
*
- * @param the data type.
+ * @param the result type.
* @return an empty window.
*/
- public static Window empty() {
+ public static Window empty() {
return new Window<>(List.of(), false, false, null, null);
}
/**
- * Creates a {@code Window} from a {@link MappedWindow} where the result type matches the data type.
+ * Returns a typed scrollable for fetching the next window, or {@code null} if the window is empty.
+ *
+ * The type parameter {@code T} is inferred from the call-site context, typically from the
+ * {@code scroll(Scrollable)} method parameter:
+ *
+ * {@code
+ * Window next = userRepository.scroll(window.next());
+ * }
+ *
+ * @param the data type, inferred from context.
+ * @return the scrollable for the next window, or {@code null}.
+ */
+ @SuppressWarnings("unchecked")
+ @Nullable
+ public Scrollable next() {
+ return (Scrollable) nextScrollable;
+ }
+
+ /**
+ * Returns a typed scrollable for fetching the previous window, or {@code null} if the window is empty.
*
- * @param mappedWindow the mapped window to convert.
- * @param the data type.
- * @return a window with the same content and navigation tokens.
+ * The type parameter {@code T} is inferred from the call-site context, typically from the
+ * {@code scroll(Scrollable)} method parameter:
+ *
+ * {@code
+ * Window prev = userRepository.scroll(window.previous());
+ * }
+ *
+ * @param the data type, inferred from context.
+ * @return the scrollable for the previous window, or {@code null}.
*/
- public static Window of(@Nonnull MappedWindow mappedWindow) {
- return new Window<>(mappedWindow.content(), mappedWindow.hasNext(), mappedWindow.hasPrevious(),
- mappedWindow.nextScrollable(), mappedWindow.previousScrollable());
+ @SuppressWarnings("unchecked")
+ @Nullable
+ public Scrollable previous() {
+ return (Scrollable) previousScrollable;
}
/**
@@ -96,7 +116,7 @@ public static Window of(@Nonnull MappedWindow mappedWi
*
* This method is a convenience for REST APIs that want to include a cursor only when more results were
* detected. For polling or streaming use cases where you want to follow the cursor regardless, use
- * {@link #nextScrollable()} directly.
+ * {@link #next()} directly.
*
* @return the cursor string, or {@code null}.
* @see Scrollable#toCursor()
@@ -112,7 +132,7 @@ public String nextCursor() {
* according to {@link #hasPrevious()}.
*
* This method is a convenience for REST APIs that want to include a cursor only when previous results exist.
- * For use cases where you want to follow the cursor regardless, use {@link #previousScrollable()} directly.
+ * For use cases where you want to follow the cursor regardless, use {@link #previous()} directly.
*
* @return the cursor string, or {@code null}.
* @see Scrollable#toCursor()
diff --git a/storm-foundation/src/test/java/st/orm/WindowTest.java b/storm-foundation/src/test/java/st/orm/WindowTest.java
index dcf75908a..3b6709096 100644
--- a/storm-foundation/src/test/java/st/orm/WindowTest.java
+++ b/storm-foundation/src/test/java/st/orm/WindowTest.java
@@ -30,7 +30,7 @@ private static Metamodel.Key stubKey(Class fieldTyp
@Test
void emptyWindowHasNoNavigation() {
- var window = new MappedWindow<>(List.of(), false, false, null, null);
+ var window = new Window<>(List.of(), false, false, null, null);
assertTrue(window.content().isEmpty());
assertFalse(window.hasNext());
assertFalse(window.hasPrevious());
@@ -41,7 +41,7 @@ void emptyWindowHasNoNavigation() {
@Test
void windowWithNextScrollableHasNext() {
var next = Scrollable.of(KEY, 42, 20);
- var window = new MappedWindow<>(List.of("a", "b"), true, false, next, null);
+ var window = new Window<>(List.of("a", "b"), true, false, next, null);
assertTrue(window.hasNext());
assertFalse(window.hasPrevious());
assertNotNull(window.nextScrollable());
@@ -51,7 +51,7 @@ void windowWithNextScrollableHasNext() {
@Test
void windowWithPreviousScrollableHasPrevious() {
var prev = Scrollable.of(KEY, 1, 20).backward();
- var window = new MappedWindow<>(List.of("a", "b"), false, true, null, prev);
+ var window = new Window<>(List.of("a", "b"), false, true, null, prev);
assertFalse(window.hasNext());
assertTrue(window.hasPrevious());
assertNull(window.nextScrollable());
@@ -62,7 +62,7 @@ void windowWithPreviousScrollableHasPrevious() {
void windowWithBothNavigations() {
var next = Scrollable.of(KEY, 42, 20);
var prev = Scrollable.of(KEY, 1, 20).backward();
- var window = new MappedWindow<>(List.of("a", "b"), true, true, next, prev);
+ var window = new Window<>(List.of("a", "b"), true, true, next, prev);
assertTrue(window.hasNext());
assertTrue(window.hasPrevious());
}
@@ -70,20 +70,47 @@ void windowWithBothNavigations() {
@Test
void contentIsImmutable() {
var list = new ArrayList<>(List.of("a", "b"));
- var window = new MappedWindow<>(list, false, false, null, null);
+ var window = new Window<>(list, false, false, null, null);
list.add("c");
assertEquals(2, window.content().size());
}
+ @Test
+ void nextReturnsTypedScrollable() {
+ var scrollable = Scrollable.of(KEY, 42, 20);
+ var window = new Window<>(List.of("a", "b"), true, false, scrollable, null);
+ Scrollable typed = window.next();
+ assertNotNull(typed);
+ assertEquals(42, typed.keyCursor());
+ assertEquals(20, typed.size());
+ }
+
+ @Test
+ void previousReturnsTypedScrollable() {
+ var scrollable = Scrollable.of(KEY, 1, 20).backward();
+ var window = new Window<>(List.of("a", "b"), false, true, null, scrollable);
+ Scrollable typed = window.previous();
+ assertNotNull(typed);
+ assertEquals(1, typed.keyCursor());
+ assertFalse(typed.isForward());
+ }
+
+ @Test
+ void nextReturnsNullForEmptyWindow() {
+ var window = new Window<>(List.of(), false, false, null, null);
+ assertNull(window.next());
+ assertNull(window.previous());
+ }
+
@Test
void nextCursorIsNullWhenNoNext() {
- var window = new MappedWindow<>(List.of("a"), false, false, null, null);
+ var window = new Window<>(List.of("a"), false, false, null, null);
assertNull(window.nextCursor());
}
@Test
void previousCursorIsNullWhenNoPrevious() {
- var window = new MappedWindow<>(List.of("a"), false, false, null, null);
+ var window = new Window<>(List.of("a"), false, false, null, null);
assertNull(window.previousCursor());
}
}
diff --git a/storm-h2/pom.xml b/storm-h2/pom.xml
index b01832b11..8c0062f84 100644
--- a/storm-h2/pom.xml
+++ b/storm-h2/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-framework
- 1.11.0
+ ${revision}
../pom.xml
storm-h2
diff --git a/storm-h2/src/test/java/st/orm/spi/h2/H2EntityRepositoryTest.java b/storm-h2/src/test/java/st/orm/spi/h2/H2EntityRepositoryTest.java
index 1b803792b..dcc989090 100644
--- a/storm-h2/src/test/java/st/orm/spi/h2/H2EntityRepositoryTest.java
+++ b/storm-h2/src/test/java/st/orm/spi/h2/H2EntityRepositoryTest.java
@@ -1488,10 +1488,10 @@ public void testUuidUpdate() {
}
@Test
- public void testUuidDelete() {
+ public void testUuidRemove() {
var repo = PreparedStatementTemplate.ORM(dataSource).entity(ApiKey.class);
long before = repo.count();
- repo.delete(repo.getById(DEFAULT_KEY_ID));
+ repo.remove(repo.getById(DEFAULT_KEY_ID));
assertEquals(before - 1, repo.count());
}
}
diff --git a/storm-h2/src/test/java/st/orm/spi/h2/H2PolymorphicTest.java b/storm-h2/src/test/java/st/orm/spi/h2/H2PolymorphicTest.java
index 1066383e0..b5409ce16 100644
--- a/storm-h2/src/test/java/st/orm/spi/h2/H2PolymorphicTest.java
+++ b/storm-h2/src/test/java/st/orm/spi/h2/H2PolymorphicTest.java
@@ -119,14 +119,14 @@ public void testUpdateAnimal() {
}
@Test
- public void testDeleteAnimal() {
+ public void testRemoveAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(Animal.class);
animals.insert(new Cat(null, "Temp", false));
long before = animals.count();
var result = animals.select().getResultList();
var last = result.getLast();
- animals.delete(last);
+ animals.remove(last);
assertEquals(before - 1, animals.count());
}
@@ -225,7 +225,7 @@ public void testUpdateJoinedCat() {
}
@Test
- public void testDeleteJoinedAnimal() {
+ public void testRemoveJoinedAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
// Insert a new animal and then delete it.
@@ -233,18 +233,18 @@ public void testDeleteJoinedAnimal() {
long before = animals.count();
var result = animals.select().getResultList();
var last = result.getLast();
- animals.delete(last);
+ animals.remove(last);
assertEquals(before - 1, animals.count());
}
@Test
- public void testDeleteByIdJoinedAnimal() {
+ public void testRemoveByIdJoinedAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
// Insert a new animal and then delete by ID.
var id = animals.insertAndFetchId(new JoinedDog(null, "TempDog", 10));
long before = animals.count();
- animals.deleteById(id);
+ animals.removeById(id);
assertEquals(before - 1, animals.count());
}
@@ -324,7 +324,7 @@ public void testBatchUpdateJoinedAnimals() {
}
@Test
- public void testBatchDeleteJoinedAnimals() {
+ public void testBatchRemoveJoinedAnimals() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
// Insert entities to delete.
@@ -336,12 +336,12 @@ public void testBatchDeleteJoinedAnimals() {
var result = animals.select().getResultList();
var cat = result.get(result.size() - 2);
var dog = result.get(result.size() - 1);
- animals.delete(List.of(cat, dog));
+ animals.remove(List.of(cat, dog));
assertEquals(before - 2, animals.count());
}
@Test
- public void testBatchDeleteByRefJoinedAnimals() {
+ public void testBatchRemoveByRefJoinedAnimals() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(JoinedAnimal.class);
// Insert entities to delete by ref.
@@ -354,7 +354,7 @@ public void testBatchDeleteByRefJoinedAnimals() {
List[> refs = ids.stream()
.map(id -> Ref.of(JoinedAnimal.class, id))
.toList();
- animals.deleteByRef(refs);
+ animals.removeByRef(refs);
assertEquals(before - 2, animals.count());
}
@@ -425,7 +425,7 @@ public void testUpdateNodscBird() {
}
@Test
- public void testDeleteNodscAnimal() {
+ public void testRemoveNodscAnimal() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(NodscAnimal.class);
// Insert a new animal and then delete it.
@@ -433,7 +433,7 @@ public void testDeleteNodscAnimal() {
long before = animals.count();
var result = animals.select().getResultList();
var last = result.getLast();
- animals.delete(last);
+ animals.remove(last);
assertEquals(before - 1, animals.count());
}
@@ -462,7 +462,7 @@ public void testBatchInsertNodscAnimals() {
}
@Test
- public void testBatchDeleteNodscAnimals() {
+ public void testBatchRemoveNodscAnimals() {
var orm = ORMTemplate.of(dataSource);
var animals = orm.entity(NodscAnimal.class);
// Insert entities to delete (mixed subtypes).
@@ -476,7 +476,7 @@ public void testBatchDeleteNodscAnimals() {
var cat = result.get(result.size() - 3);
var dog = result.get(result.size() - 2);
var bird = result.get(result.size() - 1);
- animals.delete(List.of(cat, dog, bird));
+ animals.remove(List.of(cat, dog, bird));
assertEquals(before - 3, animals.count());
}
diff --git a/storm-jackson2/pom.xml b/storm-jackson2/pom.xml
index a06fa7a35..78fd620cd 100644
--- a/storm-jackson2/pom.xml
+++ b/storm-jackson2/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-framework
- 1.11.0
+ ${revision}
../pom.xml
storm-jackson2
diff --git a/storm-jackson3/pom.xml b/storm-jackson3/pom.xml
index 5aeacb4cb..c3883b7e8 100644
--- a/storm-jackson3/pom.xml
+++ b/storm-jackson3/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-framework
- 1.11.0
+ ${revision}
../pom.xml
storm-jackson3
diff --git a/storm-java21/pom.xml b/storm-java21/pom.xml
index 79aa2f0d7..8b22c4bf0 100644
--- a/storm-java21/pom.xml
+++ b/storm-java21/pom.xml
@@ -6,7 +6,7 @@
st.orm
storm-framework
- 1.11.0
+ ${revision}
../pom.xml
storm-java21
diff --git a/storm-java21/src/main/java/st/orm/repository/EntityRepository.java b/storm-java21/src/main/java/st/orm/repository/EntityRepository.java
index ca22c1b11..6a523267c 100644
--- a/storm-java21/src/main/java/st/orm/repository/EntityRepository.java
+++ b/storm-java21/src/main/java/st/orm/repository/EntityRepository.java
@@ -146,11 +146,11 @@
*
* ]Delete
*
- * Delete user in the database. The repository also supports updates for multiple entries in batch mode by passing a
- * list entities or primary keys. Alternatively, deletion can be executed in using a stream of entities.
+ *
Remove user from the database. The repository also supports removals for multiple entries in batch mode by passing a
+ * list entities or primary keys. Alternatively, removal can be executed using a stream of entities.
*
{@code
* User user = ...;
- * userRepository.delete(user);
+ * userRepository.remove(user);
* }
*
* Also here, the QueryBuilder can be used to create specialized statement, for instance, to delete all users where
@@ -474,55 +474,55 @@ public interface EntityRepository, ID> extends Repository {
E upsertAndFetch(@Nonnull E entity);
/**
- * Deletes an entity from the database.
+ * Removes an entity from the database.
*
* This method removes an existing entity from the database. The entity must exist in the database; if it does
- * not, a {@link PersistenceException} is thrown. Unlike {@link #deleteById} and {@link #deleteByRef}, this method
+ * not, a {@link PersistenceException} is thrown. Unlike {@link #removeById} and {@link #removeByRef}, this method
* is strict rather than idempotent, because possessing the full entity implies the caller expects it to exist.
*
- * @param entity the entity to delete. The entity must exist in the database and should be correctly identified by
+ * @param entity the entity to remove. The entity must exist in the database and should be correctly identified by
* its primary key.
- * @throws PersistenceException if the deletion operation fails. Reasons for failure might include the entity not
+ * @throws PersistenceException if the removal operation fails. Reasons for failure might include the entity not
* being found in the database, violations of database constraints, connectivity
* issues, or if the entity parameter is null.
*/
- void delete(@Nonnull E entity);
+ void remove(@Nonnull E entity);
/**
- * Deletes an entity from the database based on its primary key.
+ * Removes an entity from the database based on its primary key.
*
* This method ensures the entity with the given primary key is removed from the database. If the entity does
* not exist, the operation completes successfully without error (idempotent behavior).
*
- * @param id the primary key of the entity to delete.
- * @throws PersistenceException if the deletion operation fails due to violations of database constraints,
+ * @param id the primary key of the entity to remove.
+ * @throws PersistenceException if the removal operation fails due to violations of database constraints,
* connectivity issues, or if the id parameter is null.
*/
- void deleteById(@Nonnull ID id);
+ void removeById(@Nonnull ID id);
/**
- * Deletes an entity from the database by its reference.
+ * Removes an entity from the database by its reference.
*
* This method ensures the entity identified by the given reference is removed from the database. If the entity
* does not exist, the operation completes successfully without error (idempotent behavior).
*
- * @param ref the reference to the entity to delete.
- * @throws PersistenceException if the deletion operation fails due to violations of database constraints,
+ * @param ref the reference to the entity to remove.
+ * @throws PersistenceException if the removal operation fails due to violations of database constraints,
* connectivity issues, or if the ref parameter is null.
*/
- void deleteByRef(@Nonnull Ref ref);
+ void removeByRef(@Nonnull Ref ref);
/**
- * Deletes all entities from the database.
+ * Removes all entities from the database.
*
- * This method performs a bulk deletion operation, removing all instances of the entities managed by this
+ *
This method performs a bulk removal operation, removing all instances of the entities managed by this
* repository from the database.
*
- * @throws PersistenceException if the bulk deletion operation fails. Failure can occur for several reasons,
+ * @throws PersistenceException if the bulk removal operation fails. Failure can occur for several reasons,
* including but not limited to database access issues, transaction failures, or
- * underlying database constraints that prevent the deletion of certain records.
+ * underlying database constraints that prevent the removal of certain records.
*/
- void deleteAll();
+ void removeAll();
// Singular findBy methods.
@@ -666,16 +666,40 @@ public interface EntityRepository, ID> extends Repository {
*/
Page page(@Nonnull Pageable pageable);
+ /**
+ * Returns a page of entity refs using offset-based pagination.
+ *
+ * Page numbers are zero-based: pass {@code 0} for the first page.
+ *
+ * @param pageNumber the zero-based page index.
+ * @param pageSize the maximum number of refs per page.
+ * @return a page containing the ref results and pagination metadata.
+ * @since 1.10
+ */
+ Page[> pageRef(int pageNumber, int pageSize);
+
+ /**
+ * Returns a page of entity refs using offset-based pagination.
+ *
+ * ]This method executes two queries: a {@code SELECT COUNT(*)} to determine the total number of entities, and
+ * a query with OFFSET and LIMIT to fetch the refs for the requested page.
+ *
+ * @param pageable the pagination request specifying page number and page size.
+ * @return a page containing the ref results and pagination metadata.
+ * @since 1.10
+ */
+ Page[> pageRef(@Nonnull Pageable pageable);
+
/**
* Executes a scroll request from a {@link Scrollable} token, typically obtained from
- * {@link Window#nextScrollable()} or {@link Window#previousScrollable()}.
+ * {@link Window#next()} or {@link Window#previous()}.
*
* @param scrollable the scroll request containing cursor state, key, sort, size, and direction.
* @return a window containing the results and navigation tokens.
* @since 1.11
*/
default Window scroll(@Nonnull Scrollable scrollable) {
- return Window.of(select().scroll(scrollable));
+ return select().scroll(scrollable);
}
// List based methods.
@@ -902,32 +926,32 @@ default Window scroll(@Nonnull Scrollable scrollable) {
List upsertAndFetch(@Nonnull Iterable entities);
/**
- * Deletes a collection of entities from the database in batches.
+ * Removes a collection of entities from the database in batches.
*
* ]This method processes the provided entities in batches to optimize performance when handling larger collections,
* reducing database overhead. For each entity in the collection, the method removes the corresponding record from
- * the database, if it exists. Batch processing ensures efficient handling of deletions, particularly for large data sets.
+ * the database, if it exists. Batch processing ensures efficient handling of removals, particularly for large data sets.
*
- * @param entities an iterable collection of entities to be deleted. Each entity in the collection must be non-null
- * and represent a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param entities an iterable collection of entities to be removed. Each entity in the collection must be non-null
+ * and represent a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
- void delete(@Nonnull Iterable entities);
+ void remove(@Nonnull Iterable entities);
/**
- * Deletes a collection of entities from the database in batches.
+ * Removes a collection of entities from the database in batches.
*
* This method processes the provided entities in batches to optimize performance when handling larger collections,
* reducing database overhead. For each entity in the collection, the method removes the corresponding record from
- * the database, if it exists. Batch processing ensures efficient handling of deletions, particularly for large data sets.
+ * the database, if it exists. Batch processing ensures efficient handling of removals, particularly for large data sets.
*
- * @param refs an iterable collection of entities to be deleted. Each entity in the collection must be non-null
- * and represent a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
+ * @param refs an iterable collection of entities to be removed. Each entity in the collection must be non-null
+ * and represent a valid database record for removal.
+ * @throws PersistenceException if the removal operation fails due to database issues, such as connectivity problems
* or constraints violations.
*/
- void deleteByRef(@Nonnull Iterable[> refs);
+ void removeByRef(@Nonnull Iterable][> refs);
// Stream based methods.
@@ -943,120 +967,6 @@ default Window scroll(@Nonnull Scrollable scrollable) {
// processed. The BatchCallback approach prevents the caller from accidentally misusing the API.
//
- /**
- * Retrieves a stream of entities based on their primary keys.
- *
- * ]This method executes queries in batches, depending on the number of primary keys in the specified ids stream.
- * This optimization aims to reduce the overhead of executing multiple queries and efficiently retrieve entities.
- * The batching strategy enhances performance, particularly when dealing with large sets of primary keys.
- *
- * The resulting stream is lazily loaded, meaning that the entities are only retrieved from the database as they
- * are consumed by the stream. This approach is efficient and minimizes the memory footprint, especially when
- * dealing with large volumes of entities.
- *
- * Note: Calling this method does trigger the execution of the underlying
- * query, so it should only be invoked when the query is intended to run. Since the stream holds resources open
- * while in use, it must be closed after usage to prevent resource leaks. As the stream is {@code AutoCloseable}, it
- * is recommended to use it within a {@code try-with-resources} block.
- *
- * @param ids a stream of entity IDs to retrieve from the repository.
- * @return a stream of entities corresponding to the provided primary keys. The order of entities in the stream is
- * not guaranteed to match the order of ids in the input stream. If an id does not correspond to any entity
- * in the database, it will simply be skipped, and no corresponding entity will be included in the returned
- * stream. If the same entity is requested multiple times, it may be included in the stream multiple times
- * if it is part of a separate batch.
- * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
- * connectivity.
- */
- Stream selectById(@Nonnull Stream ids);
-
- /**
- * Retrieves a stream of entities based on their primary keys.
- *
- * This method executes queries in batches, depending on the number of primary keys in the specified ids stream.
- * This optimization aims to reduce the overhead of executing multiple queries and efficiently retrieve entities.
- * The batching strategy enhances performance, particularly when dealing with large sets of primary keys.
- *
- * The resulting stream is lazily loaded, meaning that the entities are only retrieved from the database as they
- * are consumed by the stream. This approach is efficient and minimizes the memory footprint, especially when
- * dealing with large volumes of entities.
- *
- * Note: Calling this method does trigger the execution of the underlying
- * query, so it should only be invoked when the query is intended to run. Since the stream holds resources open
- * while in use, it must be closed after usage to prevent resource leaks. As the stream is {@code AutoCloseable}, it
- * is recommended to use it within a {@code try-with-resources} block.
- *
- * @param refs a stream of refs to retrieve from the repository.
- * @return a stream of entities corresponding to the provided primary keys. The order of entities in the stream is
- * not guaranteed to match the order of ids in the input stream. If an id does not correspond to any entity
- * in the database, it will simply be skipped, and no corresponding entity will be included in the returned
- * stream. If the same entity is requested multiple times, it may be included in the stream multiple times
- * if it is part of a separate batch.
- * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
- * connectivity.
- */
- Stream selectByRef(@Nonnull Stream[> refs);
-
- /**
- * Retrieves a stream of entities based on their primary keys.
- *
- * ]This method executes queries in batches, with the batch size determined by the provided parameter. This
- * optimization aims to reduce the overhead of executing multiple queries and efficiently retrieve entities. The
- * batching strategy enhances performance, particularly when dealing with large sets of primary keys.
- *
- * The resulting stream is lazily loaded, meaning that the entities are only retrieved from the database as they
- * are consumed by the stream. This approach is efficient and minimizes the memory footprint, especially when
- * dealing with large volumes of entities.
- *
- * Note: Calling this method does trigger the execution of the underlying
- * query, so it should only be invoked when the query is intended to run. Since the stream holds resources open
- * while in use, it must be closed after usage to prevent resource leaks. As the stream is {@code AutoCloseable}, it
- * is recommended to use it within a {@code try-with-resources} block.
- *
- * @param ids a stream of entity IDs to retrieve from the repository.
- * @param chunkSize the number of primary keys to include in each batch. This parameter determines the size of the
- * batches used to execute the selection operation. A larger batch size can improve performance, especially when
- * dealing with large sets of primary keys.
- * @return a stream of entities corresponding to the provided primary keys. The order of entities in the stream is
- * not guaranteed to match the order of refs in the input stream. If an id does not correspond to any entity in the
- * database, it will simply be skipped, and no corresponding entity will be included in the returned stream. If the
- * same entity is requested multiple times, it may be included in the stream multiple times if it is part of a
- * separate batch.
- * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
- * connectivity.
- */
- Stream selectById(@Nonnull Stream ids, int chunkSize);
-
- /**
- * Retrieves a stream of entities based on their primary keys.
- *
- * This method executes queries in batches, with the batch size determined by the provided parameter. This
- * optimization aims to reduce the overhead of executing multiple queries and efficiently retrieve entities. The
- * batching strategy enhances performance, particularly when dealing with large sets of primary keys.
- *
- * The resulting stream is lazily loaded, meaning that the entities are only retrieved from the database as they
- * are consumed by the stream. This approach is efficient and minimizes the memory footprint, especially when
- * dealing with large volumes of entities.
- *
- * Note: Calling this method does trigger the execution of the underlying
- * query, so it should only be invoked when the query is intended to run. Since the stream holds resources open
- * while in use, it must be closed after usage to prevent resource leaks. As the stream is {@code AutoCloseable}, it
- * is recommended to use it within a {@code try-with-resources} block.
- *
- * @param refs a stream of refs to retrieve from the repository.
- * @param chunkSize the number of primary keys to include in each batch. This parameter determines the size of the
- * batches used to execute the selection operation. A larger batch size can improve performance, especially when
- * dealing with large sets of primary keys.
- * @return a stream of entities corresponding to the provided primary keys. The order of entities in the stream is
- * not guaranteed to match the order of refs in the input stream. If an id does not correspond to any entity in the
- * database, it will simply be skipped, and no corresponding entity will be included in the returned stream. If the
- * same entity is requested multiple times, it may be included in the stream multiple times if it is part of a
- * separate batch.
- * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
- * connectivity.
- */
- Stream selectByRef(@Nonnull Stream[> refs, int chunkSize);
-
/**
* Counts the number of entities identified by the provided stream of IDs using the default batch size.
*
@@ -1245,68 +1155,36 @@ default Window scroll(@Nonnull Scrollable scrollable) {
void upsert(@Nonnull Stream entities, int batchSize);
/**
- * Deletes a stream of entities from the database in batches.
- *
- * ]This method processes the provided stream of entities in batches to optimize performance for larger
- * data sets, reducing database overhead during deletion. For each entity in the stream, the method removes
- * the corresponding record from the database, if it exists. Batch processing allows efficient handling
- * of deletions, particularly for large collections of entities.
+ * Removes a stream of entities from the database in batches.
*
- * @param entities a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
- * or constraints violations.
+ * @param entities a stream of entities to be removed.
+ * @throws PersistenceException if the removal operation fails.
*/
- void delete(@Nonnull Stream entities);
+ void remove(@Nonnull Stream entities);
/**
- * Deletes a stream of entities from the database in configurable batch sizes.
- *
- * This method processes the provided stream of entities in batches, with the size of each batch specified
- * by the `batchSize` parameter. This allows for control over the number of entities deleted in each database
- * operation, optimizing performance and memory usage based on system requirements. For each entity in the
- * stream, the method removes the corresponding record from the database, if it exists.
+ * Removes a stream of entities from the database in configurable batch sizes.
*
- * @param entities a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * a valid database record for deletion.
- * @param batchSize the number of entities to process in each batch. Larger batch sizes may improve performance
- * but require more memory, while smaller batch sizes may reduce memory usage but increase
- * the number of database operations.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
- * or constraints violations.
+ * @param entities a stream of entities to be removed.
+ * @param batchSize the number of entities to process in each batch.
+ * @throws PersistenceException if the removal operation fails.
*/
- void delete(@Nonnull Stream entities, int batchSize);
+ void remove(@Nonnull Stream entities, int batchSize);
/**
- * Deletes a stream of entities from the database in batches.
+ * Removes a stream of entities from the database in batches.
*
- * This method processes the provided stream of entities in batches to optimize performance for larger
- * data sets, reducing database overhead during deletion. For each entity in the stream, the method removes
- * the corresponding record from the database, if it exists. Batch processing allows efficient handling
- * of deletions, particularly for large collections of entities.
- *
- * @param refs a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * a valid database record for deletion.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
- * or constraints violations.
+ * @param refs a stream of entities to be removed.
+ * @throws PersistenceException if the removal operation fails.
*/
- void deleteByRef(@Nonnull Stream[> refs);
+ void removeByRef(@Nonnull Stream][> refs);
/**
- * Deletes a stream of entities from the database in configurable batch sizes.
+ * Removes a stream of entities from the database in configurable batch sizes.
*
- * ]This method processes the provided stream of entities in batches, with the size of each batch specified
- * by the `batchSize` parameter. This allows for control over the number of entities deleted in each database
- * operation, optimizing performance and memory usage based on system requirements. For each entity in the
- * stream, the method removes the corresponding record from the database, if it exists.
- *
- * @param refs a stream of entities to be deleted. Each entity in the stream must be non-null and represent
- * valid database record for deletion.
- * @param batchSize the number of entities to process in each batch. Larger batch sizes may improve performance
- * but require more memory, while smaller batch sizes may reduce memory usage but increase
- * the number of database operations.
- * @throws PersistenceException if the deletion operation fails due to database issues, such as connectivity problems
- * or constraints violations.
+ * @param refs a stream of entities to be removed.
+ * @param batchSize the number of entities to process in each batch.
+ * @throws PersistenceException if the removal operation fails.
*/
- void deleteByRef(@Nonnull Stream[> refs, int batchSize);
+ void removeByRef(@Nonnull Stream][> refs, int batchSize);
}
diff --git a/storm-java21/src/main/java/st/orm/repository/ProjectionRepository.java b/storm-java21/src/main/java/st/orm/repository/ProjectionRepository.java
index 28d36f58f..a00c62aca 100644
--- a/storm-java21/src/main/java/st/orm/repository/ProjectionRepository.java
+++ b/storm-java21/src/main/java/st/orm/repository/ProjectionRepository.java
@@ -347,14 +347,14 @@ public interface ProjectionRepository], ID> extends Repo
/**
* Executes a scroll request from a {@link Scrollable} token, typically obtained from
- * {@link Window#nextScrollable()} or {@link Window#previousScrollable()}.
+ * {@link Window#next()} or {@link Window#previous()}.
*
* @param scrollable the scroll request containing cursor state, key, sort, size, and direction.
* @return a window containing the results and navigation tokens.
* @since 1.11
*/
default Window
scroll(@Nonnull Scrollable
scrollable) {
- return Window.of(select().scroll(scrollable));
+ return select().scroll(scrollable);
}
// List based methods.
@@ -372,6 +372,24 @@ default Window
scroll(@Nonnull Scrollable
scrollable) {
*/
List
findAll();
+ /**
+ * Returns a list of refs to all projections of the type supported by this repository. Each element in the list
+ * represents a lightweight reference to a projection in the database, containing only the primary key.
+ *
+ *
This method is useful when you need to retrieve all projection identifiers without loading the full
+ * projection data. The complete projection can be fetched on demand by calling {@link Ref#fetch()} on any of
+ * the returned refs.
+ *
+ * Note: While this method is more memory-efficient than {@link #findAll()} since it only
+ * loads primary keys, loading all refs into memory at once can still be memory-intensive for very large tables.
+ *
+ * @return a list of refs to all projections of the type supported by this repository.
+ * @throws PersistenceException if the selection operation fails due to underlying database issues, such as
+ * connectivity.
+ * @since 1.3
+ */
+ List[> findAllRef();
+
/**
* Retrieves a list of projections based on their primary keys.
*
diff --git a/storm-java21/src/main/java/st/orm/repository/impl/EntityRepositoryImpl.java b/storm-java21/src/main/java/st/orm/repository/impl/EntityRepositoryImpl.java
index decbfc705..9baed2331 100644
--- a/storm-java21/src/main/java/st/orm/repository/impl/EntityRepositoryImpl.java
+++ b/storm-java21/src/main/java/st/orm/repository/impl/EntityRepositoryImpl.java
@@ -168,23 +168,23 @@ public E upsertAndFetch(@Nonnull E entity) {
}
@Override
- public void delete(@Nonnull E entity) {
- core.delete(entity);
+ public void remove(@Nonnull E entity) {
+ core.remove(entity);
}
@Override
- public void deleteById(@Nonnull ID id) {
- core.deleteById(id);
+ public void removeById(@Nonnull ID id) {
+ core.removeById(id);
}
@Override
- public void deleteByRef(@Nonnull Ref ref) {
- core.deleteByRef(ref);
+ public void removeByRef(@Nonnull Ref ref) {
+ core.removeByRef(ref);
}
@Override
- public void deleteAll() {
- core.deleteAll();
+ public void removeAll() {
+ core.removeAll();
}
@Override
@@ -239,9 +239,19 @@ public Page page(@Nonnull Pageable pageable) {
return core.page(pageable);
}
+ @Override
+ public Page][> pageRef(int pageNumber, int pageSize) {
+ return core.pageRef(pageNumber, pageSize);
+ }
+
+ @Override
+ public Page][> pageRef(@Nonnull Pageable pageable) {
+ return core.pageRef(pageable);
+ }
+
@Override
public Window scroll(@Nonnull Scrollable scrollable) {
- return Window.of(select().scroll(scrollable));
+ return select().scroll(scrollable);
}
@Override
@@ -310,33 +320,13 @@ public List upsertAndFetch(@Nonnull Iterable entities) {
}
@Override
- public void delete(@Nonnull Iterable entities) {
- core.delete(entities);
- }
-
- @Override
- public void deleteByRef(@Nonnull Iterable][> refs) {
- core.deleteByRef(refs);
- }
-
- @Override
- public Stream selectById(@Nonnull Stream ids) {
- return core.selectById(ids);
- }
-
- @Override
- public Stream selectByRef(@Nonnull Stream][> refs) {
- return core.selectByRef(refs);
- }
-
- @Override
- public Stream selectById(@Nonnull Stream ids, int chunkSize) {
- return core.selectById(ids, chunkSize);
+ public void remove(@Nonnull Iterable entities) {
+ core.remove(entities);
}
@Override
- public Stream selectByRef(@Nonnull Stream][> refs, int chunkSize) {
- return core.selectByRef(refs, chunkSize);
+ public void removeByRef(@Nonnull Iterable][> refs) {
+ core.removeByRef(refs);
}
@Override
@@ -400,23 +390,23 @@ public void upsert(@Nonnull Stream entities, int batchSize) {
}
@Override
- public void delete(@Nonnull Stream entities) {
- core.delete(entities);
+ public void remove(@Nonnull Stream entities) {
+ core.remove(entities);
}
@Override
- public void delete(@Nonnull Stream entities, int batchSize) {
- core.delete(entities, batchSize);
+ public void remove(@Nonnull Stream entities, int batchSize) {
+ core.remove(entities, batchSize);
}
@Override
- public void deleteByRef(@Nonnull Stream][> refs) {
- core.deleteByRef(refs);
+ public void removeByRef(@Nonnull Stream][> refs) {
+ core.removeByRef(refs);
}
@Override
- public void deleteByRef(@Nonnull Stream][> refs, int batchSize) {
- core.deleteByRef(refs, batchSize);
+ public void removeByRef(@Nonnull Stream][> refs, int batchSize) {
+ core.removeByRef(refs, batchSize);
}
@Override
diff --git a/storm-java21/src/main/java/st/orm/repository/impl/ProjectionRepositoryImpl.java b/storm-java21/src/main/java/st/orm/repository/impl/ProjectionRepositoryImpl.java
index 50e8eecf6..4b9086b15 100644
--- a/storm-java21/src/main/java/st/orm/repository/impl/ProjectionRepositoryImpl.java
+++ b/storm-java21/src/main/java/st/orm/repository/impl/ProjectionRepositoryImpl.java
@@ -138,7 +138,7 @@ public Page][> pageRef(@Nonnull Pageable pageable) {
@Override
public Window] scroll(@Nonnull Scrollable
scrollable) {
- return Window.of(select().scroll(scrollable));
+ return select().scroll(scrollable);
}
@Override
@@ -186,6 +186,11 @@ public List
findAll() {
return core.findAll();
}
+ @Override
+ public List[> findAllRef() {
+ return core.findAllRef();
+ }
+
@Override
public List] findAllById(@Nonnull Iterable ids) {
return core.findAllById(ids);
diff --git a/storm-java21/src/main/java/st/orm/template/QueryBuilder.java b/storm-java21/src/main/java/st/orm/template/QueryBuilder.java
index 5f2a40176..4c5f7dcf3 100644
--- a/storm-java21/src/main/java/st/orm/template/QueryBuilder.java
+++ b/storm-java21/src/main/java/st/orm/template/QueryBuilder.java
@@ -26,7 +26,6 @@
import java.util.stream.Stream;
import st.orm.Data;
import st.orm.JoinType;
-import st.orm.MappedWindow;
import st.orm.Metamodel;
import st.orm.NoResultException;
import st.orm.NonUniqueResultException;
@@ -781,25 +780,25 @@ public final Page page(@Nonnull Pageable pageable, long totalCount) {
* most {@code size} results along with a {@code hasNext} flag. The caller is responsible for managing any WHERE
* and ORDER BY clauses externally.
*
- * The returned window does not carry navigation tokens ({@code nextScrollable} and
- * {@code previousScrollable} are {@code null}).
+ * The returned window does not carry navigation tokens ({@code next()} and
+ * {@code previous()} return {@code null}).
*
* @param size the maximum number of results to include in the window (must be positive).
* @return a window containing the results and a flag indicating whether more results exist.
* @throws IllegalArgumentException if {@code size} is not positive.
* @since 1.11
*/
- public abstract MappedWindow scroll(int size);
+ public abstract Window scroll(int size);
/**
* Executes a scroll request from a {@link Scrollable} token, typically obtained from
- * {@link Window#nextScrollable()} or {@link Window#previousScrollable()}.
+ * {@link Window#next()} or {@link Window#previous()}.
*
* @param scrollable the scroll request containing cursor state, key, sort, size, and direction.
* @return a window containing the results and navigation tokens.
* @since 1.11
*/
- public abstract MappedWindow scroll(@Nonnull Scrollable scrollable);
+ public abstract Window scroll(@Nonnull Scrollable scrollable);
//
// Execution methods.
diff --git a/storm-java21/src/main/java/st/orm/template/impl/QueryBuilderImpl.java b/storm-java21/src/main/java/st/orm/template/impl/QueryBuilderImpl.java
index 224ddcbe8..4001c6ccb 100644
--- a/storm-java21/src/main/java/st/orm/template/impl/QueryBuilderImpl.java
+++ b/storm-java21/src/main/java/st/orm/template/impl/QueryBuilderImpl.java
@@ -27,12 +27,12 @@
import java.util.stream.Stream;
import st.orm.Data;
import st.orm.JoinType;
-import st.orm.MappedWindow;
import st.orm.Metamodel;
import st.orm.Operator;
import st.orm.PersistenceException;
import st.orm.Ref;
import st.orm.Scrollable;
+import st.orm.Window;
import st.orm.core.spi.ORMReflection;
import st.orm.core.spi.Providers;
import st.orm.core.template.TemplateString;
@@ -570,12 +570,12 @@ public QueryBuilder offset(int offset) {
}
@Override
- public MappedWindow scroll(int size) {
+ public Window scroll(int size) {
return core.scroll(size);
}
@Override
- public MappedWindow scroll(@Nonnull Scrollable scrollable) {
+ public Window scroll(@Nonnull Scrollable scrollable) {
return core.scroll(scrollable);
}
diff --git a/storm-java21/src/test/java/st/orm/template/ORMTemplateTest.java b/storm-java21/src/test/java/st/orm/template/ORMTemplateTest.java
index 1ace0b056..a778bcdb3 100644
--- a/storm-java21/src/test/java/st/orm/template/ORMTemplateTest.java
+++ b/storm-java21/src/test/java/st/orm/template/ORMTemplateTest.java
@@ -196,30 +196,30 @@ public void testEntityUpdateAndFetch() {
}
@Test
- public void testEntityDelete() {
+ public void testEntityRemove() {
EntityRepository cities = orm.entity(City.class);
City inserted = cities.insertAndFetch(new City(null, "ToDelete"));
long countBefore = cities.count();
- cities.delete(inserted);
+ cities.remove(inserted);
long countAfter = cities.count();
assertEquals(countBefore - 1, countAfter);
}
@Test
- public void testEntityDeleteById() {
+ public void testEntityRemoveById() {
EntityRepository cities = orm.entity(City.class);
City inserted = cities.insertAndFetch(new City(null, "DeleteById"));
- cities.deleteById(inserted.id());
+ cities.removeById(inserted.id());
assertFalse(cities.findById(inserted.id()).isPresent());
}
@Test
- public void testEntityDeleteAll() {
+ public void testEntityRemoveAll() {
// Visit has no incoming FK constraints, so we can safely deleteAll
var localOrm = ORMTemplate.of(dataSource);
EntityRepository visits = localOrm.entity(Visit.class);
assertTrue(visits.count() > 0);
- visits.deleteAll();
+ visits.removeAll();
assertEquals(0, visits.count());
}
@@ -320,38 +320,6 @@ public void testEntitySelectAllRef() {
}
}
- @Test
- public void testEntitySelectById() {
- EntityRepository cities = orm.entity(City.class);
- try (Stream stream = cities.selectById(Stream.of(1, 2, 3))) {
- assertEquals(3, stream.count());
- }
- }
-
- @Test
- public void testEntitySelectByIdWithChunkSize() {
- EntityRepository cities = orm.entity(City.class);
- try (Stream stream = cities.selectById(Stream.of(1, 2, 3, 4), 2)) {
- assertEquals(4, stream.count());
- }
- }
-
- @Test
- public void testEntitySelectByRef() {
- EntityRepository cities = orm.entity(City.class);
- try (Stream stream = cities.selectByRef(Stream.of(cities.ref(1), cities.ref(2)))) {
- assertEquals(2, stream.count());
- }
- }
-
- @Test
- public void testEntitySelectByRefWithChunkSize() {
- EntityRepository cities = orm.entity(City.class);
- try (Stream stream = cities.selectByRef(Stream.of(cities.ref(1), cities.ref(2)), 1)) {
- assertEquals(2, stream.count());
- }
- }
-
@Test
public void testEntityCountById() {
EntityRepository cities = orm.entity(City.class);
@@ -425,30 +393,30 @@ public void testEntityUpdateAndFetchIterable() {
}
@Test
- public void testEntityDeleteIterable() {
+ public void testEntityRemoveIterable() {
EntityRepository cities = orm.entity(City.class);
City city1 = cities.insertAndFetch(new City(null, "DelIter1"));
City city2 = cities.insertAndFetch(new City(null, "DelIter2"));
long countBefore = cities.count();
- cities.delete(List.of(city1, city2));
+ cities.remove(List.of(city1, city2));
assertEquals(countBefore - 2, cities.count());
}
@Test
- public void testEntityDeleteByRefIterable() {
+ public void testEntityRemoveByRefIterable() {
EntityRepository cities = orm.entity(City.class);
City city1 = cities.insertAndFetch(new City(null, "DelRef1"));
City city2 = cities.insertAndFetch(new City(null, "DelRef2"));
long countBefore = cities.count();
- cities.deleteByRef(List.of(cities.ref(city1.id()), cities.ref(city2.id())));
+ cities.removeByRef(List.of(cities.ref(city1.id()), cities.ref(city2.id())));
assertEquals(countBefore - 2, cities.count());
}
@Test
- public void testEntityDeleteByRef() {
+ public void testEntityRemoveByRef() {
EntityRepository cities = orm.entity(City.class);
City inserted = cities.insertAndFetch(new City(null, "DelByRef"));
- cities.deleteByRef(cities.ref(inserted.id()));
+ cities.removeByRef(cities.ref(inserted.id()));
assertFalse(cities.findById(inserted.id()).isPresent());
}
@@ -486,39 +454,39 @@ public void testEntityUpdateStreamWithBatchSize() {
}
@Test
- public void testEntityDeleteStream() {
+ public void testEntityRemoveStream() {
EntityRepository cities = orm.entity(City.class);
City city1 = cities.insertAndFetch(new City(null, "DelStream1"));
City city2 = cities.insertAndFetch(new City(null, "DelStream2"));
long countBefore = cities.count();
- cities.delete(Stream.of(city1, city2));
+ cities.remove(Stream.of(city1, city2));
assertEquals(countBefore - 2, cities.count());
}
@Test
- public void testEntityDeleteStreamWithBatchSize() {
+ public void testEntityRemoveStreamWithBatchSize() {
EntityRepository cities = orm.entity(City.class);
City city1 = cities.insertAndFetch(new City(null, "DelSB1"));
long countBefore = cities.count();
- cities.delete(Stream.of(city1), 1);
+ cities.remove(Stream.of(city1), 1);
assertEquals(countBefore - 1, cities.count());
}
@Test
- public void testEntityDeleteByRefStream() {
+ public void testEntityRemoveByRefStream() {
EntityRepository cities = orm.entity(City.class);
City city1 = cities.insertAndFetch(new City(null, "DelRefStream1"));
long countBefore = cities.count();
- cities.deleteByRef(Stream.of(cities.ref(city1.id())));
+ cities.removeByRef(Stream.of(cities.ref(city1.id())));
assertEquals(countBefore - 1, cities.count());
}
@Test
- public void testEntityDeleteByRefStreamWithBatchSize() {
+ public void testEntityRemoveByRefStreamWithBatchSize() {
EntityRepository cities = orm.entity(City.class);
City city1 = cities.insertAndFetch(new City(null, "DelRefSB1"));
long countBefore = cities.count();
- cities.deleteByRef(Stream.of(cities.ref(city1.id())), 1);
+ cities.removeByRef(Stream.of(cities.ref(city1.id())), 1);
assertEquals(countBefore - 1, cities.count());
}
diff --git a/storm-java21/src/test/java/st/orm/template/QueryBuilderTest.java b/storm-java21/src/test/java/st/orm/template/QueryBuilderTest.java
index 0976190a5..fff4767c9 100644
--- a/storm-java21/src/test/java/st/orm/template/QueryBuilderTest.java
+++ b/storm-java21/src/test/java/st/orm/template/QueryBuilderTest.java
@@ -31,12 +31,12 @@
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.jdbc.Sql;
import org.springframework.test.context.junit.jupiter.SpringExtension;
-import st.orm.MappedWindow;
import st.orm.NoResultException;
import st.orm.NonUniqueResultException;
import st.orm.PersistenceException;
import st.orm.Ref;
import st.orm.Scrollable;
+import st.orm.Window;
import st.orm.template.model.City;
import st.orm.template.model.City_;
import st.orm.template.model.Owner;
@@ -500,14 +500,14 @@ public void testGetSingleResultThrowsNonUnique() {
@Test
public void testScroll() {
- MappedWindow window = orm.entity(City.class).select().scroll(3);
+ Window window = orm.entity(City.class).select().scroll(3);
assertEquals(3, window.content().size());
assertTrue(window.hasNext());
}
@Test
public void testScrollNoMore() {
- MappedWindow window = orm.entity(City.class).select().scroll(100);
+ Window window = orm.entity(City.class).select().scroll(100);
assertEquals(6, window.content().size());
assertFalse(window.hasNext());
}
@@ -773,7 +773,7 @@ public void testOrderByDescendingTemplate() {
@Test
public void testScrollWithMetamodelKey() {
- MappedWindow window = orm.entity(City.class).select()
+ Window