Skip to content

Commit 8492540

Browse files
committed
MLE-27304 Timestamp key is now optional for incremental write
1 parent 7f87ad8 commit 8492540

File tree

3 files changed

+13
-9
lines changed

3 files changed

+13
-9
lines changed

marklogic-client-api/src/main/java/com/marklogic/client/datamovement/filter/IncrementalWriteFilter.java

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ public static Builder newBuilder() {
4646
public static class Builder {
4747

4848
private String hashKeyName = "incrementalWriteHash";
49-
private String timestampKeyName = "incrementalWriteTimestamp";
49+
private String timestampKeyName;
5050
private boolean canonicalizeJson = true;
5151
private Consumer<DocumentWriteOperation[]> skippedDocumentsConsumer;
5252
private String[] jsonExclusions;
@@ -67,13 +67,11 @@ public Builder hashKeyName(String keyName) {
6767
}
6868

6969
/**
70-
* @param keyName the name of the MarkLogic metadata key that will hold the timestamp value; defaults to "incrementalWriteTimestamp".
70+
* @param keyName the name of the MarkLogic metadata key that will hold the timestamp value;
71+
* defaults to null, which means no timestamp will be stored.
7172
*/
7273
public Builder timestampKeyName(String keyName) {
73-
// Don't let user shoot themselves in the foot with an empty key name.
74-
if (keyName != null && !keyName.trim().isEmpty()) {
75-
this.timestampKeyName = keyName;
76-
}
74+
this.timestampKeyName = keyName;
7775
return this;
7876
}
7977

@@ -323,7 +321,9 @@ protected static DocumentWriteOperation addHashToMetadata(DocumentWriteOperation
323321
}
324322

325323
newMetadata.getMetadataValues().put(hashKeyName, Long.toUnsignedString(hash));
326-
newMetadata.getMetadataValues().put(timestampKeyName, timestamp);
324+
if (timestampKeyName != null && !timestampKeyName.trim().isEmpty()) {
325+
newMetadata.getMetadataValues().put(timestampKeyName, timestamp);
326+
}
327327

328328
return new DocumentWriteOperationImpl(op.getUri(), newMetadata, op.getContent(), op.getTemporalDocumentURI());
329329
}

marklogic-client-api/src/test/java/com/marklogic/client/datamovement/filter/AbstractIncrementalWriteTest.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ void setup() {
3737
// Default filter implementation, should be suitable for most tests.
3838
filter = IncrementalWriteFilter.newBuilder()
3939
.onDocumentsSkipped(docs -> skippedCount.addAndGet(docs.length))
40+
.timestampKeyName("incrementalWriteTimestamp")
4041
.build();
4142
}
4243

marklogic-client-api/src/test/java/com/marklogic/client/datamovement/filter/IncrementalWriteTest.java

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,8 @@ void customTimestampKeyName() {
157157

158158
/**
159159
* The thought for this test is that if the user passes null in (which could happen via our Spark connector),
160-
* they're breaking the feature. So don't let them do that - ignore null and use the default values.
160+
* they're breaking the feature. So don't let them do that - ignore null and use the default value for the hash
161+
* key name - but null is still valid for the timestamp key name as that's an optional value to set.
161162
*/
162163
@Test
163164
void nullIsIgnoredForKeyNames() {
@@ -172,7 +173,7 @@ void nullIsIgnoredForKeyNames() {
172173
new DocumentMetadataHandle());
173174

174175
assertNotNull(metadata.getMetadataValues().get("incrementalWriteHash"));
175-
assertNotNull(metadata.getMetadataValues().get("incrementalWriteTimestamp"));
176+
assertFalse(metadata.getMetadataValues().containsKey("incrementalWriteTimestamp"));
176177
}
177178

178179
@Test
@@ -219,6 +220,7 @@ void binaryDocument() {
219220
void fromView() {
220221
filter = IncrementalWriteFilter.newBuilder()
221222
.fromView("javaClient", "incrementalWriteHash")
223+
.timestampKeyName("incrementalWriteTimestamp")
222224
.onDocumentsSkipped(docs -> skippedCount.addAndGet(docs.length))
223225
.build();
224226

@@ -275,6 +277,7 @@ void emptyValuesForFromView() {
275277
// Empty/null values are ignored, as long as both schema/view are empty/null. This makes life a little
276278
// easier for a connector in that the connector does not need to check for empty/null values.
277279
.fromView("", null)
280+
.timestampKeyName("incrementalWriteTimestamp")
278281
.onDocumentsSkipped(docs -> skippedCount.addAndGet(docs.length))
279282
.build();
280283

0 commit comments

Comments
 (0)