66import com .fasterxml .jackson .databind .node .ObjectNode ;
77import com .marklogic .client .document .*;
88import com .marklogic .client .impl .DocumentWriteOperationImpl ;
9- import com .marklogic .client .io .DocumentMetadataHandle ;
10- import com .marklogic .client .io .Format ;
11- import com .marklogic .client .io .JacksonHandle ;
12- import com .marklogic .client .io .StringHandle ;
9+ import com .marklogic .client .io .*;
1310import com .marklogic .client .test .Common ;
1411import org .junit .jupiter .api .Test ;
1512
@@ -156,7 +153,7 @@ void noRangeIndexForFieldWithEval() {
156153 @ Test
157154 void customTimestampKeyName () {
158155 filter = IncrementalWriteFilter .newBuilder ()
159- .hashKeyName ("incrementalWriteHash " )
156+ .hashKeyName ("myWriteHash " )
160157 .timestampKeyName ("myTimestamp" )
161158 .build ();
162159
@@ -165,8 +162,9 @@ void customTimestampKeyName() {
165162 DocumentMetadataHandle metadata = Common .client .newDocumentManager ().readMetadata ("/incremental/test/doc-1.xml" ,
166163 new DocumentMetadataHandle ());
167164
165+ assertNotNull (metadata .getMetadataValues ().get ("myWriteHash" ));
168166 assertNotNull (metadata .getMetadataValues ().get ("myTimestamp" ));
169- assertNotNull (metadata .getMetadataValues ().get ("incrementalWriteHash" ));
167+ assertFalse (metadata .getMetadataValues ().containsKey ("incrementalWriteHash" ));
170168 assertFalse (metadata .getMetadataValues ().containsKey ("incrementalWriteTimestamp" ));
171169 }
172170
@@ -190,6 +188,46 @@ void nullIsIgnoredForKeyNames() {
190188 assertNotNull (metadata .getMetadataValues ().get ("incrementalWriteTimestamp" ));
191189 }
192190
191+ @ Test
192+ void textDocument () {
193+ final DocumentWriteOperation writeOp = new DocumentWriteOperationImpl ("/incremental/test/doc.txt" , METADATA ,
194+ new StringHandle ("Hello world" ));
195+
196+ docs .add (writeOp );
197+ writeDocs (docs );
198+ assertEquals (1 , writtenCount .get ());
199+ assertEquals (0 , skippedCount .get ());
200+
201+ // Write the same text document again
202+ docs = new ArrayList <>();
203+ docs .add (writeOp );
204+ writeDocs (docs );
205+ assertEquals (1 , writtenCount .get ());
206+ assertEquals (1 , skippedCount .get (), "This is a sanity check to verify that text files work as expected. " +
207+ "Exclusions can't yet be specified for them since we only support JSON Pointer and XPath so far. It may " +
208+ "be worth supporting regex-based exclusions for text files in the future." );
209+ }
210+
211+ @ Test
212+ void binaryDocument () {
213+ byte [] binaryContent = "Binary content example" .getBytes ();
214+ final DocumentWriteOperation writeOp = new DocumentWriteOperationImpl ("/incremental/test/doc.bin" , METADATA ,
215+ new BytesHandle (binaryContent ).withFormat (Format .BINARY ));
216+
217+ docs .add (writeOp );
218+ writeDocs (docs );
219+ assertEquals (1 , writtenCount .get ());
220+ assertEquals (0 , skippedCount .get ());
221+
222+ // Write the same binary document again
223+ docs = new ArrayList <>();
224+ docs .add (writeOp );
225+ writeDocs (docs );
226+ assertEquals (1 , writtenCount .get ());
227+ assertEquals (1 , skippedCount .get (), "Another sanity check to make sure that binary documents work as " +
228+ "expected. Exclusions cannot be specified for them." );
229+ }
230+
193231 private void verifyIncrementalWriteWorks () {
194232 writeTenDocuments ();
195233 verifyDocumentsHasHashInMetadataKey ();
0 commit comments