@@ -612,6 +612,46 @@ public void testSizeStatisticsAndStatisticsControl() throws Exception {
612612 }
613613 }
614614
615+ @ Test
616+ public void testByteStreamSplitEncodingControl () throws Exception {
617+ MessageType schema = Types .buildMessage ()
618+ .required (FLOAT )
619+ .named ("float_field" )
620+ .required (INT32 )
621+ .named ("int32_field" )
622+ .named ("test_schema" );
623+
624+ File file = temp .newFile ();
625+ temp .delete ();
626+
627+ Path path = new Path (file .getAbsolutePath ());
628+ SimpleGroupFactory factory = new SimpleGroupFactory (schema );
629+ try (ParquetWriter <Group > writer = ExampleParquetWriter .builder (path )
630+ .withType (schema )
631+ .withByteStreamSplitEncoding (true )
632+ .withByteStreamSplitEncoding ("int32_field" , true )
633+ .build ()) {
634+ writer .write (factory .newGroup ()
635+ .append ("float_field" , 0.3f )
636+ .append ("int32_field" , 42 ));
637+ }
638+
639+ try (ParquetFileReader reader = ParquetFileReader .open (HadoopInputFile .fromPath (path , new Configuration ()))) {
640+ for (BlockMetaData block : reader .getFooter ().getBlocks ()) {
641+ for (ColumnChunkMetaData column : block .getColumns ()) {
642+ assertTrue (column .getEncodings ().contains (Encoding .BYTE_STREAM_SPLIT ));
643+ }
644+ }
645+ }
646+
647+ try (ParquetReader <Group > reader =
648+ ParquetReader .builder (new GroupReadSupport (), path ).build ()) {
649+ Group group = reader .read ();
650+ assertEquals (0.3f , group .getFloat ("float_field" , 0 ), 0.0 );
651+ assertEquals (42 , group .getInteger ("int32_field" , 0 ));
652+ }
653+ }
654+
615655 @ Test
616656 public void testV2WriteAllNullValues () throws Exception {
617657 testV2WriteAllNullValues (null , null );
0 commit comments