Skip to content

Commit 5adf134

Browse files
committed
Avoid Long boxing on the write path
1 parent 4c8f4d4 commit 5adf134

1 file changed

Lines changed: 9 additions & 3 deletions

File tree

parquet-hadoop/src/main/java/org/apache/parquet/hadoop/InternalParquetRecordWriter.java

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,9 @@ public long getDataSize() {
171171

172172
private void checkBlockSizeReached() throws IOException {
173173
if (recordCount >= rowGroupRecordCountThreshold) {
174-
LOG.debug("record count reaches threshold: flushing {} records to disk.", recordCount);
174+
if (LOG.isDebugEnabled()) {
175+
LOG.debug("record count reaches threshold: flushing {} records to disk.", recordCount);
176+
}
175177
flushRowGroupToStore();
176178
initStore();
177179
recordCountForNextMemCheck = min(
@@ -185,7 +187,9 @@ private void checkBlockSizeReached() throws IOException {
185187
// flush the row group if it is within ~2 records of the limit
186188
// it is much better to be slightly under size than to be over at all
187189
if (memSize > (nextRowGroupSize - 2 * recordSize)) {
188-
LOG.debug("mem size {} > {}: flushing {} records to disk.", memSize, nextRowGroupSize, recordCount);
190+
if (LOG.isDebugEnabled()) {
191+
LOG.debug("mem size {} > {}: flushing {} records to disk.", memSize, nextRowGroupSize, recordCount);
192+
}
189193
flushRowGroupToStore();
190194
initStore();
191195
recordCountForNextMemCheck = min(
@@ -201,7 +205,9 @@ private void checkBlockSizeReached() throws IOException {
201205
recordCount
202206
+ props.getMaxRowCountForPageSizeCheck() // will not look more than max records ahead
203207
);
204-
LOG.debug("Checked mem at {} will check again at: {}", recordCount, recordCountForNextMemCheck);
208+
if (LOG.isDebugEnabled()) {
209+
LOG.debug("Checked mem at {} will check again at: {}", recordCount, recordCountForNextMemCheck);
210+
}
205211
}
206212
}
207213
}

0 commit comments

Comments
 (0)