Skip to content

Commit d9f79a2

Browse files
committed
Fix Slf4j formatting errors in sdks/java/io/google-cloud-platform
1 parent 3197d88 commit d9f79a2

34 files changed

Lines changed: 144 additions & 180 deletions

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryHelpers.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ void logBigQueryError(@Nullable Job job) {
296296
? String.format(" due to: %s", jobErrors.get(jobErrors.size() - 1).getMessage())
297297
: "";
298298

299-
LOG.error(String.format("BigQuery Error : %s %s", finalError, causativeError));
299+
LOG.error("BigQuery Error : {} {}", finalError, causativeError);
300300
}
301301
}
302302

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryIO.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -781,8 +781,7 @@ public static <T> TypedRead<T> read(SerializableFunction<SchemaAndRecord, T> par
781781
(writer, reader) ->
782782
new GenericDatumTransformer<>(parseFn, jsonTableSchema, writer);
783783
} catch (IOException e) {
784-
LOG.warn(
785-
String.format("Error while converting table schema %s to JSON!", input), e);
784+
LOG.warn("Error while converting table schema {} to JSON!", input, e);
786785
return null;
787786
}
788787
})

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryServicesImpl.java

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ static void startJob(
383383
return; // SUCCEEDED
384384
} catch (IOException e) {
385385
if (errorExtractor.itemAlreadyExists(e)) {
386-
LOG.info("BigQuery job " + jobRef + " already exists, will not retry inserting it:", e);
386+
LOG.info("BigQuery job {} already exists, will not retry inserting it:", jobRef, e);
387387
return; // SUCCEEDED
388388
}
389389

@@ -395,7 +395,7 @@ static void startJob(
395395
.create()
396396
: null) {
397397
// ignore and retry
398-
LOG.info("Failed to insert job " + jobRef + ", will retry:", e);
398+
LOG.info("Failed to insert job {}, will retry:", jobRef, e);
399399
}
400400
lastException = e;
401401
}
@@ -431,12 +431,11 @@ static void startJobStream(
431431
} catch (IOException e) {
432432
if (errorExtractor.itemAlreadyExists(e)) {
433433
LOG.info(
434-
"BigQuery job " + jobReference + " already exists, will not retry inserting it:",
435-
e);
434+
"BigQuery job {} already exists, will not retry inserting it:", jobReference, e);
436435
return; // SUCCEEDED
437436
}
438437
// ignore and retry
439-
LOG.info("Failed to insert job " + jobReference + ", will retry:", e);
438+
LOG.info("Failed to insert job {}, will retry:", jobReference, e);
440439
exception = e;
441440
}
442441
} while (nextBackOff(sleeper, backOff));
@@ -1065,9 +1064,8 @@ public List<TableDataInsertAllResponse.InsertErrors> call() throws Exception {
10651064
.withFullResourceName(BigQueryHelpers.toTableFullResourceName(ref))
10661065
.create()) {
10671066
LOG.info(
1068-
String.format(
1069-
"BigQuery insertAll error, retrying: %s",
1070-
ApiErrorExtractor.INSTANCE.getErrorMessage(e)));
1067+
"BigQuery insertAll error, retrying: {}",
1068+
ApiErrorExtractor.INSTANCE.getErrorMessage(e));
10711069
}
10721070
try {
10731071
long nextBackOffMillis = backoff1.nextBackOffMillis();

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/DynamicDestinationsHelpers.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -451,7 +451,7 @@ private MatchTableDynamicDestinations(
451451
try (DatasetService datasetService = bqServices.getDatasetService(bqOptions)) {
452452
return datasetService.getTable(tableReference);
453453
} catch (InterruptedException | IOException e) {
454-
LOG.info("Failed to get BigQuery table " + tableReference);
454+
LOG.info("Failed to get BigQuery table {}", tableReference);
455455
}
456456
} catch (Exception e) {
457457
throw new RuntimeException(e);

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiFinalizeWritesDoFn.java

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ public void process(PipelineOptions pipelineOptions, @Element KV<String, String>
120120
contexts -> {
121121
RetryManager.Operation.Context<FinalizeWriteStreamResponse> firstContext =
122122
Preconditions.checkArgumentNotNull(Iterables.getFirst(contexts, null));
123-
LOG.error("Finalize of stream " + streamId + " failed with " + firstContext.getError());
123+
LOG.error("Finalize of stream {} failed", streamId, firstContext.getError());
124124
finalizeOperationsFailed.inc();
125125
BigQuerySinkMetrics.reportFailedRPCMetrics(
126126
firstContext, BigQuerySinkMetrics.RpcMethod.FINALIZE_STREAM);
@@ -132,7 +132,7 @@ public void process(PipelineOptions pipelineOptions, @Element KV<String, String>
132132
Preconditions.checkArgumentNotNull(
133133
c.getResult(),
134134
"Finalize of write stream " + streamId + " finished, but with null result");
135-
LOG.debug("Finalize of stream " + streamId + " finished with " + response);
135+
LOG.debug("Finalize of stream {} finished with {}", streamId, response);
136136
rowsFinalized.inc(response.getRowCount());
137137

138138
finalizeOperationsSucceeded.inc();
@@ -169,17 +169,15 @@ public void finishBundle(PipelineOptions pipelineOptions) throws Exception {
169169
RetryManager.Operation.Context<BatchCommitWriteStreamsResponse> firstContext =
170170
Preconditions.checkArgumentNotNull(Iterables.getFirst(contexts, null));
171171
LOG.error(
172-
"BatchCommit failed. tableId "
173-
+ tableId
174-
+ " streamNames "
175-
+ streamNames
176-
+ " error: "
177-
+ firstContext.getError());
172+
"BatchCommit failed. tableId {} streamNames {}",
173+
tableId,
174+
streamNames,
175+
firstContext.getError());
178176
batchCommitOperationsFailed.inc();
179177
return RetryType.RETRY_ALL_OPERATIONS;
180178
},
181179
c -> {
182-
LOG.info("BatchCommit succeeded for tableId " + tableId + " response " + c.getResult());
180+
LOG.info("BatchCommit succeeded for tableId {} response {}", tableId, c.getResult());
183181
batchCommitOperationsSucceeded.inc();
184182
},
185183
response -> {

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiFlushAndFinalizeDoFn.java

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -212,8 +212,7 @@ public void process(PipelineOptions pipelineOptions, @Element KV<String, Operati
212212
Context<FlushRowsResponse> failedContext =
213213
Preconditions.checkArgumentNotNull(Iterables.getFirst(contexts, null));
214214
Throwable error = failedContext.getError();
215-
LOG.warn(
216-
"Flush of stream " + streamId + " to offset " + offset + " failed with " + error);
215+
LOG.warn("Flush of stream {} to offset {} failed", streamId, offset, error);
217216
flushOperationsFailed.inc();
218217
BigQuerySinkMetrics.reportFailedRPCMetrics(
219218
failedContext, BigQuerySinkMetrics.RpcMethod.FLUSH_ROWS);
@@ -286,11 +285,9 @@ public void process(PipelineOptions pipelineOptions, @Element KV<String, Operati
286285
},
287286
contexts -> {
288287
LOG.warn(
289-
"Finalize of stream "
290-
+ streamId
291-
+ " failed with "
292-
+ Preconditions.checkArgumentNotNull(Iterables.getFirst(contexts, null))
293-
.getError());
288+
"Finalize of stream {} failed",
289+
streamId,
290+
Preconditions.checkArgumentNotNull(Iterables.getFirst(contexts, null)).getError());
294291
finalizeOperationsFailed.inc();
295292
@Nullable
296293
Context<FinalizeWriteStreamResponse> firstContext = Iterables.getFirst(contexts, null);

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWriteUnshardedRecords.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ public class StorageApiWriteUnshardedRecords<DestinationT, ElementT>
135135
.expireAfterAccess(15, TimeUnit.MINUTES)
136136
.removalListener(
137137
(RemovalNotification<String, AppendClientInfo> removal) -> {
138-
LOG.info("Expiring append client for " + removal.getKey());
138+
LOG.info("Expiring append client for {}", removal.getKey());
139139
final @Nullable AppendClientInfo appendClientInfo = removal.getValue();
140140
if (appendClientInfo != null) {
141141
appendClientInfo.close();

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/StorageApiWritesShardedRecords.java

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -799,7 +799,7 @@ public void process(
799799
// Don't log errors for expected offset mismatch. These will be logged as warnings
800800
// below.
801801
LOG.error(
802-
"Got error " + failedContext.getError() + " closing " + failedContext.streamName);
802+
"Got error {} closing {}", failedContext.getError(), failedContext.streamName);
803803
}
804804

805805
try {
@@ -833,11 +833,9 @@ public void process(
833833
if (offsetMismatch || streamDoesNotExist) {
834834
appendOffsetFailures.inc();
835835
LOG.warn(
836-
"Append to "
837-
+ failedContext
838-
+ " failed with "
839-
+ failedContext.getError()
840-
+ " Will retry with a new stream");
836+
"Append to {} failed. Will retry with a new stream",
837+
failedContext,
838+
failedContext.getError());
841839
// Finalize the stream and clear streamName so a new stream will be created.
842840
o.get(flushTag)
843841
.output(
@@ -901,9 +899,8 @@ public void process(
901899
// the ProtoRows iterable at 2MB and the max request size is 10MB, this scenario seems
902900
// nearly impossible.
903901
LOG.error(
904-
"A request containing more than one row is over the request size limit of "
905-
+ maxRequestSize
906-
+ ". This is unexpected. All rows in the request will be sent to the failed-rows PCollection.");
902+
"A request containing more than one row is over the request size limit of {}. This is unexpected. All rows in the request will be sent to the failed-rows PCollection.",
903+
maxRequestSize);
907904
}
908905
for (int i = 0; i < splitValue.getProtoRows().getSerializedRowsCount(); ++i) {
909906
org.joda.time.Instant timestamp = splitValue.getTimestamps().get(i);

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/TableSchemaCache.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -294,7 +294,7 @@ public void refreshThread() {
294294
// Since this is a daemon thread, don't exit until it is explicitly shut down. Exiting early
295295
// can cause the
296296
// pipeline to stall.
297-
LOG.error("Caught exception in BigQuery's table schema cache refresh thread: " + e);
297+
LOG.error("Caught exception in BigQuery's table schema cache refresh thread", e);
298298
}
299299
this.refreshExecutor.submit(this::refreshThread);
300300
}
@@ -313,7 +313,7 @@ private Map<String, TableSchema> refreshAll(Map<String, Refresh> tables)
313313
if (table == null) {
314314
throw new RuntimeException("Did not get value for table " + tableReference);
315315
}
316-
LOG.info("Refreshed BigQuery schema for " + entry.getKey());
316+
LOG.info("Refreshed BigQuery schema for {}", entry.getKey());
317317
schemas.put(entry.getKey(), table.getSchema());
318318
}
319319
return schemas;

sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigtable/BigtableIO.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1545,7 +1545,7 @@ private void checkForFailures() throws IOException {
15451545
String.format(
15461546
"At least %d errors occurred writing to Bigtable. First %d errors: %s",
15471547
i + failures.size(), i, logEntry.toString());
1548-
LOG.error(message);
1548+
LOG.error("{}", message);
15491549
IOException exception = new IOException(message);
15501550
for (BigtableWriteException e : suppressed) {
15511551
exception.addSuppressed(e);
@@ -2462,7 +2462,7 @@ private void createOrUpdateMetadataTable(
24622462
// Only try to create or update metadata table if option is set to true. Otherwise, just
24632463
// check if the table exists.
24642464
if (shouldCreateOrUpdateMetadataTable && metadataTableAdminDao.createMetadataTable()) {
2465-
LOG.info("Created metadata table: " + metadataTableId);
2465+
LOG.info("Created metadata table: {}", metadataTableId);
24662466
}
24672467
}
24682468

@@ -2645,7 +2645,7 @@ public static boolean createOrUpdateReadChangeStreamMetadataTable(
26452645
// Only try to create or update metadata table if option is set to true. Otherwise, just
26462646
// check if the table exists.
26472647
if (metadataTableAdminDao.createMetadataTable()) {
2648-
LOG.info("Created metadata table: " + metadataTableAdminDao.getTableId());
2648+
LOG.info("Created metadata table: {}", metadataTableAdminDao.getTableId());
26492649
}
26502650
return metadataTableAdminDao.doesMetadataTableExist();
26512651
} finally {

0 commit comments

Comments
 (0)