Skip to content

[HUDI-8999] Improve usage of logging libraries #12172

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Apr 25, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -999,7 +999,7 @@ public boolean updateSerdeProperties(String tableName, Map<String, String> serde
boolean different = serdeProperties.entrySet().stream().anyMatch(e ->
!existingSerdeProperties.containsKey(e.getKey()) || !existingSerdeProperties.get(e.getKey()).equals(e.getValue()));
if (!different) {
LOG.debug("Table " + tableName + " serdeProperties already up to date, skip update serde properties.");
LOG.debug("Table {} serdeProperties already up to date, skip update serde properties.", tableName);
return false;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ public String listPartitions(

HoodieTimer timer = HoodieTimer.start();
List<String> partitions = metadata.getAllPartitionPaths();
LOG.debug("Metadata Partition listing took " + timer.endTimer() + " ms");
LOG.debug("Metadata Partition listing took {} ms", timer.endTimer());

final List<Comparable[]> rows = new ArrayList<>();
partitions.stream().sorted(Comparator.reverseOrder()).forEach(p -> {
Expand Down Expand Up @@ -257,7 +257,7 @@ public String listFiles(

HoodieTimer timer = HoodieTimer.start();
List<StoragePathInfo> pathInfoList = metaReader.getAllFilesInPartition(partitionPath);
LOG.debug("Took " + timer.endTimer() + " ms");
LOG.debug("Took {} ms", timer.endTimer());

final List<Comparable[]> rows = new ArrayList<>();
pathInfoList.stream()
Expand Down Expand Up @@ -293,7 +293,7 @@ public String validateFiles(

HoodieTimer timer = HoodieTimer.start();
List<String> metadataPartitions = metadataReader.getAllPartitionPaths();
LOG.debug("Metadata Listing partitions Took " + timer.endTimer() + " ms");
LOG.debug("Metadata Listing partitions Took {} ms", timer.endTimer());
List<String> fsPartitions = fsMetaReader.getAllPartitionPaths();
Collections.sort(fsPartitions);
Collections.sort(metadataPartitions);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION;
import static org.apache.hudi.common.testutils.HoodieTestUtils.INSTANT_GENERATOR;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;

/**
Expand Down Expand Up @@ -110,11 +111,11 @@ public void testCompactionsAll() throws IOException {
HoodieCLI.getTableMetaClient().reloadActiveTimeline();

Object result = shell.evaluate(() -> "compactions show all");
System.out.println(result.toString());
assertNotNull(result);

TableHeader header = new TableHeader().addTableHeaderField("Compaction Instant Time").addTableHeaderField("State")
.addTableHeaderField("Total FileIds to be Compacted");
Map<String, Integer> fileIds = new HashMap();
Map<String, Integer> fileIds = new HashMap<>();
fileIds.put("001", 3);
fileIds.put("003", 4);
fileIds.put("005", 3);
Expand Down Expand Up @@ -142,7 +143,7 @@ public void testCompactionShow() throws IOException {
HoodieCLI.getTableMetaClient().reloadActiveTimeline();

Object result = shell.evaluate(() -> "compaction show --instant 001");
System.out.println(result.toString());
assertNotNull(result);
}

private void generateCompactionInstances() throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ protected AsyncCleanerService(BaseHoodieWriteClient writeClient) {
@Override
protected Pair<CompletableFuture, ExecutorService> startService() {
String instantTime = writeClient.createNewInstantTime();
LOG.info(String.format("Starting async clean service with instant time %s...", instantTime));
LOG.info("Starting async clean service with instant time {}.", instantTime);
return Pair.of(CompletableFuture.supplyAsync(() -> {
writeClient.clean(instantTime);
return true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ public void enqueuePendingAsyncServiceInstant(String instantTime) {
* @throws InterruptedException
*/
String fetchNextAsyncServiceInstant() throws InterruptedException {
LOG.info(String.format("Waiting for next instant up to %d seconds", POLLING_SECONDS));
LOG.info("Waiting for next instant up to {} seconds", POLLING_SECONDS);
String instantTime = pendingInstants.poll(POLLING_SECONDS, TimeUnit.SECONDS);
if (instantTime != null) {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1100,9 +1100,9 @@ public boolean rollback(final String commitInstantTime, Option<HoodiePendingRoll
.filter(instant -> EQUALS.test(instant.requestedTime(), commitInstantTime))
.findFirst());
if (commitInstantOpt.isPresent() || pendingRollbackInfo.isPresent()) {
LOG.info(String.format("Scheduling Rollback at instant time : %s "
+ "(exists in active timeline: %s), with rollback plan: %s for table %s",
rollbackInstantTime, commitInstantOpt.isPresent(), pendingRollbackInfo.isPresent(), config.getBasePath()));
LOG.info("Scheduling Rollback at instant time : {} "
+ "(exists in active timeline: {}), with rollback plan: {} for table {}",
rollbackInstantTime, commitInstantOpt.isPresent(), pendingRollbackInfo.isPresent(), config.getBasePath());
Option<HoodieRollbackPlan> rollbackPlanOption = pendingRollbackInfo.map(entry -> Option.of(entry.getRollbackPlan()))
.orElseGet(() -> table.scheduleRollback(context, rollbackInstantTime, commitInstantOpt.get(), false, config.shouldRollbackUsingMarkers(),
false));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ public boolean commitStats(String instantTime, List<HoodieWriteStat> stats,
if (!config.allowEmptyCommit() && stats.isEmpty()) {
return true;
}
LOG.info("Committing " + instantTime + " action " + commitActionType);
LOG.info("Committing {} action {}", instantTime, commitActionType);
// Create a Hoodie table which encapsulated the commits and files visible
HoodieTable table = createTable(config);
HoodieCommitMetadata metadata = CommitMetadataResolverFactory.get(
Expand All @@ -245,7 +245,7 @@ public boolean commitStats(String instantTime, List<HoodieWriteStat> stats,
}
commit(table, commitActionType, instantTime, metadata, stats);
postCommit(table, metadata, instantTime, extraMetadata);
LOG.info("Committed " + instantTime);
LOG.info("Committed {}", instantTime);
} catch (IOException e) {
throw new HoodieCommitException("Failed to complete commit " + config.getBasePath() + " at time " + instantTime, e);
} finally {
Expand Down Expand Up @@ -273,7 +273,7 @@ public boolean commitStats(String instantTime, List<HoodieWriteStat> stats,

protected void commit(HoodieTable table, String commitActionType, String instantTime, HoodieCommitMetadata metadata,
List<HoodieWriteStat> stats) throws IOException {
LOG.info("Committing " + instantTime + " action " + commitActionType);
LOG.info("Committing {} action {}", instantTime, commitActionType);
HoodieActiveTimeline activeTimeline = table.getActiveTimeline();
// Finalize write
finalizeWrite(table, instantTime, stats);
Expand Down Expand Up @@ -664,7 +664,7 @@ public void savepoint(String user, String comment) {
}

String latestCommit = table.getCompletedCommitsTimeline().lastInstant().get().requestedTime();
LOG.info("Savepointing latest commit " + latestCommit);
LOG.info("Savepointing latest commit {}", latestCommit);
savepoint(latestCommit, user, comment);
}

Expand Down Expand Up @@ -697,7 +697,7 @@ public void deleteSavepoint() {
}

String savepointTime = savePointTimeline.lastInstant().get().requestedTime();
LOG.info("Deleting latest savepoint time " + savepointTime);
LOG.info("Deleting latest savepoint time {}", savepointTime);
deleteSavepoint(savepointTime);
}

Expand All @@ -723,7 +723,7 @@ public void restoreToSavepoint() {
}

String savepointTime = savePointTimeline.lastInstant().get().requestedTime();
LOG.info("Restoring to latest savepoint time " + savepointTime);
LOG.info("Restoring to latest savepoint time {}", savepointTime);
restoreToSavepoint(savepointTime);
}

Expand All @@ -750,8 +750,8 @@ public void restoreToSavepoint(String savepointTime) {
boolean deleteMDT = false;
if (oldestMdtCompaction.isPresent()) {
if (LESSER_THAN_OR_EQUALS.test(savepointTime, oldestMdtCompaction.get().requestedTime())) {
LOG.warn(String.format("Deleting MDT during restore to %s as the savepoint is older than oldest compaction %s on MDT",
savepointTime, oldestMdtCompaction.get().requestedTime()));
LOG.warn("Deleting MDT during restore to {} as the savepoint is older than oldest compaction {} on MDT",
savepointTime, oldestMdtCompaction.get().requestedTime());
deleteMDT = true;
}
}
Expand All @@ -761,8 +761,8 @@ public void restoreToSavepoint(String savepointTime) {
if (!deleteMDT) {
HoodieInstant syncedInstant = mdtMetaClient.createNewInstant(HoodieInstant.State.COMPLETED, HoodieTimeline.DELTA_COMMIT_ACTION, savepointTime);
if (mdtMetaClient.getCommitsTimeline().isBeforeTimelineStarts(syncedInstant.requestedTime())) {
LOG.warn(String.format("Deleting MDT during restore to %s as the savepoint is older than the MDT timeline %s",
savepointTime, mdtMetaClient.getCommitsTimeline().firstInstant().get().requestedTime()));
LOG.warn("Deleting MDT during restore to {} as the savepoint is older than the MDT timeline {}",
savepointTime, mdtMetaClient.getCommitsTimeline().firstInstant().get().requestedTime());
deleteMDT = true;
}
}
Expand Down Expand Up @@ -807,7 +807,7 @@ public boolean rollback(final String commitInstantTime, String rollbackInstantTi
* @param savepointToRestoreTimestamp savepoint instant time to which restoration is requested
*/
public HoodieRestoreMetadata restoreToInstant(final String savepointToRestoreTimestamp, boolean initialMetadataTableIfNecessary) throws HoodieRestoreException {
LOG.info("Begin restore to instant " + savepointToRestoreTimestamp);
LOG.info("Begin restore to instant {}", savepointToRestoreTimestamp);
Timer.Context timerContext = metrics.getRollbackCtx();
try {
HoodieTable<T, I, K, O> table = initTable(WriteOperationType.UNKNOWN, Option.empty(), initialMetadataTableIfNecessary);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ private void updateStatsForSuccess(Option<Map<String, String>> optionalRecordMet
stat.setMinEventTime(eventTime);
stat.setMaxEventTime(eventTime);
} catch (DateTimeException | IllegalArgumentException e) {
LOG.debug(String.format("Fail to parse event time value: %s", eventTimeVal), e);
LOG.debug("Fail to parse event time value: {}", eventTimeVal, e);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,12 @@ public static boolean deleteHeartbeatFile(HoodieStorage storage,
String heartbeatFolderPath = HoodieTableMetaClient.getHeartbeatFolderPath(basePath);
deleted = storage.deleteFile(new StoragePath(heartbeatFolderPath, instantTime));
if (!deleted) {
LOG.error("Failed to delete heartbeat for instant " + instantTime);
LOG.error("Failed to delete heartbeat for instant {}", instantTime);
} else {
LOG.info("Deleted the heartbeat for instant " + instantTime);
LOG.info("Deleted the heartbeat for instant {}", instantTime);
}
} catch (IOException io) {
LOG.error("Unable to delete heartbeat for instant " + instantTime, io);
LOG.error("Unable to delete heartbeat for instant {}", instantTime, io);
}
return deleted;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ public void run() {
* @param instantTime The instant time for the heartbeat.
*/
public void start(String instantTime) {
LOG.info("Received request to start heartbeat for instant time " + instantTime);
LOG.info("Received request to start heartbeat for instant time {}", instantTime);
Heartbeat heartbeat = instantToHeartbeatMap.get(instantTime);
ValidationUtils.checkArgument(heartbeat == null || !heartbeat.isHeartbeatStopped(), "Cannot restart a stopped heartbeat for " + instantTime);
if (heartbeat != null && heartbeat.isHeartbeatStarted()) {
Expand Down Expand Up @@ -189,7 +189,7 @@ public Heartbeat stop(String instantTime) throws HoodieException {
if (isHeartbeatStarted(heartbeat)) {
stopHeartbeatTimer(heartbeat);
HeartbeatUtils.deleteHeartbeatFile(storage, basePath, instantTime);
LOG.info("Deleted heartbeat file for instant " + instantTime);
LOG.info("Deleted heartbeat file for instant {}", instantTime);
}
return heartbeat;
}
Expand Down Expand Up @@ -220,10 +220,10 @@ private boolean isHeartbeatStarted(Heartbeat heartbeat) {
* @param heartbeat The heartbeat to stop.
*/
private void stopHeartbeatTimer(Heartbeat heartbeat) {
LOG.info("Stopping heartbeat for instant " + heartbeat.getInstantTime());
LOG.info("Stopping heartbeat for instant {}", heartbeat.getInstantTime());
heartbeat.getTimer().cancel();
heartbeat.setHeartbeatStopped(true);
LOG.info("Stopped heartbeat for instant " + heartbeat.getInstantTime());
LOG.info("Stopped heartbeat for instant {}", heartbeat.getInstantTime());
}

public static Boolean heartbeatExists(HoodieStorage storage, String basePath, String instantTime) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ private Stream<HoodieInstant> getCandidateInstantsForNonTableServicesCommits(Hoo
.findInstantsModifiedAfterByCompletionTime(currentInstant.requestedTime())
.getInstantsOrderedByCompletionTime()
.collect(Collectors.toList());
LOG.info(String.format("Instants that may have conflict with %s are %s", currentInstant, completedCommitsInstants));
LOG.info("Instants that may have conflict with {} are {}", currentInstant, completedCommitsInstants);
return completedCommitsInstants.stream();
}

Expand Down Expand Up @@ -105,7 +105,7 @@ private Stream<HoodieInstant> getCandidateInstantsForTableServicesCommits(Hoodie
List<HoodieInstant> instantsToConsider = Stream.concat(completedCommitsStream, inflightIngestionCommitsStream)
.sorted(Comparator.comparing(o -> o.getCompletionTime()))
.collect(Collectors.toList());
LOG.info(String.format("Instants that may have conflict with %s are %s", currentInstant, instantsToConsider));
LOG.info("Instants that may have conflict with {} are {}", currentInstant, instantsToConsider);
return instantsToConsider.stream();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ protected String getLockPath() {
private void createPathIfNotExists() {
try {
String lockPath = getLockPath();
LOG.info(String.format("Creating zookeeper path %s if not exists", lockPath));
LOG.info("Creating zookeeper path {} if not exists", lockPath);
String[] parts = lockPath.split("/");
StringBuilder currentPath = new StringBuilder();
for (String part : parts) {
Expand All @@ -106,7 +106,7 @@ private void createPathIfNotExists() {
}
}
} catch (Exception e) {
LOG.error("Failed to create ZooKeeper path: " + e.getMessage());
LOG.error("Failed to create ZooKeeper path", e);
throw new HoodieLockException("Failed to initialize ZooKeeper path", e);
}
}
Expand All @@ -118,7 +118,7 @@ private void createNodeIfNotExists(String path) throws Exception {
// to avoid failure due to synchronous calls.
} catch (KeeperException e) {
if (e.code() == KeeperException.Code.NODEEXISTS) {
LOG.debug(String.format("Node already exist for path = %s", path));
LOG.debug("Node already exist for path = {}", path);
} else {
throw new HoodieLockException("Failed to create zookeeper node", e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ public class ZookeeperBasedImplicitBasePathLockProvider extends BaseZookeeperBas
public static String getLockBasePath(String hudiTableBasePath) {
// Ensure consistent format for S3 URI.
String lockBasePath = "/tmp/" + HashID.generateXXHashAsString(s3aToS3(hudiTableBasePath), HashID.Size.BITS_64);
LOG.info(String.format("The Zookeeper lock key for the base path %s is %s", hudiTableBasePath, lockBasePath));
LOG.info("The Zookeeper lock key for the base path {} is {}", hudiTableBasePath, lockBasePath);
return lockBasePath;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3595,9 +3595,9 @@ private void autoAdjustConfigsForConcurrencyMode(boolean isLockProviderPropertyS
// Override the configs for metadata table
writeConfig.setValue(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key(),
InProcessLockProvider.class.getName());
LOG.info(String.format("Automatically set %s=%s since user has not set the "
LOG.info("Automatically set {}={} since user has not set the "
+ "lock provider for single writer with async table services",
HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key(), InProcessLockProvider.class.getName()));
HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key(), InProcessLockProvider.class.getName());
}

// We check if "hoodie.clean.failed.writes.policy"
Expand All @@ -3607,10 +3607,10 @@ private void autoAdjustConfigsForConcurrencyMode(boolean isLockProviderPropertyS
// In this case, we assume that the user takes care of setting the lock provider used
writeConfig.setValue(HoodieCleanConfig.FAILED_WRITES_CLEANER_POLICY.key(),
HoodieFailedWritesCleaningPolicy.LAZY.name());
LOG.info(String.format("Automatically set %s=%s since %s is used",
LOG.info("Automatically set {}={} since {} is used",
HoodieCleanConfig.FAILED_WRITES_CLEANER_POLICY.key(),
HoodieFailedWritesCleaningPolicy.LAZY.name(),
writeConcurrencyMode.name()));
writeConcurrencyMode.name());
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -209,11 +209,9 @@ public static List<Pair<String, Long>> filterKeysFromFile(StoragePath filePath,
HoodieTimer timer = HoodieTimer.start();
Set<Pair<String, Long>> fileRowKeys = fileReader.filterRowKeys(candidateRecordKeys.stream().collect(Collectors.toSet()));
foundRecordKeys.addAll(fileRowKeys);
LOG.info(String.format("Checked keys against file %s, in %d ms. #candidates (%d) #found (%d)", filePath,
timer.endTimer(), candidateRecordKeys.size(), foundRecordKeys.size()));
if (LOG.isDebugEnabled()) {
LOG.debug("Keys matching for file " + filePath + " => " + foundRecordKeys);
}
LOG.info("Checked keys against file {}, in {} ms. #candidates ({}) #found ({})", filePath,
timer.endTimer(), candidateRecordKeys.size(), foundRecordKeys.size());
LOG.debug("Keys matching for file {} => {}", filePath, foundRecordKeys);
}
} catch (Exception e) {
throw new HoodieIndexException("Error checking candidate keys against file.", e);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ record -> new ImmutablePair<>(record.getPartitionPath(), record.getRecordKey()))
}
if (LOG.isDebugEnabled()) {
long totalTaggedRecords = keyFilenamePairs.count();
LOG.debug("Number of update records (ones tagged with a fileID): " + totalTaggedRecords);
LOG.debug("Number of update records (ones tagged with a fileID): {}", totalTaggedRecords);
}

// Step 3: Tag the incoming records, as inserts or updates, by joining with existing record keys
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -441,8 +441,8 @@ protected void processAppendResult(AppendResult result, Option<HoodieLogBlock> d

resetWriteCounts();
assert stat.getRuntimeStats() != null;
LOG.info(String.format("AppendHandle for partitionPath %s filePath %s, took %d ms.", partitionPath,
stat.getPath(), stat.getRuntimeStats().getTotalUpsertTime()));
LOG.info("AppendHandle for partitionPath {} filePath {}, took {} ms.", partitionPath,
stat.getPath(), stat.getRuntimeStats().getTotalUpsertTime());
timer.startTimer();
}

Expand Down
Loading
Loading