Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
627fe69
initial commit
inv-jishnu Nov 10, 2025
32b4b70
Initial changes
inv-jishnu Nov 10, 2025
1e08ecf
scanner class updated
inv-jishnu Nov 11, 2025
e87b94c
Merge branch 'master' into feat/data-loader/changes-export
inv-jishnu Nov 11, 2025
e7c68a7
Feedback changes
inv-jishnu Nov 11, 2025
b973df8
Feedback changes javadoc and param rename
inv-jishnu Nov 11, 2025
186d3c3
Replace storage with transaction in table metadata service
inv-jishnu Nov 11, 2025
1d81db7
Initial commit
inv-jishnu Nov 11, 2025
36ff32f
transaction mode use fixed
inv-jishnu Nov 12, 2025
5b6f86a
Removed unused code and udpated java doc comments
inv-jishnu Nov 12, 2025
1f0644e
Add changes based on feedback
inv-jishnu Nov 12, 2025
326f7a7
Added changes based on feedback
inv-jishnu Nov 12, 2025
e8f9feb
Removed usunsed params
inv-jishnu Nov 13, 2025
98e84bb
correction
inv-jishnu Nov 13, 2025
7502fe8
Merge branch 'feat/data-loader/table-metadata-replace-storage' into f…
inv-jishnu Nov 13, 2025
0ccc67b
Merge branch 'master' into feat/data-loader/import-replace-storage
inv-jishnu Nov 13, 2025
9589094
Resolved conflicts and merged latest changes from master branch
inv-jishnu Nov 14, 2025
671f95a
Removed unused commit
inv-jishnu Nov 14, 2025
6418f5c
Fixed merge import issues
inv-jishnu Nov 14, 2025
a5284be
Merge branch 'feat/data-loader/table-metadata-replace-storage' into f…
inv-jishnu Nov 14, 2025
96ba8f0
Deprecate `include-metadata` option for data loader export (#3159)
ypeckstadt Nov 14, 2025
d2eebbf
Merge branch 'master' into feat/data-loader/import-replace-storage
inv-jishnu Nov 14, 2025
1b100b8
Fix build failure
inv-jishnu Nov 14, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ public class ExportCommand extends ExportCommandOptions implements Callable<Inte
public Integer call() throws Exception {
validateDeprecatedOptions();
applyDeprecatedOptions();
warnAboutIgnoredDeprecatedOptions();
String scalarDbPropertiesFilePath = getScalarDbPropertiesFilePath();

try {
Expand Down Expand Up @@ -141,6 +142,28 @@ private void validateDeprecatedOptions() {
MAX_THREADS_OPTION_SHORT);
}

/** Warns about deprecated options that are no longer used and have been completely ignored. */
private void warnAboutIgnoredDeprecatedOptions() {
CommandLine.ParseResult parseResult = spec.commandLine().getParseResult();
boolean hasIncludeMetadata =
parseResult.hasMatchedOption(DEPRECATED_INCLUDE_METADATA_OPTION)
|| parseResult.hasMatchedOption(DEPRECATED_INCLUDE_METADATA_OPTION_SHORT);

if (hasIncludeMetadata) {
// Use picocli's ANSI support for colored warning output
CommandLine.Help.Ansi ansi = CommandLine.Help.Ansi.AUTO;
String warning =
ansi.string(
"@|bold,yellow The "
+ DEPRECATED_INCLUDE_METADATA_OPTION
+ " option is deprecated and no longer has any effect. "
+ "Use the 'scalar.db.consensus_commit.include_metadata.enabled' configuration property "
+ "in your ScalarDB properties file to control whether transaction metadata is included in scan operations.|@");

logger.warn(warning);
}
}

private String getScalarDbPropertiesFilePath() {
if (StringUtils.isBlank(configFilePath)) {
throw new IllegalArgumentException(DataLoaderError.CONFIG_FILE_PATH_BLANK.buildMessage());
Expand All @@ -160,8 +183,7 @@ private void validateOutputDirectory() throws DirectoryValidationException {

private ExportManager createExportManager(
TransactionFactory transactionFactory, ScalarDbDao scalarDbDao, FileFormat fileFormat) {
ProducerTaskFactory taskFactory =
new ProducerTaskFactory(delimiter, includeTransactionMetadata, prettyPrintJson);
ProducerTaskFactory taskFactory = new ProducerTaskFactory(delimiter, prettyPrintJson);
DistributedTransactionManager manager = transactionFactory.getTransactionManager();
switch (fileFormat) {
case JSON:
Expand All @@ -180,7 +202,6 @@ private ExportOptions buildExportOptions(Key partitionKey, ScanRange scanRange)
ExportOptions.builder(namespace, table, partitionKey, outputFormat)
.sortOrders(sortOrders)
.excludeHeaderRow(excludeHeader)
.includeTransactionMetadata(includeTransactionMetadata)
.delimiter(delimiter)
.limit(limit)
.maxThreadCount(maxThreads)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ public class ExportCommandOptions {
public static final String MAX_THREADS_OPTION = "--max-threads";
public static final String MAX_THREADS_OPTION_SHORT = "-mt";
public static final String DEPRECATED_THREADS_OPTION = "--threads";
public static final String DEPRECATED_INCLUDE_METADATA_OPTION = "--include-metadata";
public static final String DEPRECATED_INCLUDE_METADATA_OPTION_SHORT = "-m";

@CommandLine.Option(
names = {"--config", "-c"},
Expand Down Expand Up @@ -69,10 +71,19 @@ public class ExportCommandOptions {
defaultValue = "json")
protected FileFormat outputFormat;

/**
* @deprecated As of release 3.17.0 This option is no longer used and will be removed in release
* 4.0.0. The option is not fully removed as users who might already have their scripts or
* commands pre-set might pass the argument and when passed if not supported, picocli will
* throw an error. We want to avoid that and instead just show a warning.
*/
@Deprecated
@CommandLine.Option(
names = {"--include-metadata", "-m"},
description = "Include transaction metadata in the exported data (default: false)",
defaultValue = "false")
description =
"Deprecated: This option is no longer used. Please use scalar.db.consensus_commit.include_metadata.enabled to control whether transaction metadata is included in scan operations.",
defaultValue = "false",
hidden = true)
protected boolean includeTransactionMetadata;

@CommandLine.Option(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,10 @@
import com.scalar.db.api.TableMetadata;
import com.scalar.db.dataloader.core.DataLoaderError;
import com.scalar.db.dataloader.core.FileFormat;
import com.scalar.db.dataloader.core.ScalarDbMode;
import com.scalar.db.dataloader.core.dataimport.ImportManager;
import com.scalar.db.dataloader.core.dataimport.ImportOptions;
import com.scalar.db.dataloader.core.dataimport.controlfile.ControlFile;
import com.scalar.db.dataloader.core.dataimport.controlfile.ControlFileTable;
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbStorageManager;
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbTransactionManager;
import com.scalar.db.dataloader.core.dataimport.log.ImportLoggerConfig;
import com.scalar.db.dataloader.core.dataimport.log.LogMode;
import com.scalar.db.dataloader.core.dataimport.log.SingleFileImportLogger;
Expand All @@ -26,7 +23,6 @@
import com.scalar.db.dataloader.core.tablemetadata.TableMetadataException;
import com.scalar.db.dataloader.core.tablemetadata.TableMetadataService;
import com.scalar.db.dataloader.core.util.TableMetadataUtil;
import com.scalar.db.service.StorageFactory;
import com.scalar.db.service.TransactionFactory;
import java.io.BufferedReader;
import java.io.File;
Expand Down Expand Up @@ -150,32 +146,14 @@ private ImportManager createImportManager(
throws IOException {
File configFile = new File(configFilePath);
ImportProcessorFactory importProcessorFactory = new DefaultImportProcessorFactory();
ImportManager importManager;
if (scalarDbMode == ScalarDbMode.TRANSACTION) {
ScalarDbTransactionManager scalarDbTransactionManager =
new ScalarDbTransactionManager(TransactionFactory.create(configFile));
importManager =
new ImportManager(
tableMetadataMap,
reader,
importOptions,
importProcessorFactory,
ScalarDbMode.TRANSACTION,
null,
scalarDbTransactionManager.getDistributedTransactionManager());
} else {
ScalarDbStorageManager scalarDbStorageManager =
new ScalarDbStorageManager(StorageFactory.create(configFile));
importManager =
new ImportManager(
tableMetadataMap,
reader,
importOptions,
importProcessorFactory,
ScalarDbMode.STORAGE,
scalarDbStorageManager.getDistributedStorage(),
null);
}
ImportManager importManager =
new ImportManager(
tableMetadataMap,
reader,
importOptions,
importProcessorFactory,
scalarDbMode,
TransactionFactory.create(configFile).getTransactionManager());
if (importOptions.getLogMode().equals(LogMode.SPLIT_BY_DATA_CHUNK)) {
importManager.addListener(new SplitByDataChunkImportLogger(config, logWriterFactory));
} else {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package com.scalar.db.dataloader.cli.command.dataexport;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;

Expand Down Expand Up @@ -152,14 +153,14 @@ void call_withOnlyDeprecatedStartExclusive_shouldApplyInvertedValue() {
cmd.parseArgs(args);

// Verify the deprecated value was parsed
assertEquals(true, command.startExclusiveDeprecated);
assertTrue(command.startExclusiveDeprecated);

// Apply deprecated options (this is what the command does after validation)
command.applyDeprecatedOptions();

// Verify the value was applied with inverted logic
// start-exclusive=true should become start-inclusive=false
assertEquals(false, command.scanStartInclusive);
assertFalse(command.scanStartInclusive);
}

@Test
Expand All @@ -181,14 +182,14 @@ void call_withOnlyDeprecatedEndExclusive_shouldApplyInvertedValue() {
cmd.parseArgs(args);

// Verify the deprecated value was parsed
assertEquals(false, command.endExclusiveDeprecated);
assertFalse(command.endExclusiveDeprecated);

// Apply deprecated options (this is what the command does after validation)
command.applyDeprecatedOptions();

// Verify the value was applied with inverted logic
// end-exclusive=false should become end-inclusive=true
assertEquals(true, command.scanEndInclusive);
assertTrue(command.scanEndInclusive);
}

@Test
Expand Down Expand Up @@ -273,4 +274,83 @@ void call_withoutMaxThreads_shouldDefaultToAvailableProcessors() {
// Verify it was set to available processors
assertEquals(Runtime.getRuntime().availableProcessors(), command.maxThreads);
}

@Test
void call_withDeprecatedIncludeMetadataOption_shouldParseAndDetectOption() {
// Test that the deprecated option can be parsed without crashing
// and is detected for warning purposes (both long and short forms)
String[] argsWithLongForm = {
"--config",
"scalardb.properties",
"--namespace",
"scalar",
"--table",
"asset",
"--format",
"JSON",
"--include-metadata"
};

String[] argsWithShortForm = {
"--config",
"scalardb.properties",
"--namespace",
"scalar",
"--table",
"asset",
"--format",
"JSON",
"-m"
};

// Test long form
ExportCommand commandLong = new ExportCommand();
CommandLine cmdLong = new CommandLine(commandLong);
cmdLong.parseArgs(argsWithLongForm);
commandLong.spec = cmdLong.getCommandSpec();

// Verify the option is detected (so warning will trigger)
assertTrue(
cmdLong
.getParseResult()
.hasMatchedOption(ExportCommandOptions.DEPRECATED_INCLUDE_METADATA_OPTION));

// Test short form
ExportCommand commandShort = new ExportCommand();
CommandLine cmdShort = new CommandLine(commandShort);
cmdShort.parseArgs(argsWithShortForm);
commandShort.spec = cmdShort.getCommandSpec();

// Verify the short option is detected (so warning will trigger)
assertTrue(
cmdShort
.getParseResult()
.hasMatchedOption(ExportCommandOptions.DEPRECATED_INCLUDE_METADATA_OPTION_SHORT));
}

@Test
void call_withoutDeprecatedIncludeMetadataOption_shouldNotDetectOption() {
// Verify that when the deprecated option is NOT used, it's not detected
// (so warning won't trigger incorrectly)
String[] args = {
"--config",
"scalardb.properties",
"--namespace",
"scalar",
"--table",
"asset",
"--format",
"JSON"
};

ExportCommand command = new ExportCommand();
CommandLine cmd = new CommandLine(command);
cmd.parseArgs(args);
command.spec = cmd.getCommandSpec();

// Verify the option is NOT detected (so warning won't trigger)
assertFalse(
cmd.getParseResult()
.hasMatchedOption(ExportCommandOptions.DEPRECATED_INCLUDE_METADATA_OPTION));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import com.scalar.db.dataloader.core.dataexport.producer.ProducerTaskFactory;
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbDao;
import com.scalar.db.dataloader.core.util.CsvUtil;
import com.scalar.db.transaction.consensuscommit.ConsensusCommitUtils;
import java.io.IOException;
import java.io.Writer;
import java.util.Iterator;
Expand Down Expand Up @@ -77,8 +76,7 @@ private String createCsvHeaderRow(ExportOptions exportOptions, TableMetadata tab
Iterator<String> iterator = tableMetadata.getColumnNames().iterator();
while (iterator.hasNext()) {
String columnName = iterator.next();
if (shouldIgnoreColumn(
exportOptions.isIncludeTransactionMetadata(), columnName, tableMetadata, projections)) {
if (!projections.isEmpty() && !projections.contains(columnName)) {
continue;
}
headerRow.append(columnName);
Expand All @@ -90,24 +88,4 @@ private String createCsvHeaderRow(ExportOptions exportOptions, TableMetadata tab
headerRow.append("\n");
return headerRow.toString();
}

/**
* To ignore a column or not based on conditions such as if it is a metadata column or if it is
* not include in selected projections
*
* @param isIncludeTransactionMetadata to include transaction metadata or not
* @param columnName column name
* @param tableMetadata table metadata
* @param projections selected columns for projection
* @return ignore the column or not
*/
private boolean shouldIgnoreColumn(
boolean isIncludeTransactionMetadata,
String columnName,
TableMetadata tableMetadata,
List<String> projections) {
return (!isIncludeTransactionMetadata
&& ConsensusCommitUtils.isTransactionMetaColumn(columnName, tableMetadata))
|| (!projections.isEmpty() && !projections.contains(columnName));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
import com.scalar.db.dataloader.core.dataexport.validation.ExportOptionsValidator;
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbDao;
import com.scalar.db.dataloader.core.dataimport.dao.ScalarDbDaoException;
import com.scalar.db.dataloader.core.util.TableMetadataUtil;
import com.scalar.db.exception.transaction.CrudException;
import com.scalar.db.exception.transaction.UnknownTransactionStatusException;
import com.scalar.db.io.DataType;
Expand Down Expand Up @@ -76,7 +75,6 @@ public ExportReport startExport(
try {
validateExportOptions(exportOptions, tableMetadata);
Map<String, DataType> dataTypeByColumnName = tableMetadata.getColumnDataTypes();
handleTransactionMetadata(exportOptions, tableMetadata);
processHeader(exportOptions, tableMetadata, writer);

ExecutorService executorService =
Expand Down Expand Up @@ -198,22 +196,6 @@ private void validateExportOptions(ExportOptions exportOptions, TableMetadata ta
ExportOptionsValidator.validate(exportOptions, tableMetadata);
}

/**
* To update projection columns of export options if include metadata options is enabled
*
* @param exportOptions export options
* @param tableMetadata metadata of the table
*/
private void handleTransactionMetadata(ExportOptions exportOptions, TableMetadata tableMetadata) {
if (exportOptions.isIncludeTransactionMetadata()
&& !exportOptions.getProjectionColumns().isEmpty()) {
List<String> projectionMetadata =
TableMetadataUtil.populateProjectionsWithMetadata(
tableMetadata, exportOptions.getProjectionColumns());
exportOptions.setProjectionColumns(projectionMetadata);
}
}

/**
* To create a scanner object
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ public class ExportOptions {
@Builder.Default private final int maxThreadCount = Runtime.getRuntime().availableProcessors();
@Builder.Default private final String delimiter = ";";
@Builder.Default private final boolean excludeHeaderRow = false;
@Builder.Default private final boolean includeTransactionMetadata = false;
@Builder.Default private List<String> projectionColumns = Collections.emptyList();
private List<Scan.Ordering> sortOrders;

Expand Down
Loading
Loading