diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java index 9d6e46bb21..f5f4811b1e 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java @@ -27,10 +27,6 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } - @Override - @Disabled("Temporarily disabled because it includes DML operations") - public void truncateTable_ShouldTruncateProperly() {} - @Override @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..d5ed885ddb --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java @@ -0,0 +1,23 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitConfig; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitCrossPartitionScanIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage + extends ConsensusCommitCrossPartitionScanIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + Properties properties = ConsensusCommitObjectStorageEnv.getProperties(testName); + properties.setProperty(ConsensusCommitConfig.ISOLATION_LEVEL, "SERIALIZABLE"); + return properties; + } + + @Test + @Override + @Disabled("Cross-partition scan with ordering is not supported in Object Storage") + public void scan_CrossPartitionScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..309216210a --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java @@ -0,0 +1,51 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ConsensusCommitIntegrationTestWithObjectStorage + extends ConsensusCommitIntegrationTestBase { + + @Override + protected TableMetadata getTableMetadata() { + return TableMetadata.newBuilder() + .addColumn(ACCOUNT_ID, DataType.INT) + .addColumn(ACCOUNT_TYPE, DataType.INT) + .addColumn(BALANCE, DataType.INT) + .addColumn(SOME_COLUMN, DataType.INT) + .addColumn(BOOLEAN_COL, DataType.BOOLEAN) + .addColumn(BIGINT_COL, DataType.BIGINT) + .addColumn(FLOAT_COL, DataType.FLOAT) + .addColumn(DOUBLE_COL, DataType.DOUBLE) + .addColumn(TEXT_COL, DataType.TEXT) + .addColumn(BLOB_COL, DataType.BLOB) + .addColumn(DATE_COL, DataType.DATE) + .addColumn(TIME_COL, DataType.TIME) + .addColumn(TIMESTAMPTZ_COL, DataType.TIMESTAMPTZ) + .addColumn(TIMESTAMP_COL, DataType.TIMESTAMP) + .addPartitionKey(ACCOUNT_ID) + .addClusteringKey(ACCOUNT_TYPE) + .build(); + } + + @Override + protected Properties getProps(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scanOrGetScanner_ScanGivenForIndexColumn_ShouldReturnRecords(ScanType scanType) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scanOrGetScanner_ScanGivenForIndexColumnWithConjunctions_ShouldReturnRecords( + ScanType scanType) {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..440e753212 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitNullMetadataIntegrationTestBase; +import java.util.Properties; + +public class ConsensusCommitNullMetadataIntegrationTestWithObjectStorage + extends ConsensusCommitNullMetadataIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java new file mode 100644 index 0000000000..25d5c9a174 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java @@ -0,0 +1,22 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitTestUtils; +import java.util.Map; +import java.util.Properties; + +public class ConsensusCommitObjectStorageEnv { + private ConsensusCommitObjectStorageEnv() {} + + public static Properties getProperties(String testName) { + Properties properties = ObjectStorageEnv.getProperties(testName); + + // Add testName as a coordinator schema suffix + ConsensusCommitTestUtils.addSuffixToCoordinatorNamespace(properties, testName); + + return ConsensusCommitTestUtils.loadConsensusCommitProperties(properties); + } + + public static Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..b946d185eb --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java @@ -0,0 +1,144 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitSpecificIntegrationTestBase; +import com.scalar.db.transaction.consensuscommit.Isolation; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ConsensusCommitSpecificIntegrationTestWithObjectStorage + extends ConsensusCommitSpecificIntegrationTestBase { + + @Override + protected TableMetadata getTableMetadata() { + return TableMetadata.newBuilder() + .addColumn(ACCOUNT_ID, DataType.INT) + .addColumn(ACCOUNT_TYPE, DataType.INT) + .addColumn(BALANCE, DataType.INT) + .addColumn(SOME_COLUMN, DataType.TEXT) + .addPartitionKey(ACCOUNT_ID) + .addClusteringKey(ACCOUNT_TYPE) + .build(); + } + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_OverlappingPutWithNonIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_NonOverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_OverlappingPutWithIndexedColumnAndConjunctionsGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scanAndUpdate_ScanWithIndexGiven_ShouldUpdate(Isolation isolation) {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..884e464008 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase; +import java.util.Properties; + +public class ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage + extends ConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java index a23fd9e52e..220cba8a70 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java @@ -51,10 +51,6 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } - @Override - @Disabled("Temporarily disabled because it includes DML operations") - public void truncateTable_ShouldTruncateProperly() {} - @Override @Disabled("Object Storage does not have a concept of namespaces") public void diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java index 8030c9d1a0..e64dfd0793 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java @@ -49,10 +49,6 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } - @Override - @Disabled("Temporarily disabled because it includes DML operations") - public void truncateTable_ShouldTruncateProperly() {} - @Override @Disabled("Object Storage does not have a concept of namespaces") public void diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java new file mode 100644 index 0000000000..7dddbd230e --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java @@ -0,0 +1,61 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageCaseSensitivityIntegrationTestBase; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageCaseSensitivityIntegrationTest + extends DistributedStorageCaseSensitivityIntegrationTestBase { + + @Override + protected TableMetadata getTableMetadata() { + return TableMetadata.newBuilder() + .addColumn(getColumnName1(), DataType.INT) + .addColumn(getColumnName2(), DataType.TEXT) + .addColumn(getColumnName3(), DataType.INT) + .addColumn(getColumnName4(), DataType.INT) + .addColumn(getColumnName5(), DataType.BOOLEAN) + .addColumn(getColumnName6(), DataType.BLOB) + .addPartitionKey(getColumnName1()) + .addClusteringKey(getColumnName4()) + .build(); + } + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumn_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForIndexedColumn_ShouldScan() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java new file mode 100644 index 0000000000..1514c98f76 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java @@ -0,0 +1,12 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageColumnValueIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageColumnValueIntegrationTest + extends DistributedStorageColumnValueIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java new file mode 100644 index 0000000000..3c0631a95d --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java @@ -0,0 +1,18 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageConditionalMutationIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageConditionalMutationIntegrationTest + extends DistributedStorageConditionalMutationIntegrationTestBase { + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java new file mode 100644 index 0000000000..dd779ecfe5 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageCrossPartitionScanIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ObjectStorageCrossPartitionScanIntegrationTest + extends DistributedStorageCrossPartitionScanIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected boolean isParallelDdlSupported() { + return false; + } + + @Test + @Override + @Disabled("Cross-partition scan with ordering is not supported in Object Storage") + public void scan_WithOrderingForNonPrimaryColumns_ShouldReturnProperResult() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java new file mode 100644 index 0000000000..ace6694d7b --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java @@ -0,0 +1,60 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageIntegrationTestBase; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageIntegrationTest extends DistributedStorageIntegrationTestBase { + + @Override + protected TableMetadata getTableMetadata() { + return TableMetadata.newBuilder() + .addColumn(getColumnName1(), DataType.INT) + .addColumn(getColumnName2(), DataType.TEXT) + .addColumn(getColumnName3(), DataType.INT) + .addColumn(getColumnName4(), DataType.INT) + .addColumn(getColumnName5(), DataType.BOOLEAN) + .addColumn(getColumnName6(), DataType.BLOB) + .addPartitionKey(getColumnName1()) + .addClusteringKey(getColumnName4()) + .build(); + } + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumn_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForIndexedColumn_ShouldScan() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java new file mode 100644 index 0000000000..4610d84aed --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageJapaneseIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageJapaneseIntegrationTest + extends DistributedStorageJapaneseIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java new file mode 100644 index 0000000000..5bc41617a1 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java @@ -0,0 +1,23 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageMultipleClusteringKeyScanIntegrationTest + extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected boolean isParallelDdlSupported() { + return false; + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java new file mode 100644 index 0000000000..7db5a06982 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java @@ -0,0 +1,23 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageMultiplePartitionKeyIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageMultiplePartitionKeyIntegrationTest + extends DistributedStorageMultiplePartitionKeyIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected boolean isParallelDdlSupported() { + return false; + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java new file mode 100644 index 0000000000..98c4ea857f --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java @@ -0,0 +1,19 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageMutationAtomicityUnitIntegrationTestBase; +import java.util.Map; +import java.util.Properties; + +public class ObjectStorageMutationAtomicityUnitIntegrationTest + extends DistributedStorageMutationAtomicityUnitIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java new file mode 100644 index 0000000000..4ca86ca7e2 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java @@ -0,0 +1,12 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageSingleClusteringKeyScanIntegrationTest + extends DistributedStorageSingleClusteringKeyScanIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java new file mode 100644 index 0000000000..5ce073100b --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java @@ -0,0 +1,12 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageSinglePartitionKeyIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageSinglePartitionKeyIntegrationTest + extends DistributedStorageSinglePartitionKeyIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java new file mode 100644 index 0000000000..3c5f33b901 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java @@ -0,0 +1,61 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageWithReservedKeywordIntegrationTestBase; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageWithReservedKeywordIntegrationTest + extends DistributedStorageWithReservedKeywordIntegrationTestBase { + + @Override + protected TableMetadata getTableMetadata() { + return TableMetadata.newBuilder() + .addColumn(getColumnName1(), DataType.INT) + .addColumn(getColumnName2(), DataType.TEXT) + .addColumn(getColumnName3(), DataType.INT) + .addColumn(getColumnName4(), DataType.INT) + .addColumn(getColumnName5(), DataType.BOOLEAN) + .addColumn(getColumnName6(), DataType.BLOB) + .addPartitionKey(getColumnName1()) + .addClusteringKey(getColumnName4()) + .build(); + } + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumn_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForIndexedColumn_ShouldScan() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java index e32ab2ebac..96c168543b 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java @@ -27,8 +27,9 @@ public class ObjectStorageWrapperIntegrationTest { private static final String TEST_OBJECT1 = "test-object1"; private static final String TEST_OBJECT2 = "test-object2"; private static final String TEST_OBJECT3 = "test-object3"; + private static final int BLOB_STORAGE_LIST_MAX_KEYS = 5000; - protected ObjectStorageWrapper wrapper; + private ObjectStorageWrapper wrapper; @BeforeAll public void beforeAll() throws ObjectStorageWrapperException { @@ -156,7 +157,7 @@ public void update_NonExistingObjectKeyGiven_ShouldThrowPreconditionFailedExcept } @Test - public void update_WrongVersionGiven_ShouldThrowPreconditionFailedException() throws Exception { + public void update_WrongVersionGiven_ShouldThrowPreconditionFailedException() { // Arrange String wrongVersion = "wrong-version"; @@ -248,6 +249,32 @@ public void getKeys_WithNonExistingPrefix_ShouldReturnEmptySet() throws Exceptio assertThat(keys).isEmpty(); } + @Test + public void getKeys_WithPrefixForTheNumberOfObjectsExceedingTheListLimit_ShouldReturnAllKeys() + throws Exception { + String prefix = "prefix-"; + int numberOfObjects = BLOB_STORAGE_LIST_MAX_KEYS + 1; + try { + // Arrange + for (int i = 0; i < numberOfObjects; i++) { + wrapper.insert(prefix + i, "object-" + i); + } + + // Act + Set keys = wrapper.getKeys(prefix); + + // Assert + assertThat(keys.size()).isEqualTo(numberOfObjects); + for (int i = 0; i < numberOfObjects; i++) { + assertThat(keys).contains(prefix + i); + } + } finally { + for (int i = 0; i < numberOfObjects; i++) { + wrapper.delete(prefix + i); + } + } + } + @Test public void deleteByPrefix_WithExistingPrefix_ShouldDeleteObjectsSuccessfully() throws Exception { // Arrange @@ -277,6 +304,35 @@ public void deleteByPrefix_WithNonExistingPrefix_ShouldDoNothing() throws Except assertThat(keys).containsExactlyInAnyOrder(TEST_KEY1, TEST_KEY2, TEST_KEY3); } + @Test + public void + deleteByPrefix_WithPrefixForTheNumberOfObjectsExceedingTheListLimit_ShouldDeleteAllObjects() + throws Exception { + String prefix = "prefix-"; + int numberOfObjects = BLOB_STORAGE_LIST_MAX_KEYS + 1; + try { + // Arrange + for (int i = 0; i < numberOfObjects; i++) { + wrapper.insert(prefix + i, "object-" + i); + } + + // Act + wrapper.deleteByPrefix(prefix); + + // Assert + Set keys = wrapper.getKeys(prefix); + assertThat(keys).isEmpty(); + } finally { + for (int i = 0; i < numberOfObjects; i++) { + try { + wrapper.delete(prefix + i); + } catch (PreconditionFailedException e) { + // The object may have already been deleted, so do nothing + } + } + } + } + @Test public void close_ShouldNotThrowException() { // Arrange diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperLargeObjectWriteIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperLargeObjectWriteIntegrationTest.java new file mode 100644 index 0000000000..8d63ec1890 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperLargeObjectWriteIntegrationTest.java @@ -0,0 +1,160 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; + +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; +import java.util.Arrays; +import java.util.Optional; +import java.util.Properties; +import java.util.stream.LongStream; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class ObjectStorageWrapperLargeObjectWriteIntegrationTest { + private static final Logger logger = + LoggerFactory.getLogger(ObjectStorageWrapperLargeObjectWriteIntegrationTest.class); + + private static final String TEST_NAME = "object_storage_wrapper_integration_test"; + private static final String TEST_KEY1 = "test-key1"; + private static final String TEST_KEY2 = "test-key2"; + private static final String TEST_KEY3 = "test-key3"; + + private String testObject1; + private String testObject2; + private String testObject3; + + private ObjectStorageWrapper wrapper; + + @BeforeAll + public void beforeAll() throws ObjectStorageWrapperException { + Properties properties = getProperties(TEST_NAME); + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); + long objectSizeInBytes = + LongStream.of(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES) + .max() + .getAsLong() + + 1; + + char[] charArray = new char[(int) objectSizeInBytes]; + Arrays.fill(charArray, 'a'); + testObject1 = new String(charArray); + Arrays.fill(charArray, 'b'); + testObject2 = new String(charArray); + Arrays.fill(charArray, 'c'); + testObject3 = new String(charArray); + + createObjects(); + } + + @AfterAll + public void afterAll() { + try { + deleteObjects(); + } catch (Exception e) { + logger.warn("Failed to delete objects", e); + } + + try { + if (wrapper != null) { + wrapper.close(); + } + } catch (Exception e) { + logger.warn("Failed to close wrapper", e); + } + } + + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + private void createObjects() throws ObjectStorageWrapperException { + wrapper.insert(TEST_KEY1, testObject1); + wrapper.insert(TEST_KEY2, testObject2); + wrapper.insert(TEST_KEY3, testObject3); + } + + protected void deleteObjects() throws ObjectStorageWrapperException { + wrapper.delete(TEST_KEY1); + wrapper.delete(TEST_KEY2); + wrapper.delete(TEST_KEY3); + } + + @Test + public void insert_NewObjectKeyGiven_ShouldInsertObjectSuccessfully() throws Exception { + // Arrange + String objectKey = "new-object-key"; + String object = "new-object"; + + try { + // Act + wrapper.insert(objectKey, object); + + // Assert + Optional response = wrapper.get(objectKey); + assertThat(response.isPresent()).isTrue(); + assertThat(response.get().getPayload()).isEqualTo(object); + } finally { + wrapper.delete(objectKey); + } + } + + @Test + public void insert_ExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + + // Act Assert + assertThatCode(() -> wrapper.insert(TEST_KEY2, "another-object")) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void update_ExistingObjectKeyGiven_ShouldUpdateObjectSuccessfully() throws Exception { + // Arrange + String updatedObject = "updated-object2"; + Optional response1 = wrapper.get(TEST_KEY2); + assertThat(response1.isPresent()).isTrue(); + String version = response1.get().getVersion(); + + try { + // Act + wrapper.update(TEST_KEY2, updatedObject, version); + + // Assert + Optional response2 = wrapper.get(TEST_KEY2); + assertThat(response2.isPresent()).isTrue(); + assertThat(response2.get().getPayload()).isEqualTo(updatedObject); + } finally { + wrapper.delete(TEST_KEY2); + wrapper.insert(TEST_KEY2, testObject2); + } + } + + @Test + public void update_NonExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + String objectKey = "non-existing-key"; + + // Act Assert + assertThatCode(() -> wrapper.update(objectKey, "some-object", "some-version")) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void update_WrongVersionGiven_ShouldThrowPreconditionFailedException() { + // Arrange + String wrongVersion = "wrong-version"; + + // Act Assert + assertThatCode(() -> wrapper.update(TEST_KEY2, "another-object", wrongVersion)) + .isInstanceOf(PreconditionFailedException.class); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..9888104243 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java @@ -0,0 +1,37 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import com.scalar.db.transaction.singlecrudoperation.SingleCrudOperationTransactionIntegrationTestBase; +import java.util.Properties; + +public class SingleCrudOperationTransactionIntegrationTestWithObjectStorage + extends SingleCrudOperationTransactionIntegrationTestBase { + + @Override + protected TableMetadata getTableMetadata() { + return TableMetadata.newBuilder() + .addColumn(ACCOUNT_ID, DataType.INT) + .addColumn(ACCOUNT_TYPE, DataType.INT) + .addColumn(BALANCE, DataType.INT) + .addColumn(SOME_COLUMN, DataType.INT) + .addColumn(BOOLEAN_COL, DataType.BOOLEAN) + .addColumn(BIGINT_COL, DataType.BIGINT) + .addColumn(FLOAT_COL, DataType.FLOAT) + .addColumn(DOUBLE_COL, DataType.DOUBLE) + .addColumn(TEXT_COL, DataType.TEXT) + .addColumn(BLOB_COL, DataType.BLOB) + .addColumn(DATE_COL, DataType.DATE) + .addColumn(TIME_COL, DataType.TIME) + .addColumn(TIMESTAMPTZ_COL, DataType.TIMESTAMPTZ) + .addColumn(TIMESTAMP_COL, DataType.TIMESTAMP) + .addPartitionKey(ACCOUNT_ID) + .addClusteringKey(ACCOUNT_TYPE) + .build(); + } + + @Override + protected Properties getProps(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..7aadea8913 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java @@ -0,0 +1,23 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitConfig; +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestBase { + + @Override + protected Properties getProps1(String testName) { + Properties properties = ConsensusCommitObjectStorageEnv.getProperties(testName); + properties.setProperty(ConsensusCommitConfig.ISOLATION_LEVEL, "SERIALIZABLE"); + return properties; + } + + @Test + @Override + @Disabled("Cross-partition scan with ordering is not supported in Object Storage") + public void scan_ScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..4bad580149 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java @@ -0,0 +1,46 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class TwoPhaseConsensusCommitIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitIntegrationTestBase { + + @Override + protected TableMetadata getTableMetadata() { + return TableMetadata.newBuilder() + .addColumn(ACCOUNT_ID, DataType.INT) + .addColumn(ACCOUNT_TYPE, DataType.INT) + .addColumn(BALANCE, DataType.INT) + .addColumn(SOME_COLUMN, DataType.INT) + .addColumn(BOOLEAN_COL, DataType.BOOLEAN) + .addColumn(BIGINT_COL, DataType.BIGINT) + .addColumn(FLOAT_COL, DataType.FLOAT) + .addColumn(DOUBLE_COL, DataType.DOUBLE) + .addColumn(TEXT_COL, DataType.TEXT) + .addColumn(BLOB_COL, DataType.BLOB) + .addColumn(DATE_COL, DataType.DATE) + .addColumn(TIME_COL, DataType.TIME) + .addColumn(TIMESTAMP_COL, DataType.TIMESTAMP) + .addColumn(TIMESTAMPTZ_COL, DataType.TIMESTAMPTZ) + .addPartitionKey(ACCOUNT_ID) + .addClusteringKey(ACCOUNT_TYPE) + .build(); + } + + @Override + protected Properties getProps1(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scanOrGetScanner_ScanGivenForIndexColumn_ShouldReturnRecords(ScanType scanType) {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..1e4b66e32b --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitSpecificIntegrationTestBase; +import java.util.Properties; + +public class TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitSpecificIntegrationTestBase { + + @Override + protected Properties getProperties1(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..38a95fd99e --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase; +import java.util.Properties; + +public class TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/main/java/com/scalar/db/common/CoreError.java b/core/src/main/java/com/scalar/db/common/CoreError.java index fa097bad68..0816892164 100644 --- a/core/src/main/java/com/scalar/db/common/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/CoreError.java @@ -889,6 +889,18 @@ public enum CoreError implements ScalarDbError { "Object Storage does not support the feature for altering column types", "", ""), + OBJECT_STORAGE_CROSS_PARTITION_SCAN_WITH_ORDERING_NOT_SUPPORTED( + Category.USER_ERROR, + "0256", + "Cross-partition scan with ordering is not supported in Object Storage", + "", + ""), + OBJECT_STORAGE_PRIMARY_KEY_CONTAINS_ILLEGAL_CHARACTER( + Category.USER_ERROR, + "0257", + "The value of the column %s in the primary key contains an illegal character.", + "", + ""), // // Errors for the concurrency error category @@ -1016,6 +1028,12 @@ public enum CoreError implements ScalarDbError { "A conflict occurred when committing records. Details: %s", "", ""), + OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION( + Category.CONCURRENCY_ERROR, + "0027", + "A transaction conflict occurred in the mutation. Details: %s", + "", + ""), // // Errors for the internal error category @@ -1197,6 +1215,10 @@ public enum CoreError implements ScalarDbError { ""), JDBC_MYSQL_GETTING_CONNECTION_METADATA_FAILED( Category.INTERNAL_ERROR, "0063", "Getting the MySQL JDBC connection metadata failed", "", ""), + OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION( + Category.INTERNAL_ERROR, "0064", "An error occurred in the selection. Details: %s", "", ""), + OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION( + Category.INTERNAL_ERROR, "0065", "An error occurred in the mutation. Details: %s", "", ""), // // Errors for the unknown transaction status error category diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java new file mode 100644 index 0000000000..6031edc2f8 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java @@ -0,0 +1,39 @@ +package com.scalar.db.storage.objectstorage; + +import com.google.common.collect.Ordering; +import com.scalar.db.api.Scan; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.Comparator; +import java.util.Map; + +public class ClusteringKeyComparator implements Comparator> { + private final TableMetadata metadata; + + public ClusteringKeyComparator(TableMetadata metadata) { + this.metadata = metadata; + } + + @Override + public int compare(Map clusteringKey1, Map clusteringKey2) { + for (String columnName : metadata.getClusteringKeyNames()) { + Scan.Ordering.Order order = metadata.getClusteringOrder(columnName); + + DataType dataType = metadata.getColumnDataType(columnName); + Column column1 = + ColumnValueMapper.convert(clusteringKey1.get(columnName), columnName, dataType); + Column column2 = + ColumnValueMapper.convert(clusteringKey2.get(columnName), columnName, dataType); + + int cmp = + order == Scan.Ordering.Order.ASC + ? Ordering.natural().compare(column1, column2) + : Ordering.natural().compare(column2, column1); + if (cmp != 0) { + return cmp; + } + } + return 0; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java new file mode 100644 index 0000000000..34e2b2d780 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java @@ -0,0 +1,79 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.util.Base64; +import javax.annotation.Nullable; + +public class ColumnValueMapper { + public static Column convert(@Nullable Object recordValue, String name, DataType dataType) { + switch (dataType) { + case BOOLEAN: + return recordValue == null + ? BooleanColumn.ofNull(name) + : BooleanColumn.of(name, (boolean) recordValue); + case INT: + return recordValue == null + ? IntColumn.ofNull(name) + : IntColumn.of(name, ((Number) recordValue).intValue()); + case BIGINT: + return recordValue == null + ? BigIntColumn.ofNull(name) + : BigIntColumn.of(name, ((Number) recordValue).longValue()); + case FLOAT: + return recordValue == null + ? FloatColumn.ofNull(name) + : FloatColumn.of(name, ((Number) recordValue).floatValue()); + case DOUBLE: + return recordValue == null + ? DoubleColumn.ofNull(name) + : DoubleColumn.of(name, ((Number) recordValue).doubleValue()); + case TEXT: + return recordValue == null + ? TextColumn.ofNull(name) + : TextColumn.of(name, (String) recordValue); + case BLOB: + return recordValue == null + ? BlobColumn.ofNull(name) + : BlobColumn.of(name, Base64.getDecoder().decode((String) recordValue)); + case DATE: + return recordValue == null + ? DateColumn.ofNull(name) + : DateColumn.of( + name, TimeRelatedColumnEncodingUtils.decodeDate(((Number) recordValue).intValue())); + case TIME: + return recordValue == null + ? TimeColumn.ofNull(name) + : TimeColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTime(((Number) recordValue).longValue())); + case TIMESTAMP: + return recordValue == null + ? TimestampColumn.ofNull(name) + : TimestampColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestamp(((Number) recordValue).longValue())); + case TIMESTAMPTZ: + return recordValue == null + ? TimestampTZColumn.ofNull(name) + : TimestampTZColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestampTZ( + ((Number) recordValue).longValue())); + default: + throw new AssertionError(); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java b/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java new file mode 100644 index 0000000000..6d9e2b4167 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java @@ -0,0 +1,92 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.concurrent.NotThreadSafe; + +@NotThreadSafe +public class MapVisitor implements ColumnVisitor { + private final Map values = new HashMap<>(); + + @SuppressFBWarnings("EI_EXPOSE_REP") + public Map get() { + return values; + } + + @Override + public void visit(BooleanColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getBooleanValue()); + } + + @Override + public void visit(IntColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getIntValue()); + } + + @Override + public void visit(BigIntColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getBigIntValue()); + } + + @Override + public void visit(FloatColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getFloatValue()); + } + + @Override + public void visit(DoubleColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getDoubleValue()); + } + + @Override + public void visit(TextColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getTextValue()); + } + + @Override + public void visit(BlobColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getBlobValue()); + } + + @Override + public void visit(DateColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimeColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimestampColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimestampTZColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java new file mode 100644 index 0000000000..aa7fa96b61 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java @@ -0,0 +1,114 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.CoreError; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.exception.storage.RetriableExecutionException; +import java.util.Collections; +import java.util.List; + +public class MutateStatementHandler extends StatementHandler { + public MutateStatementHandler( + ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { + super(wrapper, metadataManager); + } + + public void handle(Mutation mutation) throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(mutation); + ObjectStorageMutation objectStorageMutation = + new ObjectStorageMutation(mutation, tableMetadata); + String objectKey = + ObjectStoragePartition.getObjectKey( + getNamespace(mutation), + getTable(mutation), + objectStorageMutation.getConcatenatedPartitionKey()); + mutate(objectKey, Collections.singletonList(mutation)); + } + + public void handle(List mutations) throws ExecutionException { + // mutations assumed to be for the same partition + TableMetadata tableMetadata = metadataManager.getTableMetadata(mutations.get(0)); + ObjectStorageMutation objectStorageMutation = + new ObjectStorageMutation(mutations.get(0), tableMetadata); + String partitionKey = objectStorageMutation.getConcatenatedPartitionKey(); + String objectKey = + ObjectStoragePartition.getObjectKey( + getNamespace(mutations.get(0)), getTable(mutations.get(0)), partitionKey); + mutate(objectKey, mutations); + } + + private void mutate(String objectKey, List mutations) + throws ExecutionException { + ObjectStoragePartitionSnapshot snapshot = getPartition(objectKey); + for (Mutation mutation : mutations) { + TableMetadata tableMetadata = metadataManager.getTableMetadata(mutation); + if (mutation instanceof Put) { + snapshot.applyPut((Put) mutation, tableMetadata); + } else { + assert mutation instanceof Delete; + snapshot.applyDelete((Delete) mutation, tableMetadata); + } + } + writePartition(snapshot); + } + + /** + * Writes a partition to the object storage. + * + * @param snapshot the partition snapshot + * @throws ExecutionException if a failure occurs during the operation + */ + private void writePartition(ObjectStoragePartitionSnapshot snapshot) throws ExecutionException { + try { + if (snapshot.getReadVersion().isPresent()) { + if (!snapshot.getPartition().isEmpty()) { + wrapper.update( + snapshot.getObjectKey(), + snapshot.getPartition().serialize(), + snapshot.getReadVersion().get()); + } else { + wrapper.delete(snapshot.getObjectKey(), snapshot.getReadVersion().get()); + } + } else { + if (!snapshot.getPartition().isEmpty()) { + wrapper.insert(snapshot.getObjectKey(), snapshot.getPartition().serialize()); + } + } + } catch (PreconditionFailedException e) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } + } + + /** + * Gets a partition and its version as a snapshot from the object storage. + * + * @param objectKey the object key + * @return the partition + * @throws ExecutionException if a failure occurs during the operation + */ + private ObjectStoragePartitionSnapshot getPartition(String objectKey) throws ExecutionException { + try { + return wrapper + .get(objectKey) + .map( + response -> + new ObjectStoragePartitionSnapshot( + objectKey, response.getPayload(), response.getVersion())) + .orElseGet( + () -> + new ObjectStoragePartitionSnapshot( + objectKey, new ObjectStoragePartition(null), null)); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java new file mode 100644 index 0000000000..88170ac942 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java @@ -0,0 +1,159 @@ +package com.scalar.db.storage.objectstorage; + +import static com.scalar.db.util.ScalarDbUtils.copyAndPrepareForDynamicFiltering; + +import com.google.common.annotations.VisibleForTesting; +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scanner; +import com.scalar.db.common.AbstractDistributedStorage; +import com.scalar.db.common.CoreError; +import com.scalar.db.common.FilterableScanner; +import com.scalar.db.common.StorageInfoProvider; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.common.checker.OperationChecker; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ObjectStorage extends AbstractDistributedStorage { + private static final Logger logger = LoggerFactory.getLogger(ObjectStorage.class); + + private final ObjectStorageWrapper wrapper; + private final SelectStatementHandler selectStatementHandler; + private final MutateStatementHandler mutateStatementHandler; + private final OperationChecker operationChecker; + + public ObjectStorage(DatabaseConfig databaseConfig) { + super(databaseConfig); + if (databaseConfig.isCrossPartitionScanOrderingEnabled()) { + throw new IllegalArgumentException( + CoreError.OBJECT_STORAGE_CROSS_PARTITION_SCAN_WITH_ORDERING_NOT_SUPPORTED.buildMessage()); + } + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(databaseConfig); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); + ObjectStorageAdmin admin = new ObjectStorageAdmin(wrapper, objectStorageConfig); + TableMetadataManager metadataManager = + new TableMetadataManager(admin, databaseConfig.getMetadataCacheExpirationTimeSecs()); + operationChecker = + new ObjectStorageOperationChecker( + databaseConfig, metadataManager, new StorageInfoProvider(admin)); + selectStatementHandler = new SelectStatementHandler(wrapper, metadataManager); + mutateStatementHandler = new MutateStatementHandler(wrapper, metadataManager); + logger.info("ObjectStorage object is created properly"); + } + + @VisibleForTesting + ObjectStorage( + DatabaseConfig databaseConfig, + ObjectStorageWrapper wrapper, + SelectStatementHandler selectStatementHandler, + MutateStatementHandler mutateStatementHandler, + OperationChecker operationChecker) { + super(databaseConfig); + this.wrapper = wrapper; + this.selectStatementHandler = selectStatementHandler; + this.mutateStatementHandler = mutateStatementHandler; + this.operationChecker = operationChecker; + } + + @Override + public Optional get(Get get) throws ExecutionException { + get = copyAndSetTargetToIfNot(get); + operationChecker.check(get); + Scanner scanner = null; + try { + if (get.getConjunctions().isEmpty()) { + scanner = selectStatementHandler.handle(get); + } else { + scanner = + new FilterableScanner( + get, selectStatementHandler.handle(copyAndPrepareForDynamicFiltering(get))); + } + Optional result = scanner.one(); + if (!result.isPresent()) { + return Optional.empty(); + } + if (scanner.one().isPresent()) { + throw new IllegalArgumentException( + CoreError.GET_OPERATION_USED_FOR_NON_EXACT_MATCH_SELECTION.buildMessage(get)); + } + return result; + } finally { + if (scanner != null) { + try { + scanner.close(); + } catch (IOException e) { + logger.warn("Failed to close the scanner", e); + } + } + } + } + + @Override + public Scanner scan(Scan scan) throws ExecutionException { + scan = copyAndSetTargetToIfNot(scan); + operationChecker.check(scan); + if (scan.getConjunctions().isEmpty()) { + return selectStatementHandler.handle(scan); + } else { + return new FilterableScanner( + scan, selectStatementHandler.handle(copyAndPrepareForDynamicFiltering(scan))); + } + } + + @Override + public void put(Put put) throws ExecutionException { + put = copyAndSetTargetToIfNot(put); + operationChecker.check(put); + mutateStatementHandler.handle(put); + } + + @Override + public void put(List puts) throws ExecutionException { + mutate(puts); + } + + @Override + public void delete(Delete delete) throws ExecutionException { + delete = copyAndSetTargetToIfNot(delete); + operationChecker.check(delete); + mutateStatementHandler.handle(delete); + } + + @Override + public void delete(List deletes) throws ExecutionException { + mutate(deletes); + } + + @Override + public void mutate(List mutations) throws ExecutionException { + if (mutations.size() == 1) { + Mutation mutation = mutations.get(0); + if (mutation instanceof Put) { + put((Put) mutation); + return; + } else if (mutation instanceof Delete) { + delete((Delete) mutation); + return; + } + } + mutations = copyAndSetTargetToIfNot(mutations); + operationChecker.check(mutations); + mutateStatementHandler.handle(mutations); + } + + @Override + public void close() { + wrapper.close(); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java new file mode 100644 index 0000000000..2114c81a76 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java @@ -0,0 +1,64 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Column; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageMutation extends ObjectStorageOperation { + ObjectStorageMutation(Mutation mutation, TableMetadata metadata) { + super(mutation, metadata); + } + + @Nonnull + public ObjectStorageRecord makeRecord() { + Mutation mutation = (Mutation) getOperation(); + + assert mutation instanceof Put; + Put put = (Put) getOperation(); + + return ObjectStorageRecord.newBuilder() + .id(getRecordId()) + .partitionKey(toMap(put.getPartitionKey().getColumns())) + .clusteringKey( + put.getClusteringKey().map(k -> toMap(k.getColumns())).orElse(Collections.emptyMap())) + .values(toMapForPut(put)) + .build(); + } + + @Nonnull + public ObjectStorageRecord makeRecord(ObjectStorageRecord existingRecord) { + Mutation mutation = (Mutation) getOperation(); + + assert mutation instanceof Put; + Put put = (Put) mutation; + + Map newValues = new HashMap<>(existingRecord.getValues()); + newValues.putAll(toMapForPut(put)); + return ObjectStorageRecord.newBuilder() + .id(existingRecord.getId()) + .partitionKey(existingRecord.getPartitionKey()) + .clusteringKey(existingRecord.getClusteringKey()) + .values(newValues) + .build(); + } + + private Map toMap(Collection> columns) { + MapVisitor visitor = new MapVisitor(); + columns.forEach(c -> c.accept(visitor)); + return visitor.get(); + } + + private Map toMapForPut(Put put) { + MapVisitor visitor = new MapVisitor(); + put.getColumns().values().forEach(c -> c.accept(visitor)); + return visitor.get(); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java new file mode 100644 index 0000000000..ec5286fb27 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java @@ -0,0 +1,60 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Operation; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Column; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageOperation { + private final Operation operation; + private final TableMetadata metadata; + + public ObjectStorageOperation(Operation operation, TableMetadata metadata) { + this.operation = operation; + this.metadata = metadata; + } + + @Nonnull + public Operation getOperation() { + return operation; + } + + @Nonnull + public String getConcatenatedPartitionKey() { + Map> keyMap = new HashMap<>(); + operation.getPartitionKey().getColumns().forEach(c -> keyMap.put(c.getName(), c)); + + ConcatenationVisitor visitor = new ConcatenationVisitor(); + metadata.getPartitionKeyNames().forEach(name -> keyMap.get(name).accept(visitor)); + + return visitor.build(); + } + + @Nonnull + public String getConcatenatedClusteringKey() { + Map> keyMap = new HashMap<>(); + operation + .getClusteringKey() + .ifPresent(k -> k.getColumns().forEach(c -> keyMap.put(c.getName(), c))); + + ConcatenationVisitor visitor = new ConcatenationVisitor(); + metadata.getClusteringKeyNames().forEach(name -> keyMap.get(name).accept(visitor)); + + return visitor.build(); + } + + @Nonnull + public String getRecordId() { + if (operation.getClusteringKey().isPresent()) { + return String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), + getConcatenatedPartitionKey(), + getConcatenatedClusteringKey()); + } + return getConcatenatedPartitionKey(); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java new file mode 100644 index 0000000000..38322b88c4 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java @@ -0,0 +1,126 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Put; +import com.scalar.db.api.Scan; +import com.scalar.db.common.CoreError; +import com.scalar.db.common.StorageInfoProvider; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.common.checker.OperationChecker; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; + +public class ObjectStorageOperationChecker extends OperationChecker { + private static final char[] ILLEGAL_CHARACTERS_IN_PRIMARY_KEY = { + ObjectStorageUtils.OBJECT_KEY_DELIMITER, ObjectStorageUtils.CONCATENATED_KEY_DELIMITER, + }; + + private static final ColumnVisitor PRIMARY_KEY_COLUMN_CHECKER = + new ColumnVisitor() { + @Override + public void visit(BooleanColumn column) {} + + @Override + public void visit(IntColumn column) {} + + @Override + public void visit(BigIntColumn column) {} + + @Override + public void visit(FloatColumn column) {} + + @Override + public void visit(DoubleColumn column) {} + + @Override + public void visit(TextColumn column) { + String value = column.getTextValue(); + assert value != null; + + for (char illegalCharacter : ILLEGAL_CHARACTERS_IN_PRIMARY_KEY) { + if (value.indexOf(illegalCharacter) != -1) { + throw new IllegalArgumentException( + CoreError.OBJECT_STORAGE_PRIMARY_KEY_CONTAINS_ILLEGAL_CHARACTER.buildMessage( + column.getName(), value)); + } + } + } + + @Override + public void visit(BlobColumn column) {} + + @Override + public void visit(DateColumn column) {} + + @Override + public void visit(TimeColumn column) {} + + @Override + public void visit(TimestampColumn column) {} + + @Override + public void visit(TimestampTZColumn column) {} + }; + + public ObjectStorageOperationChecker( + DatabaseConfig databaseConfig, + TableMetadataManager metadataManager, + StorageInfoProvider storageInfoProvider) { + super(databaseConfig, metadataManager, storageInfoProvider); + } + + @Override + public void check(Get get) throws ExecutionException { + super.check(get); + checkPrimaryKey(get); + } + + @Override + public void check(Scan scan) throws ExecutionException { + super.check(scan); + checkPrimaryKey(scan); + scan.getStartClusteringKey() + .ifPresent( + c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); + scan.getEndClusteringKey() + .ifPresent( + c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); + } + + @Override + public void check(Put put) throws ExecutionException { + super.check(put); + checkPrimaryKey(put); + } + + @Override + public void check(Delete delete) throws ExecutionException { + super.check(delete); + checkPrimaryKey(delete); + } + + private void checkPrimaryKey(Operation operation) { + operation + .getPartitionKey() + .getColumns() + .forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER)); + operation + .getClusteringKey() + .ifPresent( + c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStoragePartition.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStoragePartition.java new file mode 100644 index 0000000000..3f3d6489ac --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStoragePartition.java @@ -0,0 +1,210 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Ordering; +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.Delete; +import com.scalar.db.api.DeleteIf; +import com.scalar.db.api.DeleteIfExists; +import com.scalar.db.api.Put; +import com.scalar.db.api.PutIf; +import com.scalar.db.api.PutIfExists; +import com.scalar.db.api.PutIfNotExists; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.CoreError; +import com.scalar.db.exception.storage.NoMutationException; +import com.scalar.db.io.Column; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import javax.annotation.Nullable; + +@SuppressFBWarnings("EI_EXPOSE_REP2") +public class ObjectStoragePartition { + private final Map records; + + @JsonCreator + public ObjectStoragePartition( + @JsonProperty("records") @Nullable Map records) { + this.records = records != null ? records : new HashMap<>(); + } + + public static ObjectStoragePartition deserialize(String serializedObject) { + return Serializer.deserialize(serializedObject, new TypeReference() {}); + } + + public static String getObjectKey(String namespaceName, String tableName, String partitionKey) { + return ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey); + } + + public String serialize() { + return Serializer.serialize(this); + } + + public Map getRecords() { + return Collections.unmodifiableMap(records); + } + + public Optional getRecord(String recordId) { + return Optional.ofNullable(records.get(recordId)); + } + + public boolean isEmpty() { + return records.isEmpty(); + } + + public void applyPut(Put put, TableMetadata tableMetadata) throws NoMutationException { + ObjectStorageMutation mutation = new ObjectStorageMutation(put, tableMetadata); + if (!put.getCondition().isPresent()) { + ObjectStorageRecord existingRecord = records.get(mutation.getRecordId()); + if (existingRecord == null) { + records.put(mutation.getRecordId(), mutation.makeRecord()); + } else { + records.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); + } + } else if (put.getCondition().get() instanceof PutIfNotExists) { + if (records.containsKey(mutation.getRecordId())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); + } + records.put(mutation.getRecordId(), mutation.makeRecord()); + } else if (put.getCondition().get() instanceof PutIfExists) { + ObjectStorageRecord existingRecord = records.get(mutation.getRecordId()); + if (existingRecord == null) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); + } + records.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); + } else { + assert put.getCondition().get() instanceof PutIf; + if (!records.containsKey(mutation.getRecordId())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); + } + ObjectStorageRecord existingRecord = records.get(mutation.getRecordId()); + if (areConditionsMet( + existingRecord, put.getCondition().get().getExpressions(), tableMetadata)) { + records.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); + return; + } + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); + } + } + + public void applyDelete(Delete delete, TableMetadata tableMetadata) throws NoMutationException { + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, tableMetadata); + if (!delete.getCondition().isPresent()) { + records.remove(mutation.getRecordId()); + } else if (delete.getCondition().get() instanceof DeleteIfExists) { + if (!records.containsKey(mutation.getRecordId())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete)); + } + records.remove(mutation.getRecordId()); + } else { + assert delete.getCondition().get() instanceof DeleteIf; + if (!records.containsKey(mutation.getRecordId())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete)); + } + ObjectStorageRecord existingRecord = records.get(mutation.getRecordId()); + if (areConditionsMet( + existingRecord, delete.getCondition().get().getExpressions(), tableMetadata)) { + records.remove(mutation.getRecordId()); + return; + } + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete)); + } + } + + @VisibleForTesting + protected void putRecord(String recordId, ObjectStorageRecord record) { + records.put(recordId, record); + } + + @VisibleForTesting + protected boolean areConditionsMet( + ObjectStorageRecord record, List expressions, TableMetadata metadata) { + for (ConditionalExpression expression : expressions) { + Column expectedColumn = expression.getColumn(); + Column actualColumn = + ColumnValueMapper.convert( + record.getValues().get(expectedColumn.getName()), + expectedColumn.getName(), + metadata.getColumnDataType(expectedColumn.getName())); + switch (expression.getOperator()) { + case EQ: + if (actualColumn.hasNullValue()) { + return false; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) != 0) { + return false; + } + break; + case NE: + if (actualColumn.hasNullValue()) { + return false; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) == 0) { + return false; + } + break; + case GT: + if (actualColumn.hasNullValue()) { + return false; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) <= 0) { + return false; + } + break; + case GTE: + if (actualColumn.hasNullValue()) { + return false; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) < 0) { + return false; + } + break; + case LT: + if (actualColumn.hasNullValue()) { + return false; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) >= 0) { + return false; + } + break; + case LTE: + if (actualColumn.hasNullValue()) { + return false; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) > 0) { + return false; + } + break; + case IS_NULL: + if (!actualColumn.hasNullValue()) { + return false; + } + break; + case IS_NOT_NULL: + if (actualColumn.hasNullValue()) { + return false; + } + break; + case LIKE: + case NOT_LIKE: + default: + throw new AssertionError("Unsupported operator"); + } + } + return true; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStoragePartitionSnapshot.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStoragePartitionSnapshot.java new file mode 100644 index 0000000000..49a338b599 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStoragePartitionSnapshot.java @@ -0,0 +1,76 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.exception.storage.NoMutationException; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Optional; +import javax.annotation.Nullable; + +@SuppressFBWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}) +public class ObjectStoragePartitionSnapshot { + private final String namespaceName; + private final String tableName; + private final String partitionKey; + private final ObjectStoragePartition partition; + private final String readVersion; + + public ObjectStoragePartitionSnapshot( + String objectKey, String serializedPartition, @Nullable String readVersion) { + String[] parts = ObjectStorageUtils.parseObjectKey(objectKey); + String namespaceName = parts[0]; + String tableName = parts[1]; + String partitionKey = parts[2]; + this.namespaceName = namespaceName; + this.tableName = tableName; + this.partitionKey = partitionKey; + this.partition = ObjectStoragePartition.deserialize(serializedPartition); + this.readVersion = readVersion; + } + + public ObjectStoragePartitionSnapshot( + String objectKey, ObjectStoragePartition partition, @Nullable String readVersion) { + String[] parts = ObjectStorageUtils.parseObjectKey(objectKey); + String namespaceName = parts[0]; + String tableName = parts[1]; + String partitionKey = parts[2]; + this.namespaceName = namespaceName; + this.tableName = tableName; + this.partitionKey = partitionKey; + this.partition = partition; + this.readVersion = readVersion; + } + + public String getNamespaceName() { + return namespaceName; + } + + public String getTableName() { + return tableName; + } + + public String getPartitionKey() { + return partitionKey; + } + + public ObjectStoragePartition getPartition() { + return partition; + } + + public Optional getReadVersion() { + return Optional.ofNullable(readVersion); + } + + public String getObjectKey() { + return ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey); + } + + public void applyPut(Put put, TableMetadata tableMetadata) throws NoMutationException { + partition.applyPut(put, tableMetadata); + } + + public void applyDelete(Delete delete, TableMetadata tableMetadata) throws NoMutationException { + partition.applyDelete(delete, tableMetadata); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java index 1f7b94275b..0be04f632e 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java @@ -10,7 +10,7 @@ public interface ObjectStorageProvider extends DistributedStorageProvider { @Override default DistributedStorage createDistributedStorage(DatabaseConfig config) { - return null; + return new ObjectStorage(config); } @Override diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java new file mode 100644 index 0000000000..f0693918b1 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java @@ -0,0 +1,114 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +@SuppressFBWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}) +@Immutable +public class ObjectStorageRecord { + private final String id; + private final Map partitionKey; + private final Map clusteringKey; + private final Map values; + + @JsonCreator + public ObjectStorageRecord( + @JsonProperty("id") @Nullable String id, + @JsonProperty("partitionKey") @Nullable Map partitionKey, + @JsonProperty("clusteringKey") @Nullable Map clusteringKey, + @JsonProperty("values") @Nullable Map values) { + this.id = id != null ? id : ""; + this.partitionKey = partitionKey != null ? new HashMap<>(partitionKey) : Collections.emptyMap(); + this.clusteringKey = + clusteringKey != null ? new HashMap<>(clusteringKey) : Collections.emptyMap(); + this.values = values != null ? new HashMap<>(values) : Collections.emptyMap(); + } + + public ObjectStorageRecord(ObjectStorageRecord record) { + this(record.getId(), record.getPartitionKey(), record.getClusteringKey(), record.getValues()); + } + + public String getId() { + return id; + } + + public Map getPartitionKey() { + return Collections.unmodifiableMap(partitionKey); + } + + public Map getClusteringKey() { + return Collections.unmodifiableMap(clusteringKey); + } + + public Map getValues() { + return Collections.unmodifiableMap(values); + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (!(o instanceof ObjectStorageRecord)) { + return false; + } + ObjectStorageRecord other = (ObjectStorageRecord) o; + if (!other.getId().equals(id)) { + return false; + } + if (!other.getPartitionKey().equals(partitionKey)) { + return false; + } + if (!other.getClusteringKey().equals(clusteringKey)) { + return false; + } + return other.getValues().equals(values); + } + + @Override + public int hashCode() { + return Objects.hash(id, partitionKey, clusteringKey, values); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public static final class Builder { + private String id; + private Map partitionKey = new HashMap<>(); + private Map clusteringKey = new HashMap<>(); + private Map values = new HashMap<>(); + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder partitionKey(Map partitionKey) { + this.partitionKey = partitionKey; + return this; + } + + public Builder clusteringKey(Map clusteringKey) { + this.clusteringKey = clusteringKey; + return this; + } + + public Builder values(Map values) { + this.values = values; + return this; + } + + public ObjectStorageRecord build() { + return new ObjectStorageRecord(id, partitionKey, clusteringKey, values); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java index 38cbfcfad0..88e75a755f 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -157,8 +157,8 @@ private DataType convertDataType(String columnType) { } } - public static ObjectStorageTableMetadata.Builder newBuilder() { - return new ObjectStorageTableMetadata.Builder(); + public static Builder newBuilder() { + return new Builder(); } public static final class Builder { @@ -170,27 +170,27 @@ public static final class Builder { private Builder() {} - public ObjectStorageTableMetadata.Builder partitionKeyNames(LinkedHashSet val) { + public Builder partitionKeyNames(LinkedHashSet val) { partitionKeyNames = val; return this; } - public ObjectStorageTableMetadata.Builder clusteringKeyNames(LinkedHashSet val) { + public Builder clusteringKeyNames(LinkedHashSet val) { clusteringKeyNames = val; return this; } - public ObjectStorageTableMetadata.Builder clusteringOrders(Map val) { + public Builder clusteringOrders(Map val) { clusteringOrders = val; return this; } - public ObjectStorageTableMetadata.Builder secondaryIndexNames(Set val) { + public Builder secondaryIndexNames(Set val) { secondaryIndexNames = val; return this; } - public ObjectStorageTableMetadata.Builder columns(Map val) { + public Builder columns(Map val) { columns = val; return this; } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java index ac04b7df21..c6a4883cff 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java @@ -16,6 +16,10 @@ public static String getObjectKey(String namespace, String table) { return String.join(String.valueOf(OBJECT_KEY_DELIMITER), namespace, table); } + public static String[] parseObjectKey(String objectKey) { + return objectKey.split(String.valueOf(OBJECT_KEY_DELIMITER), 3); + } + public static ObjectStorageConfig getObjectStorageConfig(DatabaseConfig databaseConfig) { if (Objects.equals(databaseConfig.getStorage(), BlobStorageConfig.STORAGE_NAME)) { return new BlobStorageConfig(databaseConfig); diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java new file mode 100644 index 0000000000..6eaa4d2066 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java @@ -0,0 +1,54 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Result; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.ResultImpl; +import com.scalar.db.io.Column; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class ResultInterpreter { + private final List projections; + private final TableMetadata metadata; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public ResultInterpreter(List projections, TableMetadata metadata) { + this.projections = Collections.unmodifiableList(Objects.requireNonNull(projections)); + this.metadata = Objects.requireNonNull(metadata); + } + + public Result interpret(ObjectStorageRecord record) { + Map> ret = new HashMap<>(); + + if (projections.isEmpty()) { + metadata.getColumnNames().forEach(name -> add(ret, name, record, metadata)); + } else { + projections.forEach(name -> add(ret, name, record, metadata)); + } + + return new ResultImpl(ret, metadata); + } + + private void add( + Map> columns, + String name, + ObjectStorageRecord record, + TableMetadata metadata) { + Object value; + if (record.getPartitionKey().containsKey(name)) { + value = record.getPartitionKey().get(name); + } else if (record.getClusteringKey().containsKey(name)) { + value = record.getClusteringKey().get(name); + } else { + value = record.getValues().get(name); + } + + columns.put(name, ColumnValueMapper.convert(value, name, metadata.getColumnDataType(name))); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java new file mode 100644 index 0000000000..d7d14a39f0 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java @@ -0,0 +1,59 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Result; +import com.scalar.db.common.AbstractScanner; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; + +@NotThreadSafe +public class ScannerImpl extends AbstractScanner { + private final Iterator recordIterator; + private final ResultInterpreter resultInterpreter; + private final int recordCountLimit; + + private int recordCount; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public ScannerImpl( + Iterator recordIterator, + ResultInterpreter resultInterpreter, + int recordCountLimit) { + this.recordIterator = recordIterator; + this.resultInterpreter = resultInterpreter; + this.recordCountLimit = recordCountLimit; + this.recordCount = 0; + } + + @Override + @Nonnull + public Optional one() { + if (!recordIterator.hasNext()) { + return Optional.empty(); + } + if (recordCountLimit != 0 && recordCount >= recordCountLimit) { + return Optional.empty(); + } + recordCount++; + return Optional.of(resultInterpreter.interpret(recordIterator.next())); + } + + @Override + @Nonnull + public List all() { + List results = new ArrayList<>(); + Optional result; + while ((result = one()).isPresent()) { + results.add(result.get()); + } + return results; + } + + @Override + public void close() throws IOException {} +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java new file mode 100644 index 0000000000..b25b218391 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java @@ -0,0 +1,251 @@ +package com.scalar.db.storage.objectstorage; + +import com.google.common.collect.Ordering; +import com.scalar.db.api.Get; +import com.scalar.db.api.Scan; +import com.scalar.db.api.ScanAll; +import com.scalar.db.api.Scanner; +import com.scalar.db.api.Selection; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.CoreError; +import com.scalar.db.common.EmptyScanner; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.Column; +import com.scalar.db.io.Key; +import com.scalar.db.util.ScalarDbUtils; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class SelectStatementHandler extends StatementHandler { + public SelectStatementHandler( + ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { + super(wrapper, metadataManager); + } + + @Nonnull + public Scanner handle(Selection selection) throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(selection); + if (ScalarDbUtils.isSecondaryIndexSpecified(selection, tableMetadata)) { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); + } + if (selection instanceof Get) { + return executeGet((Get) selection, tableMetadata); + } else { + if (selection instanceof ScanAll) { + return executeScanAll((ScanAll) selection, tableMetadata); + } else { + return executeScan((Scan) selection, tableMetadata); + } + } + } + + private Scanner executeGet(Get get, TableMetadata metadata) throws ExecutionException { + ObjectStorageOperation operation = new ObjectStorageOperation(get, metadata); + ObjectStoragePartition partition = + getPartition(getNamespace(get), getTable(get), operation.getConcatenatedPartitionKey()); + if (!partition.getRecord(operation.getRecordId()).isPresent()) { + return new EmptyScanner(); + } + ObjectStorageRecord record = partition.getRecord(operation.getRecordId()).get(); + return new ScannerImpl( + Collections.singletonList(record).iterator(), + new ResultInterpreter(get.getProjections(), metadata), + 1); + } + + private Scanner executeScan(Scan scan, TableMetadata metadata) throws ExecutionException { + ObjectStorageOperation operation = new ObjectStorageOperation(scan, metadata); + ObjectStoragePartition partition = + getPartition(getNamespace(scan), getTable(scan), operation.getConcatenatedPartitionKey()); + List records = new ArrayList<>(partition.getRecords().values()); + + ClusteringKeyComparator clusteringKeyComparator = new ClusteringKeyComparator(metadata); + Comparator cmp = + Comparator.comparing(ObjectStorageRecord::getClusteringKey, clusteringKeyComparator); + if (isReverseOrder(scan, metadata)) { + cmp = cmp.reversed(); + } + records.sort(cmp); + + // If the scan is for DESC clustering order, use the end clustering key as a start key and the + // start clustering key as an end key + boolean scanForDescClusteringOrder = isScanForDescClusteringOrder(scan, metadata); + Optional startKey = + scanForDescClusteringOrder ? scan.getEndClusteringKey() : scan.getStartClusteringKey(); + boolean startInclusive = + scanForDescClusteringOrder ? scan.getEndInclusive() : scan.getStartInclusive(); + Optional endKey = + scanForDescClusteringOrder ? scan.getStartClusteringKey() : scan.getEndClusteringKey(); + boolean endInclusive = + scanForDescClusteringOrder ? scan.getStartInclusive() : scan.getEndInclusive(); + + if (startKey.isPresent()) { + records = + filterRecordsByClusteringKeyBoundary( + records, startKey.get(), true, startInclusive, metadata); + } + if (endKey.isPresent()) { + records = + filterRecordsByClusteringKeyBoundary( + records, endKey.get(), false, endInclusive, metadata); + } + + if (scan.getLimit() > 0) { + records = records.subList(0, Math.min(scan.getLimit(), records.size())); + } + + return new ScannerImpl( + records.iterator(), + new ResultInterpreter(scan.getProjections(), metadata), + scan.getLimit()); + } + + private Scanner executeScanAll(ScanAll scan, TableMetadata metadata) throws ExecutionException { + try { + List partitionKeys = getPartitionKeysInTable(getNamespace(scan), getTable(scan)); + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, getNamespace(scan), getTable(scan), partitionKeys); + return new ScannerImpl( + iterator, new ResultInterpreter(scan.getProjections(), metadata), scan.getLimit()); + } catch (Exception e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); + } + } + + private ObjectStoragePartition getPartition( + String namespaceName, String tableName, String partitionKey) throws ExecutionException { + try { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey)); + if (!response.isPresent()) { + return new ObjectStoragePartition(Collections.emptyMap()); + } + return ObjectStoragePartition.deserialize(response.get().getPayload()); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); + } + } + + private List getPartitionKeysInTable(String namespaceName, String tableName) + throws ExecutionException { + try { + return wrapper.getKeys(ObjectStorageUtils.getObjectKey(namespaceName, tableName, "")).stream() + .map(key -> key.substring(key.lastIndexOf(ObjectStorageUtils.OBJECT_KEY_DELIMITER) + 1)) + .filter(partition -> !partition.isEmpty()) + .collect(Collectors.toList()); + } catch (Exception e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); + } + } + + private boolean isReverseOrder(Scan scan, TableMetadata metadata) { + Boolean reverse = null; + Iterator iterator = metadata.getClusteringKeyNames().iterator(); + for (Scan.Ordering ordering : scan.getOrderings()) { + String clusteringKeyName = iterator.next(); + if (!ordering.getColumnName().equals(clusteringKeyName)) { + throw new IllegalArgumentException( + CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); + } + boolean isValidOrder = + ordering.getOrder() != metadata.getClusteringOrder(ordering.getColumnName()); + if (reverse == null) { + reverse = isValidOrder; + } else { + if (reverse != isValidOrder) { + throw new IllegalArgumentException( + CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); + } + } + } + return reverse != null && reverse; + } + + private boolean isScanForDescClusteringOrder(Scan scan, TableMetadata tableMetadata) { + if (scan.getStartClusteringKey().isPresent()) { + Key startClusteringKey = scan.getStartClusteringKey().get(); + String lastValueName = + startClusteringKey.getColumns().get(startClusteringKey.size() - 1).getName(); + return tableMetadata.getClusteringOrder(lastValueName) == Scan.Ordering.Order.DESC; + } + if (scan.getEndClusteringKey().isPresent()) { + Key endClusteringKey = scan.getEndClusteringKey().get(); + String lastValueName = + endClusteringKey.getColumns().get(endClusteringKey.size() - 1).getName(); + return tableMetadata.getClusteringOrder(lastValueName) == Scan.Ordering.Order.DESC; + } + return false; + } + + private List filterRecordsByClusteringKeyBoundary( + List records, + Key clusteringKey, + boolean isStart, + boolean isInclusive, + TableMetadata metadata) { + for (Column column : clusteringKey.getColumns()) { + Scan.Ordering.Order order = metadata.getClusteringOrder(column.getName()); + if (clusteringKey.getColumns().indexOf(column) == clusteringKey.size() - 1) { + return records.stream() + .filter( + record -> { + Column recordColumn = + ColumnValueMapper.convert( + record.getClusteringKey().get(column.getName()), + column.getName(), + column.getDataType()); + int cmp = + order == Scan.Ordering.Order.ASC + ? Ordering.natural().compare(recordColumn, column) + : Ordering.natural().compare(column, recordColumn); + if (isStart) { + if (isInclusive) { + return cmp >= 0; + } else { + return cmp > 0; + } + } else { + if (isInclusive) { + return cmp <= 0; + } else { + return cmp < 0; + } + } + }) + .collect(Collectors.toList()); + } else { + List tmpRecords = new ArrayList<>(); + records.forEach( + record -> { + Column recordColumn = + ColumnValueMapper.convert( + record.getClusteringKey().get(column.getName()), + column.getName(), + column.getDataType()); + int cmp = Ordering.natural().compare(recordColumn, column); + if (cmp == 0) { + tmpRecords.add(record); + } + }); + if (tmpRecords.isEmpty()) { + return Collections.emptyList(); + } + records = tmpRecords; + } + } + return records; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java new file mode 100644 index 0000000000..b4395407aa --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java @@ -0,0 +1,31 @@ +package com.scalar.db.storage.objectstorage; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.scalar.db.api.Operation; +import com.scalar.db.common.TableMetadataManager; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import javax.annotation.Nonnull; + +public class StatementHandler { + protected final ObjectStorageWrapper wrapper; + protected final TableMetadataManager metadataManager; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public StatementHandler(ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { + this.wrapper = checkNotNull(wrapper); + this.metadataManager = checkNotNull(metadataManager); + } + + @Nonnull + protected String getNamespace(Operation operation) { + assert operation.forNamespace().isPresent(); + return operation.forNamespace().get(); + } + + @Nonnull + protected String getTable(Operation operation) { + assert operation.forTable().isPresent(); + return operation.forTable().get(); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/StreamingRecordIterator.java b/core/src/main/java/com/scalar/db/storage/objectstorage/StreamingRecordIterator.java new file mode 100644 index 0000000000..3a9df860d9 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/StreamingRecordIterator.java @@ -0,0 +1,75 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.common.CoreError; +import com.scalar.db.exception.storage.ExecutionException; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; + +/** + * Iterator that streams records from partitions in a lazy manner, loading partitions on-demand + * instead of loading all records into memory at once. + */ +@SuppressFBWarnings("EI_EXPOSE_REP2") +public class StreamingRecordIterator implements Iterator { + private final ObjectStorageWrapper wrapper; + private final String namespaceName; + private final String tableName; + private final Iterator partitionKeyIterator; + private Iterator partitionRecordIterator; + + public StreamingRecordIterator( + ObjectStorageWrapper wrapper, + String namespaceName, + String tableName, + List partitionKeys) { + this.wrapper = wrapper; + this.namespaceName = namespaceName; + this.tableName = tableName; + this.partitionKeyIterator = partitionKeys.iterator(); + this.partitionRecordIterator = Collections.emptyIterator(); + } + + @Override + public boolean hasNext() { + while (!partitionRecordIterator.hasNext() && partitionKeyIterator.hasNext()) { + loadNextPartition(); + } + return partitionRecordIterator.hasNext(); + } + + @Override + public ObjectStorageRecord next() { + if (!hasNext()) { + throw new java.util.NoSuchElementException(); + } + return partitionRecordIterator.next(); + } + + private void loadNextPartition() { + try { + String partitionKey = partitionKeyIterator.next(); + ObjectStoragePartition partition = getPartition(partitionKey); + partitionRecordIterator = partition.getRecords().values().iterator(); + } catch (ExecutionException e) { + throw new RuntimeException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); + } + } + + private ObjectStoragePartition getPartition(String partitionKey) throws ExecutionException { + try { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey)); + if (!response.isPresent()) { + return new ObjectStoragePartition(Collections.emptyMap()); + } + return ObjectStoragePartition.deserialize(response.get().getPayload()); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); + } + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/MapVisitorTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/MapVisitorTest.java new file mode 100644 index 0000000000..9f4768bd79 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/MapVisitorTest.java @@ -0,0 +1,277 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class MapVisitorTest { + private static final boolean ANY_BOOLEAN = false; + private static final BooleanColumn ANY_BOOLEAN_COLUMN = + BooleanColumn.of("any_boolean", ANY_BOOLEAN); + private static final int ANY_INT = Integer.MIN_VALUE; + private static final IntColumn ANY_INT_COLUMN = IntColumn.of("any_int", ANY_INT); + private static final long ANY_BIGINT = BigIntColumn.MAX_VALUE; + private static final BigIntColumn ANY_BIGINT_COLUMN = BigIntColumn.of("any_bigint", ANY_BIGINT); + private static final float ANY_FLOAT = Float.MIN_NORMAL; + private static final FloatColumn ANY_FLOAT_COLUMN = FloatColumn.of("any_float", ANY_FLOAT); + private static final double ANY_DOUBLE = Double.MIN_NORMAL; + private static final DoubleColumn ANY_DOUBLE_COLUMN = DoubleColumn.of("any_double", ANY_DOUBLE); + private static final String ANY_TEXT = "test"; + private static final TextColumn ANY_TEXT_COLUMN = TextColumn.of("any_text", ANY_TEXT); + private static final byte[] ANY_BLOB = ANY_TEXT.getBytes(StandardCharsets.UTF_8); + private static final BlobColumn ANY_BLOB_COLUMN = BlobColumn.of("any_blob", ANY_BLOB); + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final DateColumn ANY_DATE_COLUMN = DateColumn.of("any_date", ANY_DATE); + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final TimeColumn ANY_TIME_COLUMN = TimeColumn.of("any_time", ANY_TIME); + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final TimestampColumn ANY_TIMESTAMP_COLUMN = + TimestampColumn.of("any_timestamp", ANY_TIMESTAMP); + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + private static final TimestampTZColumn ANY_TIMESTAMPTZ_COLUMN = + TimestampTZColumn.of("any_timestamptz", ANY_TIMESTAMPTZ); + + private MapVisitor visitor; + + @BeforeEach + public void setUp() { + visitor = new MapVisitor(); + } + + @Test + public void visit_BooleanColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_BOOLEAN_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_BOOLEAN_COLUMN.getName())).isEqualTo(ANY_BOOLEAN); + } + + @Test + public void visit_BooleanColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + BooleanColumn.ofNull("any_boolean").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_boolean")).isTrue(); + assertThat(visitor.get().get("any_boolean")).isNull(); + } + + @Test + public void visit_IntColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_INT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_INT_COLUMN.getName())).isEqualTo(ANY_INT); + } + + @Test + public void visit_IntColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + IntColumn.ofNull("any_int").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_int")).isTrue(); + assertThat(visitor.get().get("any_int")).isNull(); + } + + @Test + public void visit_BigIntColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_BIGINT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_BIGINT_COLUMN.getName())).isEqualTo(ANY_BIGINT); + } + + @Test + public void visit_BigIntColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + BigIntColumn.ofNull("any_bigint").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_bigint")).isTrue(); + assertThat(visitor.get().get("any_bigint")).isNull(); + } + + @Test + public void visit_FloatColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_FLOAT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_FLOAT_COLUMN.getName())).isEqualTo(ANY_FLOAT); + } + + @Test + public void visit_FloatColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + FloatColumn.ofNull("any_float").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_float")).isTrue(); + assertThat(visitor.get().get("any_float")).isNull(); + } + + @Test + public void visit_DoubleColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_DOUBLE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_DOUBLE_COLUMN.getName())).isEqualTo(ANY_DOUBLE); + } + + @Test + public void visit_DoubleColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + FloatColumn.ofNull("any_double").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_double")).isTrue(); + assertThat(visitor.get().get("any_double")).isNull(); + } + + @Test + public void visit_TextColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TEXT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TEXT_COLUMN.getName())).isEqualTo(ANY_TEXT); + } + + @Test + public void visit_TextColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TextColumn.ofNull("any_text").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_text")).isTrue(); + assertThat(visitor.get().get("any_text")).isNull(); + } + + @Test + public void visit_BlobColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_BLOB_COLUMN.accept(visitor); + + // Assert + ByteBuffer expected = + (ByteBuffer) + ByteBuffer.allocate(ANY_TEXT.length()) + .put(ANY_TEXT.getBytes(StandardCharsets.UTF_8)) + .flip(); + assertThat(visitor.get().get(ANY_BLOB_COLUMN.getName())).isEqualTo(expected); + } + + @Test + public void visit_BlobColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + BlobColumn.ofNull("any_blob").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_blob")).isTrue(); + assertThat(visitor.get().get("any_blob")).isNull(); + } + + @Test + public void visit_DateColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_DATE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_DATE_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_DATE_COLUMN)); + } + + @Test + public void visit_DateColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + DateColumn.ofNull("any_date").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_date")).isTrue(); + assertThat(visitor.get().get("any_date")).isNull(); + } + + @Test + public void visit_TimeColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TIME_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TIME_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_TIME_COLUMN)); + } + + @Test + public void visit_TimeColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TimeColumn.ofNull("any_time").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_time")).isTrue(); + assertThat(visitor.get().get("any_time")).isNull(); + } + + @Test + public void visit_TimestampColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TIMESTAMP_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TIMESTAMP_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMP_COLUMN)); + } + + @Test + public void visit_TimestampColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TimestampColumn.ofNull("any_timestamp").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_timestamp")).isTrue(); + assertThat(visitor.get().get("any_timestamp")).isNull(); + } + + @Test + public void visit_TimestampTZColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TIMESTAMPTZ_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TIMESTAMPTZ_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMPTZ_COLUMN)); + } + + @Test + public void visit_TimestampTZColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TimestampTZColumn.ofNull("any_timestamptz").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_timestamptz")).isTrue(); + assertThat(visitor.get().get("any_timestamptz")).isNull(); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java new file mode 100644 index 0000000000..1e8e4ae424 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java @@ -0,0 +1,847 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.scalar.db.api.ConditionBuilder; +import com.scalar.db.api.Delete; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.exception.storage.NoMutationException; +import com.scalar.db.io.DataType; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class MutateStatementHandlerTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_NAME_4 = "name4"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final int ANY_INT_1 = 1; + private static final int ANY_INT_2 = 2; + private static final String VERSION = "version1"; + + private MutateStatementHandler handler; + @Mock private ObjectStorageWrapper wrapper; + @Mock private TableMetadataManager metadataManager; + @Mock private TableMetadata metadata; + + @Captor private ArgumentCaptor objectKeyCaptor; + @Captor private ArgumentCaptor payloadCaptor; + @Captor private ArgumentCaptor versionCaptor; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + handler = new MutateStatementHandler(wrapper, metadataManager); + + when(metadataManager.getTableMetadata(any(Operation.class))).thenReturn(metadata); + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); + when(metadata.getClusteringKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); + when(metadata.getColumnDataType(ANY_NAME_3)).thenReturn(DataType.INT); + when(metadata.getColumnDataType(ANY_NAME_4)).thenReturn(DataType.INT); + } + + private Put preparePut() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .intValue(ANY_NAME_3, ANY_INT_1) + .intValue(ANY_NAME_4, ANY_INT_2) + .build(); + } + + private Put preparePutWithoutClusteringKey() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .intValue(ANY_NAME_3, ANY_INT_1) + .intValue(ANY_NAME_4, ANY_INT_2) + .build(); + } + + private Delete prepareDelete() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Delete.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + } + + private Delete prepareDeleteWithoutClusteringKey() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + return Delete.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .build(); + } + + private ObjectStorageRecord prepareExistingRecord() { + Map values = new HashMap<>(); + values.put(ANY_NAME_3, ANY_INT_1); + values.put(ANY_NAME_4, ANY_INT_2); + return ObjectStorageRecord.newBuilder().id("concat_key").values(values).build(); + } + + private void setupNonExistentPartition() throws ObjectStorageWrapperException { + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + } + + private void setupPartitionWithRecord(String recordId) throws ObjectStorageWrapperException { + Map records = new HashMap<>(); + records.put(recordId, prepareExistingRecord()); + ObjectStoragePartition partition = new ObjectStoragePartition(records); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + } + + private void setupPartitionWithRecords(String recordId, String... additionalRecordIds) + throws ObjectStorageWrapperException { + Map records = new HashMap<>(); + records.put(recordId, prepareExistingRecord()); + for (String additionalRecordId : additionalRecordIds) { + records.put(additionalRecordId, prepareExistingRecord()); + } + ObjectStoragePartition partition = new ObjectStoragePartition(records); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + } + + @Test + public void handle_PutWithoutConditionsGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put = preparePut(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupNonExistentPartition(); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutWithoutConditionsGiven_WhenPartitionExists_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = preparePut(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutWithoutClusteringKeyGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put = preparePutWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupNonExistentPartition(); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutWithoutClusteringKeyGiven_WhenPartitionExists_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = preparePutWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(put); + + // Assert + verify(wrapper).get(objectKeyCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutWithoutConditionsWrapperExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Put put = preparePut(); + ObjectStorageWrapperException exception = new ObjectStorageWrapperException("Test error"); + when(wrapper.get(anyString())).thenThrow(exception); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)) + .isInstanceOf(ExecutionException.class) + .hasCause(exception); + } + + @Test + public void handle_PutIfNotExistsGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupNonExistentPartition(); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutIfNotExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord("another_record_key"); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutIfNotExistsGiven_WhenPartitionAndRecordExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void handle_PutIfExistsGiven_WhenPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); + setupNonExistentPartition(); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_PutIfExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); + setupPartitionWithRecord("another_record_key"); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void handle_PutIfExistsGiven_WhenPartitionAndRecordExist_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutIfGiven_WhenConditionMatchesAndPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + setupNonExistentPartition(); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_PutIfGiven_WhenConditionMatchesAndPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + setupPartitionWithRecord("another_record_key"); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_PutIfGiven_WhenConditionMatchesAndPartitionAndRecordExist_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutIfGiven_WhenConditionDoesNotMatch_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(999)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + private void assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + String expectedObjectKey, String expectedConcatenatedKey) + throws ObjectStorageWrapperException { + verify(wrapper).get(objectKeyCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + ObjectStoragePartition insertedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference() {}); + Optional record = insertedPartition.getRecord(expectedConcatenatedKey); + assertThat(record).isPresent(); + assertThat(record.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } + + private void assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + String expectedObjectKey, String expectedConcatenatedKey) + throws ObjectStorageWrapperException { + verify(wrapper) + .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + ObjectStoragePartition updatedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference() {}); + Optional record = updatedPartition.getRecord(expectedConcatenatedKey); + assertThat(record).isPresent(); + assertThat(record.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(versionCaptor.getValue()).isEqualTo(VERSION); + } + + @Test + public void + handle_DeleteWithoutConditionsGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + String expectedExistingRecordKey = "existing_record_key"; + setupPartitionWithRecords(mutation.getRecordId(), expectedExistingRecordKey); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getConcatenatedPartitionKey(), expectedExistingRecordKey); + } + + @Test + public void handle_DeleteWithoutConditionsGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void + handle_DeleteWithoutClusteringKeyGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = prepareDeleteWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + String expectedExistingRecordKey = "existing_record_key"; + setupPartitionWithRecords(mutation.getRecordId(), expectedExistingRecordKey); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); + } + + @Test + public void + handle_DeleteWithoutClusteringKeyGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = prepareDeleteWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void handle_DeleteWithoutConditionsWrapperExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageWrapperException exception = new ObjectStorageWrapperException("Test error"); + when(wrapper.get(anyString())).thenThrow(exception); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)) + .isInstanceOf(ExecutionException.class) + .hasCause(exception); + } + + @Test + public void handle_DeleteIfExistsGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + String expectedExistingRecordKey = "existing_record_key"; + setupPartitionWithRecords(mutation.getRecordId(), expectedExistingRecordKey); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); + } + + @Test + public void handle_DeleteIfExistsGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void handle_DeleteIfExistsGiven_WhenPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + setupNonExistentPartition(); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_DeleteIfExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + setupPartitionWithRecord("another_record_key"); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionAndRecordExistAndNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + String expectedExistingRecordKey = "existing_record_key"; + setupPartitionWithRecords(mutation.getRecordId(), expectedExistingRecordKey); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionAndRecordExistAndPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + setupPartitionWithRecord("another_record_key"); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + setupNonExistentPartition(); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void handle_DeleteIfGiven_WhenConditionDoesNotMatch_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(999)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + private void assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + String expectedObjectKey, String expectedConcatenatedKey, String expectedExistingRecordKey) + throws ObjectStorageWrapperException { + verify(wrapper) + .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + ObjectStoragePartition updatedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference() {}); + assertThat(updatedPartition.getRecord(expectedConcatenatedKey)).isEmpty(); + assertThat(updatedPartition.getRecord(expectedExistingRecordKey)).isPresent(); + } + + private void assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete( + String expectedObjectKey) throws ObjectStorageWrapperException { + verify(wrapper).delete(objectKeyCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + assertThat(versionCaptor.getValue()).isEqualTo(VERSION); + } + + @Test + public void handle_MultipleMutationsGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put1 = preparePut(); + Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); + Put put3 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put3")).build(); + Put put4 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put4")).build(); + ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); + ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); + ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); + ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); + setupNonExistentPartition(); + + // Act + handler.handle(Arrays.asList(put1, put2, put3, put4)); + + // Assert + verify(wrapper).get(objectKeyCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + ObjectStoragePartition insertedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference() {}); + Optional record1 = insertedPartition.getRecord(mutation1.getRecordId()); + assertThat(record1).isPresent(); + assertThat(record1.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + Optional record2 = insertedPartition.getRecord(mutation2.getRecordId()); + assertThat(record2).isPresent(); + assertThat(record2.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + Optional record3 = insertedPartition.getRecord(mutation3.getRecordId()); + assertThat(record3).isPresent(); + assertThat(record3.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + Optional record4 = insertedPartition.getRecord(mutation4.getRecordId()); + assertThat(record4).isPresent(); + assertThat(record4.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } + + @Test + public void handle_MultipleMutationsGiven_WhenPartitionExists_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put1 = preparePut(); + Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); + Put put3 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put3")).build(); + Put put4 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put4")).build(); + ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); + ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); + ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); + ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); + setupPartitionWithRecords( + mutation1.getRecordId(), + mutation2.getRecordId(), + mutation3.getRecordId(), + mutation4.getRecordId()); + + // Act + handler.handle(Arrays.asList(put1, put2, put3, put4)); + + // Assert + verify(wrapper) + .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + ObjectStoragePartition updatedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference() {}); + Optional record1 = updatedPartition.getRecord(mutation1.getRecordId()); + assertThat(record1).isPresent(); + assertThat(record1.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + Optional record2 = updatedPartition.getRecord(mutation2.getRecordId()); + assertThat(record2).isPresent(); + assertThat(record2.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + Optional record3 = updatedPartition.getRecord(mutation3.getRecordId()); + assertThat(record3).isPresent(); + assertThat(record3.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + Optional record4 = updatedPartition.getRecord(mutation4.getRecordId()); + assertThat(record4).isPresent(); + assertThat(record4.get().getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(versionCaptor.getValue()).isEqualTo(VERSION); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java new file mode 100644 index 0000000000..3177c64cd1 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java @@ -0,0 +1,89 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Key; +import java.util.Collections; +import java.util.LinkedHashSet; +import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageMutationTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_NAME_4 = "name4"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final int ANY_INT_1 = 1; + private static final int ANY_INT_2 = 2; + + @Mock private TableMetadata metadata; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); + } + + private Put preparePut() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .intValue(ANY_NAME_3, ANY_INT_1) + .intValue(ANY_NAME_4, ANY_INT_2) + .build(); + } + + @Test + public void makeRecord_PutGiven_ShouldReturnWithValues() { + // Arrange + Put put = preparePut(); + ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(put, metadata); + String concatenatedKey = objectStorageMutation.getRecordId(); + + // Act + ObjectStorageRecord actual = objectStorageMutation.makeRecord(); + + // Assert + assertThat(actual.getId()).isEqualTo(concatenatedKey); + Assertions.assertThat(actual.getPartitionKey().get(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + Assertions.assertThat(actual.getClusteringKey().get(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + Assertions.assertThat(actual.getValues().get(ANY_NAME_3)).isEqualTo(ANY_INT_1); + Assertions.assertThat(actual.getValues().get(ANY_NAME_4)).isEqualTo(ANY_INT_2); + } + + @Test + public void makeRecord_PutWithNullValueGiven_ShouldReturnWithValues() { + // Arrange + Put put = preparePut(); + put = Put.newBuilder(put).intValue(ANY_NAME_3, null).build(); + ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(put, metadata); + String concatenatedKey = objectStorageMutation.getRecordId(); + + // Act + ObjectStorageRecord actual = objectStorageMutation.makeRecord(); + + // Assert + assertThat(actual.getId()).isEqualTo(concatenatedKey); + Assertions.assertThat(actual.getPartitionKey().get(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + Assertions.assertThat(actual.getClusteringKey().get(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + Assertions.assertThat(actual.getValues().containsKey(ANY_NAME_3)).isTrue(); + Assertions.assertThat(actual.getValues().get(ANY_NAME_3)).isNull(); + Assertions.assertThat(actual.getValues().get(ANY_NAME_4)).isEqualTo(ANY_INT_2); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java new file mode 100644 index 0000000000..b68f4c56be --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java @@ -0,0 +1,830 @@ +package com.scalar.db.storage.objectstorage; + +import static com.scalar.db.api.ConditionBuilder.column; +import static com.scalar.db.api.ConditionBuilder.deleteIf; +import static com.scalar.db.api.ConditionBuilder.deleteIfExists; +import static com.scalar.db.api.ConditionBuilder.putIf; +import static com.scalar.db.api.ConditionBuilder.putIfExists; +import static com.scalar.db.api.ConditionBuilder.putIfNotExists; +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.openMocks; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.MutationCondition; +import com.scalar.db.api.Put; +import com.scalar.db.api.Scan; +import com.scalar.db.api.StorageInfo; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.StorageInfoImpl; +import com.scalar.db.common.StorageInfoProvider; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.DataType; +import com.scalar.db.io.Key; +import java.util.Arrays; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; + +public class ObjectStorageOperationCheckerTest { + private static final String NAMESPACE_NAME = "n1"; + private static final String TABLE_NAME = "t1"; + private static final String PKEY1 = "p1"; + private static final String CKEY1 = "c1"; + private static final String COL1 = "v1"; + private static final String COL2 = "v2"; + private static final String COL3 = "v3"; + private static final String COL4 = "v4"; + private static final StorageInfo STORAGE_INFO = + new StorageInfoImpl("ObjectStorage", StorageInfo.MutationAtomicityUnit.STORAGE, 100); + + private static final TableMetadata TABLE_METADATA1 = + TableMetadata.newBuilder() + .addColumn(PKEY1, DataType.INT) + .addColumn(CKEY1, DataType.INT) + .addColumn(COL1, DataType.INT) + .addColumn(COL2, DataType.BOOLEAN) + .addColumn(COL3, DataType.TEXT) + .addColumn(COL4, DataType.BLOB) + .addPartitionKey(PKEY1) + .addClusteringKey(CKEY1) + .build(); + + private static final TableMetadata TABLE_METADATA2 = + TableMetadata.newBuilder() + .addColumn(PKEY1, DataType.TEXT) + .addColumn(CKEY1, DataType.TEXT) + .addPartitionKey(PKEY1) + .addClusteringKey(CKEY1) + .build(); + + @Mock private DatabaseConfig databaseConfig; + @Mock private TableMetadataManager metadataManager; + @Mock private StorageInfoProvider storageInfoProvider; + private ObjectStorageOperationChecker operationChecker; + + @BeforeEach + public void setUp() throws Exception { + openMocks(this).close(); + when(storageInfoProvider.getStorageInfo(any())).thenReturn(STORAGE_INFO); + operationChecker = + new ObjectStorageOperationChecker(databaseConfig, metadataManager, storageInfoProvider); + } + + @Test + public void check_ForMutationsWithPut_ShouldDoNothing() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Put putWithoutSettingIndex = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 0)) + .build(); + Put put = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .intValue(COL1, 1) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(Arrays.asList(putWithoutSettingIndex, put))) + .doesNotThrowAnyException(); + } + + @Test + public void check_ForMutationsWithDelete_ShouldDoNothing() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Delete deleteWithoutSettingIndex = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 0)) + .build(); + Delete delete = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(Arrays.asList(deleteWithoutSettingIndex, delete))) + .doesNotThrowAnyException(); + } + + @Test + public void + check_GetGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Get get1 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Get get2 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Get get3 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .build(); + Get get4 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Get get5 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(get1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(get2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(get3)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(get4)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(get5)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_ScanGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Scan scan1 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .start(Key.ofText(CKEY1, "ab")) + .end(Key.ofText(CKEY1, "ab")) + .build(); + Scan scan2 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .start(Key.ofText(CKEY1, "ab")) + .end(Key.ofText(CKEY1, "ab")) + .build(); + Scan scan3 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .start(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .end(Key.ofText(CKEY1, "ab")) + .build(); + Scan scan4 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .start(Key.ofText(CKEY1, "ab")) + .end(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(scan1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(scan2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(scan3)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(scan4)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_PutGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Put put1 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Put put2 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Put put3 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(put1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(put2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(put3)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_DeleteGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Delete delete1 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete2 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete3 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(delete1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(delete2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(delete3)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_MutationsGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + when(storageInfoProvider.getStorageInfo(any())).thenReturn(STORAGE_INFO); + + Put put1 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Put put2 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete1 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete2 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(Arrays.asList(put1, delete1))) + .doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(Arrays.asList(put2, delete1))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(Arrays.asList(put1, delete2))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void check_ForPutWithCondition_ShouldBehaveProperly() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + + // Act Assert + assertThatCode(() -> operationChecker.check(buildPutWithCondition(putIfExists()))) + .doesNotThrowAnyException(); + assertThatCode(() -> operationChecker.check(buildPutWithCondition(putIfNotExists()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL1).isEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL1).isNullInt()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isNotEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isNotNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isGreaterThanBoolean(false)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL4).isEqualToBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL4).isNotEqualToBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL4).isNullBlob()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL4).isNotNullBlob()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + } + + @Test + public void check_ForDeleteWithCondition_ShouldBehaveProperly() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + + // Act Assert + assertThatCode(() -> operationChecker.check(buildDeleteWithCondition(deleteIfExists()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL1).isEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL1).isNullInt()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isNotEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL2).isNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL2).isNotNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isGreaterThanBoolean(false)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL4).isEqualToBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL4).isNotEqualToBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL4).isNullBlob()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL4).isNotNullBlob()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})) + .build()))) + .doesNotThrowAnyException(); + } + + @Test + public void check_ForMutationsWithPutWithCondition_ShouldBehaveProperly() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Put put = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .intValue(COL1, 1) + .build(); + + // Act Assert + assertThatCode( + () -> operationChecker.check(Arrays.asList(buildPutWithCondition(putIfExists()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check(Arrays.asList(buildPutWithCondition(putIfNotExists()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL1).isEqualToInt(1)).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL1).isNullInt()).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL2).isEqualToBoolean(true)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL2).isNotEqualToBoolean(true)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL2).isNullBoolean()).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL2).isNotNullBoolean()).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL2).isGreaterThanBoolean(false)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL4).isEqualToBlob(new byte[] {1, 2, 3})).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL4).isNotEqualToBlob(new byte[] {1, 2, 3})).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL4).isNullBlob()).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL4).isNotNullBlob()).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})) + .build()), + put))) + .doesNotThrowAnyException(); + } + + @Test + public void check_ForMutationsWithDeleteWithCondition_ShouldBehaveProperly() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Delete delete = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .build(); + + // Act Assert + assertThatCode( + () -> + operationChecker.check( + Arrays.asList(buildDeleteWithCondition(deleteIfExists()), delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL1).isEqualToInt(1)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL1).isNullInt()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isEqualToBoolean(true)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isNotEqualToBoolean(true)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL2).isNullBoolean()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL2).isNotNullBoolean()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isGreaterThanBoolean(false)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL4).isEqualToBlob(new byte[] {1, 2, 3})).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL4).isNotEqualToBlob(new byte[] {1, 2, 3})).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL4).isNullBlob()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL4).isNotNullBlob()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})) + .build()), + delete))) + .doesNotThrowAnyException(); + } + + private Put buildPutWithCondition(MutationCondition condition) { + return Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .intValue(COL1, 1) + .condition(condition) + .build(); + } + + private Delete buildDeleteWithCondition(MutationCondition condition) { + return Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .condition(condition) + .build(); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java new file mode 100644 index 0000000000..4515fe90c4 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java @@ -0,0 +1,94 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Get; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashSet; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageOperationTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final int ANY_INT_1 = 1; + + @Mock private TableMetadata metadata; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + } + + @Test + public void getConcatenatedPartitionKey_MultipleKeysGiven_ShouldReturnConcatenatedPartitionKey() { + // Arrange + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Arrays.asList(ANY_NAME_1, ANY_NAME_2, ANY_NAME_3))); + + Key partitionKey = + Key.of(ANY_NAME_1, ANY_TEXT_1, ANY_NAME_2, ANY_TEXT_2, ANY_NAME_3, ANY_INT_1); + Get get = + Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .build(); + ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(get, metadata); + + // Act + String actual = objectStorageOperation.getConcatenatedPartitionKey(); + + // Assert + assertThat(actual) + .isEqualTo( + String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), + ANY_TEXT_1, + ANY_TEXT_2, + String.valueOf(ANY_INT_1))); + } + + @Test + public void getId_MultipleKeysGiven_ShouldReturnConcatenatedPartitionKey() { + // Arrange + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Arrays.asList(ANY_NAME_1, ANY_NAME_3))); + when(metadata.getClusteringKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); + + Key partitionKey = Key.of(ANY_NAME_1, ANY_TEXT_1, ANY_NAME_3, ANY_INT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + Get get = + Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(get, metadata); + + // Act + String actual = objectStorageOperation.getRecordId(); + + // Assert + assertThat(actual) + .isEqualTo( + String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), + ANY_TEXT_1, + String.valueOf(ANY_INT_1), + ANY_TEXT_2)); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStoragePartitionTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStoragePartitionTest.java new file mode 100644 index 0000000000..2695632c82 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStoragePartitionTest.java @@ -0,0 +1,760 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.ConditionBuilder; +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.Delete; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.exception.storage.NoMutationException; +import com.scalar.db.io.DataType; +import com.scalar.db.io.Key; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStoragePartitionTest { + private static final String NAMESPACE = "test_namespace"; + private static final String TABLE = "test_table"; + private static final String PARTITION_KEY_NAME = "pk"; + private static final String CLUSTERING_KEY_NAME = "ck"; + private static final String COLUMN_NAME_1 = "col1"; + private static final String COLUMN_NAME_2 = "col2"; + private static final String PARTITION_KEY_VALUE = "pk1"; + private static final String CLUSTERING_KEY_VALUE = "ck1"; + private static final String RECORD_ID_1 = "record1"; + private static final String RECORD_ID_2 = "record2"; + private static final int INT_VALUE_1 = 10; + private static final int INT_VALUE_2 = 20; + + @Mock private TableMetadata metadata; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(PARTITION_KEY_NAME))); + when(metadata.getClusteringKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(CLUSTERING_KEY_NAME))); + when(metadata.getColumnDataType(anyString())).thenReturn(DataType.INT); + } + + @Test + public void getRecord_WhenRecordExists_ShouldReturnRecord() { + // Arrange + Map records = new HashMap<>(); + ObjectStorageRecord record = createRecord(RECORD_ID_1, INT_VALUE_1); + records.put(RECORD_ID_1, record); + ObjectStoragePartition partition = createObjectStoragePartition(records); + + // Act + Optional result = partition.getRecord(RECORD_ID_1); + + // Assert + assertThat(result).isPresent(); + assertThat(result).hasValue(record); + } + + @Test + public void getRecord_WhenRecordDoesNotExist_ShouldReturnEmpty() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + + // Act Assert + assertThat(partition.getRecord(RECORD_ID_1)).isEmpty(); + } + + @Test + public void getRecords_WhenPartitionHasRecords_ShouldReturnAllRecords() { + // Arrange + Map records = new HashMap<>(); + ObjectStorageRecord record1 = createRecord(RECORD_ID_1, INT_VALUE_1); + ObjectStorageRecord record2 = createRecord(RECORD_ID_2, INT_VALUE_2); + records.put(RECORD_ID_1, record1); + records.put(RECORD_ID_2, record2); + ObjectStoragePartition partition = createObjectStoragePartition(records); + + // Act + Map result = partition.getRecords(); + + // Assert + assertThat(result).hasSize(2); + assertThat(result.values()).containsExactlyInAnyOrder(record1, record2); + assertThat(result).containsKey(RECORD_ID_1); + assertThat(result).containsKey(RECORD_ID_2); + } + + @Test + public void isEmpty_WhenPartitionHasNoRecords_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + + // Act & Assert + assertThat(partition.isEmpty()).isTrue(); + } + + @Test + public void isEmpty_WhenPartitionHasRecords_ShouldReturnFalse() { + // Arrange + Map records = new HashMap<>(); + records.put(RECORD_ID_1, createRecord(RECORD_ID_1, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + + // Act & Assert + assertThat(partition.isEmpty()).isFalse(); + } + + @Test + public void applyPut_WithoutConditionAndRecordDoesNotExist_ShouldInsertRecord() throws Exception { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + Put put = createPut(INT_VALUE_1); + + // Act + partition.applyPut(put, metadata); + + // Assert + Map records = partition.getRecords(); + assertThat(records).hasSize(1); + ObjectStorageRecord record = records.values().iterator().next(); + assertThat(record.getValues()).containsEntry(COLUMN_NAME_1, INT_VALUE_1); + assertThat(record.getValues()).containsEntry(COLUMN_NAME_2, INT_VALUE_2); + } + + @Test + public void applyPut_WithoutConditionAndRecordExists_ShouldUpdateRecord() throws Exception { + // Arrange + Map records = new HashMap<>(); + String recordId = + PARTITION_KEY_VALUE + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + CLUSTERING_KEY_VALUE; + records.put(recordId, createRecord(recordId, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + Put put = createPut(INT_VALUE_2); + + // Act + partition.applyPut(put, metadata); + + // Assert + Map resultRecords = partition.getRecords(); + assertThat(resultRecords).hasSize(1); + ObjectStorageRecord record = resultRecords.values().iterator().next(); + assertThat(record.getValues()).containsEntry(COLUMN_NAME_1, INT_VALUE_2); + assertThat(record.getValues()).containsEntry(COLUMN_NAME_2, INT_VALUE_2); + } + + @Test + public void applyPut_WithPutIfNotExistsAndRecordDoesNotExist_ShouldInsertRecord() + throws Exception { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + Put put = + Put.newBuilder(createPut(INT_VALUE_1)).condition(ConditionBuilder.putIfNotExists()).build(); + + // Act + partition.applyPut(put, metadata); + + // Assert + assertThat(partition.getRecords()).hasSize(1); + } + + @Test + public void applyPut_WithPutIfNotExistsAndRecordExists_ShouldThrowNoMutationException() { + // Arrange + Map records = new HashMap<>(); + String recordId = + PARTITION_KEY_VALUE + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + CLUSTERING_KEY_VALUE; + records.put(recordId, createRecord(recordId, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + Put put = + Put.newBuilder(createPut(INT_VALUE_1)).condition(ConditionBuilder.putIfNotExists()).build(); + + // Act & Assert + assertThatThrownBy(() -> partition.applyPut(put, metadata)) + .isInstanceOf(NoMutationException.class); + } + + @Test + public void applyPut_WithPutIfExistsAndRecordExists_ShouldUpdateRecord() throws Exception { + // Arrange + Map records = new HashMap<>(); + String recordId = + PARTITION_KEY_VALUE + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + CLUSTERING_KEY_VALUE; + records.put(recordId, createRecord(recordId, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + Put put = + Put.newBuilder(createPut(INT_VALUE_2)).condition(ConditionBuilder.putIfExists()).build(); + + // Act + partition.applyPut(put, metadata); + + // Assert + Map resultRecords = partition.getRecords(); + assertThat(resultRecords).hasSize(1); + assertThat(resultRecords.values().iterator().next().getValues()) + .containsEntry(COLUMN_NAME_1, INT_VALUE_2); + } + + @Test + public void applyPut_WithPutIfExistsAndRecordDoesNotExist_ShouldThrowNoMutationException() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + Put put = + Put.newBuilder(createPut(INT_VALUE_1)).condition(ConditionBuilder.putIfExists()).build(); + + // Act & Assert + assertThatThrownBy(() -> partition.applyPut(put, metadata)) + .isInstanceOf(NoMutationException.class); + } + + @Test + public void applyPut_WithPutIfAndConditionMatches_ShouldUpdateRecord() throws Exception { + // Arrange + Map records = new HashMap<>(); + String recordId = + PARTITION_KEY_VALUE + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + CLUSTERING_KEY_VALUE; + records.put(recordId, createRecord(recordId, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + Put put = + Put.newBuilder(createPut(INT_VALUE_2)) + .condition( + ConditionBuilder.putIf( + ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(INT_VALUE_1)) + .build()) + .build(); + + // Act + partition.applyPut(put, metadata); + + // Assert + Map resultRecords = partition.getRecords(); + assertThat(resultRecords).hasSize(1); + assertThat(resultRecords.values().iterator().next().getValues()) + .containsEntry(COLUMN_NAME_1, INT_VALUE_2); + } + + @Test + public void applyPut_WithPutIfAndConditionDoesNotMatch_ShouldThrowNoMutationException() { + // Arrange + Map records = new HashMap<>(); + String recordId = + PARTITION_KEY_VALUE + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + CLUSTERING_KEY_VALUE; + records.put(recordId, createRecord(recordId, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + Put put = + Put.newBuilder(createPut(INT_VALUE_2)) + .condition( + ConditionBuilder.putIf( + ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(INT_VALUE_2)) + .build()) + .build(); + + // Act & Assert + assertThatThrownBy(() -> partition.applyPut(put, metadata)) + .isInstanceOf(NoMutationException.class); + } + + @Test + public void applyPut_WithPutIfAndRecordDoesNotExist_ShouldThrowNoMutationException() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + Put put = + Put.newBuilder(createPut(INT_VALUE_1)) + .condition( + ConditionBuilder.putIf( + ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(INT_VALUE_1)) + .build()) + .build(); + + // Act & Assert + assertThatThrownBy(() -> partition.applyPut(put, metadata)) + .isInstanceOf(NoMutationException.class); + } + + @Test + public void applyDelete_WithoutConditionAndRecordExists_ShouldRemoveRecord() throws Exception { + // Arrange + Map records = new HashMap<>(); + String recordId = + PARTITION_KEY_VALUE + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + CLUSTERING_KEY_VALUE; + records.put(recordId, createRecord(recordId, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + Delete delete = createDelete(); + + // Act + partition.applyDelete(delete, metadata); + + // Assert + assertThat(partition.isEmpty()).isTrue(); + } + + @Test + public void applyDelete_WithoutConditionAndRecordDoesNotExist_ShouldDoNothing() throws Exception { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + Delete delete = createDelete(); + + // Act + partition.applyDelete(delete, metadata); + + // Assert + assertThat(partition.isEmpty()).isTrue(); + } + + @Test + public void applyDelete_WithDeleteIfExistsAndRecordExists_ShouldRemoveRecord() throws Exception { + // Arrange + Map records = new HashMap<>(); + String recordId = + PARTITION_KEY_VALUE + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + CLUSTERING_KEY_VALUE; + records.put(recordId, createRecord(recordId, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + Delete delete = + Delete.newBuilder(createDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + + // Act + partition.applyDelete(delete, metadata); + + // Assert + assertThat(partition.isEmpty()).isTrue(); + } + + @Test + public void applyDelete_WithDeleteIfExistsAndRecordDoesNotExist_ShouldThrowNoMutationException() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + Delete delete = + Delete.newBuilder(createDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + + // Act & Assert + assertThatThrownBy(() -> partition.applyDelete(delete, metadata)) + .isInstanceOf(NoMutationException.class); + } + + @Test + public void applyDelete_WithDeleteIfAndConditionMatches_ShouldRemoveRecord() throws Exception { + // Arrange + Map records = new HashMap<>(); + String recordId = + PARTITION_KEY_VALUE + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + CLUSTERING_KEY_VALUE; + records.put(recordId, createRecord(recordId, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + Delete delete = + Delete.newBuilder(createDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(INT_VALUE_1)) + .build()) + .build(); + + // Act + partition.applyDelete(delete, metadata); + + // Assert + assertThat(partition.isEmpty()).isTrue(); + } + + @Test + public void applyDelete_WithDeleteIfAndConditionDoesNotMatch_ShouldThrowNoMutationException() { + // Arrange + Map records = new HashMap<>(); + String recordId = + PARTITION_KEY_VALUE + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + CLUSTERING_KEY_VALUE; + records.put(recordId, createRecord(recordId, INT_VALUE_1)); + ObjectStoragePartition partition = createObjectStoragePartition(records); + Delete delete = + Delete.newBuilder(createDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(INT_VALUE_2)) + .build()) + .build(); + + // Act & Assert + assertThatThrownBy(() -> partition.applyDelete(delete, metadata)) + .isInstanceOf(NoMutationException.class); + } + + @Test + public void applyDelete_WithDeleteIfAndRecordDoesNotExist_ShouldThrowNoMutationException() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + Delete delete = + Delete.newBuilder(createDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(INT_VALUE_1)) + .build()) + .build(); + + // Act & Assert + assertThatThrownBy(() -> partition.applyDelete(delete, metadata)) + .isInstanceOf(NoMutationException.class); + } + + private ObjectStoragePartition createObjectStoragePartition( + Map records) { + return new ObjectStoragePartition(records); + } + + private ObjectStorageRecord createRecord(String recordId, int value) { + Map values = new HashMap<>(); + values.put(COLUMN_NAME_1, value); + values.put(COLUMN_NAME_2, INT_VALUE_2); + return ObjectStorageRecord.newBuilder() + .id(recordId) + .partitionKey(Collections.singletonMap(PARTITION_KEY_NAME, PARTITION_KEY_VALUE)) + .clusteringKey(Collections.singletonMap(CLUSTERING_KEY_NAME, CLUSTERING_KEY_VALUE)) + .values(values) + .build(); + } + + private Put createPut(int value1) { + return Put.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofText(PARTITION_KEY_NAME, PARTITION_KEY_VALUE)) + .clusteringKey( + Key.ofText(CLUSTERING_KEY_NAME, ObjectStoragePartitionTest.CLUSTERING_KEY_VALUE)) + .intValue(COLUMN_NAME_1, value1) + .intValue(COLUMN_NAME_2, ObjectStoragePartitionTest.INT_VALUE_2) + .build(); + } + + private Delete createDelete() { + return Delete.newBuilder() + .namespace(NAMESPACE) + .table(TABLE) + .partitionKey(Key.ofText(PARTITION_KEY_NAME, PARTITION_KEY_VALUE)) + .clusteringKey( + Key.ofText(CLUSTERING_KEY_NAME, ObjectStoragePartitionTest.CLUSTERING_KEY_VALUE)) + .build(); + } + + @Test + public void areConditionsMet_WithEqConditionAndSameValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithEqConditionAndDifferentValue_ShouldReturnFalse() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(INT_VALUE_2); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isFalse(); + } + + @Test + public void areConditionsMet_WithEqConditionAndNullValue_ShouldReturnFalse() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTestWithNull(); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isFalse(); + } + + @Test + public void areConditionsMet_WithNeConditionAndDifferentValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isNotEqualToInt(INT_VALUE_2); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithNeConditionAndSameValue_ShouldReturnFalse() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isNotEqualToInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isFalse(); + } + + @Test + public void areConditionsMet_WithGtConditionAndGreaterValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_2); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithGtConditionAndSameValue_ShouldReturnFalse() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isFalse(); + } + + @Test + public void areConditionsMet_WithGteConditionAndGreaterValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_2); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanOrEqualToInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithGteConditionAndSameValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanOrEqualToInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithGteConditionAndSmallerValue_ShouldReturnFalse() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanOrEqualToInt(INT_VALUE_2); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isFalse(); + } + + @Test + public void areConditionsMet_WithLtConditionAndSmallerValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isLessThanInt(INT_VALUE_2); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithLtConditionAndSameValue_ShouldReturnFalse() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isLessThanInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isFalse(); + } + + @Test + public void areConditionsMet_WithLteConditionAndSmallerValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isLessThanOrEqualToInt(INT_VALUE_2); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithLteConditionAndSameValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isLessThanOrEqualToInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithLteConditionAndGreaterValue_ShouldReturnFalse() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_2); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isLessThanOrEqualToInt(INT_VALUE_1); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isFalse(); + } + + @Test + public void areConditionsMet_WithIsNullConditionAndNullValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTestWithNull(); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNullInt(); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithIsNullConditionAndNonNullValue_ShouldReturnFalse() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNullInt(); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isFalse(); + } + + @Test + public void areConditionsMet_WithIsNotNullConditionAndNonNullValue_ShouldReturnTrue() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTest(INT_VALUE_1); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNotNullInt(); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isTrue(); + } + + @Test + public void areConditionsMet_WithIsNotNullConditionAndNullValue_ShouldReturnFalse() { + // Arrange + ObjectStoragePartition partition = createObjectStoragePartition(new HashMap<>()); + ObjectStorageRecord record = createRecordForConditionTestWithNull(); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNotNullInt(); + + // Act + boolean result = + partition.areConditionsMet(record, Collections.singletonList(condition), metadata); + + // Assert + assertThat(result).isFalse(); + } + + private ObjectStorageRecord createRecordForConditionTest(int value) { + return ObjectStorageRecord.newBuilder() + .id(RECORD_ID_1) + .partitionKey(new HashMap<>()) + .clusteringKey(new HashMap<>()) + .values(Collections.singletonMap(COLUMN_NAME_1, value)) + .build(); + } + + private ObjectStorageRecord createRecordForConditionTestWithNull() { + return ObjectStorageRecord.newBuilder() + .id(RECORD_ID_1) + .partitionKey(new HashMap<>()) + .clusteringKey(new HashMap<>()) + .values(Collections.singletonMap(COLUMN_NAME_1, null)) + .build(); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageTest.java new file mode 100644 index 0000000000..7d0ed486fb --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageTest.java @@ -0,0 +1,319 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.ConditionBuilder; +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scanner; +import com.scalar.db.common.FilterableScanner; +import com.scalar.db.common.checker.OperationChecker; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Optional; +import java.util.Properties; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageTest { + private static final int ANY_LIMIT = 100; + private ObjectStorage objectStorage; + @Mock private ObjectStorageWrapper wrapper; + @Mock private SelectStatementHandler selectStatementHandler; + @Mock private MutateStatementHandler mutateStatementHandler; + @Mock private OperationChecker operationChecker; + @Mock private ScannerImpl scanner; + @Mock private Key partitionKey; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + Properties objectStorageConfigProperties = new Properties(); + objectStorage = + new ObjectStorage( + new DatabaseConfig(objectStorageConfigProperties), + wrapper, + selectStatementHandler, + mutateStatementHandler, + operationChecker); + } + + @Test + public void get_WithoutConjunction_ShouldHandledWithOriginalGet() throws ExecutionException { + // Arrange + Get get = + Get.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .projection("col1") + .build(); + when(selectStatementHandler.handle(any(Get.class))).thenReturn(scanner); + + // Act + Optional actual = objectStorage.get(get); + + // Assert + assertThat(actual.isPresent()).isFalse(); + ArgumentCaptor captor = ArgumentCaptor.forClass(Get.class); + verify(selectStatementHandler).handle(captor.capture()); + Get actualGet = captor.getValue(); + assertThat(actualGet).isEqualTo(get); + } + + @Test + public void get_WithConjunctionWithoutProjections_ShouldHandledWithoutProjections() + throws ExecutionException { + // Arrange + Get get = + Get.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .where(ConditionBuilder.column("col2").isLessThanInt(0)) + .build(); + when(selectStatementHandler.handle(any(Get.class))).thenReturn(scanner); + + // Act + Optional actual = objectStorage.get(get); + + // Assert + assertThat(actual.isPresent()).isFalse(); + ArgumentCaptor captor = ArgumentCaptor.forClass(Get.class); + verify(selectStatementHandler).handle(captor.capture()); + Get actualGet = captor.getValue(); + assertThat(actualGet.getProjections()).isEmpty(); + } + + @Test + public void get_WithConjunctionAndProjections_ShouldHandledWithExtendedProjections() + throws ExecutionException { + // Arrange + Get get = + Get.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .projections("col1") + .where(ConditionBuilder.column("col2").isLessThanInt(0)) + .build(); + when(selectStatementHandler.handle(any(Get.class))).thenReturn(scanner); + + // Act + Optional actual = objectStorage.get(get); + + // Assert + assertThat(actual.isPresent()).isFalse(); + ArgumentCaptor captor = ArgumentCaptor.forClass(Get.class); + verify(selectStatementHandler).handle(captor.capture()); + Get actualGet = captor.getValue(); + assertThat(actualGet.getProjections()).containsExactlyInAnyOrder("col1", "col2"); + } + + @Test + public void scan_WithLimitWithoutConjunction_ShouldHandledWithLimit() throws ExecutionException { + // Arrange + Scan scan = Scan.newBuilder().namespace("ns").table("tbl").all().limit(ANY_LIMIT).build(); + when(selectStatementHandler.handle(scan)).thenReturn(scanner); + + // Act + Scanner actual = objectStorage.scan(scan); + + // Assert + assertThat(actual).isInstanceOf(ScannerImpl.class); + ArgumentCaptor captor = ArgumentCaptor.forClass(Scan.class); + verify(selectStatementHandler).handle(captor.capture()); + Scan actualScan = captor.getValue(); + assertThat(actualScan.getLimit()).isEqualTo(ANY_LIMIT); + } + + @Test + public void scan_WithLimitAndConjunction_ShouldHandledWithoutLimit() throws ExecutionException { + // Arrange + Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .where(mock(ConditionalExpression.class)) + .limit(ANY_LIMIT) + .build(); + when(selectStatementHandler.handle(scan)).thenReturn(scanner); + + // Act + Scanner actual = objectStorage.scan(scan); + + // Assert + assertThat(actual).isInstanceOf(FilterableScanner.class); + ArgumentCaptor captor = ArgumentCaptor.forClass(Scan.class); + verify(selectStatementHandler).handle(captor.capture()); + Scan actualScan = captor.getValue(); + assertThat(actualScan.getLimit()).isEqualTo(0); + } + + @Test + public void scan_WithConjunctionWithoutProjections_ShouldHandledWithoutProjections() + throws ExecutionException { + // Arrange + Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .where(ConditionBuilder.column("col2").isLessThanInt(0)) + .build(); + when(selectStatementHandler.handle(scan)).thenReturn(scanner); + + // Act + Scanner actual = objectStorage.scan(scan); + + // Assert + assertThat(actual).isInstanceOf(FilterableScanner.class); + ArgumentCaptor captor = ArgumentCaptor.forClass(Scan.class); + verify(selectStatementHandler).handle(captor.capture()); + Scan actualScan = captor.getValue(); + assertThat(actualScan.getProjections()).isEmpty(); + } + + @Test + public void scan_WithConjunctionAndProjections_ShouldHandledWithExtendedProjections() + throws ExecutionException { + // Arrange + Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .projections("col1") + .where(ConditionBuilder.column("col2").isLessThanInt(0)) + .build(); + when(selectStatementHandler.handle(scan)).thenReturn(scanner); + + // Act + Scanner actual = objectStorage.scan(scan); + + // Assert + assertThat(actual).isInstanceOf(FilterableScanner.class); + ArgumentCaptor captor = ArgumentCaptor.forClass(Scan.class); + verify(selectStatementHandler).handle(captor.capture()); + Scan actualScan = captor.getValue(); + assertThat(actualScan.getProjections()).containsExactlyInAnyOrder("col1", "col2"); + } + + @Test + public void + get_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Get get = Get.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + doThrow(IllegalArgumentException.class).when(operationChecker).check(get); + + // Act Assert + assertThatThrownBy(() -> objectStorage.get(get)).isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + scan_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Scan scan = Scan.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + doThrow(IllegalArgumentException.class).when(operationChecker).check(scan); + + // Act Assert + assertThatThrownBy(() -> objectStorage.scan(scan)).isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + put_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Put put = Put.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + doThrow(IllegalArgumentException.class).when(operationChecker).check(put); + + // Act Assert + assertThatThrownBy(() -> objectStorage.put(put)).isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + put_MultiplePutsGiven_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Put put1 = Put.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + Put put2 = Put.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + + doThrow(IllegalArgumentException.class).when(operationChecker).check(Arrays.asList(put1, put2)); + + // Act Assert + assertThatThrownBy(() -> objectStorage.put(Arrays.asList(put1, put2))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + delete_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Delete delete = + Delete.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + doThrow(IllegalArgumentException.class).when(operationChecker).check(delete); + + // Act Assert + assertThatThrownBy(() -> objectStorage.delete(delete)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + delete_MultipleDeletesGiven_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Delete delete1 = + Delete.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + Delete delete2 = + Delete.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + + doThrow(IllegalArgumentException.class) + .when(operationChecker) + .check(Arrays.asList(delete1, delete2)); + + // Act Assert + assertThatThrownBy(() -> objectStorage.delete(Arrays.asList(delete1, delete2))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + mutate_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Put put = Put.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + Delete delete = + Delete.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + + doThrow(IllegalArgumentException.class) + .when(operationChecker) + .check(Arrays.asList(put, delete)); + + // Act Assert + assertThatThrownBy(() -> objectStorage.mutate(Arrays.asList(put, delete))) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ResultInterpreterTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ResultInterpreterTest.java new file mode 100644 index 0000000000..ce72eb9b4c --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ResultInterpreterTest.java @@ -0,0 +1,312 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableMap; +import com.scalar.db.api.Result; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.Test; + +public class ResultInterpreterTest { + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final String ANY_COLUMN_NAME_1 = "col1"; + private static final String ANY_COLUMN_NAME_2 = "col2"; + private static final String ANY_COLUMN_NAME_3 = "col3"; + private static final String ANY_COLUMN_NAME_4 = "col4"; + private static final String ANY_COLUMN_NAME_5 = "col5"; + private static final String ANY_COLUMN_NAME_6 = "col6"; + private static final String ANY_COLUMN_NAME_7 = "col7"; + private static final String ANY_COLUMN_NAME_8 = "col8"; + private static final String ANY_COLUMN_NAME_9 = "col9"; + private static final String ANY_COLUMN_NAME_10 = "col10"; + private static final String ANY_COLUMN_NAME_11 = "col11"; + private static final String ANY_ID_1 = "id"; + + private static final TableMetadata TABLE_METADATA = + TableMetadata.newBuilder() + .addColumn(ANY_NAME_1, DataType.TEXT) + .addColumn(ANY_NAME_2, DataType.TEXT) + .addColumn(ANY_COLUMN_NAME_1, DataType.BOOLEAN) + .addColumn(ANY_COLUMN_NAME_2, DataType.INT) + .addColumn(ANY_COLUMN_NAME_3, DataType.BIGINT) + .addColumn(ANY_COLUMN_NAME_4, DataType.FLOAT) + .addColumn(ANY_COLUMN_NAME_5, DataType.DOUBLE) + .addColumn(ANY_COLUMN_NAME_6, DataType.TEXT) + .addColumn(ANY_COLUMN_NAME_7, DataType.BLOB) + .addColumn(ANY_COLUMN_NAME_8, DataType.DATE) + .addColumn(ANY_COLUMN_NAME_9, DataType.TIME) + .addColumn(ANY_COLUMN_NAME_10, DataType.TIMESTAMP) + .addColumn(ANY_COLUMN_NAME_11, DataType.TIMESTAMPTZ) + .addPartitionKey(ANY_NAME_1) + .addClusteringKey(ANY_NAME_2) + .build(); + + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + + @Test + public void interpret_ShouldReturnWhatsSet() { + // Arrange + Map values = + ImmutableMap.builder() + .put(ANY_COLUMN_NAME_1, true) + .put(ANY_COLUMN_NAME_2, Integer.MAX_VALUE) + .put(ANY_COLUMN_NAME_3, BigIntColumn.MAX_VALUE) + .put(ANY_COLUMN_NAME_4, Float.MAX_VALUE) + .put(ANY_COLUMN_NAME_5, Double.MAX_VALUE) + .put(ANY_COLUMN_NAME_6, "string") + .put( + ANY_COLUMN_NAME_7, + Base64.getEncoder().encodeToString("bytes".getBytes(StandardCharsets.UTF_8))) + .put( + ANY_COLUMN_NAME_8, + TimeRelatedColumnEncodingUtils.encode(DateColumn.of(ANY_COLUMN_NAME_8, ANY_DATE))) + .put( + ANY_COLUMN_NAME_9, + TimeRelatedColumnEncodingUtils.encode(TimeColumn.of(ANY_COLUMN_NAME_9, ANY_TIME))) + .put( + ANY_COLUMN_NAME_10, + TimeRelatedColumnEncodingUtils.encode( + TimestampColumn.of(ANY_COLUMN_NAME_10, ANY_TIMESTAMP))) + .put( + ANY_COLUMN_NAME_11, + TimeRelatedColumnEncodingUtils.encode( + TimestampTZColumn.of(ANY_COLUMN_NAME_11, ANY_TIMESTAMPTZ))) + .build(); + ObjectStorageRecord record = + new ObjectStorageRecord( + ANY_ID_1, + ImmutableMap.of(ANY_NAME_1, ANY_TEXT_1), + ImmutableMap.of(ANY_NAME_2, ANY_TEXT_2), + values); + List projections = Collections.emptyList(); + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); + + // Act + Result result = interpreter.interpret(record); + + // Assert + assertThat(result.contains(ANY_NAME_1)).isTrue(); + assertThat(result.isNull(ANY_NAME_1)).isFalse(); + assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); + assertThat(result.isNull(ANY_NAME_2)).isFalse(); + assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_1)).isFalse(); + assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_2)).isFalse(); + assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(Integer.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_3)).isFalse(); + assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_4)).isFalse(); + assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(Float.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_5)).isFalse(); + assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(Double.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_6)).isFalse(); + assertThat(result.getText(ANY_COLUMN_NAME_6)).isEqualTo("string"); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_7)).isFalse(); + assertThat(result.getBlob(ANY_COLUMN_NAME_7)) + .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)) + .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isFalse(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isEqualTo(ANY_DATE); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isFalse(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isEqualTo(ANY_TIME); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isFalse(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isEqualTo(ANY_TIMESTAMP); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isFalse(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isEqualTo(ANY_TIMESTAMPTZ); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isTrue(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(Integer.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(Float.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(Double.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isEqualTo("string"); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()) + .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()) + .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + assertThat(columns.containsKey(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isEqualTo(ANY_DATE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isEqualTo(ANY_TIME); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); + } + + @Test + public void interpret_ShouldReturnWhatsSetWithNullValues() { + // Arrange + Map values = new HashMap<>(); + values.put(ANY_COLUMN_NAME_1, null); + values.put(ANY_COLUMN_NAME_2, null); + values.put(ANY_COLUMN_NAME_3, null); + values.put(ANY_COLUMN_NAME_4, null); + values.put(ANY_COLUMN_NAME_5, null); + values.put(ANY_COLUMN_NAME_6, null); + values.put(ANY_COLUMN_NAME_7, null); + values.put(ANY_COLUMN_NAME_8, null); + values.put(ANY_COLUMN_NAME_9, null); + values.put(ANY_COLUMN_NAME_10, null); + values.put(ANY_COLUMN_NAME_11, null); + ObjectStorageRecord record = + new ObjectStorageRecord( + ANY_ID_1, + ImmutableMap.of(ANY_NAME_1, ANY_TEXT_1), + ImmutableMap.of(ANY_NAME_2, ANY_TEXT_2), + values); + + List projections = Collections.emptyList(); + + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); + + // Act + Result result = interpreter.interpret(record); + + // Assert + assertThat(result.contains(ANY_NAME_1)).isTrue(); + assertThat(result.isNull(ANY_NAME_1)).isFalse(); + assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); + assertThat(result.isNull(ANY_NAME_2)).isFalse(); + assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isFalse(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(0); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(0L); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(0.0F); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(0D); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(result.getText(ANY_COLUMN_NAME_6)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(result.getBlob(ANY_COLUMN_NAME_7)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isNull(); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isFalse(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(0); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(0L); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(0.0F); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(0D); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isNull(); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ScannerImplTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ScannerImplTest.java new file mode 100644 index 0000000000..53b8b42769 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ScannerImplTest.java @@ -0,0 +1,210 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Result; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class ScannerImplTest { + + @Mock ResultInterpreter resultInterpreter; + @Mock ObjectStorageRecord record1; + @Mock ObjectStorageRecord record2; + @Mock ObjectStorageRecord record3; + @Mock ObjectStorageRecord record4; + @Mock Result result1; + @Mock Result result2; + @Mock Result result3; + @Mock Result result4; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + when(resultInterpreter.interpret(record1)).thenReturn(result1); + when(resultInterpreter.interpret(record2)).thenReturn(result2); + when(resultInterpreter.interpret(record3)).thenReturn(result3); + when(resultInterpreter.interpret(record4)).thenReturn(result4); + } + + @Test + public void one_WithSingleRecord_ShouldContainOnlyOneResult() { + // Arrange + ScannerImpl scanner = buildScanner(Collections.singletonList(record1)); + + // Act + Optional actualResult1 = scanner.one(); + Optional emptyResult = scanner.one(); + + // Assert + assertThat(actualResult1).contains(result1); + assertThat(emptyResult).isEmpty(); + } + + @Test + public void all_WithSingleRecord_ShouldContainOnlyOneResult() { + // Arrange + ScannerImpl scanner = buildScanner(Collections.singletonList(record1)); + + // Act + List actualResults = scanner.all(); + List emptyResults = scanner.all(); + + // Assert + assertThat(actualResults).containsExactly(result1); + assertThat(emptyResults).isEmpty(); + } + + @Test + public void all_WithMultipleRecords_ShouldReturnAllResults() { + // Arrange + ScannerImpl scanner = buildScanner(Arrays.asList(record1, record2, record3, record4)); + + // Act + List actualResults = scanner.all(); + + // Assert + assertThat(actualResults).containsExactly(result1, result2, result3, result4); + } + + @Test + public void one_WithMultipleRecords_ShouldReturnAllResults() { + // Arrange + ScannerImpl scanner = buildScanner(Arrays.asList(record1, record2, record3, record4)); + + // Act + Optional actualResult1 = scanner.one(); + Optional actualResult2 = scanner.one(); + Optional actualResult3 = scanner.one(); + Optional actualResult4 = scanner.one(); + Optional actualResult5 = scanner.one(); + + // Assert + assertThat(actualResult1).contains(result1); + assertThat(actualResult2).contains(result2); + assertThat(actualResult3).contains(result3); + assertThat(actualResult4).contains(result4); + assertThat(actualResult5).isEmpty(); + } + + @Test + public void oneAndAll_WithMultipleRecords_ShouldReturnAllResults() { + // Arrange + ScannerImpl scanner = buildScanner(Arrays.asList(record1, record2, record3, record4)); + + // Act + Optional oneResult = scanner.one(); + List remainingResults = scanner.all(); + Optional emptyResultForOne = scanner.one(); + List emptyResultForAll = scanner.all(); + + // Assert + assertThat(oneResult).contains(result1); + assertThat(remainingResults).containsExactly(result2, result3, result4); + assertThat(emptyResultForOne).isEmpty(); + assertThat(emptyResultForAll).isEmpty(); + } + + @Test + public void one_WithNoRecord_ShouldReturnEmpty() { + // Arrange + ScannerImpl scanner = buildScanner(Collections.emptyList()); + + // Act + Optional oneResult = scanner.one(); + + // Assert + assertThat(oneResult).isEmpty(); + } + + @Test + public void all_WithNoRecord_ShouldReturnEmpty() { + // Arrange + ScannerImpl scanner = buildScanner(Collections.emptyList()); + + // Act + List allResults = scanner.all(); + + // Assert + assertThat(allResults).isEmpty(); + } + + @Test + public void one_WithRecordCountLimit_ShouldReturnLimitedResults() { + // Arrange + ScannerImpl scanner = + buildScannerWithLimit(Arrays.asList(record1, record2, record3, record4), 2); + + // Act + Optional actualResult1 = scanner.one(); + Optional actualResult2 = scanner.one(); + Optional actualResult3 = scanner.one(); + + // Assert + assertThat(actualResult1).contains(result1); + assertThat(actualResult2).contains(result2); + assertThat(actualResult3).isEmpty(); + } + + @Test + public void all_WithRecordCountLimit_ShouldReturnLimitedResults() { + // Arrange + ScannerImpl scanner = + buildScannerWithLimit(Arrays.asList(record1, record2, record3, record4), 2); + + // Act + List actualResults = scanner.all(); + + // Assert + assertThat(actualResults).containsExactly(result1, result2); + } + + @Test + public void oneAndAll_WithRecordCountLimit_ShouldReturnLimitedResults() { + // Arrange + ScannerImpl scanner = + buildScannerWithLimit(Arrays.asList(record1, record2, record3, record4), 3); + + // Act + Optional oneResult = scanner.one(); + List remainingResults = scanner.all(); + + // Assert + assertThat(oneResult).contains(result1); + assertThat(remainingResults).containsExactly(result2, result3); + } + + @Test + public void all_WithZeroRecordCountLimit_ShouldReturnAllResults() { + // Arrange + ScannerImpl scanner = buildScannerWithLimit(Arrays.asList(record1, record2, record3), 0); + + // Act + List actualResults = scanner.all(); + + // Assert + assertThat(actualResults).containsExactly(result1, result2, result3); + } + + private ScannerImpl buildScanner(List records) { + return buildScannerWithLimit(records, 0); + } + + private ScannerImpl buildScannerWithLimit(List records, int limit) { + List recordList = new ArrayList<>(records); + Iterator iterator = recordList.iterator(); + return new ScannerImpl(iterator, resultInterpreter, limit); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/SelectStatementHandlerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/SelectStatementHandlerTest.java new file mode 100644 index 0000000000..309d3bf803 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/SelectStatementHandlerTest.java @@ -0,0 +1,443 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Get; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scanner; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.DataType; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class SelectStatementHandlerTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final String ANY_TEXT_3 = "text3"; + + private SelectStatementHandler handler; + @Mock private ObjectStorageWrapper wrapper; + @Mock private TableMetadataManager metadataManager; + @Mock private TableMetadata metadata; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + handler = new SelectStatementHandler(wrapper, metadataManager); + + when(metadataManager.getTableMetadata(any(Operation.class))).thenReturn(metadata); + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); + when(metadata.getClusteringKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); + when(metadata.getSecondaryIndexNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_3))); + when(metadata.getClusteringOrder(ANY_NAME_2)).thenReturn(Scan.Ordering.Order.ASC); + when(metadata.getColumnDataType(anyString())).thenReturn(DataType.TEXT); + } + + private Get prepareGet() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + } + + private Scan prepareScan() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + return Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .build(); + } + + private Scan prepareScanAll() { + return Scan.newBuilder().namespace(ANY_NAMESPACE_NAME).table(ANY_TABLE_NAME).all().build(); + } + + private ObjectStoragePartition createPartitionWithRecord() { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2); + Map values = Collections.singletonMap(ANY_NAME_3, ANY_TEXT_3); + ObjectStoragePartition partition = new ObjectStoragePartition(new HashMap<>()); + addRecordToPartition(partition, partitionKey, clusteringKey, values); + return partition; + } + + private ObjectStorageRecord createRecord( + Map partitionKey, + Map clusteringKey, + Map values) { + String recordId = buildRecordId(partitionKey, clusteringKey); + return ObjectStorageRecord.newBuilder() + .id(recordId) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .values(values) + .build(); + } + + private void addRecordToPartition( + ObjectStoragePartition partition, + Map partitionKey, + Map clusteringKey, + Map values) { + ObjectStorageRecord record = createRecord(partitionKey, clusteringKey, values); + String recordId = buildRecordId(partitionKey, clusteringKey); + partition.putRecord(recordId, record); + } + + private String buildRecordId( + Map partitionKey, Map clusteringKey) { + String partitionKeyValue = (String) partitionKey.get(ANY_NAME_1); + String clusteringKeyValue = (String) clusteringKey.get(ANY_NAME_2); + return partitionKeyValue + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + clusteringKeyValue; + } + + @Test + public void handle_GetOperationGiven_ShouldReturnScanner() throws Exception { + // Arrange + Get get = prepareGet(); + ObjectStoragePartition partition = createPartitionWithRecord(); + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(get); + + // Assert + assertThat(scanner).isNotNull(); + verify(wrapper) + .get(ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1)); + } + + @Test + public void handle_GetOperationWhenRecordNotFound_ShouldReturnEmptyScanner() throws Exception { + // Arrange + Get get = prepareGet(); + ObjectStoragePartition partition = new ObjectStoragePartition(new HashMap<>()); + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(get); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).isEmpty(); + } + + @Test + public void handle_GetOperationWhenPartitionNotFound_ShouldReturnEmptyScanner() throws Exception { + // Arrange + Get get = prepareGet(); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act + Scanner scanner = handler.handle(get); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).isEmpty(); + } + + @Test + public void handle_GetOperationWithSecondaryIndex_ShouldThrowUnsupportedOperationException() { + // Arrange + Key indexKey = Key.ofText(ANY_NAME_3, ANY_TEXT_3); + Get get = + Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(indexKey) + .build(); + + // Act Assert + assertThatThrownBy(() -> handler.handle(get)).isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void handle_GetOperationWhenExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Get get = prepareGet(); + when(wrapper.get(anyString())).thenThrow(new ObjectStorageWrapperException("error")); + + // Act Assert + assertThatThrownBy(() -> handler.handle(get)).isInstanceOf(ExecutionException.class); + } + + @Test + public void handle_ScanOperationGiven_ShouldReturnScanner() throws Exception { + // Arrange + Scan scan = prepareScan(); + ObjectStoragePartition partition = createPartitionWithRecord(); + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + verify(wrapper) + .get(ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1)); + } + + @Test + public void handle_ScanOperationWithSecondaryIndex_ShouldThrowUnsupportedOperationException() { + // Arrange + Key indexKey = Key.ofText(ANY_NAME_3, ANY_TEXT_3); + Scan scan = + Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(indexKey) + .build(); + + // Act Assert + assertThatThrownBy(() -> handler.handle(scan)) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void handle_ScanOperationWhenExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Scan scan = prepareScan(); + when(wrapper.get(anyString())).thenThrow(new ObjectStorageWrapperException("error")); + + // Act Assert + assertThatThrownBy(() -> handler.handle(scan)).isInstanceOf(ExecutionException.class); + } + + @Test + public void handle_ScanOperationWithLimit_ShouldReturnLimitedResults() throws Exception { + // Arrange + Scan scan = Scan.newBuilder(prepareScan()).limit(1).build(); + ObjectStoragePartition partition = new ObjectStoragePartition(new HashMap<>()); + + // Create multiple records + for (int i = 0; i < 5; i++) { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2 + i); + addRecordToPartition(partition, partitionKey, clusteringKey, new HashMap<>()); + } + + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).hasSize(1); + } + + @Test + public void handle_ScanAllOperationGiven_ShouldReturnScanner() throws Exception { + // Arrange + Scan scanAll = prepareScanAll(); + when(wrapper.getKeys(anyString())) + .thenReturn( + new HashSet<>( + Arrays.asList( + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1), + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_2)))); + + ObjectStoragePartition partition1 = createPartitionWithRecord(); + String serialized1 = Serializer.serialize(partition1); + ObjectStorageWrapperResponse response1 = + new ObjectStorageWrapperResponse(serialized1, "version1"); + + ObjectStoragePartition partition2 = new ObjectStoragePartition(new HashMap<>()); + Map partitionKey2 = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_2); + Map clusteringKey2 = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_3); + addRecordToPartition(partition2, partitionKey2, clusteringKey2, new HashMap<>()); + String serialized2 = Serializer.serialize(partition2); + ObjectStorageWrapperResponse response2 = + new ObjectStorageWrapperResponse(serialized2, "version2"); + + when(wrapper.get( + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1))) + .thenReturn(Optional.of(response1)); + when(wrapper.get( + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_2))) + .thenReturn(Optional.of(response2)); + + // Act + Scanner scanner = handler.handle(scanAll); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).hasSize(2); + } + + @Test + public void handle_ScanAllOperationWithLimit_ShouldReturnLimitedResults() throws Exception { + // Arrange + Scan scanAll = Scan.newBuilder(prepareScanAll()).limit(1).build(); + String objectKey1 = + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1); + String objectKey2 = + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_2); + when(wrapper.getKeys(anyString())) + .thenReturn(new HashSet<>(Arrays.asList(objectKey1, objectKey2))); + + ObjectStoragePartition partition1 = createPartitionWithRecord(); + String serialized1 = Serializer.serialize(partition1); + ObjectStorageWrapperResponse response1 = + new ObjectStorageWrapperResponse(serialized1, "version1"); + + ObjectStoragePartition partition2 = new ObjectStoragePartition(new HashMap<>()); + Map partitionKey2 = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_2); + Map clusteringKey2 = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_3); + addRecordToPartition(partition2, partitionKey2, clusteringKey2, new HashMap<>()); + String serialized2 = Serializer.serialize(partition2); + ObjectStorageWrapperResponse response2 = + new ObjectStorageWrapperResponse(serialized2, "version2"); + + when(wrapper.get(objectKey1)).thenReturn(Optional.of(response1)); + when(wrapper.get(objectKey2)).thenReturn(Optional.of(response2)); + + // Act + Scanner scanner = handler.handle(scanAll); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).hasSize(1); + } + + @Test + public void handle_ScanAllOperationWhenExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Scan scanAll = prepareScanAll(); + when(wrapper.getKeys(anyString())).thenThrow(new ObjectStorageWrapperException("error")); + + // Act Assert + assertThatThrownBy(() -> handler.handle(scanAll)).isInstanceOf(ExecutionException.class); + } + + @Test + public void handle_ScanOperationWithStartClusteringKey_ShouldFilterResults() throws Exception { + // Arrange + Scan scan = + Scan.newBuilder(prepareScan()).start(Key.ofText(ANY_NAME_2, ANY_TEXT_2 + "2")).build(); + ObjectStoragePartition partition = new ObjectStoragePartition(new HashMap<>()); + + // Create multiple records with different clustering keys + for (int i = 0; i < 5; i++) { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2 + i); + addRecordToPartition(partition, partitionKey, clusteringKey, new HashMap<>()); + } + + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + // Should filter records with clustering key >= "text22" + assertThat(scanner.all()).hasSizeGreaterThanOrEqualTo(1); + } + + @Test + public void handle_ScanOperationWithEndClusteringKey_ShouldFilterResults() throws Exception { + // Arrange + Scan scan = + Scan.newBuilder(prepareScan()).end(Key.ofText(ANY_NAME_2, ANY_TEXT_2 + "2")).build(); + ObjectStoragePartition partition = new ObjectStoragePartition(new HashMap<>()); + + // Create multiple records with different clustering keys + for (int i = 0; i < 5; i++) { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2 + i); + addRecordToPartition(partition, partitionKey, clusteringKey, new HashMap<>()); + } + + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + // Should filter records with clustering key <= "text22" + assertThat(scanner.all()).hasSizeGreaterThanOrEqualTo(1); + } + + @Test + public void handle_ScanOperationWithDescOrdering_ShouldReverseResults() throws Exception { + // Arrange + when(metadata.getClusteringOrder(ANY_NAME_2)).thenReturn(Scan.Ordering.Order.ASC); + Scan scan = Scan.newBuilder(prepareScan()).ordering(Scan.Ordering.desc(ANY_NAME_2)).build(); + ObjectStoragePartition partition = new ObjectStoragePartition(new HashMap<>()); + + // Create multiple records + for (int i = 0; i < 3; i++) { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2 + i); + addRecordToPartition(partition, partitionKey, clusteringKey, new HashMap<>()); + } + + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).hasSize(3); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/StreamingRecordIteratorTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/StreamingRecordIteratorTest.java new file mode 100644 index 0000000000..2324033b0f --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/StreamingRecordIteratorTest.java @@ -0,0 +1,287 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class StreamingRecordIteratorTest { + private static final String NAMESPACE = "test_namespace"; + private static final String TABLE = "test_table"; + private static final String PARTITION_KEY_1 = "partition1"; + private static final String PARTITION_KEY_2 = "partition2"; + private static final String PARTITION_KEY_3 = "partition3"; + private static final String VERSION = "version1"; + + @Mock private ObjectStorageWrapper wrapper; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + } + + @Test + public void hasNext_WhenNoPartitionKeys_ShouldReturnFalse() { + // Arrange + List partitionKeys = Collections.emptyList(); + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act + boolean hasNext = iterator.hasNext(); + + // Assert + assertThat(hasNext).isFalse(); + } + + @Test + public void next_WhenNoPartitionKeys_ShouldThrowNoSuchElementException() { + // Arrange + List partitionKeys = Collections.emptyList(); + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act & Assert + assertThatThrownBy(iterator::next).isInstanceOf(NoSuchElementException.class); + } + + @Test + public void hasNext_WhenSinglePartitionWithRecords_ShouldReturnTrue() throws Exception { + // Arrange + List partitionKeys = Collections.singletonList(PARTITION_KEY_1); + Map records = createRecords(2); + setupPartitionWithRecords(PARTITION_KEY_1, records); + + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act + boolean hasNext = iterator.hasNext(); + + // Assert + assertThat(hasNext).isTrue(); + } + + @Test + public void next_WhenSinglePartitionWithRecords_ShouldReturnAllRecords() throws Exception { + // Arrange + List partitionKeys = Collections.singletonList(PARTITION_KEY_1); + Map records = createRecords(2); + setupPartitionWithRecords(PARTITION_KEY_1, records); + + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act + List result = new ArrayList<>(); + while (iterator.hasNext()) { + result.add(iterator.next()); + } + + // Assert + assertThat(result).hasSize(2); + assertThat(result).containsAll(records.values()); + } + + @Test + public void next_WhenMultiplePartitionsWithRecords_ShouldReturnAllRecordsInOrder() + throws Exception { + // Arrange + List partitionKeys = Arrays.asList(PARTITION_KEY_1, PARTITION_KEY_2, PARTITION_KEY_3); + Map records1 = createRecords(2, "record1_"); + Map records2 = createRecords(3, "record2_"); + Map records3 = createRecords(1, "record3_"); + + setupPartitionWithRecords(PARTITION_KEY_1, records1); + setupPartitionWithRecords(PARTITION_KEY_2, records2); + setupPartitionWithRecords(PARTITION_KEY_3, records3); + + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act + List result = new ArrayList<>(); + while (iterator.hasNext()) { + result.add(iterator.next()); + } + + // Assert + assertThat(result).hasSize(6); + assertThat(result.subList(0, 2)).containsAll(records1.values()); + assertThat(result.subList(2, 5)).containsAll(records2.values()); + assertThat(result.subList(5, 6)).containsAll(records3.values()); + } + + @Test + public void next_WhenPartitionDoesNotExist_ShouldSkipPartition() throws Exception { + // Arrange + List partitionKeys = Arrays.asList(PARTITION_KEY_1, PARTITION_KEY_2); + Map records = createRecords(2); + + setupNonExistentPartition(); + setupPartitionWithRecords(PARTITION_KEY_2, records); + + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act + List result = new ArrayList<>(); + while (iterator.hasNext()) { + result.add(iterator.next()); + } + + // Assert + assertThat(result).hasSize(2); + assertThat(result).containsAll(records.values()); + } + + @Test + public void next_WhenPartitionHasNoRecords_ShouldSkipPartition() throws Exception { + // Arrange + List partitionKeys = Arrays.asList(PARTITION_KEY_1, PARTITION_KEY_2); + Map records = createRecords(2); + + setupPartitionWithRecords(PARTITION_KEY_1, Collections.emptyMap()); + setupPartitionWithRecords(PARTITION_KEY_2, records); + + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act + List result = new ArrayList<>(); + while (iterator.hasNext()) { + result.add(iterator.next()); + } + + // Assert + assertThat(result).hasSize(2); + assertThat(result).containsAll(records.values()); + } + + @Test + public void hasNext_WhenCalledMultipleTimes_ShouldNotLoadPartitionMultipleTimes() + throws Exception { + // Arrange + List partitionKeys = Collections.singletonList(PARTITION_KEY_1); + Map records = createRecords(1); + setupPartitionWithRecords(PARTITION_KEY_1, records); + + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act + boolean hasNext1 = iterator.hasNext(); + boolean hasNext2 = iterator.hasNext(); + boolean hasNext3 = iterator.hasNext(); + + // Assert + assertThat(hasNext1).isTrue(); + assertThat(hasNext2).isTrue(); + assertThat(hasNext3).isTrue(); + // wrapper.get should be called only once + verify(wrapper, times(1)).get(anyString()); + } + + @Test + public void next_WhenWrapperThrowsException_ShouldThrowRuntimeException() throws Exception { + // Arrange + List partitionKeys = Collections.singletonList(PARTITION_KEY_1); + ObjectStorageWrapperException exception = new ObjectStorageWrapperException("Test error"); + when(wrapper.get(anyString())).thenThrow(exception); + + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act & Assert + assertThatThrownBy(iterator::hasNext).isInstanceOf(RuntimeException.class); + } + + @Test + public void hasNext_AfterConsumingAllRecords_ShouldReturnFalse() throws Exception { + // Arrange + List partitionKeys = Collections.singletonList(PARTITION_KEY_1); + Map records = createRecords(2); + setupPartitionWithRecords(PARTITION_KEY_1, records); + + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act + iterator.next(); + iterator.next(); + boolean hasNext = iterator.hasNext(); + + // Assert + assertThat(hasNext).isFalse(); + } + + @Test + public void next_AfterConsumingAllRecords_ShouldThrowNoSuchElementException() throws Exception { + // Arrange + List partitionKeys = Collections.singletonList(PARTITION_KEY_1); + Map records = createRecords(1); + setupPartitionWithRecords(PARTITION_KEY_1, records); + + StreamingRecordIterator iterator = + new StreamingRecordIterator(wrapper, NAMESPACE, TABLE, partitionKeys); + + // Act + iterator.next(); + + // Assert + assertThatThrownBy(iterator::next).isInstanceOf(NoSuchElementException.class); + } + + private Map createRecords(int count) { + return createRecords(count, "record_"); + } + + private Map createRecords(int count, String prefix) { + Map records = new HashMap<>(); + for (int i = 0; i < count; i++) { + String recordId = prefix + i; + ObjectStorageRecord record = + ObjectStorageRecord.newBuilder() + .id(recordId) + .partitionKey(Collections.singletonMap("pk", "value")) + .clusteringKey(Collections.singletonMap("ck", "value")) + .values(Collections.singletonMap("col1", i)) + .build(); + records.put(recordId, record); + } + return records; + } + + private void setupPartitionWithRecords( + String partitionKey, Map records) + throws ObjectStorageWrapperException { + ObjectStoragePartition partition = new ObjectStoragePartition(records); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + String objectKey = ObjectStorageUtils.getObjectKey(NAMESPACE, TABLE, partitionKey); + when(wrapper.get(objectKey)).thenReturn(Optional.of(response)); + } + + private void setupNonExistentPartition() throws ObjectStorageWrapperException { + String objectKey = + ObjectStorageUtils.getObjectKey( + NAMESPACE, TABLE, StreamingRecordIteratorTest.PARTITION_KEY_1); + when(wrapper.get(objectKey)).thenReturn(Optional.empty()); + } +} diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageIntegrationTestBase.java index 02033c47df..0e6b21e47f 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedStorageIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedStorageIntegrationTestBase.java @@ -108,6 +108,20 @@ protected String getColumnName6() { return COL_NAME6; } + protected TableMetadata getTableMetadata() { + return TableMetadata.newBuilder() + .addColumn(getColumnName1(), DataType.INT) + .addColumn(getColumnName2(), DataType.TEXT) + .addColumn(getColumnName3(), DataType.INT) + .addColumn(getColumnName4(), DataType.INT) + .addColumn(getColumnName5(), DataType.BOOLEAN) + .addColumn(getColumnName6(), DataType.BLOB) + .addPartitionKey(getColumnName1()) + .addClusteringKey(getColumnName4()) + .addSecondaryIndex(getColumnName3()) + .build(); + } + protected DistributedStorage getStorage() { return storage; } @@ -119,22 +133,7 @@ protected DistributedStorageAdmin getAdmin() { private void createTable() throws ExecutionException { Map options = getCreationOptions(); admin.createNamespace(namespace, true, options); - admin.createTable( - namespace, - getTableName(), - TableMetadata.newBuilder() - .addColumn(getColumnName1(), DataType.INT) - .addColumn(getColumnName2(), DataType.TEXT) - .addColumn(getColumnName3(), DataType.INT) - .addColumn(getColumnName4(), DataType.INT) - .addColumn(getColumnName5(), DataType.BOOLEAN) - .addColumn(getColumnName6(), DataType.BLOB) - .addPartitionKey(getColumnName1()) - .addClusteringKey(getColumnName4()) - .addSecondaryIndex(getColumnName3()) - .build(), - true, - options); + admin.createTable(namespace, getTableName(), getTableMetadata(), true, options); } protected int getLargeDataSizeInBytes() { diff --git a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionIntegrationTestBase.java index 4b497b9944..da917eddd9 100644 --- a/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/DistributedTransactionIntegrationTestBase.java @@ -110,7 +110,7 @@ protected String getNamespaceBaseName() { return NAMESPACE_BASE_NAME; } - private void createTables() throws ExecutionException { + protected TableMetadata getTableMetadata() { TableMetadata.Builder tableMetadata = TableMetadata.newBuilder() .addColumn(ACCOUNT_ID, DataType.INT) @@ -132,11 +132,14 @@ private void createTables() throws ExecutionException { if (isTimestampTypeSupported()) { tableMetadata.addColumn(TIMESTAMP_COL, DataType.TIMESTAMP); } + return tableMetadata.build(); + } + private void createTables() throws ExecutionException { Map options = getCreationOptions(); admin.createCoordinatorTables(true, options); admin.createNamespace(namespace, true, options); - admin.createTable(namespace, TABLE, tableMetadata.build(), true, options); + admin.createTable(namespace, TABLE, getTableMetadata(), true, options); } protected Map getCreationOptions() { diff --git a/integration-test/src/main/java/com/scalar/db/api/TwoPhaseCommitTransactionIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/api/TwoPhaseCommitTransactionIntegrationTestBase.java index d19d4596b9..7a0a45bf54 100644 --- a/integration-test/src/main/java/com/scalar/db/api/TwoPhaseCommitTransactionIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/api/TwoPhaseCommitTransactionIntegrationTestBase.java @@ -122,7 +122,7 @@ protected String getNamespaceBaseName() { return NAMESPACE_BASE_NAME; } - private void createTables() throws ExecutionException { + protected TableMetadata getTableMetadata() { TableMetadata.Builder tableMetadata = TableMetadata.newBuilder() .addColumn(ACCOUNT_ID, DataType.INT) @@ -144,13 +144,16 @@ private void createTables() throws ExecutionException { if (isTimestampTypeSupported()) { tableMetadata.addColumn(TIMESTAMP_COL, DataType.TIMESTAMP); } + return tableMetadata.build(); + } + private void createTables() throws ExecutionException { Map options = getCreationOptions(); admin1.createCoordinatorTables(true, options); admin1.createNamespace(namespace1, true, options); - admin1.createTable(namespace1, TABLE_1, tableMetadata.build(), true, options); + admin1.createTable(namespace1, TABLE_1, getTableMetadata(), true, options); admin2.createNamespace(namespace2, true, options); - admin2.createTable(namespace2, TABLE_2, tableMetadata.build(), true, options); + admin2.createTable(namespace2, TABLE_2, getTableMetadata(), true, options); } protected Map getCreationOptions() { diff --git a/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java index a442c78b47..b46bb3a6ee 100644 --- a/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java @@ -89,10 +89,10 @@ public abstract class ConsensusCommitSpecificIntegrationTestBase { private static final String NAMESPACE_2 = "int_test_" + TEST_NAME + "2"; private static final String TABLE_1 = "test_table1"; private static final String TABLE_2 = "test_table2"; - private static final String ACCOUNT_ID = "account_id"; - private static final String ACCOUNT_TYPE = "account_type"; - private static final String BALANCE = "balance"; - private static final String SOME_COLUMN = "some_column"; + protected static final String ACCOUNT_ID = "account_id"; + protected static final String ACCOUNT_TYPE = "account_type"; + protected static final String BALANCE = "balance"; + protected static final String SOME_COLUMN = "some_column"; private static final int INITIAL_BALANCE = 1000; private static final int NEW_BALANCE = 2000; private static final int NUM_ACCOUNTS = 4; @@ -148,23 +148,25 @@ protected String getNamespace2() { return NAMESPACE_2; } + protected TableMetadata getTableMetadata() { + return TableMetadata.newBuilder() + .addColumn(ACCOUNT_ID, DataType.INT) + .addColumn(ACCOUNT_TYPE, DataType.INT) + .addColumn(BALANCE, DataType.INT) + .addColumn(SOME_COLUMN, DataType.TEXT) + .addPartitionKey(ACCOUNT_ID) + .addClusteringKey(ACCOUNT_TYPE) + .addSecondaryIndex(BALANCE) + .build(); + } + private void createTables() throws ExecutionException { Map options = getCreationOptions(); consensusCommitAdmin.createCoordinatorTables(true, options); - TableMetadata tableMetadata = - TableMetadata.newBuilder() - .addColumn(ACCOUNT_ID, DataType.INT) - .addColumn(ACCOUNT_TYPE, DataType.INT) - .addColumn(BALANCE, DataType.INT) - .addColumn(SOME_COLUMN, DataType.TEXT) - .addPartitionKey(ACCOUNT_ID) - .addClusteringKey(ACCOUNT_TYPE) - .addSecondaryIndex(BALANCE) - .build(); consensusCommitAdmin.createNamespace(namespace1, true, options); - consensusCommitAdmin.createTable(namespace1, TABLE_1, tableMetadata, true, options); + consensusCommitAdmin.createTable(namespace1, TABLE_1, getTableMetadata(), true, options); consensusCommitAdmin.createNamespace(namespace2, true, options); - consensusCommitAdmin.createTable(namespace2, TABLE_2, tableMetadata, true, options); + consensusCommitAdmin.createTable(namespace2, TABLE_2, getTableMetadata(), true, options); } protected Map getCreationOptions() {