diff --git a/.github/workflows/build_linux_arm64_wheels-gh.yml b/.github/workflows/build_linux_arm64_wheels-gh.yml index 5fd5c76fd83..952ef01a151 100644 --- a/.github/workflows/build_linux_arm64_wheels-gh.yml +++ b/.github/workflows/build_linux_arm64_wheels-gh.yml @@ -8,16 +8,16 @@ on: required: true release: types: [created] - push: - branches: - - main - paths-ignore: - - '**/*.md' - pull_request: - branches: - - main - paths-ignore: - - '**/*.md' + # push: + # branches: + # - main + # paths-ignore: + # - '**/*.md' + # pull_request: + # branches: + # - main + # paths-ignore: + # - '**/*.md' jobs: @@ -137,7 +137,7 @@ jobs: which clang++-19 clang++-19 --version sudo apt-get install -y make cmake ccache ninja-build yasm gawk wget - # Install WebAssembly linker (wasm-ld) + # Install WebAssembly linker (wasm-ld) sudo apt-get install -y lld-19 # Create symlink for wasm-ld if ! command -v wasm-ld &> /dev/null; then diff --git a/.github/workflows/build_macos_arm64_wheels.yml b/.github/workflows/build_macos_arm64_wheels.yml index 4c7b24f1ac2..f09c06edfb5 100644 --- a/.github/workflows/build_macos_arm64_wheels.yml +++ b/.github/workflows/build_macos_arm64_wheels.yml @@ -8,16 +8,16 @@ on: required: true release: types: [created] - push: - branches: - - main - paths-ignore: - - '**/*.md' - pull_request: - branches: - - main - paths-ignore: - - '**/*.md' + # push: + # branches: + # - main + # paths-ignore: + # - '**/*.md' + # pull_request: + # branches: + # - main + # paths-ignore: + # - '**/*.md' jobs: build_universal_wheel: diff --git a/.github/workflows/build_macos_x86_wheels.yml b/.github/workflows/build_macos_x86_wheels.yml index 22d597a0f6e..47ae8e490c0 100644 --- a/.github/workflows/build_macos_x86_wheels.yml +++ b/.github/workflows/build_macos_x86_wheels.yml @@ -8,16 +8,16 @@ on: required: true release: types: [created] - push: - branches: - - main - paths-ignore: - - '**/*.md' - pull_request: - branches: - - main - paths-ignore: - - '**/*.md' + # push: + # branches: + # - main + # paths-ignore: + # - '**/*.md' + # pull_request: + # branches: + # - main + # paths-ignore: + # - '**/*.md' jobs: build_universal_wheel: diff --git a/.github/workflows/build_musllinux_arm64_wheels.yml b/.github/workflows/build_musllinux_arm64_wheels.yml index d7c2819ac0a..0cfd5d2a3a1 100644 --- a/.github/workflows/build_musllinux_arm64_wheels.yml +++ b/.github/workflows/build_musllinux_arm64_wheels.yml @@ -8,16 +8,16 @@ on: required: true release: types: [created] - push: - branches: - - main - paths-ignore: - - '**/*.md' - pull_request: - branches: - - main - paths-ignore: - - '**/*.md' + # push: + # branches: + # - main + # paths-ignore: + # - '**/*.md' + # pull_request: + # branches: + # - main + # paths-ignore: + # - '**/*.md' jobs: build_musllinux_wheels: diff --git a/.github/workflows/build_musllinux_x86_wheels.yml b/.github/workflows/build_musllinux_x86_wheels.yml index 715cc816bf6..bf077181b6b 100644 --- a/.github/workflows/build_musllinux_x86_wheels.yml +++ b/.github/workflows/build_musllinux_x86_wheels.yml @@ -8,16 +8,16 @@ on: required: true release: types: [created] - push: - branches: - - main - paths-ignore: - - '**/*.md' - pull_request: - branches: - - main - paths-ignore: - - '**/*.md' + # push: + # branches: + # - main + # paths-ignore: + # - '**/*.md' + # pull_request: + # branches: + # - main + # paths-ignore: + # - '**/*.md' jobs: diff --git a/chdb/__init__.py b/chdb/__init__.py index 0094323643e..6d4d516a3e7 100644 --- a/chdb/__init__.py +++ b/chdb/__init__.py @@ -38,9 +38,8 @@ class ChdbError(Exception): """ -_arrow_format = set({"dataframe", "arrowtable"}) +_arrow_format = set({"arrowtable"}) _process_result_format_funs = { - "dataframe": lambda x: to_df(x), "arrowtable": lambda x: to_arrowTable(x), } @@ -108,33 +107,6 @@ def to_arrowTable(res): return pa.RecordBatchFileReader(memview.view()).read_all() -# return pandas dataframe -def to_df(r): - """Convert query result to pandas DataFrame. - - Converts a chDB query result to a pandas DataFrame by first converting to - PyArrow Table and then to pandas using multi-threading for better performance. - - Args: - r: chDB query result object containing binary Arrow data - - Returns: - pd.DataFrame: pandas DataFrame containing the query results - - Raises: - ImportError: If pyarrow or pandas are not installed - - Example: - >>> result = chdb.query("SELECT 1 as id, 'hello' as msg", "Arrow") - >>> df = chdb.to_df(result) - >>> print(df) - id msg - 0 1 hello - """ - t = to_arrowTable(r) - return t.to_pandas(use_threads=True) - - # global connection lock, for multi-threading use of legacy chdb.query() g_conn_lock = threading.Lock() @@ -222,6 +194,11 @@ def query(sql, output_format="CSV", path="", udf_path=""): with g_conn_lock: conn = _chdb.connect(conn_str) res = conn.query(sql, output_format) + + if lower_output_format == "dataframe": + conn.close() + return res + if res.has_error(): conn.close() raise ChdbError(res.error_message()) diff --git a/chdb/state/sqlitelike.py b/chdb/state/sqlitelike.py index 3b138e0adbb..afaf2c5cbdd 100644 --- a/chdb/state/sqlitelike.py +++ b/chdb/state/sqlitelike.py @@ -10,9 +10,8 @@ raise ImportError("Failed to import pyarrow") from None -_arrow_format = set({"dataframe", "arrowtable"}) +_arrow_format = set({"arrowtable"}) _process_result_format_funs = { - "dataframe": lambda x: to_df(x), "arrowtable": lambda x: to_arrowTable(x), } @@ -67,47 +66,6 @@ def to_arrowTable(res): return pa.RecordBatchFileReader(memview.view()).read_all() -# return pandas dataframe -def to_df(r): - """Convert query result to Pandas DataFrame. - - This function converts chdb query results to a Pandas DataFrame format - by first converting to PyArrow Table and then to DataFrame. This provides - convenient data analysis capabilities with Pandas API. - - Args: - r: Query result object from chdb containing Arrow format data - - Returns: - pandas.DataFrame: DataFrame containing the query results with - appropriate column names and data types - - Raises: - ImportError: If pyarrow or pandas packages are not installed - - .. note:: - This function uses multi-threading for the Arrow to Pandas conversion - to improve performance on large datasets. - - .. seealso:: - :func:`to_arrowTable` - For PyArrow Table format conversion - - Examples: - >>> import chdb - >>> result = chdb.query("SELECT 1 as num, 'hello' as text", "Arrow") - >>> df = to_df(result) - >>> print(df) - num text - 0 1 hello - >>> print(df.dtypes) - num int64 - text object - dtype: object - """ - t = to_arrowTable(r) - return t.to_pandas(use_threads=True) - - class StreamingResult: def __init__(self, c_result, conn, result_func, supports_record_batch): self._result = c_result diff --git a/programs/local/CMakeLists.txt b/programs/local/CMakeLists.txt index f84770e6392..2cade0b59be 100644 --- a/programs/local/CMakeLists.txt +++ b/programs/local/CMakeLists.txt @@ -25,13 +25,19 @@ endif() if (USE_PYTHON) set (CHDB_SOURCES chdb.cpp + ChunkCollectorOutputFormat.cpp + FieldToPython.cpp FormatHelper.cpp ListScan.cpp LocalChdb.cpp LocalServer.cpp + NumpyArray.cpp + NumpyNestedTypes.cpp NumpyType.cpp + ObjectToPython.cpp PandasAnalyzer.cpp PandasDataFrame.cpp + PandasDataFrameBuilder.cpp PandasScan.cpp PyArrowStreamFactory.cpp PyArrowTable.cpp diff --git a/programs/local/ChunkCollectorOutputFormat.cpp b/programs/local/ChunkCollectorOutputFormat.cpp new file mode 100644 index 00000000000..8faa54a7ef4 --- /dev/null +++ b/programs/local/ChunkCollectorOutputFormat.cpp @@ -0,0 +1,91 @@ +#include "ChunkCollectorOutputFormat.h" +#include "PandasDataFrameBuilder.h" + +#include +#include +#include +#include + +using namespace DB; + +namespace CHDB +{ + +NullWriteBuffer ChunkCollectorOutputFormat::out; + +ChunkCollectorOutputFormat::ChunkCollectorOutputFormat( + SharedHeader shared_header, + PandasDataFrameBuilder & builder) + : IOutputFormat(shared_header, out) + , dataframe_builder(builder) +{} + +void ChunkCollectorOutputFormat::consume(Chunk chunk) +{ + chunks.emplace_back(std::move(chunk)); +} + +void ChunkCollectorOutputFormat::consumeTotals(Chunk totals) +{ + chunks.emplace_back(std::move(totals)); +} + +void ChunkCollectorOutputFormat::consumeExtremes(Chunk extremes) +{ + chunks.emplace_back(std::move(extremes)); +} + +void ChunkCollectorOutputFormat::finalizeImpl() +{ + // Add all collected chunks to the builder + for (const auto & chunk : chunks) + { + dataframe_builder.addChunk(chunk); + } + + // Finalize the DataFrame generation + dataframe_builder.finalize(); + + chunks.clear(); +} + +/// Global dataframe builder +static std::shared_ptr g_dataframe_builder = nullptr; + +PandasDataFrameBuilder & getGlobalDataFrameBuilder() +{ + return *g_dataframe_builder; +} + +void setGlobalDataFrameBuilder(std::shared_ptr builder) +{ + g_dataframe_builder = builder; +} + +void resetGlobalDataFrameBuilder() +{ + if (g_dataframe_builder) + { + py::gil_scoped_acquire acquire; + g_dataframe_builder.reset(); + } +} + +/// create ChunkCollectorOutputFormat for use with function pointer +std::shared_ptr createDataFrameOutputFormat(SharedHeader header) +{ + /// Create a PandasDataFrameBuilder and set it globally + auto dataframe_builder = std::make_shared(*header); + setGlobalDataFrameBuilder(dataframe_builder); + + /// Create and return the format with the builder + return std::make_shared(header, getGlobalDataFrameBuilder()); +} + +/// Registration function to be called during initialization +void registerDataFrameOutputFormat() +{ + ClientBase::setDataFrameFormatCreator(&createDataFrameOutputFormat); +} + +} diff --git a/programs/local/ChunkCollectorOutputFormat.h b/programs/local/ChunkCollectorOutputFormat.h new file mode 100644 index 00000000000..7dc2fe26127 --- /dev/null +++ b/programs/local/ChunkCollectorOutputFormat.h @@ -0,0 +1,61 @@ +#pragma once + +#include +#include +#include +#include + +namespace DB +{ +class NullWriteBuffer; +} + +namespace CHDB +{ + +class PandasDataFrameBuilder; + +/// OutputFormat that collects all chunks into memory for further processing +/// Does not write to WriteBuffer, instead accumulates data for conversion to pandas DataFrame objects +class ChunkCollectorOutputFormat : public DB::IOutputFormat +{ +public: + ChunkCollectorOutputFormat(DB::SharedHeader shared_header, PandasDataFrameBuilder & builder); + + String getName() const override { return "ChunkCollectorOutputFormat"; } + + void onCancel() noexcept override + { + chunks.clear(); + } + +protected: + void consume(DB::Chunk chunk) override; + + void consumeTotals(DB::Chunk totals) override; + + void consumeExtremes(DB::Chunk extremes) override; + + void finalizeImpl() override; + +private: + std::vector chunks; + + PandasDataFrameBuilder & dataframe_builder; + + static DB::NullWriteBuffer out; +}; + +/// Registration function to be called during initialization +void registerDataFrameOutputFormat(); + +/// Get the global dataframe builder +PandasDataFrameBuilder & getGlobalDataFrameBuilder(); + +/// Set the global dataframe builder +void setGlobalDataFrameBuilder(std::shared_ptr builder); + +/// Reset the global dataframe builder +void resetGlobalDataFrameBuilder(); + +} diff --git a/programs/local/FieldToPython.cpp b/programs/local/FieldToPython.cpp new file mode 100644 index 00000000000..c04ad4f9ac3 --- /dev/null +++ b/programs/local/FieldToPython.cpp @@ -0,0 +1,762 @@ +#include "FieldToPython.h" +#include "PythonImporter.h" +#include "ObjectToPython.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ +extern const int NOT_IMPLEMENTED; +extern const int LOGICAL_ERROR; +} + +} + +namespace CHDB +{ + +using namespace DB; + +py::object convertTimeFieldToPython(const Field & field) +{ + auto & import_cache = PythonImporter::ImportCache(); + auto time_seconds = field.safeGet(); + + if (time_seconds < 0) + { + return py::str(toString(field)); + } + + /// Handle time overflow (should be within 24 hours) + /// ClickHouse Time range is [-999:59:59, 999:59:59] + time_seconds = time_seconds % 86400; + + int hour = static_cast(time_seconds / 3600); + int minute = static_cast((time_seconds % 3600) / 60); + int second = static_cast(time_seconds % 60); + int microsecond = 0; + + try + { + return import_cache.datetime.time()(hour, minute, second, microsecond); + } + catch (py::error_already_set &) + { + return py::str(toString(field)); + } +} + +py::object convertTime64FieldToPython(const Field & field) +{ + auto & import_cache = PythonImporter::ImportCache(); + auto time64_field = field.safeGet>(); + auto time64_value = time64_field.getValue(); + Int64 time64_ticks = time64_value.value; + + if (time64_ticks < 0) + { + return py::str(toString(field)); + } + + UInt32 scale = time64_field.getScale(); + Int64 scale_multiplier = DecimalUtils::scaleMultiplier(scale); + + /// Convert to seconds and fractional part within a day + Int64 total_seconds = time64_ticks / scale_multiplier; + Int64 fractional = time64_ticks % scale_multiplier; + + /// Handle time overflow (should be within 24 hours) + /// ClickHouse Time range is [-999:59:59, 999:59:59] + total_seconds = total_seconds % 86400; + + int hour = static_cast(total_seconds / 3600); + int minute = static_cast((total_seconds % 3600) / 60); + int second = static_cast(total_seconds % 60); + int microsecond = static_cast((fractional * 1000000) / scale_multiplier); + + try + { + return import_cache.datetime.time()(hour, minute, second, microsecond); + } + catch (py::error_already_set &) + { + return py::str(toString(field)); + } +} + +static bool canTypeBeUsedAsDictKey(const DataTypePtr & type) +{ + DataTypePtr actual_type = removeLowCardinalityAndNullable(type); + + switch (actual_type->getTypeId()) + { + case TypeIndex::Nothing: + case TypeIndex::Int8: + case TypeIndex::UInt8: + case TypeIndex::Int16: + case TypeIndex::UInt16: + case TypeIndex::Int32: + case TypeIndex::UInt32: + case TypeIndex::Int64: + case TypeIndex::UInt64: + case TypeIndex::Float32: + case TypeIndex::Float64: + case TypeIndex::Int128: + case TypeIndex::Int256: + case TypeIndex::UInt128: + case TypeIndex::UInt256: + case TypeIndex::BFloat16: + case TypeIndex::Date: + case TypeIndex::Date32: + case TypeIndex::DateTime: + case TypeIndex::DateTime64: + case TypeIndex::Time: + case TypeIndex::Time64: + case TypeIndex::String: + case TypeIndex::FixedString: + case TypeIndex::Enum8: + case TypeIndex::Enum16: + case TypeIndex::Decimal32: + case TypeIndex::Decimal64: + case TypeIndex::Decimal128: + case TypeIndex::Decimal256: + case TypeIndex::UUID: + case TypeIndex::Interval: + case TypeIndex::IPv4: + case TypeIndex::IPv6: + return true; + + case TypeIndex::Array: + case TypeIndex::Tuple: + case TypeIndex::Map: + case TypeIndex::Object: + case TypeIndex::Dynamic: + return false; + + case TypeIndex::Variant: + { + const auto * variant_type = typeid_cast(type.get()); + chassert(variant_type); + + const auto & variants = variant_type->getVariants(); + for (const auto & variant : variants) + { + if (!canTypeBeUsedAsDictKey(variant)) + return false; + } + return true; + } + + case TypeIndex::Set: + case TypeIndex::JSONPaths: + case TypeIndex::ObjectDeprecated: + case TypeIndex::Function: + case TypeIndex::AggregateFunction: + case TypeIndex::LowCardinality: + case TypeIndex::Nullable: + default: + return false; + } +} + +static py::object convertLocalDateToPython(const LocalDate & local_date, auto & import_cache, const Field & field) +{ + auto year = local_date.year(); + auto month = local_date.month(); + auto day = local_date.day(); + + try + { + return import_cache.datetime.date()(year, month, day); + } + catch (py::error_already_set &) + { + return py::str(toString(field)); + } +} + +py::object convertFieldToPython( + const IColumn & column, + const DataTypePtr & type, + size_t index) +{ + if (column.isNullAt(index)) + { + return py::none(); + } + + DataTypePtr actual_type = removeLowCardinalityAndNullable(type); + + auto & import_cache = PythonImporter::ImportCache(); + + switch (actual_type->getTypeId()) + { + case TypeIndex::Nothing: + return py::none(); + + case TypeIndex::Int8: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::UInt8: + { + auto field = column[index]; + auto filed_type = field.getType(); + if (filed_type == Field::Types::Bool) + return py::cast(field.safeGet()); + + return py::cast(field.safeGet()); + } + + case TypeIndex::Int16: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::UInt16: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::Int32: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::UInt32: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::Int64: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::UInt64: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::Float32: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::Float64: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::Int128: + { + auto field = column[index]; + return py::cast((double)field.safeGet()); + } + + case TypeIndex::Int256: + { + auto field = column[index]; + return py::cast((double)field.safeGet()); + } + + case TypeIndex::UInt128: + { + auto field = column[index]; + return py::cast((double)field.safeGet()); + } + + case TypeIndex::UInt256: + { + auto field = column[index]; + return py::cast((double)field.safeGet()); + } + + case TypeIndex::BFloat16: + { + auto field = column[index]; + return py::cast((double)field.safeGet()); + } + + case TypeIndex::Date: + { + auto field = column[index]; + auto days = field.safeGet(); + LocalDate local_date(static_cast(days)); + return convertLocalDateToPython(local_date, import_cache, field); + } + + case TypeIndex::Date32: + { + auto field = column[index]; + auto days = field.safeGet(); + LocalDate local_date(static_cast(days)); + return convertLocalDateToPython(local_date, import_cache, field); + } + + case TypeIndex::DateTime: + { + auto field = column[index]; + auto seconds = field.safeGet(); + + const auto * datetime_type = typeid_cast(actual_type.get()); + const auto & time_zone = datetime_type ? datetime_type->getTimeZone() : DateLUT::instance("UTC"); + + time_t timestamp = static_cast(seconds); + LocalDateTime local_dt(timestamp, time_zone); + + int year = local_dt.year(); + int month = local_dt.month(); + int day = local_dt.day(); + int hour = local_dt.hour(); + int minute = local_dt.minute(); + int second = local_dt.second(); + int microsecond = 0; + + try + { + py::object timestamp_object = import_cache.datetime.datetime()( + year, month, day, hour, minute, second, microsecond + ); + + const String & tz_name = time_zone.getTimeZone(); + auto tz_obj = import_cache.pytz.timezone()(tz_name); + return tz_obj.attr("localize")(timestamp_object); + } + catch (py::error_already_set &) + { + return py::str(toString(field)); + } + } + + case TypeIndex::DateTime64: + { + auto field = column[index]; + auto datetime64_field = field.safeGet>(); + auto datetime64_value = datetime64_field.getValue(); + Int64 datetime64_ticks = datetime64_value.value; + + const auto * datetime64_type = typeid_cast(actual_type.get()); + const auto & time_zone = datetime64_type ? datetime64_type->getTimeZone() : DateLUT::instance("UTC"); + + UInt32 scale = datetime64_field.getScale(); + Int64 scale_multiplier = DecimalUtils::scaleMultiplier(scale); + + auto seconds = static_cast(datetime64_ticks / scale_multiplier); + auto fractional = datetime64_ticks % scale_multiplier; + + LocalDateTime local_dt(seconds, time_zone); + + int year = local_dt.year(); + int month = local_dt.month(); + int day = local_dt.day(); + int hour = local_dt.hour(); + int minute = local_dt.minute(); + int second = local_dt.second(); + int microsecond = static_cast((fractional * 1000000) / scale_multiplier); + + try + { + py::object timestamp_object = import_cache.datetime.datetime()( + year, month, day, hour, minute, second, microsecond + ); + + const String & tz_name = time_zone.getTimeZone(); + auto tz_obj = import_cache.pytz.timezone()(tz_name); + return tz_obj.attr("localize")(timestamp_object); + } + catch (py::error_already_set &) + { + return py::str(toString(field)); + } + } + + case TypeIndex::Time: + { + auto field = column[index]; + return convertTimeFieldToPython(field); + } + + case TypeIndex::Time64: + { + auto field = column[index]; + return convertTime64FieldToPython(field); + } + + case TypeIndex::String: + case TypeIndex::FixedString: + { + auto field = column[index]; + return py::cast(field.safeGet()); + } + + case TypeIndex::Enum8: + { + auto field = column[index]; + try + { + const auto & enum_type = typeid_cast(*type); + auto it = enum_type.findByValue(static_cast(field.safeGet())); + String enum_name(it->second.data, it->second.size); + return py::cast(enum_name); + } + catch (...) + { + return py::cast(toString(field)); + } + } + + case TypeIndex::Enum16: + { + auto field = column[index]; + try + { + const auto & enum_type = typeid_cast(*type); + auto it = enum_type.findByValue(static_cast(field.safeGet())); + String enum_name(it->second.data, it->second.size); + return py::cast(enum_name); + } + catch (...) + { + return py::cast(toString(field)); + } + } + + case TypeIndex::Decimal32: + { + auto field = column[index]; + auto decimal_field = field.safeGet>(); + auto decimal_value = decimal_field.getValue(); + UInt32 scale = decimal_field.getScale(); + double result = DecimalUtils::convertTo(decimal_value, scale); + return py::cast(result); + } + + case TypeIndex::Decimal64: + { + auto field = column[index]; + auto decimal_field = field.safeGet>(); + auto decimal_value = decimal_field.getValue(); + UInt32 scale = decimal_field.getScale(); + double result = DecimalUtils::convertTo(decimal_value, scale); + return py::cast(result); + } + + case TypeIndex::Decimal128: + { + auto field = column[index]; + auto decimal_field = field.safeGet>(); + auto decimal_value = decimal_field.getValue(); + UInt32 scale = decimal_field.getScale(); + double result = DecimalUtils::convertTo(decimal_value, scale); + return py::cast(result); + } + + case TypeIndex::Decimal256: + { + auto field = column[index]; + auto decimal_field = field.safeGet>(); + auto decimal_value = decimal_field.getValue(); + UInt32 scale = decimal_field.getScale(); + double result = DecimalUtils::convertTo(decimal_value, scale); + return py::cast(result); + } + + case TypeIndex::UUID: + { + auto field = column[index]; + auto uuid_value = field.safeGet(); + const auto formatted_uuid = formatUUID(uuid_value); + return import_cache.uuid.UUID()(String(formatted_uuid.data(), formatted_uuid.size())); + } + + case TypeIndex::Array: + { + const auto & array_column = typeid_cast(column); + + const auto * array_type = typeid_cast(actual_type.get()); + chassert(array_type); + + const auto & element_type = array_type->getNestedType(); + const auto & offsets = array_column.getOffsets(); + const auto & nested_column = array_column.getDataPtr(); + + size_t start_offset = (index == 0) ? 0 : offsets[index - 1]; + size_t end_offset = offsets[index]; + + py::list python_list; + for (size_t i = start_offset; i < end_offset; ++i) + { + auto python_element = convertFieldToPython(*nested_column, element_type, i); + python_list.append(python_element); + } + + return python_list; + } + + case TypeIndex::Tuple: + { + const auto & tuple_column = typeid_cast(column); + + const auto * tuple_type = typeid_cast(actual_type.get()); + chassert(tuple_type); + + const auto & element_types = tuple_type->getElements(); + const auto & tuple_columns = tuple_column.getColumns(); + + py::tuple python_tuple(tuple_columns.size()); + for (size_t i = 0; i < tuple_columns.size(); ++i) + { + auto python_element = convertFieldToPython(*(tuple_columns[i]), element_types[i], index); + python_tuple[i] = python_element; + } + + return python_tuple; + } + + case TypeIndex::Interval: + { + auto field = column[index]; + auto interval_value = field.safeGet(); + const auto * interval_type = typeid_cast(actual_type.get()); + chassert(interval_type); + IntervalKind::Kind interval_kind = interval_type->getKind(); + + switch (interval_kind) + { + case IntervalKind::Kind::Nanosecond: + return import_cache.datetime.timedelta()(py::arg("microseconds") = interval_value / 1000); + case IntervalKind::Kind::Microsecond: + return import_cache.datetime.timedelta()(py::arg("microseconds") = interval_value); + case IntervalKind::Kind::Millisecond: + return import_cache.datetime.timedelta()(py::arg("milliseconds") = interval_value); + case IntervalKind::Kind::Second: + return import_cache.datetime.timedelta()(py::arg("seconds") = interval_value); + case IntervalKind::Kind::Minute: + return import_cache.datetime.timedelta()(py::arg("minutes") = interval_value); + case IntervalKind::Kind::Hour: + return import_cache.datetime.timedelta()(py::arg("hours") = interval_value); + case IntervalKind::Kind::Day: + return import_cache.datetime.timedelta()(py::arg("days") = interval_value); + case IntervalKind::Kind::Week: + return import_cache.datetime.timedelta()(py::arg("weeks") = interval_value); + case IntervalKind::Kind::Month: + /// Approximate: 1 month = 30 days + return import_cache.datetime.timedelta()(py::arg("days") = interval_value * 30); + case IntervalKind::Kind::Quarter: + /// 1 quarter = 3 months = 90 days + return import_cache.datetime.timedelta()(py::arg("days") = interval_value * 90); + case IntervalKind::Kind::Year: + /// 1 year = 365 days + return import_cache.datetime.timedelta()(py::arg("days") = interval_value * 365); + default: + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unsupported interval kind"); + } + } + + case TypeIndex::Map: + { + const auto & map_column = typeid_cast(column); + + const auto * map_type = typeid_cast(actual_type.get()); + chassert(map_type); + + const auto & key_type = map_type->getKeyType(); + const auto & value_type = map_type->getValueType(); + + /// Get the nested array column containing tuples + const auto & nested_array = map_column.getNestedColumn(); + const auto & array_column = typeid_cast(nested_array); + + const auto & offsets = array_column.getOffsets(); + const auto & tuple_column_ptr = array_column.getDataPtr(); + const auto & tuple_column = typeid_cast(*tuple_column_ptr); + + size_t start_offset = (index == 0) ? 0 : offsets[index - 1]; + size_t end_offset = offsets[index]; + + const auto & key_column = tuple_column.getColumn(0); + const auto & value_column = tuple_column.getColumn(1); + + bool use_dict = canTypeBeUsedAsDictKey(key_type); + + if (use_dict) + { + py::dict python_dict; + for (size_t i = start_offset; i < end_offset; ++i) + { + auto python_key = convertFieldToPython(key_column, key_type, i); + auto python_value = convertFieldToPython(value_column, value_type, i); + + python_dict[std::move(python_key)] = std::move(python_value); + } + + return python_dict; + } + else + { + py::list keys_list; + py::list values_list; + for (size_t i = start_offset; i < end_offset; ++i) + { + auto python_key = convertFieldToPython(key_column, key_type, i); + auto python_value = convertFieldToPython(value_column, value_type, i); + + keys_list.append(std::move(python_key)); + values_list.append(std::move(python_value)); + } + + py::dict python_dict; + python_dict["keys"] = std::move(keys_list); + python_dict["values"] = std::move(values_list); + + return python_dict; + } + } + + case TypeIndex::Variant: + { + const auto & variant_column = typeid_cast(column); + auto discriminator = variant_column.globalDiscriminatorAt(index); + if (discriminator == ColumnVariant::NULL_DISCRIMINATOR) + { + return py::none(); + } + + const auto & variant_type = typeid_cast(*actual_type); + const auto & variants = variant_type.getVariants(); + const auto & variant_data_type = variants[discriminator]; + + auto offset = variant_column.offsetAt(index); + const auto & variant_inner_column = variant_column.getVariantByGlobalDiscriminator(discriminator); + + return convertFieldToPython(variant_inner_column, variant_data_type, offset); + } + + + case TypeIndex::Dynamic: + { + const auto & dynamic_column = typeid_cast(column); + const auto & variant_column = dynamic_column.getVariantColumn(); + + /// Check if this row has value in shared variant + if (variant_column.globalDiscriminatorAt(index) == dynamic_column.getSharedVariantDiscriminator()) + { + /// Get data from shared variant and deserialize it + auto value = dynamic_column.getSharedVariant().getDataAt(variant_column.offsetAt(index)); + ReadBufferFromMemory buf(value.data, value.size); + auto variant_type = decodeDataType(buf); + auto tmp_variant_column = variant_type->createColumn(); + auto variant_serialization = variant_type->getDefaultSerialization(); + variant_serialization->deserializeBinary(*tmp_variant_column, buf, FormatSettings{}); + + /// Convert the deserialized value + return convertFieldToPython(*tmp_variant_column, variant_type, 0); + } + else + { + /// Use variant conversion logic directly + return convertFieldToPython(variant_column, dynamic_column.getVariantInfo().variant_type, index); + } + } + + case TypeIndex::Object: + { + return convertObjectToPython(column, actual_type, index); + } + + case TypeIndex::IPv4: + { + auto field = column[index]; + auto ipv4_value = field.safeGet(); + + char ipv4_str[IPV4_MAX_TEXT_LENGTH]; + char * ptr = ipv4_str; + formatIPv4(reinterpret_cast(&ipv4_value), ptr); + const size_t ipv4_str_len = ptr - ipv4_str; + + return import_cache.ipaddress.ipv4_address()(String(ipv4_str, ipv4_str_len)); + } + + case TypeIndex::IPv6: + { + auto field = column[index]; + auto ipv6_value = field.safeGet(); + + char ipv6_str[IPV6_MAX_TEXT_LENGTH]; + char * ptr = ipv6_str; + formatIPv6(reinterpret_cast(&ipv6_value), ptr); + const size_t ipv6_str_len = ptr - ipv6_str; + + return import_cache.ipaddress.ipv6_address()(String(ipv6_str, ipv6_str_len)); + } + + /// Set types are used only in WHERE clauses for IN operations, not in actual data storage + case TypeIndex::Set: + /// JSONPaths is an internal type used only for JSON schema inference, + case TypeIndex::JSONPaths: + /// Deprecated type, should not appear in normal data processing + case TypeIndex::ObjectDeprecated: + /// Function types are not actual data types, should not appear here + case TypeIndex::Function: + /// Aggregate function types are not actual data types, should not appear here + case TypeIndex::AggregateFunction: + /// LowCardinality should be unwrapped before reaching this point + case TypeIndex::LowCardinality: + /// Nullable cannot contain another Nullable type, so this should not appear in nested conversion + case TypeIndex::Nullable: + /// QBit type is supported in newer versions of ClickHouse + /// case TypeIndex::QBit: + default: + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported type {}", type->getName()); + } +} + +} // namespace CHDB diff --git a/programs/local/FieldToPython.h b/programs/local/FieldToPython.h new file mode 100644 index 00000000000..f175ceb0866 --- /dev/null +++ b/programs/local/FieldToPython.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include +#include +#include + +namespace CHDB +{ + +pybind11::object convertTimeFieldToPython(const DB::Field & field); + +pybind11::object convertTime64FieldToPython(const DB::Field & field); + +pybind11::object convertFieldToPython( + const DB::IColumn & column, + const DB::DataTypePtr & type, + size_t index); + +} // namespace CHDB diff --git a/programs/local/IPAddressCacheItem.h b/programs/local/IPAddressCacheItem.h new file mode 100644 index 00000000000..2d51a1a3e43 --- /dev/null +++ b/programs/local/IPAddressCacheItem.h @@ -0,0 +1,25 @@ +#pragma once + +#include "PythonImportCacheItem.h" + +namespace CHDB { + +struct IPAddressCacheItem : public PythonImportCacheItem +{ +public: + static constexpr const char * Name = "ipaddress"; + + IPAddressCacheItem() + : PythonImportCacheItem("ipaddress") + , ipv4_address("IPv4Address", this) + , ipv6_address("IPv6Address", this) + { + } + + ~IPAddressCacheItem() override = default; + + PythonImportCacheItem ipv4_address; + PythonImportCacheItem ipv6_address; +}; + +} // namespace CHDB diff --git a/programs/local/LocalChdb.cpp b/programs/local/LocalChdb.cpp index a35f6669c56..61c931270e3 100644 --- a/programs/local/LocalChdb.cpp +++ b/programs/local/LocalChdb.cpp @@ -1,14 +1,14 @@ #include "LocalChdb.h" -#include +#include "chdb-internal.h" +#include "PandasDataFrameBuilder.h" +#include "ChunkCollectorOutputFormat.h" #include "PythonImporter.h" #include "PythonTableCache.h" #include "StoragePython.h" -#include "chdb-internal.h" -#include "chdb.h" #include #include - +#include #include #if USE_JEMALLOC # include @@ -79,13 +79,26 @@ chdb_result * queryToBuffer( // Pybind11 will take over the ownership of the `query_result` object // using smart ptr will cause early free of the object -query_result * query( +py::object query( const std::string & queryStr, const std::string & output_format = "CSV", const std::string & path = {}, const std::string & udfPath = {}) { - return new query_result(queryToBuffer(queryStr, output_format, path, udfPath)); + auto * result = queryToBuffer(queryStr, output_format, path, udfPath); + + if (Poco::toLower(output_format) == "dataframe") + { + chdb_destroy_query_result(result); + + auto & builder = CHDB::getGlobalDataFrameBuilder(); + auto ret = builder.getDataFrame(); + CHDB::resetGlobalDataFrameBuilder(); + return ret; + } + + // Default behavior - return query_result + return py::cast(new query_result(result)); } // The `query_result` and `memoryview_wrapper` will hold `local_result_wrapper` with shared_ptr @@ -263,25 +276,39 @@ void connection_wrapper::commit() // do nothing } -query_result * connection_wrapper::query(const std::string & query_str, const std::string & format) +py::object connection_wrapper::query(const std::string & query_str, const std::string & format) { CHDB::PythonTableCache::findQueryableObjFromQuery(query_str); - py::gil_scoped_release release; - auto * result = chdb_query_n(*conn, query_str.data(), query_str.size(), format.data(), format.size()); - if (chdb_result_length(result)) + chdb_result * result = nullptr; { - LOG_DEBUG(getLogger("CHDB"), "Empty result returned for query: {}", query_str); - } + py::gil_scoped_release release; + result = chdb_query_n(*conn, query_str.data(), query_str.size(), format.data(), format.size()); + auto error_msg = CHDB::chdb_result_error_string(result); + if (!error_msg.empty()) + { + std::string msg_copy(error_msg); + chdb_destroy_query_result(result); + CHDB::resetGlobalDataFrameBuilder(); + throw std::runtime_error(msg_copy); + } - auto error_msg = CHDB::chdb_result_error_string(result); - if (!error_msg.empty()) - { - std::string msg_copy(error_msg); - chdb_destroy_query_result(result); - throw std::runtime_error(msg_copy); + if (Poco::toLower(format) == "dataframe") + { + chdb_destroy_query_result(result); + auto & builder = CHDB::getGlobalDataFrameBuilder(); + auto ret = builder.getDataFrame(); + CHDB::resetGlobalDataFrameBuilder(); + return ret; + } + + if (chdb_result_length(result)) + { + LOG_DEBUG(getLogger("CHDB"), "Empty result returned for query: {}", query_str); + } } - return new query_result(result, false); + + return py::cast(new query_result(result, false)); } streaming_query_result * connection_wrapper::send_query(const std::string & query_str, const std::string & format) @@ -483,7 +510,7 @@ PYBIND11_MODULE(_chdb, m) &connection_wrapper::query, py::arg("query_str"), py::arg("format") = "CSV", - "Execute a query and return a query_result object") + "Execute a query and return a query_result object or DataFrame") .def( "send_query", &connection_wrapper::send_query, @@ -509,7 +536,7 @@ PYBIND11_MODULE(_chdb, m) py::kw_only(), py::arg("path") = "", py::arg("udf_path") = "", - "Query chDB and return a query_result object"); + "Query chDB and return a query_result object or DataFrame"); auto destroy_import_cache = []() { diff --git a/programs/local/LocalChdb.h b/programs/local/LocalChdb.h index 5cf30058135..076103cca19 100644 --- a/programs/local/LocalChdb.h +++ b/programs/local/LocalChdb.h @@ -30,7 +30,7 @@ class connection_wrapper cursor_wrapper * cursor(); void commit(); void close(); - query_result * query(const std::string & query_str, const std::string & format = "CSV"); + py::object query(const std::string & query_str, const std::string & format = "CSV"); streaming_query_result * send_query(const std::string & query_str, const std::string & format = "CSV"); query_result * streaming_fetch_result(streaming_query_result * streaming_result); void streaming_cancel_query(streaming_query_result * streaming_result); diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index af5590c8186..886fe374be0 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -1,7 +1,7 @@ #include "LocalServer.h" - #include "chdb-internal.h" #if USE_PYTHON +#include "ChunkCollectorOutputFormat.h" #include "StoragePython.h" #include "TableFunctionPython.h" #else @@ -10,7 +10,6 @@ #endif #include #include - #include #include #include @@ -671,6 +670,7 @@ try auto & storage_factory = StorageFactory::instance(); #if USE_PYTHON registerStoragePython(storage_factory); + CHDB::registerDataFrameOutputFormat(); #else registerStorageArrowStream(storage_factory); #endif diff --git a/programs/local/NumpyArray.cpp b/programs/local/NumpyArray.cpp new file mode 100644 index 00000000000..b1500ad775b --- /dev/null +++ b/programs/local/NumpyArray.cpp @@ -0,0 +1,906 @@ +#include "NumpyArray.h" +#include "NumpyType.h" +#include "NumpyNestedTypes.h" +#include "PythonImporter.h" +#include "FieldToPython.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; + extern const int LOGICAL_ERROR; +} + +} + +using namespace DB; + +namespace CHDB +{ + +struct RegularConvert +{ + template + static NUMPYTYPE convertValue(CHTYPE val, NumpyAppendData & append_data) + { + (void)append_data; + return (NUMPYTYPE)val; + } + + template + static NUMPYTYPE nullValue(bool & set_mask) + { + set_mask = true; + return 0; + } +}; + +struct TimeConvert +{ + template + static NUMPYTYPE convertValue(CHTYPE val, NumpyAppendData & append_data) + { + chassert(append_data.type); + + Field field(static_cast(val)); + auto time_object = convertTimeFieldToPython(field); + return time_object.release().ptr(); + } + + template + static NUMPYTYPE nullValue(bool & set_mask) + { + set_mask = true; + return nullptr; + } +}; + +struct Time64Convert +{ + template + static NUMPYTYPE convertValue(CHTYPE val, NumpyAppendData & append_data) + { + chassert(append_data.type); + + const auto & time64_type = typeid_cast(*append_data.type); + UInt32 scale = time64_type.getScale(); + DecimalField decimal_field(static_cast(val), scale); + Field field(decimal_field); + + auto time64_object = convertTime64FieldToPython(field); + return time64_object.release().ptr(); + } + + template + static NUMPYTYPE nullValue(bool & set_mask) + { + set_mask = true; + return nullptr; + } +}; + +struct Enum8Convert +{ + template + static NUMPYTYPE convertValue(CHTYPE val, NumpyAppendData & append_data) + { + const auto & enum_type = typeid_cast(*append_data.type); + + try + { + auto it = enum_type.findByValue(static_cast(val)); + String enum_name(it->second.data, it->second.size); + return py::str(enum_name).release().ptr(); + } + catch (...) + { + return py::str(toString(static_cast(val))).release().ptr(); + } + } + + template + static NUMPYTYPE nullValue(bool & set_mask) + { + set_mask = true; + return nullptr; + } +}; + +struct Enum16Convert +{ + template + static NUMPYTYPE convertValue(CHTYPE val, NumpyAppendData & append_data) + { + const auto & enum_type = typeid_cast(*append_data.type); + try + { + auto it = enum_type.findByValue(static_cast(val)); + String enum_name(it->second.data, it->second.size); + return py::str(enum_name).release().ptr(); + } + catch (...) + { + return py::str(toString(static_cast(val))).release().ptr(); + } + } + + template + static NUMPYTYPE nullValue(bool & set_mask) + { + set_mask = true; + return nullptr; + } +}; + +template +static bool TransformColumn(NumpyAppendData & append_data) +{ + bool has_null = false; + const IColumn * data_column = &append_data.column; + const ColumnNullable * nullable_column = nullptr; + + /// Check if column is nullable + if (const auto * nullable = typeid_cast(&append_data.column)) + { + nullable_column = nullable; + data_column = &nullable->getNestedColumn(); + } + + const auto * tmp_ptr = static_cast(data_column)->getRawDataBegin(); + const auto * src_ptr = reinterpret_cast(tmp_ptr); + auto * dest_ptr = reinterpret_cast(append_data.target_data); + auto * mask_ptr = append_data.target_mask; + + for (size_t i = 0; i < append_data.src_count; i++) + { + size_t src_index = append_data.src_offset + i; + size_t dest_index = append_data.dest_offset + i; + if (nullable_column && nullable_column->isNullAt(src_index)) + { + dest_ptr[dest_index] = CONVERT::template nullValue(mask_ptr[dest_index]); + has_null = has_null || mask_ptr[dest_index]; + } + else + { + dest_ptr[dest_index] = CONVERT::template convertValue(src_ptr[src_index], append_data); + mask_ptr[dest_index] = false; + } + } + + return has_null; +} + +template +static bool CHColumnToNumpyArray(NumpyAppendData & append_data) +{ + return TransformColumn(append_data); +} + +template +static bool CHColumnDecimalToNumpyArray(NumpyAppendData & append_data, const DataTypePtr & data_type) +{ + bool has_null = false; + const IColumn * data_column = &append_data.column; + const ColumnNullable * nullable_column = nullptr; + + /// Check if column is nullable + if (const auto * nullable = typeid_cast(&append_data.column)) + { + nullable_column = nullable; + data_column = &nullable->getNestedColumn(); + } + + const auto * decimal_column = typeid_cast *>(data_column); + if (!decimal_column) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected ColumnDecimal"); + + /// Get scale from data type to convert integer to actual decimal value + const auto * decimal_type = typeid_cast *>(data_type.get()); + if (!decimal_type) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected DataTypeDecimal"); + + UInt32 scale = decimal_type->getScale(); + + auto * dest_ptr = reinterpret_cast(append_data.target_data); + auto * mask_ptr = append_data.target_mask; + + for (size_t i = 0; i < append_data.src_count; i++) + { + size_t src_index = append_data.src_offset + i; + size_t dest_index = append_data.dest_offset + i; + if (nullable_column && nullable_column->isNullAt(src_index)) + { + /// Set to 0.0 for null values + dest_ptr[dest_index] = 0.0; + mask_ptr[dest_index] = true; + has_null = true; + } + else + { + auto decimal_value = decimal_column->getElement(src_index); + dest_ptr[dest_index] = DecimalUtils::convertTo(decimal_value, scale); + mask_ptr[dest_index] = false; + } + } + + return has_null; +} + +static bool CHColumnDateTime64ToNumpyArray(NumpyAppendData & append_data) +{ + bool has_null = false; + const IColumn * data_column = &append_data.column; + const ColumnNullable * nullable_column = nullptr; + + if (const auto * nullable = typeid_cast(&append_data.column)) + { + nullable_column = nullable; + data_column = &nullable->getNestedColumn(); + } + + const auto * decimal_column = typeid_cast *>(data_column); + if (!decimal_column) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected ColumnDecimal"); + + auto * dest_ptr = reinterpret_cast(append_data.target_data); + auto * mask_ptr = append_data.target_mask; + + for (size_t i = 0; i < append_data.src_count; i++) + { + size_t src_index = append_data.src_offset + i; + size_t dest_index = append_data.dest_offset + i; + if (nullable_column && nullable_column->isNullAt(src_index)) + { + dest_ptr[dest_index] = 0; + mask_ptr[dest_index] = true; + has_null = true; + } + else + { + /// Get the DateTime64 value and convert to nanoseconds + Int64 raw_value = decimal_column->getInt(src_index); + auto scale = decimal_column->getScale(); + + Int64 ns_value; + chassert(scale <= 9); + Int64 multiplier = common::exp10_i32(9 - scale); + ns_value = raw_value * multiplier; + + dest_ptr[dest_index] = ns_value; + mask_ptr[dest_index] = false; + } + } + + return has_null; +} + +static bool CHColumnIntervalToNumpyArray(NumpyAppendData & append_data) +{ + bool has_null = false; + const IColumn * data_column = &append_data.column; + const ColumnNullable * nullable_column = nullptr; + + /// Check if column is nullable + if (const auto * nullable = typeid_cast(&append_data.column)) + { + nullable_column = nullable; + data_column = &nullable->getNestedColumn(); + } + + const auto * int64_column = typeid_cast *>(data_column); + if (!int64_column) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected ColumnVector for Interval"); + + auto * dest_ptr = reinterpret_cast(append_data.target_data); + auto * mask_ptr = append_data.target_mask; + + for (size_t i = 0; i < append_data.src_count; i++) + { + size_t src_index = append_data.src_offset + i; + size_t dest_index = append_data.dest_offset + i; + if (nullable_column && nullable_column->isNullAt(src_index)) + { + dest_ptr[dest_index] = 0; + mask_ptr[dest_index] = true; + has_null = true; + } + else + { + Int64 interval_value = int64_column->getElement(src_index); + + /// Convert quarter to month by multiplying by 3 + /// This function is only called for Quarter intervals + interval_value *= 3; + + dest_ptr[dest_index] = interval_value; + mask_ptr[dest_index] = false; + } + } + + return has_null; +} + +static bool CHColumnUUIDToNumpyArray(NumpyAppendData & append_data) +{ + bool has_null = false; + const IColumn * data_column = &append_data.column; + const ColumnNullable * nullable_column = nullptr; + + /// Check if column is nullable + if (const auto * nullable = typeid_cast(&append_data.column)) + { + nullable_column = nullable; + data_column = &nullable->getNestedColumn(); + } + + const auto * uuid_column = typeid_cast *>(data_column); + if (!uuid_column) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected ColumnVector"); + + auto * dest_ptr = reinterpret_cast(append_data.target_data); + auto * mask_ptr = append_data.target_mask; + + for (size_t i = 0; i < append_data.src_count; i++) + { + size_t src_index = append_data.src_offset + i; + size_t dest_index = append_data.dest_offset + i; + if (nullable_column && nullable_column->isNullAt(src_index)) + { + dest_ptr[dest_index] = nullptr; + has_null = true; + mask_ptr[dest_index] = true; + } + else + { + /// Convert UUID to Python uuid.UUID object + UUID uuid_value = uuid_column->getElement(src_index); + const auto formatted_uuid = formatUUID(uuid_value); + const char * uuid_str = formatted_uuid.data(); + const size_t uuid_str_len = formatted_uuid.size(); + + /// Create Python uuid.UUID object + auto & import_cache = PythonImporter::ImportCache(); + py::handle uuid_handle = import_cache.uuid.UUID()(String(uuid_str, uuid_str_len)).release(); + dest_ptr[dest_index] = uuid_handle.ptr(); + mask_ptr[dest_index] = false; + } + } + + return has_null; +} + +static bool CHColumnIPv4ToNumpyArray(NumpyAppendData & append_data) +{ + bool has_null = false; + const IColumn * data_column = &append_data.column; + const ColumnNullable * nullable_column = nullptr; + + /// Check if column is nullable + if (const auto * nullable = typeid_cast(&append_data.column)) + { + nullable_column = nullable; + data_column = &nullable->getNestedColumn(); + } + + const auto * ipv4_column = typeid_cast *>(data_column); + if (!ipv4_column) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected ColumnVector"); + + auto * dest_ptr = reinterpret_cast(append_data.target_data); + auto * mask_ptr = append_data.target_mask; + + for (size_t i = 0; i < append_data.src_count; i++) + { + size_t src_index = append_data.src_offset + i; + size_t dest_index = append_data.dest_offset + i; + if (nullable_column && nullable_column->isNullAt(src_index)) + { + dest_ptr[dest_index] = nullptr; + has_null = true; + mask_ptr[dest_index] = true; + } + else + { + /// Convert IPv4 to Python ipaddress.IPv4Address object + IPv4 ipv4_value = ipv4_column->getElement(src_index); + + char ipv4_str[IPV4_MAX_TEXT_LENGTH]; + char * ptr = ipv4_str; + formatIPv4(reinterpret_cast(&ipv4_value), ptr); + const size_t ipv4_str_len = ptr - ipv4_str; + + /// Create Python ipaddress.IPv4Address object + auto & import_cache = PythonImporter::ImportCache(); + py::handle ipv4_handle = import_cache.ipaddress.ipv4_address()(String(ipv4_str, ipv4_str_len)).release(); + dest_ptr[dest_index] = ipv4_handle.ptr(); + mask_ptr[dest_index] = false; + } + } + + return has_null; +} + +static bool CHColumnIPv6ToNumpyArray(NumpyAppendData & append_data) +{ + bool has_null = false; + const IColumn * data_column = &append_data.column; + const ColumnNullable * nullable_column = nullptr; + + /// Check if column is nullable + if (const auto * nullable = typeid_cast(&append_data.column)) + { + nullable_column = nullable; + data_column = &nullable->getNestedColumn(); + } + + const auto * ipv6_column = typeid_cast *>(data_column); + if (!ipv6_column) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected ColumnVector"); + + auto * dest_ptr = reinterpret_cast(append_data.target_data); + auto * mask_ptr = append_data.target_mask; + + for (size_t i = 0; i < append_data.src_count; i++) + { + size_t src_index = append_data.src_offset + i; + size_t dest_index = append_data.dest_offset + i; + if (nullable_column && nullable_column->isNullAt(src_index)) + { + dest_ptr[dest_index] = nullptr; + has_null = true; + mask_ptr[dest_index] = true; + } + else + { + /// Convert IPv6 to Python ipaddress.IPv6Address object + IPv6 ipv6_value = ipv6_column->getElement(src_index); + + /// Use ClickHouse's built-in IPv6 formatting function + char ipv6_str[IPV6_MAX_TEXT_LENGTH]; + char * ptr = ipv6_str; + formatIPv6(reinterpret_cast(&ipv6_value), ptr); + const size_t ipv6_str_len = ptr - ipv6_str; + + /// Create Python ipaddress.IPv6Address object + auto & import_cache = PythonImporter::ImportCache(); + py::handle ipv6_handle = import_cache.ipaddress.ipv6_address()(String(ipv6_str, ipv6_str_len)).release(); + dest_ptr[dest_index] = ipv6_handle.ptr(); + mask_ptr[dest_index] = false; + } + } + + return has_null; +} + +template +static bool CHColumnStringToNumpyArray(NumpyAppendData & append_data) +{ + bool has_null = false; + const IColumn * data_column = &append_data.column; + const ColumnNullable * nullable_column = nullptr; + + /// Check if column is nullable + if (const auto * nullable = typeid_cast(&append_data.column)) + { + nullable_column = nullable; + data_column = &nullable->getNestedColumn(); + } + + const auto * string_column = typeid_cast(data_column); + if (!string_column) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected String ColumnType"); + + auto * dest_ptr = reinterpret_cast(append_data.target_data); + + for (size_t i = 0; i < append_data.src_count; i++) + { + size_t src_index = append_data.src_offset + i; + size_t dest_index = append_data.dest_offset + i; + if (nullable_column && nullable_column->isNullAt(src_index)) + { + Py_INCREF(Py_None); + dest_ptr[dest_index] = Py_None; + } + else + { + StringRef str_ref = string_column->getDataAt(src_index); + auto * str_ptr = const_cast(str_ref.data); + auto str_size = str_ref.size; + dest_ptr[dest_index] = PyUnicode_FromStringAndSize(str_ptr, str_size); + } + } + + return has_null; +} + +NumpyAppendData::NumpyAppendData( + const DB::IColumn & column_, + const DB::DataTypePtr & type_) + : column(column_) + , type(type_) + , src_offset(0) + , src_count(0) + , dest_offset(0) + , target_data(nullptr) + , target_mask(nullptr) +{ +} + +InternalNumpyArray::InternalNumpyArray(const DataTypePtr & type_) + : data(nullptr) + , type(type_) + , count(0) +{ +} + +void InternalNumpyArray::init(size_t capacity) +{ + String type_str = DataTypeToNumpyTypeStr(type); + + array = py::array(py::dtype(type_str), capacity); + data = reinterpret_cast(array.mutable_data()); +} + +void InternalNumpyArray::resize(size_t capacity) +{ + std::vector new_shape {py::ssize_t(capacity)}; + + array.resize(new_shape, false); + data = reinterpret_cast(array.mutable_data()); +} + +NumpyArray::NumpyArray(const DataTypePtr & type_) + : hava_null(false) +{ + data_array = std::make_unique(type_); +} + +void NumpyArray::init(size_t capacity, bool may_have_null) +{ + data_array->init(capacity); + + if (may_have_null) + { + if (!mask_array) + mask_array = std::make_unique(DataTypeFactory::instance().get("Bool")); + + mask_array->init(capacity); + } +} + +void NumpyArray::resize(size_t capacity, bool may_have_null) +{ + data_array->resize(capacity); + + if (may_have_null) + { + if (!mask_array) + mask_array = std::make_unique(DataTypeFactory::instance().get("Bool")); + + mask_array->resize(capacity); + } +} + +static bool CHColumnNothingToNumpyArray(NumpyAppendData & append_data) +{ + /// Nothing type represents columns with no actual values, so we fill all positions with None + bool has_null = true; + auto * dest_ptr = reinterpret_cast(append_data.target_data); + auto * mask_ptr = append_data.target_mask; + + for (size_t i = 0; i < append_data.src_count; i++) + { + size_t dest_index = append_data.dest_offset + i; + + Py_INCREF(Py_None); + dest_ptr[dest_index] = Py_None; + mask_ptr[dest_index] = true; + } + + return has_null; +} + +void NumpyArray::append(const ColumnPtr & column) +{ + append(column, 0, column->size()); +} + +void NumpyArray::append( + const ColumnPtr & column, + size_t offset, + size_t count) +{ + auto actual_column = column->convertToFullColumnIfLowCardinality(); + DataTypePtr actual_type = removeLowCardinalityAndNullable(data_array->type); + + chassert(data_array); + chassert(mask_array); + + auto * data_ptr = data_array->data; + auto * mask_ptr = reinterpret_cast(mask_array->data); + chassert(data_ptr); + chassert(mask_ptr); + chassert(actual_column->isNullable() || actual_column->getDataType() == actual_type->getColumnType()); + + data_array->count += count; + mask_array->count += count; + bool may_have_null = false; + + NumpyAppendData append_data(*actual_column, actual_type); + append_data.src_offset = offset; + append_data.src_count = count; + append_data.target_data = data_ptr; + append_data.target_mask = mask_ptr; + append_data.dest_offset = data_array->count - count; + + switch (actual_type->getTypeId()) + { + case TypeIndex::Nothing: + may_have_null = CHColumnNothingToNumpyArray(append_data); + break; + + case TypeIndex::Int8: + may_have_null = CHColumnToNumpyArray(append_data); + break; + + case TypeIndex::UInt8: + { + auto is_bool = isBool(actual_type); + if (is_bool) + may_have_null = CHColumnToNumpyArray(append_data); + else + may_have_null = CHColumnToNumpyArray(append_data); + } + break; + + case TypeIndex::Int16: + may_have_null = CHColumnToNumpyArray(append_data); + break; + + case TypeIndex::UInt16: + may_have_null = CHColumnToNumpyArray(append_data); + break; + + case TypeIndex::Int32: + may_have_null = CHColumnToNumpyArray(append_data); + break; + + case TypeIndex::UInt32: + may_have_null = CHColumnToNumpyArray(append_data); + break; + + case TypeIndex::Int64: + may_have_null = CHColumnToNumpyArray(append_data); + break; + + case TypeIndex::UInt64: + may_have_null = CHColumnToNumpyArray(append_data); + break; + + case TypeIndex::Float32: + may_have_null = CHColumnToNumpyArray(append_data); + break; + + case TypeIndex::Float64: + may_have_null = CHColumnToNumpyArray(append_data); + break; + + case TypeIndex::Int128: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::Int256: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::UInt128: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::UInt256: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::BFloat16: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::Date: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::Date32: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::DateTime: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::DateTime64: + may_have_null = CHColumnDateTime64ToNumpyArray(append_data); + break; + + case TypeIndex::Time: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::Time64: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::String: + may_have_null = CHColumnStringToNumpyArray(append_data); + break; + + case TypeIndex::FixedString: + may_have_null = CHColumnStringToNumpyArray(append_data); + break; + + case TypeIndex::Enum8: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::Enum16: + may_have_null = TransformColumn(append_data); + break; + + case TypeIndex::Decimal32: + may_have_null = CHColumnDecimalToNumpyArray(append_data, actual_type); + break; + + case TypeIndex::Decimal64: + may_have_null = CHColumnDecimalToNumpyArray(append_data, actual_type); + break; + + case TypeIndex::Decimal128: + may_have_null = CHColumnDecimalToNumpyArray(append_data, actual_type); + break; + + case TypeIndex::Decimal256: + may_have_null = CHColumnDecimalToNumpyArray(append_data, actual_type); + break; + + case TypeIndex::UUID: + may_have_null = CHColumnUUIDToNumpyArray(append_data); + break; + + case TypeIndex::Array: + may_have_null = CHColumnArrayToNumpyArray(append_data, actual_type); + break; + + case TypeIndex::Tuple: + may_have_null = CHColumnTupleToNumpyArray(append_data, actual_type); + break; + + case TypeIndex::Interval: + { + const auto * interval_type = typeid_cast(actual_type.get()); + if (interval_type && interval_type->getKind() == IntervalKind::Kind::Quarter) + { + may_have_null = CHColumnIntervalToNumpyArray(append_data); + } + else + { + may_have_null = CHColumnToNumpyArray(append_data); + } + } + break; + + case TypeIndex::Map: + may_have_null = CHColumnMapToNumpyArray(append_data, actual_type); + break; + + case TypeIndex::Object: + may_have_null = CHColumnObjectToNumpyArray(append_data, actual_type); + break; + + case TypeIndex::IPv4: + may_have_null = CHColumnIPv4ToNumpyArray(append_data); + break; + + case TypeIndex::IPv6: + may_have_null = CHColumnIPv6ToNumpyArray(append_data); + break; + + case TypeIndex::Variant: + may_have_null = CHColumnVariantToNumpyArray(append_data, actual_type); + break; + + case TypeIndex::Dynamic: + may_have_null = CHColumnDynamicToNumpyArray(append_data, actual_type); + break; + + /// Set types are used only in WHERE clauses for IN operations, not in actual data storage + case TypeIndex::Set: + /// JSONPaths is an internal type used only for JSON schema inference, + case TypeIndex::JSONPaths: + /// Deprecated type, should not appear in normal data processing + case TypeIndex::ObjectDeprecated: + /// Function types are not actual data types, should not appear here + case TypeIndex::Function: + /// Aggregate function types are not actual data types, should not appear here + case TypeIndex::AggregateFunction: + /// LowCardinality should be unwrapped before reaching this point + case TypeIndex::LowCardinality: + /// Nullable cannot contain another Nullable type, so this should not appear in nested conversion + case TypeIndex::Nullable: + /// QBit type is supported in newer versions of ClickHouse + /// case TypeIndex::QBit: + default: + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported type {}", data_array->type->getName()); + } + + if (may_have_null) + { + hava_null = true; + } +} + +void NumpyArray::append( + const DB::IColumn & column, + const DB::DataTypePtr & type, + size_t index) +{ + chassert(data_array); + chassert(!mask_array); + + auto * data_ptr = data_array->data; + chassert(data_ptr); + + auto * dest_ptr = reinterpret_cast(data_ptr) + data_array->count; + + *dest_ptr = convertFieldToPython(column, type, index); + + data_array->count += 1; +} + +py::object NumpyArray::toArray() const +{ + chassert(data_array); + + data_array->resize(data_array->count); + if (!hava_null) + { + return std::move(data_array->array); + } + + chassert(mask_array); + + mask_array->resize(mask_array->count); + auto data_values = std::move(data_array->array); + auto null_values = std::move(mask_array->array); + + auto masked_array = py::module::import("numpy.ma").attr("masked_array")(data_values, null_values); + return masked_array; +} + +} // namespace CHDB diff --git a/programs/local/NumpyArray.h b/programs/local/NumpyArray.h new file mode 100644 index 00000000000..ca2af0ae6bd --- /dev/null +++ b/programs/local/NumpyArray.h @@ -0,0 +1,74 @@ +#pragma once + +#include "PybindWrapper.h" + +#include +#include +#include +#include + +namespace CHDB +{ + +/// Data structure for appending column data to numpy arrays +class NumpyAppendData +{ +public: + explicit NumpyAppendData( + const DB::IColumn & column_, + const DB::DataTypePtr & type_); + + const DB::IColumn & column; + const DB::DataTypePtr & type; + + size_t src_offset; + size_t src_count; + size_t dest_offset; + UInt8 * target_data; + bool * target_mask; +}; + +class InternalNumpyArray +{ +public: + explicit InternalNumpyArray(const DB::DataTypePtr & type); + + void init(size_t capacity); + + void resize(size_t capacity); + + py::array array; + UInt8 * data; + DB::DataTypePtr type; + size_t count; +}; + +class NumpyArray { +public: + explicit NumpyArray(const DB::DataTypePtr & type_); + + void init(size_t capacity, bool may_have_null = true); + + void resize(size_t capacity, bool may_have_null = true); + + void append(const DB::ColumnPtr & column); + + void append( + const DB::ColumnPtr & column, + size_t offset, + size_t count); + + void append( + const DB::IColumn & column, + const DB::DataTypePtr & type, + size_t index); + + py::object toArray() const; + +private: + bool hava_null; + std::unique_ptr data_array; + std::unique_ptr mask_array; +}; + +} // namespace CHDB diff --git a/programs/local/NumpyCacheItem.h b/programs/local/NumpyCacheItem.h new file mode 100644 index 00000000000..5d75cc5ed0a --- /dev/null +++ b/programs/local/NumpyCacheItem.h @@ -0,0 +1,66 @@ +#pragma once + +#include "PythonImportCacheItem.h" + +namespace CHDB { + +struct NumpyMaCacheItem : public PythonImportCacheItem +{ +public: + NumpyMaCacheItem(PythonImportCacheItem * parent) + : PythonImportCacheItem("ma", parent), masked("masked", this), masked_array("masked_array", this) { + } + ~NumpyMaCacheItem() override = default; + + PythonImportCacheItem masked; + PythonImportCacheItem masked_array; +}; + +struct NumpyCacheItem : public PythonImportCacheItem +{ +public: + static constexpr const char * Name = "numpy"; + + NumpyCacheItem() + : PythonImportCacheItem("numpy"), ma(this), ndarray("ndarray", this), datetime64("datetime64", this), + generic("generic", this), int64("int64", this), bool_("bool_", this), byte("byte", this), + ubyte("ubyte", this), short_("short", this), ushort_("ushort", this), intc("intc", this), + uintc("uintc", this), int_("int_", this), uint("uint", this), longlong("longlong", this), + ulonglong("ulonglong", this), half("half", this), float16("float16", this), single("single", this), + longdouble("longdouble", this), csingle("csingle", this), cdouble("cdouble", this), + clongdouble("clongdouble", this) { + } + ~NumpyCacheItem() override = default; + + NumpyMaCacheItem ma; + PythonImportCacheItem ndarray; + PythonImportCacheItem datetime64; + PythonImportCacheItem generic; + PythonImportCacheItem int64; + PythonImportCacheItem bool_; + PythonImportCacheItem byte; + PythonImportCacheItem ubyte; + PythonImportCacheItem short_; + PythonImportCacheItem ushort_; + PythonImportCacheItem intc; + PythonImportCacheItem uintc; + PythonImportCacheItem int_; + PythonImportCacheItem uint; + PythonImportCacheItem longlong; + PythonImportCacheItem ulonglong; + PythonImportCacheItem half; + PythonImportCacheItem float16; + PythonImportCacheItem single; + PythonImportCacheItem longdouble; + PythonImportCacheItem csingle; + PythonImportCacheItem cdouble; + PythonImportCacheItem clongdouble; + +protected: + bool IsRequired() const override final + { + return false; + } +}; + +} // namespace CHDB diff --git a/programs/local/NumpyNestedTypes.cpp b/programs/local/NumpyNestedTypes.cpp new file mode 100644 index 00000000000..633f0d89c8d --- /dev/null +++ b/programs/local/NumpyNestedTypes.cpp @@ -0,0 +1,205 @@ +#include "NumpyNestedTypes.h" +#include "NumpyArray.h" +#include "FieldToPython.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ +extern const int LOGICAL_ERROR; +extern const int NOT_IMPLEMENTED; +} + +} + +namespace CHDB +{ + +using namespace DB; + +template +struct ColumnTraits; + +template <> +struct ColumnTraits +{ + using DataType = DataTypeArray; + + static py::object convertElement(const ColumnArray * column, const DataTypePtr & data_type, size_t index) + { + const auto & offsets = column->getOffsets(); + const auto & nested_column = column->getDataPtr(); + + size_t start_offset = (index == 0) ? 0 : offsets[index - 1]; + size_t end_offset = offsets[index]; + size_t array_size = end_offset - start_offset; + + /// Extract the nested element type from DataTypeArray + const auto & array_data_type = typeid_cast(*data_type); + const DataTypePtr & nested_data_type = array_data_type.getNestedType(); + + NumpyArray numpy_array(nested_data_type); + numpy_array.init(array_size); + numpy_array.append(nested_column, start_offset, array_size); + + return numpy_array.toArray(); + } +}; + +template <> +struct ColumnTraits +{ + using DataType = DataTypeTuple; + + static py::object convertElement(const ColumnTuple * column, const DataTypePtr & data_type, size_t index) + { + const auto & tuple_data_type = typeid_cast(*data_type); + + const auto & element_types = tuple_data_type.getElements(); + size_t tuple_size = column->tupleSize(); + + NumpyArray numpy_array({}); + numpy_array.init(tuple_size, false); + + for (size_t i = 0; i < tuple_size; ++i) + { + numpy_array.append(column->getColumn(i), element_types[i], index); + } + + return numpy_array.toArray(); + } +}; + +template <> +struct ColumnTraits +{ + using DataType = DataTypeMap; + + static py::object convertElement(const ColumnMap * column, const DataTypePtr & data_type, size_t index) + { + return convertFieldToPython(*column, data_type, index); + } +}; + +template <> +struct ColumnTraits +{ + using DataType = DataTypeObject; + + static py::object convertElement(const ColumnObject * column, const DataTypePtr & data_type, size_t index) + { + return convertFieldToPython(*column, data_type, index); + } +}; + +template <> +struct ColumnTraits +{ + using DataType = DataTypeVariant; + + static py::object convertElement(const ColumnVariant * column, const DataTypePtr & data_type, size_t index) + { + return convertFieldToPython(*column, data_type, index); + } +}; + +template <> +struct ColumnTraits +{ + using DataType = DataTypeDynamic; + + static py::object convertElement(const ColumnDynamic * column, const DataTypePtr & data_type, size_t index) + { + return convertFieldToPython(*column, data_type, index); + } +}; + +template +bool CHNestedColumnToNumpyArray(NumpyAppendData & append_data, const DataTypePtr & data_type) +{ + bool has_null = false; + const IColumn * data_column = &append_data.column; + const ColumnNullable * nullable_column = nullptr; + + if (const auto * nullable = typeid_cast(&append_data.column)) + { + nullable_column = nullable; + data_column = &nullable->getNestedColumn(); + } + + const auto * typed_column = typeid_cast(data_column); + if (!typed_column) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected specific column type"); + + auto * dest_ptr = reinterpret_cast(append_data.target_data); + auto * mask_ptr = append_data.target_mask; + + for (size_t i = append_data.src_offset; i < append_data.src_offset + append_data.src_count; i++) + { + size_t offset = append_data.dest_offset + i; + if (nullable_column && nullable_column->isNullAt(i)) + { + dest_ptr[offset] = py::none(); + mask_ptr[offset] = true; + has_null = true; + } + else + { + dest_ptr[offset] = ColumnTraits::convertElement(typed_column, data_type, i); + mask_ptr[offset] = false; + } + } + + return has_null; +} + +bool CHColumnArrayToNumpyArray(NumpyAppendData & append_data, const DataTypePtr & data_type) +{ + return CHNestedColumnToNumpyArray(append_data, data_type); +} + +bool CHColumnTupleToNumpyArray(NumpyAppendData & append_data, const DataTypePtr & data_type) +{ + return CHNestedColumnToNumpyArray(append_data, data_type); +} + +bool CHColumnMapToNumpyArray(NumpyAppendData & append_data, const DataTypePtr & data_type) +{ + return CHNestedColumnToNumpyArray(append_data, data_type); +} + +bool CHColumnObjectToNumpyArray(NumpyAppendData & append_data, const DataTypePtr & data_type) +{ + return CHNestedColumnToNumpyArray(append_data, data_type); +} + +bool CHColumnVariantToNumpyArray(NumpyAppendData & append_data, const DataTypePtr & data_type) +{ + return CHNestedColumnToNumpyArray(append_data, data_type); +} + +bool CHColumnDynamicToNumpyArray(NumpyAppendData & append_data, const DataTypePtr & data_type) +{ + return CHNestedColumnToNumpyArray(append_data, data_type); +} + +} // namespace CHDB diff --git a/programs/local/NumpyNestedTypes.h b/programs/local/NumpyNestedTypes.h new file mode 100644 index 00000000000..b3e0a68520e --- /dev/null +++ b/programs/local/NumpyNestedTypes.h @@ -0,0 +1,20 @@ +#pragma once + +#include "NumpyArray.h" + +namespace CHDB +{ + +bool CHColumnArrayToNumpyArray(NumpyAppendData & append_data, const DB::DataTypePtr & data_type); + +bool CHColumnTupleToNumpyArray(NumpyAppendData & append_data, const DB::DataTypePtr & data_type); + +bool CHColumnMapToNumpyArray(NumpyAppendData & append_data, const DB::DataTypePtr & data_type); + +bool CHColumnObjectToNumpyArray(NumpyAppendData & append_data, const DB::DataTypePtr & data_type); + +bool CHColumnVariantToNumpyArray(NumpyAppendData & append_data, const DB::DataTypePtr & data_type); + +bool CHColumnDynamicToNumpyArray(NumpyAppendData & append_data, const DB::DataTypePtr & data_type); + +} // namespace CHDB diff --git a/programs/local/NumpyType.cpp b/programs/local/NumpyType.cpp index cf92c8dece9..acdf84a9f56 100644 --- a/programs/local/NumpyType.cpp +++ b/programs/local/NumpyType.cpp @@ -1,11 +1,14 @@ #include "NumpyType.h" +#include "PythonImporter.h" -#include #include #include +#include +#include #include #include #include +#include using namespace DB; @@ -231,4 +234,194 @@ std::shared_ptr NumpyToDataType(const NumpyType & col_type) } } +String DataTypeToNumpyTypeStr(const std::shared_ptr & data_type) +{ + if (!data_type) + return "object"; + + auto actual_data_type = removeLowCardinalityAndNullable(data_type); + + TypeIndex type_id = actual_data_type->getTypeId(); + switch (type_id) + { + case TypeIndex::Nothing: + return "object"; + + case TypeIndex::Int8: + return "int8"; + + case TypeIndex::UInt8: + /// Special case: UInt8 could be Bool type, need to check getName() + { + auto is_bool = isBool(actual_data_type); + return is_bool ? "bool" : "uint8"; + } + + case TypeIndex::Int16: + return "int16"; + + case TypeIndex::UInt16: + return "uint16"; + + case TypeIndex::Int32: + return "int32"; + + case TypeIndex::UInt32: + return "uint32"; + + case TypeIndex::Int64: + return "int64"; + + case TypeIndex::UInt64: + return "uint64"; + + case TypeIndex::BFloat16: + case TypeIndex::Float32: + return "float32"; + + case TypeIndex::Int256: + case TypeIndex::UInt256: + case TypeIndex::Int128: + case TypeIndex::UInt128: + case TypeIndex::Float64: + return "float64"; + + case TypeIndex::String: + case TypeIndex::FixedString: + return "object"; + + case TypeIndex::DateTime: + return "datetime64[s]"; + + case TypeIndex::DateTime64: + { + if (const auto * dt64 = typeid_cast(data_type.get())) + { + UInt32 scale = dt64->getScale(); + if (scale <= 9) + return "datetime64[ns]"; + + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported type {}, scale {}", data_type->getName(), scale); + } + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected datetime64 type {}", data_type->getName()); + } + + case TypeIndex::Date: + case TypeIndex::Date32: + return "datetime64[D]"; // pandas converts datetime64[D] to datetime64[s] internally + + case TypeIndex::Time: + case TypeIndex::Time64: + return "object"; + + case TypeIndex::Interval: + { + if (const auto * interval = typeid_cast(data_type.get())) + { + IntervalKind kind = interval->getKind(); + switch (kind.kind) + { + case IntervalKind::Kind::Nanosecond: + return "timedelta64[ns]"; + case IntervalKind::Kind::Microsecond: + return "timedelta64[us]"; + case IntervalKind::Kind::Millisecond: + return "timedelta64[ms]"; + case IntervalKind::Kind::Second: + return "timedelta64[s]"; + case IntervalKind::Kind::Minute: + return "timedelta64[m]"; + case IntervalKind::Kind::Hour: + return "timedelta64[h]"; + case IntervalKind::Kind::Day: + return "timedelta64[D]"; + case IntervalKind::Kind::Week: + return "timedelta64[W]"; + case IntervalKind::Kind::Month: + return "timedelta64[M]"; + case IntervalKind::Kind::Quarter: + /// numpy doesn't have quarter type + return "timedelta64[M]"; + case IntervalKind::Kind::Year: + return "timedelta64[Y]"; + default: + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected interval kind {}", kind.kind); + } + } + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected interval type {}", data_type->getName()); + } + + case TypeIndex::UUID: + case TypeIndex::IPv4: + case TypeIndex::IPv6: + return "object"; + + case TypeIndex::Decimal32: + case TypeIndex::Decimal64: + case TypeIndex::Decimal128: + case TypeIndex::Decimal256: + return "float64"; + + case TypeIndex::Array: + case TypeIndex::Tuple: + case TypeIndex::Map: + case TypeIndex::Set: + case TypeIndex::Dynamic: + case TypeIndex::Variant: + case TypeIndex::Object: + return "object"; + + case TypeIndex::Enum8: + case TypeIndex::Enum16: + return "object"; + + case TypeIndex::Nullable: + default: + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported type {}", data_type->getName()); + } +} + +py::object ConvertNumpyDtype(const py::handle & numpy_array) +{ + chassert(py::gil_check()); + + auto & import_cache = PythonImporter::ImportCache(); + + auto dtype = numpy_array.attr("dtype"); + if (!py::isinstance(numpy_array, import_cache.numpy.ma.masked_array())) + { + return dtype; + } + + auto numpy_type = ConvertNumpyType(dtype); + switch (numpy_type.type) + { + case NumpyNullableType::BOOL: + return import_cache.pandas.BooleanDtype()(); + case NumpyNullableType::UINT_8: + return import_cache.pandas.UInt8Dtype()(); + case NumpyNullableType::UINT_16: + return import_cache.pandas.UInt16Dtype()(); + case NumpyNullableType::UINT_32: + return import_cache.pandas.UInt32Dtype()(); + case NumpyNullableType::UINT_64: + return import_cache.pandas.UInt64Dtype()(); + case NumpyNullableType::INT_8: + return import_cache.pandas.Int8Dtype()(); + case NumpyNullableType::INT_16: + return import_cache.pandas.Int16Dtype()(); + case NumpyNullableType::INT_32: + return import_cache.pandas.Int32Dtype()(); + case NumpyNullableType::INT_64: + return import_cache.pandas.Int64Dtype()(); + case NumpyNullableType::FLOAT_32: + return import_cache.pandas.Float32Dtype()(); + case NumpyNullableType::FLOAT_64: + return import_cache.pandas.Float64Dtype()(); + case NumpyNullableType::FLOAT_16: + default: + return dtype; + } +} + } // namespace CHDB diff --git a/programs/local/NumpyType.h b/programs/local/NumpyType.h index c58fee13768..da8ccd5eafe 100644 --- a/programs/local/NumpyType.h +++ b/programs/local/NumpyType.h @@ -48,6 +48,11 @@ enum class NumpyObjectType : uint8_t { }; NumpyType ConvertNumpyType(const py::handle & col_type); + std::shared_ptr NumpyToDataType(const NumpyType & col_type); +String DataTypeToNumpyTypeStr(const std::shared_ptr & data_type); + +py::object ConvertNumpyDtype(const py::handle & numpy_array); + } // namespace CHDB diff --git a/programs/local/ObjectToPython.cpp b/programs/local/ObjectToPython.cpp new file mode 100644 index 00000000000..c3caf91ef58 --- /dev/null +++ b/programs/local/ObjectToPython.cpp @@ -0,0 +1,152 @@ +#include "ObjectToPython.h" +#include "FieldToPython.h" + +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +} + +namespace CHDB +{ + +using namespace DB; +namespace py = pybind11; + +struct PathElements +{ + explicit PathElements(const String & path) + { + const char * start = path.data(); + const char * end = start + path.size(); + const char * pos = start; + const char * last_dot_pos = pos - 1; + for (pos = start; pos != end; ++pos) + { + if (*pos == '.') + { + elements.emplace_back(last_dot_pos + 1, size_t(pos - last_dot_pos - 1)); + last_dot_pos = pos; + } + } + + elements.emplace_back(last_dot_pos + 1, size_t(pos - last_dot_pos - 1)); + } + + size_t size() const { return elements.size(); } + + std::vector elements; +}; + +py::object convertObjectToPython( + const IColumn & column, + const DataTypePtr & type, + size_t index) +{ + const IColumn * data_column = &column; + if (const auto * nullable = typeid_cast(&column)) + { + data_column = &nullable->getNestedColumn(); + } + + const auto & column_object = typeid_cast(*data_column); + const auto & typed_paths = column_object.getTypedPaths(); + const auto & dynamic_paths = column_object.getDynamicPaths(); + const auto & shared_data_offsets = column_object.getSharedDataOffsets(); + const auto [shared_data_paths, shared_data_values] = column_object.getSharedDataPathsAndValues(); + + size_t shared_data_offset = shared_data_offsets[static_cast(index) - 1]; + size_t shared_data_end = shared_data_offsets[static_cast(index)]; + + const auto & object_type = typeid_cast(*type); + const auto & specific_typed_paths = object_type.getTypedPaths(); + const auto & dynamic_data_type = object_type.getDynamicType(); + + std::vector> path_values; + path_values.reserve(typed_paths.size() + dynamic_paths.size() + (shared_data_end - shared_data_offset)); + + for (const auto & [path, column_ptr] : typed_paths) + { + auto iter = specific_typed_paths.find(path); + if (iter == specific_typed_paths.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Path {} not found in typed paths", path); + + const auto & specific_data_type = iter->second; + auto python_value = convertFieldToPython(*column_ptr, specific_data_type, index); + path_values.emplace_back(path, python_value); + } + + for (const auto & [path, dynamic_column] : dynamic_paths) + { + if (!dynamic_column->isNullAt(index)) + { + auto python_value = convertFieldToPython(*dynamic_column, dynamic_data_type, index); + path_values.emplace_back(path, python_value); + } + } + + size_t index_in_shared_data_values = shared_data_offset; + for (size_t i = shared_data_offset; i != shared_data_end; ++i) + { + auto path = shared_data_paths->getDataAt(i).toString(); + + auto tmp_dynamic_column = ColumnDynamic::create(); + tmp_dynamic_column->reserve(1); + ColumnObject::deserializeValueFromSharedData(shared_data_values, index_in_shared_data_values++, *tmp_dynamic_column); + + auto python_value = convertFieldToPython(*tmp_dynamic_column, dynamic_data_type, 0); + path_values.emplace_back(path, python_value); + } + + py::dict result; + + for (const auto & [path, value] : path_values) + { + PathElements path_elements(path); + + if (path_elements.size() == 1) + { + String key(path_elements.elements[0]); + result[key.c_str()] = value; + } + else + { + py::dict current_dict = result; + + for (size_t i = 0; i < path_elements.size() - 1; ++i) + { + String key(path_elements.elements[i]); + + if (current_dict.contains(key.c_str())) + { + py::object nested = (*current_dict)[key.c_str()]; + current_dict = nested.cast(); + } + else + { + py::dict new_dict; + current_dict[key.c_str()] = new_dict; + current_dict = new_dict; + } + } + + String final_key(path_elements.elements[path_elements.size() - 1]); + current_dict[final_key.c_str()] = value; + } + } + + return result; +} + +} // namespace CHDB diff --git a/programs/local/ObjectToPython.h b/programs/local/ObjectToPython.h new file mode 100644 index 00000000000..64d79e218fd --- /dev/null +++ b/programs/local/ObjectToPython.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include +#include + +namespace CHDB +{ + +pybind11::object convertObjectToPython( + const DB::IColumn & column, + const DB::DataTypePtr & type, + size_t index); + +} // namespace CHDB diff --git a/programs/local/PandasAnalyzer.cpp b/programs/local/PandasAnalyzer.cpp index f1c97c96772..57d6140c692 100644 --- a/programs/local/PandasAnalyzer.cpp +++ b/programs/local/PandasAnalyzer.cpp @@ -38,7 +38,7 @@ PandasAnalyzer::PandasAnalyzer(const DB::Settings & settings) bool PandasAnalyzer::Analyze(py::object column) { #if USE_JEMALLOC - ::Memory::MemoryCheckScope memory_check_scope; + ::Memory::MemoryCheckScope memory_check_scope; #endif if (sample_size == 0) return false; diff --git a/programs/local/PandasDataFrameBuilder.cpp b/programs/local/PandasDataFrameBuilder.cpp new file mode 100644 index 00000000000..7b570cebfcb --- /dev/null +++ b/programs/local/PandasDataFrameBuilder.cpp @@ -0,0 +1,176 @@ +#include "PandasDataFrameBuilder.h" +#include "PythonImporter.h" +#include "NumpyType.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ +extern const int LOGICAL_ERROR; +} + +} + +using namespace DB; + +namespace CHDB +{ + +PandasDataFrameBuilder::PandasDataFrameBuilder(const Block & sample) +{ + column_names.reserve(sample.columns()); + column_types.reserve(sample.columns()); + + for (const auto & column : sample) + { + column_names.push_back(column.name); + column_types.push_back(column.type); + + /// Record timezone for timezone-aware types + if (const auto * dt = typeid_cast(column.type.get())) + column_timezones[column.name] = dt->getTimeZone().getTimeZone(); + else if (const auto * dt64 = typeid_cast(column.type.get())) + column_timezones[column.name] = dt64->getTimeZone().getTimeZone(); + } +} + +void PandasDataFrameBuilder::addChunk(const Chunk & chunk) +{ + if (chunk.hasRows()) + { + chunks.push_back(chunk.clone()); + total_rows += chunk.getNumRows(); + } +} + +py::object PandasDataFrameBuilder::genDataFrame(const py::handle & dict) +{ + auto & import_cache = PythonImporter::ImportCache(); + auto pandas = import_cache.pandas(); + if (!pandas) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Pandas is not installed"); + } + + py::object items = dict.attr("items")(); + for (const py::handle & item : items) { + auto key_value = py::cast(item); + py::handle key = key_value[0]; + py::handle value = key_value[1]; + + if (py::isinstance(value, import_cache.numpy.ma.masked_array())) + { + auto dtype = ConvertNumpyDtype(value); + auto series = pandas.attr("Series")(value.attr("data"), py::arg("dtype") = dtype); + series.attr("__setitem__")(value.attr("mask"), import_cache.pandas.NA()); + dict.attr("__setitem__")(key, series); + } + } + + auto df = pandas.attr("DataFrame").attr("from_dict")(dict); + + /// Apply timezone conversion for timezone-aware columns + changeToTZType(df); + + return df; +} + +void PandasDataFrameBuilder::changeToTZType(py::object & df) +{ + if (column_timezones.empty()) + return; + + for (const auto & [column_name, timezone_str] : column_timezones) + { + /// Check if column exists in DataFrame + if (!df.attr("__contains__")(column_name).cast()) + continue; + + /// Get the column + auto column = df[column_name.c_str()]; + + /// First localize to UTC (assuming the timestamps are in UTC) + auto utc_localized = column.attr("dt").attr("tz_localize")("UTC"); + + /// Then convert to the target timezone + auto tz_converted = utc_localized.attr("dt").attr("tz_convert")(timezone_str); + + /// Update the column in DataFrame + df.attr("__setitem__")(column_name.c_str(), tz_converted); + } +} + +void PandasDataFrameBuilder::finalize() +{ + if (is_finalized) + return; + + columns_data.reserve(column_types.size()); + + py::gil_scoped_acquire acquire; + + for (const auto & type : column_types) + { + columns_data.emplace_back(type); + } + + for (auto & column_data : columns_data) + { + column_data.init(total_rows); + } + + /// Process all chunks and append column data + for (const auto & chunk : chunks) + { + const auto & columns = chunk.getColumns(); + for (size_t col_idx = 0; col_idx < columns.size(); ++col_idx) + { + auto column = columns[col_idx]; + + columns_data[col_idx].append(column); + } + } + + chunks.clear(); + + /// Create pandas DataFrame + py::dict res; + for (size_t col_idx = 0; col_idx < column_names.size(); ++col_idx) { + auto & name = column_names[col_idx]; + auto & column_data = columns_data[col_idx]; + res[name.c_str()] = column_data.toArray(); + } + final_dataframe = genDataFrame(res); + + is_finalized = true; +} + +py::object PandasDataFrameBuilder::getDataFrame() +{ + chassert(is_finalized); + + py::gil_scoped_acquire acquire; + + columns_data.clear(); + return std::move(final_dataframe); +} +} diff --git a/programs/local/PandasDataFrameBuilder.h b/programs/local/PandasDataFrameBuilder.h new file mode 100644 index 00000000000..4c6d395e0a5 --- /dev/null +++ b/programs/local/PandasDataFrameBuilder.h @@ -0,0 +1,53 @@ +#pragma once + +#include "PybindWrapper.h" +#include "NumpyArray.h" + +#include +#include +#include +#include +#include + +namespace CHDB +{ + +/// Builder class to convert ClickHouse Chunks to Pandas DataFrame +/// Accumulates chunks and provides conversion to Python pandas DataFrame object +class PandasDataFrameBuilder +{ +public: + explicit PandasDataFrameBuilder(const DB::Block & sample); + + ~PandasDataFrameBuilder() = default; + + /// Add data chunk + void addChunk(const DB::Chunk & chunk); + + /// Finalize and build pandas DataFrame from all collected chunks + void finalize(); + + /// Get the finalized pandas DataFrame + pybind11::object getDataFrame(); + +private: + pybind11::object genDataFrame(const pybind11::handle & dict); + void changeToTZType(pybind11::object & df); + + std::vector column_names; + std::vector column_types; + + /// Map column name to timezone string for timezone-aware types + std::unordered_map column_timezones; + + std::vector chunks; + std::vector columns_data; + + size_t total_rows = 0; + bool is_finalized = false; + pybind11::object final_dataframe; + + Poco::Logger * log = &Poco::Logger::get("PandasDataFrameBuilder"); +}; + +} diff --git a/programs/local/PythonImportCache.cpp b/programs/local/PythonImportCache.cpp index 6e24b35e934..85f30a9a732 100644 --- a/programs/local/PythonImportCache.cpp +++ b/programs/local/PythonImportCache.cpp @@ -53,7 +53,7 @@ py::handle PythonImportCacheItem::AddCache(PythonImportCache & cache, py::object void PythonImportCacheItem::LoadModule(PythonImportCache & cache) { #if USE_JEMALLOC - ::Memory::MemoryCheckScope memory_check_scope; + ::Memory::MemoryCheckScope memory_check_scope; #endif try { diff --git a/programs/local/PythonImportCache.h b/programs/local/PythonImportCache.h index 6bdf5cf7c8f..598069a60e2 100644 --- a/programs/local/PythonImportCache.h +++ b/programs/local/PythonImportCache.h @@ -2,9 +2,13 @@ #include "DatetimeCacheItem.h" #include "DecimalCacheItem.h" +#include "NumpyCacheItem.h" #include "PandasCacheItem.h" #include "PyArrowCacheItem.h" #include "PythonImportCacheItem.h" +#include "UUIDCacheItem.h" +#include "IPAddressCacheItem.h" +#include "PytzCacheItem.h" #include @@ -13,7 +17,8 @@ namespace CHDB { struct PythonImportCache; using PythonImportCachePtr = std::shared_ptr; -struct PythonImportCache { +struct PythonImportCache +{ public: explicit PythonImportCache() = default; @@ -23,6 +28,10 @@ struct PythonImportCache { PyarrowCacheItem pyarrow; DatetimeCacheItem datetime; DecimalCacheItem decimal; + NumpyCacheItem numpy; + UUIDCacheItem uuid; + IPAddressCacheItem ipaddress; + PytzCacheItem pytz; py::handle AddCache(py::object item); diff --git a/programs/local/PytzCacheItem.h b/programs/local/PytzCacheItem.h new file mode 100644 index 00000000000..3c6fccbe858 --- /dev/null +++ b/programs/local/PytzCacheItem.h @@ -0,0 +1,19 @@ +#pragma once + +#include "PythonImportCacheItem.h" + +namespace CHDB { + +struct PytzCacheItem : public PythonImportCacheItem +{ +public: + static constexpr const char *Name = "pytz"; + + PytzCacheItem() : PythonImportCacheItem("pytz"), timezone("timezone", this) {} + + ~PytzCacheItem() override = default; + + PythonImportCacheItem timezone; +}; + +} // namespace CHDB diff --git a/programs/local/QueryResult.h b/programs/local/QueryResult.h index ebd79ec042e..bbd924e3931 100644 --- a/programs/local/QueryResult.h +++ b/programs/local/QueryResult.h @@ -64,6 +64,9 @@ class MaterializedQueryResult : public QueryResult { String string() { + if (!result_buffer) + return {}; + return String(result_buffer->begin(), result_buffer->end()); } diff --git a/programs/local/UUIDCacheItem.h b/programs/local/UUIDCacheItem.h new file mode 100644 index 00000000000..ee21b48ca22 --- /dev/null +++ b/programs/local/UUIDCacheItem.h @@ -0,0 +1,21 @@ +#pragma once + +#include "PythonImportCacheItem.h" + +namespace CHDB { + +struct UUIDCacheItem : public PythonImportCacheItem +{ +public: + static constexpr const char * Name = "uuid"; + + UUIDCacheItem() : PythonImportCacheItem("uuid"), UUID("UUID", this) + { + } + + ~UUIDCacheItem() override = default; + + PythonImportCacheItem UUID; +}; + +} // namespace CHDB diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 4980f8d610f..aad4ad78e78 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -151,6 +151,11 @@ namespace ErrorCodes extern const int CANNOT_WRITE_TO_FILE; } +#if USE_PYTHON +/// Custom DataFrame format creator function pointer +static CustomOutputFormatCreator g_dataframe_format_creator = nullptr; +#endif + } namespace ProfileEvents @@ -644,6 +649,22 @@ try { if (!output_format) { +#if USE_PYTHON + if (Poco::toLower(default_output_format) == "dataframe") + { + auto creator = getDataFrameFormatCreator(); + if (creator) + { + output_format = creator(std::make_shared(block)); + return; + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "DataFrame output format creator not set"); + } + } +#endif + /// Ignore all results when fuzzing as they can be huge. if (query_fuzzer_runs) { @@ -4035,4 +4056,16 @@ void ClientBase::showClientVersion() output_stream << VERSION_NAME << " " + getName() + " version " << VERSION_STRING << VERSION_OFFICIAL << "." << std::endl; } +#if USE_PYTHON +void ClientBase::setDataFrameFormatCreator(CustomOutputFormatCreator creator) +{ + g_dataframe_format_creator = std::move(creator); +} + +CustomOutputFormatCreator ClientBase::getDataFrameFormatCreator() +{ + return g_dataframe_format_creator; +} +#endif + } diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 8e08e60e541..86fdb78d798 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -94,6 +94,11 @@ struct StreamingQueryContext StreamingQueryContext() = default; }; +#if USE_PYTHON +/// Function pointer type for creating custom output formats (e.g. DataFrame) +using CustomOutputFormatCreator = std::function(SharedHeader)>; +#endif + /** * The base class which encapsulates the core functionality of a client. * Can be used in a standalone application (clickhouse-client or clickhouse-local), @@ -353,6 +358,14 @@ class ClientBase String appendSmileyIfNeeded(const String & prompt); +#if USE_PYTHON + /// Set custom DataFrame format creator + static void setDataFrameFormatCreator(CustomOutputFormatCreator creator); + + /// Get custom DataFrame format creator + static CustomOutputFormatCreator getDataFrameFormatCreator(); +#endif + /// Should be one of the first, to be destroyed the last, /// since other members can use them. /// This holder may not be initialized in case if we run the client in the embedded mode (SSH). diff --git a/tests/test_complex_pyobj.py b/tests/test_complex_pyobj.py index 241fffc0105..e393f042e70 100644 --- a/tests/test_complex_pyobj.py +++ b/tests/test_complex_pyobj.py @@ -42,14 +42,43 @@ def test_df_with_na(self): self.assertEqual(ret.dtypes["E"], "object") self.assertEqual(ret.dtypes["F"], "object") self.assertEqual(ret.dtypes["G"], "object") - self.assertEqual( - str(ret), - """ A B C D E F G -0 1 4.0 True a [1, 2] {"a": 1, "b": 2} -1 2 5.0 False b [3, 4] {"c": 3, "d": 4} -2 3 6.0 True c [5, 6] {"e": 5, "f": 6} -3 """, - ) + self.assertEqual(ret.shape, (4, 7)) + + # Row 0 + self.assertEqual(ret.iloc[0]["A"], '1') + self.assertEqual(ret.iloc[0]["B"], '4.0') + self.assertEqual(ret.iloc[0]["C"], 'True') + self.assertEqual(ret.iloc[0]["D"], 'a') + self.assertEqual(ret.iloc[0]["E"], '') + self.assertEqual(ret.iloc[0]["F"], '[1, 2]') + self.assertEqual(ret.iloc[0]["G"], '{"a": 1, "b": 2}') + + # Row 1 + self.assertEqual(ret.iloc[1]["A"], '2') + self.assertEqual(ret.iloc[1]["B"], '5.0') + self.assertEqual(ret.iloc[1]["C"], 'False') + self.assertEqual(ret.iloc[1]["D"], 'b') + self.assertEqual(ret.iloc[1]["E"], '') + self.assertEqual(ret.iloc[1]["F"], '[3, 4]') + self.assertEqual(ret.iloc[1]["G"], '{"c": 3, "d": 4}') + + # Row 2 + self.assertEqual(ret.iloc[2]["A"], '3') + self.assertEqual(ret.iloc[2]["B"], '6.0') + self.assertEqual(ret.iloc[2]["C"], 'True') + self.assertEqual(ret.iloc[2]["D"], 'c') + self.assertEqual(ret.iloc[2]["E"], '') + self.assertEqual(ret.iloc[2]["F"], '[5, 6]') + self.assertEqual(ret.iloc[2]["G"], '{"e": 5, "f": 6}') + + # Row 3 + self.assertEqual(ret.iloc[3]["A"], '') + self.assertEqual(ret.iloc[3]["B"], '') + self.assertEqual(ret.iloc[3]["C"], '') + self.assertEqual(ret.iloc[3]["D"], '') + self.assertEqual(ret.iloc[3]["E"], '') + self.assertEqual(ret.iloc[3]["F"], '') + self.assertEqual(ret.iloc[3]["G"], '') def test_df_without_na(self): ret = chdb.query( @@ -65,14 +94,44 @@ def test_df_without_na(self): self.assertEqual(ret.dtypes["E"], "object") self.assertEqual(ret.dtypes["F"], "object") self.assertEqual(ret.dtypes["G"], "object") - self.assertEqual( - str(ret), - """ A B C D E F G -0 1 4.0 1 a a [1, 2] {"a": 1, "b": 2} -1 2 5.0 0 b b [3, 4] {"c": 3, "d": 4} -2 3 6.0 1 c c [5, 6] {"e": 5, "f": 6} -3 4 7.0 0 d d [7, 8] {"g": 7, "h": 8}""", - ) + + self.assertEqual(ret.shape, (4, 7)) + + # Row 0 + self.assertEqual(ret.iloc[0]["A"], 1) + self.assertEqual(ret.iloc[0]["B"], 4.0) + self.assertEqual(ret.iloc[0]["C"], 1) + self.assertEqual(ret.iloc[0]["D"], "a") + self.assertEqual(ret.iloc[0]["E"], "a") + self.assertEqual(ret.iloc[0]["F"], '[1, 2]') + self.assertEqual(ret.iloc[0]["G"], {"a": 1, "b": 2}) + + # Row 1 + self.assertEqual(ret.iloc[1]["A"], 2) + self.assertEqual(ret.iloc[1]["B"], 5.0) + self.assertEqual(ret.iloc[1]["C"], 0) + self.assertEqual(ret.iloc[1]["D"], "b") + self.assertEqual(ret.iloc[1]["E"], "b") + self.assertEqual(ret.iloc[1]["F"], '[3, 4]') + self.assertEqual(ret.iloc[1]["G"], {"c": 3, "d": 4}) + + # Row 2 + self.assertEqual(ret.iloc[2]["A"], 3) + self.assertEqual(ret.iloc[2]["B"], 6.0) + self.assertEqual(ret.iloc[2]["C"], 1) + self.assertEqual(ret.iloc[2]["D"], "c") + self.assertEqual(ret.iloc[2]["E"], "c") + self.assertEqual(ret.iloc[2]["F"], '[5, 6]') + self.assertEqual(ret.iloc[2]["G"], {"e": 5, "f": 6}) + + # Row 3 + self.assertEqual(ret.iloc[3]["A"], 4) + self.assertEqual(ret.iloc[3]["B"], 7.0) + self.assertEqual(ret.iloc[3]["C"], 0) + self.assertEqual(ret.iloc[3]["D"], "d") + self.assertEqual(ret.iloc[3]["E"], "d") + self.assertEqual(ret.iloc[3]["F"], '[7, 8]') + self.assertEqual(ret.iloc[3]["G"], {"g": 7, "h": 8}) if __name__ == "__main__": diff --git a/tests/test_dataframe_column_types.py b/tests/test_dataframe_column_types.py new file mode 100644 index 00000000000..3390743d37c --- /dev/null +++ b/tests/test_dataframe_column_types.py @@ -0,0 +1,1547 @@ +#!/usr/bin/env python3 + +import unittest +import pandas as pd +import chdb +from datetime import datetime, date +import numpy as np +import math +import uuid +import ipaddress + + +class TestDataFrameColumnTypes(unittest.TestCase): + + def setUp(self): + self.session = chdb.session.Session("./tmp") + + def tearDown(self): + self.session.close() + + @unittest.skip("") + def test_integer_types(self): + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toInt8(-128) as int8_val, + toInt16(-32768) as int16_val, + toInt32(-2147483648) as int32_val, + toInt64(-9223372036854775808) as int64_val, + toInt128('-170141183460469231731687303715884105728') as int128_val, + toInt256('-57896044618658097711785492504343953926634992332820282019728792003956564819968') as int256_val, + toUInt8(255) as UInt8_val, + toUInt16(65535) as uint16_val, + toUInt32(4294967295) as uint32_val, + toUInt64(18446744073709551615) as uint64_val, + toUInt128('340282366920938463463374607431768211455') as uint128_val, + toUInt256('115792089237316195423570985008687907853269984665640564039457584007913129639935') as uint256_val + UNION ALL + SELECT + 2 as row_id, + toInt8(127) as int8_val, + toInt16(32767) as int16_val, + toInt32(2147483647) as int32_val, + toInt64(9223372036854775807) as int64_val, + toInt128('170141183460469231731687303715884105727') as int128_val, + toInt256('57896044618658097711785492504343953926634992332820282019728792003956564819967') as int256_val, + toUInt8(254) as UInt8_val, + toUInt16(65534) as uint16_val, + toUInt32(4294967294) as uint32_val, + toUInt64(18446744073709551614) as uint64_val, + toUInt128('340282366920938463463374607431768211454') as uint128_val, + toUInt256('115792089237316195423570985008687907853269984665640564039457584007913129639934') as uint256_val + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[0][col]}, Python type: {type(ret.iloc[0][col])})") + + # Test first row (minimum/maximum values) + self.assertEqual(ret.iloc[0]["int8_val"], -128) + self.assertEqual(ret.iloc[0]["int16_val"], -32768) + self.assertEqual(ret.iloc[0]["int32_val"], -2147483648) + self.assertEqual(ret.iloc[0]["int64_val"], -9223372036854775808) + self.assertEqual(ret.iloc[0]["int128_val"], float(-170141183460469231731687303715884105728)) + self.assertEqual(ret.iloc[0]["int256_val"], float(-57896044618658097711785492504343953926634992332820282019728792003956564819968)) + self.assertEqual(ret.iloc[0]["UInt8_val"], 255) + self.assertEqual(ret.iloc[0]["uint16_val"], 65535) + self.assertEqual(ret.iloc[0]["uint32_val"], 4294967295) + self.assertEqual(ret.iloc[0]["uint64_val"], 18446744073709551615) + self.assertEqual(ret.iloc[0]["uint128_val"], float(340282366920938463463374607431768211455)) + self.assertEqual(ret.iloc[0]["uint256_val"], float(115792089237316195423570985008687907853269984665640564039457584007913129639935)) + + # Test second row (maximum/near-maximum values) + self.assertEqual(ret.iloc[1]["int8_val"], 127) + self.assertEqual(ret.iloc[1]["int16_val"], 32767) + self.assertEqual(ret.iloc[1]["int32_val"], 2147483647) + self.assertEqual(ret.iloc[1]["int64_val"], 9223372036854775807) + self.assertEqual(ret.iloc[1]["int128_val"], float(170141183460469231731687303715884105727)) + self.assertEqual(ret.iloc[1]["int256_val"], float(57896044618658097711785492504343953926634992332820282019728792003956564819967)) + self.assertEqual(ret.iloc[1]["UInt8_val"], 254) + self.assertEqual(ret.iloc[1]["uint16_val"], 65534) + self.assertEqual(ret.iloc[1]["uint32_val"], 4294967294) + self.assertEqual(ret.iloc[1]["uint64_val"], 18446744073709551614) + self.assertEqual(ret.iloc[1]["uint128_val"], float(340282366920938463463374607431768211454)) + self.assertEqual(ret.iloc[1]["uint256_val"], float(115792089237316195423570985008687907853269984665640564039457584007913129639934)) + + # Precise data type validation + expected_types = { + "int8_val": "int8", + "int16_val": "int16", + "int32_val": "int32", + "int64_val": "int64", + "int128_val": "float64", # Int128 mapped to float64 in ClickHouse->pandas conversion + "int256_val": "float64", # Int256 mapped to float64 in ClickHouse->pandas conversion + "UInt8_val": "uint8", + "uint16_val": "uint16", + "uint32_val": "uint32", + "uint64_val": "uint64", + "uint128_val": "float64", # UInt128 mapped to float64 in ClickHouse->pandas conversion + "uint256_val": "float64" # UInt256 mapped to float64 in ClickHouse->pandas conversion + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + @unittest.skip("") + def test_float_types(self): + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toFloat32(3.14159265) as float32_val, + toFloat32(-3.40282347e+38) as float32_min, + toFloat32(3.40282347e+38) as float32_max, + toFloat64(2.718281828459045) as float64_val, + toFloat64(-1.7976931348623157e+308) as float64_min, + toFloat64(1.7976931348623157e+308) as float64_max, + toBFloat16(1.5) as bfloat16_val, + toBFloat16(-3.389531389e+38) as bfloat16_min, + toBFloat16(3.389531389e+38) as bfloat16_max + UNION ALL + SELECT + 2 as row_id, + toFloat32(0.0) as float32_val, + toFloat32(1.175494351e-38) as float32_min, + toFloat32(-1.175494351e-38) as float32_max, + toFloat64(0.0) as float64_val, + toFloat64(2.2250738585072014e-308) as float64_min, + toFloat64(-2.2250738585072014e-308) as float64_max, + toBFloat16(0.0) as bfloat16_val, + toBFloat16(1.175494351e-38) as bfloat16_min, + toBFloat16(-1.175494351e-38) as bfloat16_max + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[1][col]}, Python type: {type(ret.iloc[0][col])})") + + # Test first row - regular and extreme values + self.assertAlmostEqual(ret.iloc[0]["float32_val"], 3.14159265, places=6) + self.assertAlmostEqual(ret.iloc[0]["float32_min"], -3.40282347e+38, delta=1e30) + self.assertAlmostEqual(ret.iloc[0]["float32_max"], 3.40282347e+38, delta=1e30) + self.assertAlmostEqual(ret.iloc[0]["float64_val"], 2.718281828459045, places=15) + self.assertAlmostEqual(ret.iloc[0]["float64_min"], -1.7976931348623157e+308, delta=1e300) + self.assertAlmostEqual(ret.iloc[0]["float64_max"], 1.7976931348623157e+308, delta=1e300) + self.assertAlmostEqual(ret.iloc[0]["bfloat16_val"], 1.5, places=2) + self.assertAlmostEqual(ret.iloc[0]["bfloat16_min"], -3.389531389e+38, delta=1e30) + self.assertAlmostEqual(ret.iloc[0]["bfloat16_max"], 3.389531389e+38, delta=1e30) + + # Test second row - zero and small values + self.assertEqual(ret.iloc[1]["float32_val"], 0.0) + self.assertAlmostEqual(ret.iloc[1]["float32_min"], 1.175494351e-38, delta=1e-40) + self.assertAlmostEqual(ret.iloc[1]["float32_max"], -1.175494351e-38, delta=1e-40) + self.assertEqual(ret.iloc[1]["float64_val"], 0.0) + self.assertAlmostEqual(ret.iloc[1]["float64_min"], 2.2250738585072014e-308, delta=1e-310) + self.assertAlmostEqual(ret.iloc[1]["float64_max"], -2.2250738585072014e-308, delta=1e-310) + self.assertEqual(ret.iloc[1]["bfloat16_val"], 0.0) + self.assertAlmostEqual(ret.iloc[1]["bfloat16_min"], 1.175494351e-38, delta=1e-40) + self.assertAlmostEqual(ret.iloc[1]["bfloat16_max"], -1.175494351e-38, delta=1e-40) + + # Precise data type validation + expected_types = { + "float32_val": "float32", + "float32_min": "float32", + "float32_max": "float32", + "float64_val": "float64", + "float64_min": "float64", + "float64_max": "float64", + "bfloat16_val": "float32", # BFloat16 typically mapped to float32 in pandas + "bfloat16_min": "float32", + "bfloat16_max": "float32" + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + @unittest.skip("") + def test_float_special_values(self): + """Test Infinity and NaN values for all float types""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toFloat32(1.0/0.0) as float32_pos_inf, + toFloat32(-1.0/0.0) as float32_neg_inf, + toFloat32(0.0/0.0) as float32_nan, + toFloat64(1.0/0.0) as float64_pos_inf, + toFloat64(-1.0/0.0) as float64_neg_inf, + toFloat64(0.0/0.0) as float64_nan, + toBFloat16(1.0/0.0) as bfloat16_pos_inf, + toBFloat16(-1.0/0.0) as bfloat16_neg_inf, + toBFloat16(0.0/0.0) as bfloat16_nan + UNION ALL + SELECT + 2 as row_id, + toFloat32(1.0/0.0) as float32_pos_inf, + toFloat32(-1.0/0.0) as float32_neg_inf, + toFloat32(0.0/0.0) as float32_nan, + toFloat64(1.0/0.0) as float64_pos_inf, + toFloat64(-1.0/0.0) as float64_neg_inf, + toFloat64(0.0/0.0) as float64_nan, + toBFloat16(1.0/0.0) as bfloat16_pos_inf, + toBFloat16(-1.0/0.0) as bfloat16_neg_inf, + toBFloat16(0.0/0.0) as bfloat16_nan + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[0][col]}, Python type: {type(ret.iloc[0][col])})") + + # Test Float32 special values + self.assertTrue(math.isinf(ret.iloc[0]["float32_pos_inf"])) + self.assertTrue(ret.iloc[0]["float32_pos_inf"] > 0) # positive infinity + self.assertTrue(math.isinf(ret.iloc[0]["float32_neg_inf"])) + self.assertTrue(ret.iloc[0]["float32_neg_inf"] < 0) # negative infinity + self.assertTrue(math.isnan(ret.iloc[0]["float32_nan"])) + + # Test Float64 special values + self.assertTrue(math.isinf(ret.iloc[0]["float64_pos_inf"])) + self.assertTrue(ret.iloc[0]["float64_pos_inf"] > 0) # positive infinity + self.assertTrue(math.isinf(ret.iloc[0]["float64_neg_inf"])) + self.assertTrue(ret.iloc[0]["float64_neg_inf"] < 0) # negative infinity + self.assertTrue(math.isnan(ret.iloc[0]["float64_nan"])) + + # Test BFloat16 special values + self.assertTrue(math.isinf(ret.iloc[0]["bfloat16_pos_inf"])) + self.assertTrue(ret.iloc[0]["bfloat16_pos_inf"] > 0) # positive infinity + self.assertTrue(math.isinf(ret.iloc[0]["bfloat16_neg_inf"])) + self.assertTrue(ret.iloc[0]["bfloat16_neg_inf"] < 0) # negative infinity + self.assertTrue(math.isnan(ret.iloc[0]["bfloat16_nan"])) + + # Test second row (same values, consistency check) + self.assertTrue(math.isinf(ret.iloc[1]["float32_pos_inf"])) + self.assertTrue(ret.iloc[1]["float32_pos_inf"] > 0) + self.assertTrue(math.isinf(ret.iloc[1]["float32_neg_inf"])) + self.assertTrue(ret.iloc[1]["float32_neg_inf"] < 0) + self.assertTrue(math.isnan(ret.iloc[1]["float32_nan"])) + + self.assertTrue(math.isinf(ret.iloc[1]["float64_pos_inf"])) + self.assertTrue(ret.iloc[1]["float64_pos_inf"] > 0) + self.assertTrue(math.isinf(ret.iloc[1]["float64_neg_inf"])) + self.assertTrue(ret.iloc[1]["float64_neg_inf"] < 0) + self.assertTrue(math.isnan(ret.iloc[1]["float64_nan"])) + + self.assertTrue(math.isinf(ret.iloc[1]["bfloat16_pos_inf"])) + self.assertTrue(ret.iloc[1]["bfloat16_pos_inf"] > 0) + self.assertTrue(math.isinf(ret.iloc[1]["bfloat16_neg_inf"])) + self.assertTrue(ret.iloc[1]["bfloat16_neg_inf"] < 0) + self.assertTrue(math.isnan(ret.iloc[1]["bfloat16_nan"])) + + # Precise data type validation + expected_types = { + "float32_pos_inf": "float32", + "float32_neg_inf": "float32", + "float32_nan": "float32", + "float64_pos_inf": "float64", + "float64_neg_inf": "float64", + "float64_nan": "float64", + "bfloat16_pos_inf": "float32", # BFloat16 typically mapped to float32 in pandas + "bfloat16_neg_inf": "float32", + "bfloat16_nan": "float32" + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + @unittest.skip("") + def test_decimal_types(self): + """Test Decimal32, Decimal64, Decimal128, Decimal256 types""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toDecimal32('123.456', 3) as decimal32_val, + toDecimal32('-999999.999', 3) as decimal32_min, + toDecimal32('999999.999', 3) as decimal32_max, + toDecimal64('123456.789012', 6) as decimal64_val, + toDecimal64('-999999999999.999999', 6) as decimal64_min, + toDecimal64('999999999999.999999', 6) as decimal64_max, + toDecimal128('12345678901234567890123456789.123456789', 9) as decimal128_val, + toDecimal128('-12345678901234567890123456789.123456789', 9) as decimal128_min, + toDecimal128('12345678901234567890123456789.123456789', 9) as decimal128_max, + toDecimal256('1234567890123456789012345678901234567890123456789012345678.123456789012345678', 18) as decimal256_val, + toDecimal256('-1234567890123456789012345678901234567890123456789012345678.123456789012345678', 18) as decimal256_min, + toDecimal256('1234567890123456789012345678901234567890123456789012345678.123456789012345678', 18) as decimal256_max + UNION ALL + SELECT + 2 as row_id, + toDecimal32('0.001', 3) as decimal32_val, + toDecimal32('0.000', 3) as decimal32_min, + toDecimal32('1.000', 3) as decimal32_max, + toDecimal64('0.000001', 6) as decimal64_val, + toDecimal64('0.000000', 6) as decimal64_min, + toDecimal64('1.000000', 6) as decimal64_max, + toDecimal128('0.000000001', 9) as decimal128_val, + toDecimal128('0.000000000', 9) as decimal128_min, + toDecimal128('1.000000000', 9) as decimal128_max, + toDecimal256('0.000000000000000001', 18) as decimal256_val, + toDecimal256('0.000000000000000000', 18) as decimal256_min, + toDecimal256('1.000000000000000000', 18) as decimal256_max + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[0][col]}, Python type: {type(ret.iloc[0][col])})") + + # Test first row - regular and extreme decimal values (converted to float64) + self.assertAlmostEqual(ret.iloc[0]["decimal32_val"], 123.456, places=3) + self.assertAlmostEqual(ret.iloc[0]["decimal32_min"], -999999.999, places=3) + self.assertAlmostEqual(ret.iloc[0]["decimal32_max"], 999999.999, places=3) + + self.assertAlmostEqual(ret.iloc[0]["decimal64_val"], 123456.789012, places=6) + self.assertAlmostEqual(ret.iloc[0]["decimal64_min"], -999999999999.999999, places=6) + self.assertAlmostEqual(ret.iloc[0]["decimal64_max"], 999999999999.999999, places=6) + + self.assertAlmostEqual(ret.iloc[0]["decimal128_val"], 12345678901234567890123456789.123456789, delta=1e20) + self.assertAlmostEqual(ret.iloc[0]["decimal128_min"], -12345678901234567890123456789.123456789, delta=1e20) + self.assertAlmostEqual(ret.iloc[0]["decimal128_max"], 12345678901234567890123456789.123456789, delta=1e20) + + self.assertAlmostEqual(ret.iloc[0]["decimal256_val"], 1234567890123456789012345678901234567890123456789012345678.123456789012345678, delta=1e50) + self.assertAlmostEqual(ret.iloc[0]["decimal256_min"], -1234567890123456789012345678901234567890123456789012345678.123456789012345678, delta=1e50) + self.assertAlmostEqual(ret.iloc[0]["decimal256_max"], 1234567890123456789012345678901234567890123456789012345678.123456789012345678, delta=1e50) + + # Test second row - small decimal values (converted to float64) + self.assertAlmostEqual(ret.iloc[1]["decimal32_val"], 0.001, places=3) + self.assertEqual(ret.iloc[1]["decimal32_min"], 0.000) + self.assertAlmostEqual(ret.iloc[1]["decimal32_max"], 1.000, places=3) + + self.assertAlmostEqual(ret.iloc[1]["decimal64_val"], 0.000001, places=6) + self.assertEqual(ret.iloc[1]["decimal64_min"], 0.000000) + self.assertAlmostEqual(ret.iloc[1]["decimal64_max"], 1.000000, places=6) + + self.assertAlmostEqual(ret.iloc[1]["decimal128_val"], 0.000000001, places=9) + self.assertEqual(ret.iloc[1]["decimal128_min"], 0.000000000) + self.assertAlmostEqual(ret.iloc[1]["decimal128_max"], 1.000000000, places=9) + + self.assertAlmostEqual(ret.iloc[1]["decimal256_val"], 0.000000000000000001, places=18) + self.assertEqual(ret.iloc[1]["decimal256_min"], 0.000000000000000000) + self.assertAlmostEqual(ret.iloc[1]["decimal256_max"], 1.000000000000000000, places=18) + + # Precise data type validation + expected_types = { + "decimal32_val": "float64", # Decimal types mapped to float64 in ClickHouse->pandas conversion + "decimal32_min": "float64", + "decimal32_max": "float64", + "decimal64_val": "float64", + "decimal64_min": "float64", + "decimal64_max": "float64", + "decimal128_val": "float64", + "decimal128_min": "float64", + "decimal128_max": "float64", + "decimal256_val": "float64", + "decimal256_min": "float64", + "decimal256_max": "float64" + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + @unittest.skip("") + def test_string_types(self): + """Test String, FixedString, and LowCardinality string types""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toString('Hello World') as string_val, + toFixedString('Fixed', 10) as fixed_string_val, + toLowCardinality('Category A') as low_cardinality_val, + toString('') as empty_string, + toString('Unicode: 🌍 éñáíóú') as unicode_string, + toString('Special chars: \\t\\n\\r\\"\\\'') as special_chars, + toString('Very long string with many characters to test maximum length handling and memory allocation behavior') as long_string, + toFixedString('ABC', 5) as fixed_string_short, + toLowCardinality('') as low_cardinality_empty + UNION ALL + SELECT + 2 as row_id, + toString('Another string') as string_val, + toFixedString('Test123', 10) as fixed_string_val, + toLowCardinality('Category B') as low_cardinality_val, + toString('Non-empty') as empty_string, + toString('More Unicode: 🚀 ñáéíóú àèìòù') as unicode_string, + toString('Line breaks:\\nTab:\\tQuote:\\"') as special_chars, + toString('Short') as long_string, + toFixedString('XYZZZ', 5) as fixed_string_short, + toLowCardinality('Option 2') as low_cardinality_empty + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[0][col]}, Python type: {type(ret.iloc[0][col])})") + + # Test first row - various string types + self.assertEqual(ret.iloc[0]["string_val"], "Hello World") + self.assertEqual(ret.iloc[0]["fixed_string_val"], "Fixed\x00\x00\x00\x00\x00") # FixedString pads with null bytes + self.assertEqual(ret.iloc[0]["low_cardinality_val"], "Category A") + self.assertEqual(ret.iloc[0]["empty_string"], "") + self.assertEqual(ret.iloc[0]["unicode_string"], "Unicode: 🌍 éñáíóú") + self.assertEqual(ret.iloc[0]["special_chars"], "Special chars: \t\n\r\"'") # ClickHouse interprets escape sequences + self.assertEqual(ret.iloc[0]["long_string"], "Very long string with many characters to test maximum length handling and memory allocation behavior") + self.assertEqual(ret.iloc[0]["fixed_string_short"], "ABC\x00\x00") # Padded to 5 chars + self.assertEqual(ret.iloc[0]["low_cardinality_empty"], "") + + # Test second row - different string values + self.assertEqual(ret.iloc[1]["string_val"], "Another string") + self.assertEqual(ret.iloc[1]["fixed_string_val"], "Test123\x00\x00\x00") # Padded to 10 chars + self.assertEqual(ret.iloc[1]["low_cardinality_val"], "Category B") + self.assertEqual(ret.iloc[1]["empty_string"], "Non-empty") + self.assertEqual(ret.iloc[1]["unicode_string"], "More Unicode: 🚀 ñáéíóú àèìòù") + self.assertEqual(ret.iloc[1]["special_chars"], "Line breaks:\nTab:\tQuote:\"") # ClickHouse interprets escape sequences + self.assertEqual(ret.iloc[1]["long_string"], "Short") + self.assertEqual(ret.iloc[1]["fixed_string_short"], "XYZZZ") # Exactly 5 chars, no padding + self.assertEqual(ret.iloc[1]["low_cardinality_empty"], "Option 2") + + # Precise data type validation + expected_types = { + "string_val": "object", # String types mapped to object in pandas + "fixed_string_val": "object", + "low_cardinality_val": "object", + "empty_string": "object", + "unicode_string": "object", + "special_chars": "object", + "long_string": "object", + "fixed_string_short": "object", + "low_cardinality_empty": "object" + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + @unittest.skip("") + def test_date_types(self): + """Test Date and Date32 types""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toDate('2023-12-25') as date_val, + toDate('1970-01-01') as date_min, + toDate('2149-06-06') as date_max, + toDate32('2023-12-25') as date32_val, + toDate32('1900-01-01') as date32_min, + toDate32('2299-12-31') as date32_max, + toDate('2000-02-29') as date_leap_year, + toDate32('2000-02-29') as date32_leap_year, + toDate32('1950-06-15') as date32_negative_1, + toDate32('1960-12-31') as date32_negative_2, + toDate32('1969-12-31') as date32_before_epoch + UNION ALL + SELECT + 2 as row_id, + toDate('1970-01-01') as date_val, + toDate('2023-01-01') as date_min, + toDate('2023-12-31') as date_max, + toDate32('1970-01-01') as date32_val, + toDate32('2023-01-01') as date32_min, + toDate32('2023-12-31') as date32_max, + toDate('2024-02-29') as date_leap_year, + toDate32('2024-02-29') as date32_leap_year, + toDate32('1945-05-08') as date32_negative_1, + toDate32('1955-03-20') as date32_negative_2, + toDate32('1968-07-20') as date32_before_epoch + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[0][col]}, Python type: {type(ret.iloc[0][col])})") + + # Test first row - specific dates (Date types include time component 00:00:00) + self.assertIn("2023-12-25", str(ret.iloc[0]["date_val"])) + self.assertIn("1970-01-01", str(ret.iloc[0]["date_min"])) + self.assertIn("2149-06-06", str(ret.iloc[0]["date_max"])) + self.assertIn("2023-12-25", str(ret.iloc[0]["date32_val"])) + self.assertIn("1900-01-01", str(ret.iloc[0]["date32_min"])) + self.assertIn("2299-12-31", str(ret.iloc[0]["date32_max"])) + self.assertIn("2000-02-29", str(ret.iloc[0]["date_leap_year"])) + self.assertIn("2000-02-29", str(ret.iloc[0]["date32_leap_year"])) + # Test Date32 negative values (before 1970 epoch) + self.assertIn("1950-06-15", str(ret.iloc[0]["date32_negative_1"])) + self.assertIn("1960-12-31", str(ret.iloc[0]["date32_negative_2"])) + self.assertIn("1969-12-31", str(ret.iloc[0]["date32_before_epoch"])) + + # Test second row - different dates + self.assertIn("1970-01-01", str(ret.iloc[1]["date_val"])) + self.assertIn("2023-01-01", str(ret.iloc[1]["date_min"])) + self.assertIn("2023-12-31", str(ret.iloc[1]["date_max"])) + self.assertIn("1970-01-01", str(ret.iloc[1]["date32_val"])) + self.assertIn("2023-01-01", str(ret.iloc[1]["date32_min"])) + self.assertIn("2023-12-31", str(ret.iloc[1]["date32_max"])) + self.assertIn("2024-02-29", str(ret.iloc[1]["date_leap_year"])) + self.assertIn("2024-02-29", str(ret.iloc[1]["date32_leap_year"])) + # Test Date32 negative values (before 1970 epoch) - second row + self.assertIn("1945-05-08", str(ret.iloc[1]["date32_negative_1"])) + self.assertIn("1955-03-20", str(ret.iloc[1]["date32_negative_2"])) + self.assertIn("1968-07-20", str(ret.iloc[1]["date32_before_epoch"])) + + # Precise data type validation + expected_types = { + "date_val": "datetime64[s]", # Date types mapped to datetime64[s] in pandas + "date_min": "datetime64[s]", + "date_max": "datetime64[s]", + "date32_val": "datetime64[s]", + "date32_min": "datetime64[s]", + "date32_max": "datetime64[s]", + "date_leap_year": "datetime64[s]", + "date32_leap_year": "datetime64[s]", + "date32_negative_1": "datetime64[s]", # Date32 negative values (before 1970) + "date32_negative_2": "datetime64[s]", + "date32_before_epoch": "datetime64[s]" + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + @unittest.skip("") + def test_time_types(self): + """Test Time and Time64 types""" + # Enable Time and Time64 types + self.session.query("SET enable_time_time64_type = 1") + + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + CAST('14:30:45' AS Time) as time_val, + CAST('00:00:00' AS Time) as time_min, + CAST('23:59:59' AS Time) as time_max, + CAST('14:30:45.123456' AS Time64(6)) as time64_val, + CAST('00:00:00.000000' AS Time64(6)) as time64_min, + CAST('23:59:59.999999' AS Time64(6)) as time64_max, + CAST('12:00:00.123' AS Time64(3)) as time64_ms, + CAST('18:45:30.987654321' AS Time64(9)) as time64_ns + UNION ALL + SELECT + 2 as row_id, + CAST('09:15:30' AS Time) as time_val, + CAST('12:00:00' AS Time) as time_min, + CAST('18:45:15' AS Time) as time_max, + CAST('09:15:30.654321' AS Time64(6)) as time64_val, + CAST('12:30:45.500000' AS Time64(6)) as time64_min, + CAST('20:15:30.111111' AS Time64(6)) as time64_max, + CAST('08:30:15.500' AS Time64(3)) as time64_ms, + CAST('16:20:10.123456789' AS Time64(9)) as time64_ns + UNION ALL + SELECT + 3 as row_id, + CAST(-3600 AS Time) as time_val, -- -1 hour as negative seconds + CAST(-7200 AS Time) as time_min, -- -2 hours as negative seconds + CAST(-1800 AS Time) as time_max, -- -30 minutes as negative seconds + CAST(-3661.123456 AS Time64(6)) as time64_val, -- -1h 1m 1.123456s + CAST(-7322.500000 AS Time64(6)) as time64_min, -- -2h 2m 2.5s + CAST(-1801.999999 AS Time64(6)) as time64_max, -- -30m 1.999999s + CAST(-3723.500 AS Time64(3)) as time64_ms, -- -1h 2m 3.5s + CAST(-5434.123456789 AS Time64(9)) as time64_ns -- -1h 30m 34.123456789s + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[0][col]}, Python type: {type(ret.iloc[0][col])})") + + # Test first row - time values + self.assertIn("14:30:45", str(ret.iloc[0]["time_val"])) + self.assertIn("00:00:00", str(ret.iloc[0]["time_min"])) + self.assertIn("23:59:59", str(ret.iloc[0]["time_max"])) + self.assertIn("14:30:45", str(ret.iloc[0]["time64_val"])) + self.assertIn("00:00:00", str(ret.iloc[0]["time64_min"])) + self.assertIn("23:59:59", str(ret.iloc[0]["time64_max"])) + self.assertIn("12:00:00", str(ret.iloc[0]["time64_ms"])) + self.assertIn("18:45:30", str(ret.iloc[0]["time64_ns"])) + + # Test second row - different time values + self.assertIn("09:15:30", str(ret.iloc[1]["time_val"])) + self.assertIn("12:00:00", str(ret.iloc[1]["time_min"])) + self.assertIn("18:45:15", str(ret.iloc[1]["time_max"])) + self.assertIn("09:15:30", str(ret.iloc[1]["time64_val"])) + self.assertIn("12:30:45", str(ret.iloc[1]["time64_min"])) + self.assertIn("20:15:30", str(ret.iloc[1]["time64_max"])) + self.assertIn("08:30:15", str(ret.iloc[1]["time64_ms"])) + self.assertIn("16:20:10", str(ret.iloc[1]["time64_ns"])) + + # Test third row - negative time values (should be returned as string numbers) + # Since Python time types don't support negative values, they are returned as numeric strings + self.assertEqual(ret.iloc[2]["time_val"], "-3600") # -1 hour + self.assertEqual(ret.iloc[2]["time_min"], "-7200") # -2 hours + self.assertEqual(ret.iloc[2]["time_max"], "-1800") # -30 minutes + self.assertEqual(ret.iloc[2]["time64_val"], "-3661.123456") # -1h 1m 1.123456s + self.assertEqual(ret.iloc[2]["time64_min"], "-7322.5") # -2h 2m 2.5s + self.assertEqual(ret.iloc[2]["time64_max"], "-1801.999999") # -30m 1.999999s + self.assertEqual(ret.iloc[2]["time64_ms"], "-3723.5") # -1h 2m 3.5s + self.assertEqual(ret.iloc[2]["time64_ns"], "-5434.123456789") # -1h 30m 34.123456789s + + # Verify negative values are returned as strings (object dtype) + for col in ["time_val", "time_min", "time_max", "time64_val", "time64_min", "time64_max", "time64_ms", "time64_ns"]: + self.assertIsInstance(ret.iloc[2][col], str, f"{col} should be string for negative values") + + # Precise data type validation + expected_types = { + "time_val": "object", # Time types mapped to object in pandas + "time_min": "object", + "time_max": "object", + "time64_val": "object", + "time64_min": "object", + "time64_max": "object", + "time64_ms": "object", + "time64_ns": "object" + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + @unittest.skip("") + def test_datetime_types(self): + """Test DateTime and DateTime64 types""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toDateTime('2023-12-25 14:30:45', 'Asia/Shanghai') as datetime_val, + toDateTime('1970-01-02 00:00:00', 'Asia/Shanghai') as datetime_min, + toDateTime('2106-02-07 06:28:15', 'Asia/Shanghai') as datetime_max, + toDateTime64('2023-12-25 14:30:45.123456', 6, 'Asia/Shanghai') as datetime64_val, + toDateTime64('1902-01-01 00:00:00.000000', 6, 'Asia/Shanghai') as datetime64_min, + toDateTime64('2099-12-31 10:59:59.999999', 6, 'Asia/Shanghai') as datetime64_max, + toDateTime64('2023-12-25 14:30:45.123456789', 9, 'Asia/Shanghai') as datetime64_ns, + toDateTime('2023-06-15 12:00:00', 'UTC') as datetime_utc, + toDateTime('2023-06-15 15:30:00', 'Europe/London') as datetime_london, + toDateTime64('2023-06-15 12:00:00.123', 3, 'Asia/Shanghai') as datetime64_tz_sh, + toDateTime64('2023-06-15 12:00:00.456', 3, 'America/New_York') as datetime64_tz_ny + UNION ALL + SELECT + 2 as row_id, + toDateTime('2000-02-29 09:15:30', 'Asia/Shanghai') as datetime_val, + toDateTime('2023-01-01 12:30:45', 'Asia/Shanghai') as datetime_min, + toDateTime('2023-12-31 18:45:15', 'Asia/Shanghai') as datetime_max, + toDateTime64('2000-02-29 09:15:30.654321', 6, 'Asia/Shanghai') as datetime64_val, + toDateTime64('2023-01-01 08:00:00.111111', 6, 'Asia/Shanghai') as datetime64_min, + toDateTime64('2023-12-31 20:30:45.888888', 6, 'Asia/Shanghai') as datetime64_max, + toDateTime64('2000-02-29 09:15:30.987654321', 9, 'Asia/Shanghai') as datetime64_ns, + toDateTime('2024-01-15 08:30:00', 'UTC') as datetime_utc, + toDateTime('2024-01-15 20:00:00', 'Europe/London') as datetime_london, + toDateTime64('2024-01-15 16:45:30.789', 3, 'Asia/Shanghai') as datetime64_tz_sh, + toDateTime64('2024-01-15 09:15:45.987', 3, 'America/New_York') as datetime64_tz_ny + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[0][col]}, Python type: {type(ret.iloc[0][col])})") + + # Note: Historical timezone offsets vary for the same location across different periods. + # For example, in 1900, Shanghai had a UTC offset of +8:05:43 (8 hours 5 minutes 43 seconds). + # So executing session.query("select toDateTime64('1900-01-01 00:00:00.000000', 6, 'Asia/Shanghai')", "DataFrame") + # would output 1900-01-01 00:00:17+08:06 in pandas instead of the standard +08:00 + + + # Test first row - exact datetime values + # DateTime (second precision) - ClickHouse uses server timezone + # Get system timezone dynamically + actual_tz = "Asia/Shanghai" + + self.assertEqual(ret.iloc[0]["datetime_val"], pd.Timestamp('2023-12-25 14:30:45', tz=actual_tz)) + self.assertEqual(ret.iloc[0]["datetime_min"], pd.Timestamp('1970-01-02 00:00:00', tz=actual_tz)) + self.assertEqual(ret.iloc[0]["datetime_max"], pd.Timestamp('2106-02-07 06:28:15', tz=actual_tz)) + + # DateTime64 (microsecond precision) - should use same timezone as ClickHouse server + self.assertEqual(ret.iloc[0]["datetime64_val"], pd.Timestamp('2023-12-25 14:30:45.123456', tz=actual_tz)) + self.assertEqual(ret.iloc[0]["datetime64_min"], pd.Timestamp('1902-01-01 00:00:00.000000', tz=actual_tz)) + self.assertEqual(ret.iloc[0]["datetime64_max"], pd.Timestamp('2099-12-31 10:59:59.999999', tz=actual_tz)) + + # DateTime64 (nanosecond precision) - should use same timezone as ClickHouse server + self.assertEqual(ret.iloc[0]["datetime64_ns"], pd.Timestamp('2023-12-25 14:30:45.123456789', tz=actual_tz)) + + # UTC timezone datetime + expected_utc = pd.Timestamp('2023-06-15 12:00:00', tz='UTC') + actual_utc = ret.iloc[0]["datetime_utc"] + self.assertEqual(actual_utc, expected_utc) + + # Europe/London timezone datetime + expected_london = pd.Timestamp('2023-06-15 15:30:00', tz='Europe/London') + actual_london = ret.iloc[0]["datetime_london"] + self.assertEqual(actual_london, expected_london) + + # Timezone-aware datetime64 - Asia/Shanghai + expected_sh = pd.Timestamp('2023-06-15 12:00:00.123', tz='Asia/Shanghai') + actual_sh = ret.iloc[0]["datetime64_tz_sh"] + self.assertEqual(actual_sh, expected_sh) + + # Timezone-aware datetime64 - America/New_York + expected_ny = pd.Timestamp('2023-06-15 12:00:00.456', tz='America/New_York') + actual_ny = ret.iloc[0]["datetime64_tz_ny"] + self.assertEqual(actual_ny, expected_ny) + + # Test second row - exact datetime values with ClickHouse server timezone + self.assertEqual(ret.iloc[1]["datetime_val"], pd.Timestamp('2000-02-29 09:15:30', tz=actual_tz)) + self.assertEqual(ret.iloc[1]["datetime_min"], pd.Timestamp('2023-01-01 12:30:45', tz=actual_tz)) + self.assertEqual(ret.iloc[1]["datetime_max"], pd.Timestamp('2023-12-31 18:45:15', tz=actual_tz)) + self.assertEqual(ret.iloc[1]["datetime64_val"], pd.Timestamp('2000-02-29 09:15:30.654321', tz=actual_tz)) + self.assertEqual(ret.iloc[1]["datetime64_min"], pd.Timestamp('2023-01-01 08:00:00.111111', tz=actual_tz)) + self.assertEqual(ret.iloc[1]["datetime64_max"], pd.Timestamp('2023-12-31 20:30:45.888888', tz=actual_tz)) + self.assertEqual(ret.iloc[1]["datetime64_ns"], pd.Timestamp('2000-02-29 09:15:30.987654321', tz=actual_tz)) + + # Second row timezone datetime tests + expected_utc_2 = pd.Timestamp('2024-01-15 08:30:00', tz='UTC') + actual_utc_2 = ret.iloc[1]["datetime_utc"] + self.assertEqual(actual_utc_2, expected_utc_2) + + expected_london_2 = pd.Timestamp('2024-01-15 20:00:00', tz='Europe/London') + actual_london_2 = ret.iloc[1]["datetime_london"] + self.assertEqual(actual_london_2, expected_london_2) + + # Second row timezone tests (already converted by C++ code) + expected_sh_2 = pd.Timestamp('2024-01-15 16:45:30.789', tz='Asia/Shanghai') + actual_sh_2 = ret.iloc[1]["datetime64_tz_sh"] + self.assertEqual(actual_sh_2, expected_sh_2) + + expected_ny_2 = pd.Timestamp('2024-01-15 09:15:45.987', tz='America/New_York') + actual_ny_2 = ret.iloc[1]["datetime64_tz_ny"] + self.assertEqual(actual_ny_2, expected_ny_2) + + # Precise data type validation + expected_types = { + "row_id": "uint8", + "datetime_val": "datetime64[s, Asia/Shanghai]", # DateTime types mapped to datetime64[s] (second precision) + "datetime_min": "datetime64[s, Asia/Shanghai]", + "datetime_max": "datetime64[s, Asia/Shanghai]", + "datetime64_val": "datetime64[ns, Asia/Shanghai]", # DateTime64 types mapped to datetime64[ns] (nanosecond precision) + "datetime64_min": "datetime64[ns, Asia/Shanghai]", + "datetime64_max": "datetime64[ns, Asia/Shanghai]", + "datetime64_ns": "datetime64[ns, Asia/Shanghai]", # DateTime64 with 9-digit precision (nanoseconds) + "datetime_utc": "datetime64[s, UTC]", # DateTime with timezone -> datetime64[s] + "datetime64_tz_sh": "datetime64[ns, Asia/Shanghai]", # DateTime64 with Asia/Shanghai timezone + "datetime64_tz_ny": "datetime64[ns, America/New_York]" # DateTime64 with America/New_York timezone + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + @unittest.skip("") + def test_enum_types(self): + """Test Enum8 and Enum16 types""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + CAST('hello' AS Enum8('hello' = 1, 'world' = 2)) as enum8_val, + CAST('small' AS Enum8('small' = -128, 'medium' = 0, 'large' = 127)) as enum8_range, + CAST('active' AS Enum16('active' = 1, 'inactive' = 2, 'pending' = 3, 'deleted' = -1)) as enum16_val, + CAST('north' AS Enum16('north' = 1, 'south' = 2, 'east' = 3, 'west' = 4, 'center' = 0)) as enum16_direction + UNION ALL + SELECT + 2 as row_id, + CAST('world' AS Enum8('hello' = 1, 'world' = 2)) as enum8_val, + CAST('large' AS Enum8('small' = -128, 'medium' = 0, 'large' = 127)) as enum8_range, + CAST('deleted' AS Enum16('active' = 1, 'inactive' = 2, 'pending' = 3, 'deleted' = -1)) as enum16_val, + CAST('south' AS Enum16('north' = 1, 'south' = 2, 'east' = 3, 'west' = 4, 'center' = 0)) as enum16_direction + UNION ALL + SELECT + 3 as row_id, + CAST('hello' AS Enum8('hello' = 1, 'world' = 2)) as enum8_val, + CAST('medium' AS Enum8('small' = -128, 'medium' = 0, 'large' = 127)) as enum8_range, + CAST('pending' AS Enum16('active' = 1, 'inactive' = 2, 'pending' = 3, 'deleted' = -1)) as enum16_val, + CAST('center' AS Enum16('north' = 1, 'south' = 2, 'east' = 3, 'west' = 4, 'center' = 0)) as enum16_direction + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[0][col]}, Python type: {type(ret.iloc[0][col])})") + + # Test first row values + self.assertEqual(ret.iloc[0]["enum8_val"], "hello") + self.assertEqual(ret.iloc[0]["enum8_range"], "small") + self.assertEqual(ret.iloc[0]["enum16_val"], "active") + self.assertEqual(ret.iloc[0]["enum16_direction"], "north") + + # Test second row values + self.assertEqual(ret.iloc[1]["enum8_val"], "world") + self.assertEqual(ret.iloc[1]["enum8_range"], "large") + self.assertEqual(ret.iloc[1]["enum16_val"], "deleted") + self.assertEqual(ret.iloc[1]["enum16_direction"], "south") + + # Test third row values + self.assertEqual(ret.iloc[2]["enum8_val"], "hello") + self.assertEqual(ret.iloc[2]["enum8_range"], "medium") + self.assertEqual(ret.iloc[2]["enum16_val"], "pending") + self.assertEqual(ret.iloc[2]["enum16_direction"], "center") + + # Verify data types - Enum types should be mapped to object (string) dtype in pandas + expected_types = { + "row_id": "uint8", + "enum8_val": "object", # Enum8 mapped to object (string) dtype + "enum8_range": "object", # Enum8 with negative/positive range + "enum16_val": "object", # Enum16 mapped to object (string) dtype + "enum16_direction": "object" # Enum16 with multiple values + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + # Verify all enum values are strings + for col in ["enum8_val", "enum8_range", "enum16_val", "enum16_direction"]: + for i in range(len(ret)): + self.assertIsInstance(ret.iloc[i][col], str, f"Row {i}, column {col} should be string") + + @unittest.skip("") + def test_uuid_types(self): + """Test UUID data type""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toUUID('550e8400-e29b-41d4-a716-446655440000') as uuid_fixed1, + toUUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') as uuid_fixed2, + generateUUIDv4() as uuid_random1, + generateUUIDv4() as uuid_random2 + UNION ALL + SELECT + 2 as row_id, + toUUID('123e4567-e89b-12d3-a456-426614174000') as uuid_fixed1, + toUUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') as uuid_fixed2, + generateUUIDv4() as uuid_random1, + generateUUIDv4() as uuid_random2 + UNION ALL + SELECT + 3 as row_id, + toUUID('00000000-0000-0000-0000-000000000000') as uuid_fixed1, + toUUID('ffffffff-ffff-ffff-ffff-ffffffffffff') as uuid_fixed2, + generateUUIDv4() as uuid_random1, + generateUUIDv4() as uuid_random2 + ) + ORDER BY row_id + """, "DataFrame") + + # Verify we have 3 rows and 5 columns + self.assertEqual(len(ret), 3) + self.assertEqual(len(ret.columns), 5) + + # Test first row fixed UUID values + self.assertEqual(ret.iloc[0]["uuid_fixed1"], uuid.UUID("550e8400-e29b-41d4-a716-446655440000")) + self.assertEqual(ret.iloc[0]["uuid_fixed2"], uuid.UUID("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + + # Test second row fixed UUID values + self.assertEqual(ret.iloc[1]["uuid_fixed1"], uuid.UUID("123e4567-e89b-12d3-a456-426614174000")) + self.assertEqual(ret.iloc[1]["uuid_fixed2"], uuid.UUID("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + + # Test third row special UUID values (all zeros and all F's) + self.assertEqual(ret.iloc[2]["uuid_fixed1"], uuid.UUID("00000000-0000-0000-0000-000000000000")) + self.assertEqual(ret.iloc[2]["uuid_fixed2"], uuid.UUID("ffffffff-ffff-ffff-ffff-ffffffffffff")) + + # Verify data types - UUID types should be mapped to object dtype in pandas + expected_types = { + "row_id": "uint8", + "uuid_fixed1": "object", # UUID mapped to object dtype (contains UUID objects) + "uuid_fixed2": "object", # UUID mapped to object dtype (contains UUID objects) + "uuid_random1": "object", # Generated UUID mapped to object dtype (contains UUID objects) + "uuid_random2": "object" # Generated UUID mapped to object dtype (contains UUID objects) + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + # Verify all UUID values are UUID objects and have valid format + for col in ["uuid_fixed1", "uuid_fixed2", "uuid_random1", "uuid_random2"]: + for i in range(len(ret)): + uuid_value = ret.iloc[i][col] + self.assertIsInstance(uuid_value, uuid.UUID, f"Row {i}, column {col} should be UUID object") + # Verify UUID string representation has correct format + uuid_str = str(uuid_value) + self.assertEqual(len(uuid_str), 36, f"Row {i}, column {col} UUID string should be 36 characters") + self.assertEqual(uuid_str.count('-'), 4, f"Row {i}, column {col} UUID should have 4 hyphens") + + @unittest.skip("") + def test_ipv4_types(self): + """Test IPv4 data type""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toIPv4('192.168.1.1') as ipv4_private, + toIPv4('8.8.8.8') as ipv4_public, + toIPv4('127.0.0.1') as ipv4_localhost, + toIPv4('0.0.0.0') as ipv4_zero, + toIPv4('255.255.255.255') as ipv4_broadcast + UNION ALL + SELECT + 2 as row_id, + toIPv4('10.0.0.1') as ipv4_private, + toIPv4('1.1.1.1') as ipv4_public, + toIPv4('127.0.0.2') as ipv4_localhost, + toIPv4('172.16.0.1') as ipv4_zero, + toIPv4('203.0.113.1') as ipv4_broadcast + UNION ALL + SELECT + 3 as row_id, + toIPv4('192.0.2.1') as ipv4_private, + toIPv4('208.67.222.222') as ipv4_public, + toIPv4('169.254.1.1') as ipv4_localhost, + toIPv4('224.0.0.1') as ipv4_zero, + toIPv4('239.255.255.255') as ipv4_broadcast + ) + ORDER BY row_id + """, "DataFrame") + + # Verify we have 3 rows and 6 columns + self.assertEqual(len(ret), 3) + self.assertEqual(len(ret.columns), 6) + + # Test first row IPv4 values + self.assertEqual(ret.iloc[0]["ipv4_private"], ipaddress.IPv4Address("192.168.1.1")) + self.assertEqual(ret.iloc[0]["ipv4_public"], ipaddress.IPv4Address("8.8.8.8")) + self.assertEqual(ret.iloc[0]["ipv4_localhost"], ipaddress.IPv4Address("127.0.0.1")) + self.assertEqual(ret.iloc[0]["ipv4_zero"], ipaddress.IPv4Address("0.0.0.0")) + self.assertEqual(ret.iloc[0]["ipv4_broadcast"], ipaddress.IPv4Address("255.255.255.255")) + + # Test second row IPv4 values + self.assertEqual(ret.iloc[1]["ipv4_private"], ipaddress.IPv4Address("10.0.0.1")) + self.assertEqual(ret.iloc[1]["ipv4_public"], ipaddress.IPv4Address("1.1.1.1")) + self.assertEqual(ret.iloc[1]["ipv4_localhost"], ipaddress.IPv4Address("127.0.0.2")) + self.assertEqual(ret.iloc[1]["ipv4_zero"], ipaddress.IPv4Address("172.16.0.1")) + self.assertEqual(ret.iloc[1]["ipv4_broadcast"], ipaddress.IPv4Address("203.0.113.1")) + + # Test third row IPv4 values + self.assertEqual(ret.iloc[2]["ipv4_private"], ipaddress.IPv4Address("192.0.2.1")) + self.assertEqual(ret.iloc[2]["ipv4_public"], ipaddress.IPv4Address("208.67.222.222")) + self.assertEqual(ret.iloc[2]["ipv4_localhost"], ipaddress.IPv4Address("169.254.1.1")) + self.assertEqual(ret.iloc[2]["ipv4_zero"], ipaddress.IPv4Address("224.0.0.1")) + self.assertEqual(ret.iloc[2]["ipv4_broadcast"], ipaddress.IPv4Address("239.255.255.255")) + + # Verify data types - IPv4 types should be mapped to object dtype in pandas + expected_types = { + "row_id": "uint8", + "ipv4_private": "object", # IPv4Address mapped to object dtype + "ipv4_public": "object", + "ipv4_localhost": "object", + "ipv4_zero": "object", + "ipv4_broadcast": "object" + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + # Verify all IPv4 values are IPv4Address objects + for col in ["ipv4_private", "ipv4_public", "ipv4_localhost", "ipv4_zero", "ipv4_broadcast"]: + for i in range(len(ret)): + ipv4_value = ret.iloc[i][col] + self.assertIsInstance(ipv4_value, ipaddress.IPv4Address, f"Row {i}, column {col} should be IPv4Address object") + # Verify IPv4 string representation is valid + ipv4_str = str(ipv4_value) + self.assertEqual(len(ipv4_str.split('.')), 4, f"Row {i}, column {col} IPv4 should have 4 octets") + + @unittest.skip("") + def test_ipv6_types(self): + """Test IPv6 data type""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + toIPv6('2001:db8::1') as ipv6_standard, + toIPv6('::1') as ipv6_localhost, + toIPv6('::') as ipv6_zero, + toIPv6('2001:db8:85a3::8a2e:370:7334') as ipv6_full, + toIPv6('fe80::1') as ipv6_link_local + UNION ALL + SELECT + 2 as row_id, + toIPv6('2001:db8::2') as ipv6_standard, + toIPv6('::2') as ipv6_localhost, + toIPv6('2001:db8::') as ipv6_zero, + toIPv6('2001:db8:85a3:0:0:8a2e:370:7335') as ipv6_full, + toIPv6('fe80::2') as ipv6_link_local + UNION ALL + SELECT + 3 as row_id, + toIPv6('2001:0db8:0000:0000:0000:ff00:0042:8329') as ipv6_standard, + toIPv6('::ffff:192.0.2.1') as ipv6_localhost, + toIPv6('2001:db8:85a3::8a2e:370:7336') as ipv6_zero, + toIPv6('ff02::1') as ipv6_full, + toIPv6('2001:db8:85a3:8d3:1319:8a2e:370:7348') as ipv6_link_local + ) + ORDER BY row_id + """, "DataFrame") + + # Verify we have 3 rows and 6 columns + self.assertEqual(len(ret), 3) + self.assertEqual(len(ret.columns), 6) + + # Test first row IPv6 values + self.assertEqual(ret.iloc[0]["ipv6_standard"], ipaddress.IPv6Address("2001:db8::1")) + self.assertEqual(ret.iloc[0]["ipv6_localhost"], ipaddress.IPv6Address("::1")) + self.assertEqual(ret.iloc[0]["ipv6_zero"], ipaddress.IPv6Address("::")) + self.assertEqual(ret.iloc[0]["ipv6_full"], ipaddress.IPv6Address("2001:db8:85a3::8a2e:370:7334")) + self.assertEqual(ret.iloc[0]["ipv6_link_local"], ipaddress.IPv6Address("fe80::1")) + + # Test second row IPv6 values + self.assertEqual(ret.iloc[1]["ipv6_standard"], ipaddress.IPv6Address("2001:db8::2")) + self.assertEqual(ret.iloc[1]["ipv6_localhost"], ipaddress.IPv6Address("::2")) + self.assertEqual(ret.iloc[1]["ipv6_zero"], ipaddress.IPv6Address("2001:db8::")) + self.assertEqual(ret.iloc[1]["ipv6_full"], ipaddress.IPv6Address("2001:db8:85a3::8a2e:370:7335")) + self.assertEqual(ret.iloc[1]["ipv6_link_local"], ipaddress.IPv6Address("fe80::2")) + + # Test third row IPv6 values + self.assertEqual(ret.iloc[2]["ipv6_standard"], ipaddress.IPv6Address("2001:db8::ff00:42:8329")) + self.assertEqual(ret.iloc[2]["ipv6_localhost"], ipaddress.IPv6Address("::ffff:192.0.2.1")) + self.assertEqual(ret.iloc[2]["ipv6_zero"], ipaddress.IPv6Address("2001:db8:85a3::8a2e:370:7336")) + self.assertEqual(ret.iloc[2]["ipv6_full"], ipaddress.IPv6Address("ff02::1")) + self.assertEqual(ret.iloc[2]["ipv6_link_local"], ipaddress.IPv6Address("2001:db8:85a3:8d3:1319:8a2e:370:7348")) + + # Verify data types - IPv6 types should be mapped to object dtype in pandas + expected_types = { + "row_id": "uint8", + "ipv6_standard": "object", # IPv6Address mapped to object dtype + "ipv6_localhost": "object", + "ipv6_zero": "object", + "ipv6_full": "object", + "ipv6_link_local": "object" + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + # Verify all IPv6 values are IPv6Address objects + for col in ["ipv6_standard", "ipv6_localhost", "ipv6_zero", "ipv6_full", "ipv6_link_local"]: + for i in range(len(ret)): + ipv6_value = ret.iloc[i][col] + self.assertIsInstance(ipv6_value, ipaddress.IPv6Address, f"Row {i}, column {col} should be IPv6Address object") + # Verify IPv6 address is valid by checking it can be converted back to string + ipv6_str = str(ipv6_value) + self.assertIn(":", ipv6_str, f"Row {i}, column {col} IPv6 should contain colons") + + @unittest.skip("") + def test_bool_types(self): + """Test Bool and Nullable(Bool) types with various values""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + true as bool_true, + false as bool_false, + true::Bool as explicit_bool_true, + false::Bool as explicit_bool_false, + NULL::Nullable(Bool) as nullable_bool_null, + true::Nullable(Bool) as nullable_bool_true, + false::Nullable(Bool) as nullable_bool_false + UNION ALL + SELECT + 2 as row_id, + false as bool_true, + true as bool_false, + false::Bool as explicit_bool_true, + true::Bool as explicit_bool_false, + true::Nullable(Bool) as nullable_bool_null, + NULL::Nullable(Bool) as nullable_bool_true, + true::Nullable(Bool) as nullable_bool_false + UNION ALL + SELECT + 3 as row_id, + 1 = 1 as bool_true, -- expression result + 1 = 0 as bool_false, -- expression result + (1 > 0)::Bool as explicit_bool_true, + (1 < 0)::Bool as explicit_bool_false, + false::Nullable(Bool) as nullable_bool_null, + false::Nullable(Bool) as nullable_bool_true, + NULL::Nullable(Bool) as nullable_bool_false + ) + ORDER BY row_id + """, "DataFrame") + + # Verify we have 3 rows and 8 columns + self.assertEqual(len(ret), 3) + self.assertEqual(len(ret.columns), 8) + + # Test first row - basic Boolean values + self.assertTrue(ret.iloc[0]["bool_true"]) + self.assertFalse(ret.iloc[0]["bool_false"]) + self.assertTrue(ret.iloc[0]["explicit_bool_true"]) + self.assertFalse(ret.iloc[0]["explicit_bool_false"]) + self.assertTrue(pd.isna(ret.iloc[0]["nullable_bool_null"])) + self.assertTrue(ret.iloc[0]["nullable_bool_true"]) + self.assertFalse(ret.iloc[0]["nullable_bool_false"]) + + # Test second row - inverted Boolean values + self.assertFalse(ret.iloc[1]["bool_true"]) + self.assertTrue(ret.iloc[1]["bool_false"]) + self.assertFalse(ret.iloc[1]["explicit_bool_true"]) + self.assertTrue(ret.iloc[1]["explicit_bool_false"]) + self.assertTrue(ret.iloc[1]["nullable_bool_null"]) + self.assertTrue(pd.isna(ret.iloc[1]["nullable_bool_true"])) + self.assertTrue(ret.iloc[1]["nullable_bool_false"]) + + # Test third row - expression results + self.assertTrue(ret.iloc[2]["bool_true"]) # 1 = 1 is true + self.assertFalse(ret.iloc[2]["bool_false"]) # 1 = 0 is false + self.assertTrue(ret.iloc[2]["explicit_bool_true"]) # 1 > 0 is true + self.assertFalse(ret.iloc[2]["explicit_bool_false"]) # 1 < 0 is false + self.assertFalse(ret.iloc[2]["nullable_bool_null"]) + self.assertFalse(ret.iloc[2]["nullable_bool_true"]) + self.assertTrue(pd.isna(ret.iloc[2]["nullable_bool_false"])) + + # Test Python types - Bool values should be boolean types (Python bool or numpy bool_) + for i in range(len(ret)): + for col in ["bool_true", "bool_false", "explicit_bool_true", "explicit_bool_false"]: + value = ret.iloc[i][col] + # Accept both Python bool and numpy bool_ types + self.assertTrue(isinstance(value, (bool, np.bool_)), f"Row {i}, column {col} should be boolean type, got {type(value)}") + + # Test nullable Bool columns - should be bool/numpy.bool_ or null + for col in ["nullable_bool_null", "nullable_bool_true", "nullable_bool_false"]: + if (pd.isna(ret.iloc[i][col])): + continue + + value = ret.iloc[i][col] + self.assertTrue(isinstance(value, (bool, np.bool_)), + f"Row {i}, column {col} should be boolean type, got {type(value)}") + + # Verify data types - Bool types should be mapped to bool dtype in pandas + expected_types = { + "row_id": "uint8", + "bool_true": "bool", + "bool_false": "bool", + "explicit_bool_true": "bool", + "explicit_bool_false": "bool", + "nullable_bool_null": "boolean", + "nullable_bool_true": "boolean", + "nullable_bool_false": "boolean" + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type, f"Column {col} type mismatch") + + @unittest.skip("") + def test_tuple_types(self): + """Test Tuple types with various element combinations""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + (1, 'hello') as tuple_int_str, + (true, false, true) as tuple_bool, + (1, 2.5, 'test') as tuple_mixed, + tuple(42, 'world', false) as tuple_explicit, + (1, (2, 3)) as tuple_nested, + ('a', 'b', 'c') as tuple_string, + (NULL, 1)::Tuple(Nullable(Int32), Int32) as tuple_nullable, + tuple() as tuple_empty + UNION ALL + SELECT + 2 as row_id, + (100, 'goodbye') as tuple_int_str, + (false, true, false) as tuple_bool, + (10, -3.14, 'data') as tuple_mixed, + tuple(-5, 'universe', true) as tuple_explicit, + (5, (6, 7)) as tuple_nested, + ('x', 'y', 'z') as tuple_string, + (42, NULL)::Tuple(Int32, Nullable(Int32)) as tuple_nullable, + tuple() as tuple_empty + UNION ALL + SELECT + 3 as row_id, + (-1, '') as tuple_int_str, + (true, false, false) as tuple_bool, + (0, 0.0, '') as tuple_mixed, + tuple(2147483647, 'edge_case', false) as tuple_explicit, + (99, (100, 101)) as tuple_nested, + ('🌍', 'Unicode', 'Test') as tuple_string, + (NULL, NULL)::Tuple(Nullable(Int32), Nullable(Int32)) as tuple_nullable, + tuple() as tuple_empty + ) + ORDER BY row_id + """, "DataFrame") + + # Verify we have 3 rows and 9 columns + self.assertEqual(len(ret), 3) + self.assertEqual(len(ret.columns), 9) + + # Test first row - basic tuple values + tuple_int_str = ret.iloc[0]["tuple_int_str"] + self.assertIsInstance(tuple_int_str, np.ndarray) + self.assertEqual(len(tuple_int_str), 2) + self.assertEqual(tuple_int_str[0], 1) + self.assertEqual(tuple_int_str[1], 'hello') + + tuple_bool = ret.iloc[0]["tuple_bool"] + self.assertIsInstance(tuple_bool, np.ndarray) + self.assertEqual(len(tuple_bool), 3) + self.assertTrue(tuple_bool[0]) + self.assertFalse(tuple_bool[1]) + self.assertTrue(tuple_bool[2]) + + tuple_mixed = ret.iloc[0]["tuple_mixed"] + self.assertIsInstance(tuple_mixed, np.ndarray) + self.assertEqual(len(tuple_mixed), 3) + self.assertEqual(tuple_mixed[0], 1) + self.assertEqual(tuple_mixed[1], 2.5) + self.assertEqual(tuple_mixed[2], 'test') + + # Test nested tuples + tuple_nested = ret.iloc[0]["tuple_nested"] + self.assertIsInstance(tuple_nested, np.ndarray) + self.assertEqual(len(tuple_nested), 2) + self.assertEqual(tuple_nested[0], 1) + self.assertIsInstance(tuple_nested[1], tuple) + self.assertEqual(tuple_nested[1][0], 2) + self.assertEqual(tuple_nested[1][1], 3) + + # Test nullable tuples + tuple_nullable = ret.iloc[0]["tuple_nullable"] + self.assertIsInstance(tuple_nullable, np.ndarray) + self.assertEqual(len(tuple_nullable), 2) + self.assertTrue(pd.isna(tuple_nullable[0])) # NULL value + self.assertEqual(tuple_nullable[1], 1) + + # Test empty tuple + tuple_empty = ret.iloc[0]["tuple_empty"] + self.assertIsInstance(tuple_empty, np.ndarray) + self.assertEqual(len(tuple_empty), 0) + + # Test second row - different values + tuple_int_str_2 = ret.iloc[1]["tuple_int_str"] + self.assertEqual(tuple_int_str_2[0], 100) + self.assertEqual(tuple_int_str_2[1], 'goodbye') + + tuple_nullable_2 = ret.iloc[1]["tuple_nullable"] + self.assertEqual(tuple_nullable_2[0], 42) + self.assertTrue(pd.isna(tuple_nullable_2[1])) # NULL value + + # Test third row - edge cases + tuple_bool_3 = ret.iloc[2]["tuple_bool"] + self.assertIsInstance(tuple_bool_3, np.ndarray) + self.assertEqual(len(tuple_bool_3), 3) + self.assertTrue(tuple_bool_3[0]) # true + self.assertFalse(tuple_bool_3[1]) # false + self.assertFalse(tuple_bool_3[2]) # false + + tuple_nullable_3 = ret.iloc[2]["tuple_nullable"] + self.assertTrue(pd.isna(tuple_nullable_3[0])) # Both NULL + self.assertTrue(pd.isna(tuple_nullable_3[1])) + + # Test string tuple with Unicode + tuple_string_3 = ret.iloc[2]["tuple_string"] + self.assertEqual(tuple_string_3[0], '🌍') + self.assertEqual(tuple_string_3[1], 'Unicode') + self.assertEqual(tuple_string_3[2], 'Test') + + # Test tuple element types + for i in range(len(ret)): + tuple_val = ret.iloc[i]["tuple_int_str"] + self.assertIsInstance(tuple_val, np.ndarray, f"Row {i} tuple_int_str should be tuple") + if len(tuple_val) >= 2: + self.assertIsInstance(tuple_val[0], (int, np.integer), f"Row {i} first element should be integer") + self.assertIsInstance(tuple_val[1], str, f"Row {i} second element should be string") + + # Verify data types - Tuple types should be mapped to object dtype in pandas + expected_types = { + "row_id": "uint8", + "tuple_int_str": "object", # Tuple mapped to object dtype + "tuple_bool": "object", # Tuple mapped to object dtype + "tuple_mixed": "object", # Tuple mapped to object dtype + "tuple_explicit": "object", # Tuple mapped to object dtype + "tuple_nested": "object", # Nested Tuple mapped to object dtype + "tuple_string": "object", # Tuple mapped to object dtype + "tuple_nullable": "object", # Tuple with nullable elements mapped to object dtype + "tuple_empty": "object" # Empty Tuple mapped to object dtype + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type, f"Column {col} type mismatch") + + # Test named tuples + named_tuple_ret = self.session.query(""" + SELECT + tuple(1, 'John', 25) as person_tuple, + (42, 3.14159, 'pi') as unnamed_tuple + """, "DataFrame") + + person_tuple = named_tuple_ret.iloc[0]["person_tuple"] + self.assertIsInstance(person_tuple, np.ndarray) + self.assertEqual(len(person_tuple), 3) + self.assertEqual(person_tuple[0], 1) + self.assertEqual(person_tuple[1], 'John') + self.assertEqual(person_tuple[2], 25) + + unnamed_tuple = named_tuple_ret.iloc[0]["unnamed_tuple"] + self.assertIsInstance(unnamed_tuple, np.ndarray) + self.assertEqual(len(unnamed_tuple), 3) + self.assertEqual(unnamed_tuple[0], 42) + self.assertAlmostEqual(unnamed_tuple[1], 3.14159, places=5) + self.assertEqual(unnamed_tuple[2], 'pi') + + @unittest.skip("") + def test_array_types(self): + """Test Array types with various element types""" + ret = self.session.query(""" + SELECT * FROM ( + SELECT + 1 as row_id, + [1, 2, 3, 4, 5] as array_int32, + [1, 2, 3, 4, 5]::Array(UInt64) as array_uint64, + [1.1, 2.2, 3.3, 4.4, 5.5] as array_float64, + ['hello', 'world', 'clickhouse', 'array'] as array_string, + [true, false, true, false] as array_bool, + [toDate('2023-01-01'), toDate('2023-02-01'), toDate('2023-03-01')] as array_date, + [toDateTime('2023-01-01 10:00:00', 'Asia/Shanghai'), toDateTime('2023-01-01 11:00:00', 'Asia/Shanghai')] as array_datetime, + [[1, 2], [3, 4], [5, 6]] as array_nested_int, + [[100, 200], [300, 400], [500, 600]]::Array(Array(UInt32)) as array_nested_uint32, + [['a', 'b'], ['c', 'd']] as array_nested_string, + [] as array_empty_int, + ['']::Array(String) as array_empty_string_element, + [NULL, 1, NULL, 3]::Array(Nullable(Int32)) as array_nullable_int, + [NULL, 'test', NULL]::Array(Nullable(String)) as array_nullable_string + UNION ALL + SELECT + 2 as row_id, + [10, 20, 30] as array_int32, + [100, 200, 300]::Array(UInt64) as array_uint64, + [10.5, 20.5] as array_float64, + ['test', 'array', 'data'] as array_string, + [false, false, true] as array_bool, + [toDate('2024-01-01'), toDate('2024-12-31')] as array_date, + [toDateTime('2024-06-15 14:30:00', 'Asia/Shanghai')] as array_datetime, + [[7, 8, 9], [10]] as array_nested_int, + [[700, 800], [900]]::Array(Array(UInt32)) as array_nested_uint32, + [['x'], ['y', 'z', 'w']] as array_nested_string, + [42] as array_empty_int, + ['single'] as array_empty_string_element, + [1, 2, 3]::Array(Nullable(Int32)) as array_nullable_int, + ['a', 'b']::Array(Nullable(String)) as array_nullable_string + UNION ALL + SELECT + 3 as row_id, + [-1, 0, 1, 2147483647, -2147483648] as array_int32, + [0, 18446744073709551615]::Array(UInt64) as array_uint64, + [0.0, -1.5, 1.0/0.0, -1.0/0.0, 0.0/0.0] as array_float64, + ['Unicode: 🌍', 'Special: \t\n"''', ''] as array_string, + [true] as array_bool, + [toDate('1970-01-01'), toDate('2149-06-06')] as array_date, + [toDateTime('1970-01-02 00:00:00', 'Asia/Shanghai'), toDateTime('2106-02-07 06:28:15', 'Asia/Shanghai')] as array_datetime, + [[], [1], [2, 3, 4, 5]] as array_nested_int, + [[], [1000], [2000, 3000, 4000]]::Array(Array(UInt32)) as array_nested_uint32, + [[], ['single'], ['a', 'b', 'c']] as array_nested_string, + []::Array(Int32) as array_empty_int, + []::Array(String) as array_empty_string_element, + [NULL]::Array(Nullable(Int32)) as array_nullable_int, + [NULL, NULL]::Array(Nullable(String)) as array_nullable_string + ) + ORDER BY row_id + """, "DataFrame") + + for col in ret.columns: + print(f"{col}: {ret.dtypes[col]} (actual value: {ret.iloc[0][col]}, Python type: {type(ret.iloc[0][col])})") + + # Test first row - basic arrays (converted to numpy arrays) + np.testing.assert_array_equal(ret.iloc[0]["array_int32"], [1, 2, 3, 4, 5]) + np.testing.assert_array_equal(ret.iloc[0]["array_uint64"], [1, 2, 3, 4, 5]) + np.testing.assert_array_equal(ret.iloc[0]["array_float64"], [1.1, 2.2, 3.3, 4.4, 5.5]) + np.testing.assert_array_equal(ret.iloc[0]["array_string"], ['hello', 'world', 'clickhouse', 'array']) + np.testing.assert_array_equal(ret.iloc[0]["array_bool"], [True, False, True, False]) + + # Test date arrays (converted to numpy array of pandas timestamps) + date_array = ret.iloc[0]["array_date"] + self.assertIsInstance(date_array, np.ndarray) + self.assertEqual(len(date_array), 3) + self.assertEqual(date_array[0], pd.Timestamp('2023-01-01')) + self.assertEqual(date_array[1], pd.Timestamp('2023-02-01')) + self.assertEqual(date_array[2], pd.Timestamp('2023-03-01')) + + # Test datetime arrays (converted to numpy array of numpy.datetime64 in UTC) + datetime_array = ret.iloc[0]["array_datetime"] + self.assertIsInstance(datetime_array, np.ndarray) + self.assertEqual(len(datetime_array), 2) + # ClickHouse converts Asia/Shanghai time to UTC: 10:00:00 +0800 -> 02:00:00 UTC + self.assertEqual(datetime_array[0], np.datetime64('2023-01-01T02:00:00')) + self.assertEqual(datetime_array[1], np.datetime64('2023-01-01T03:00:00')) + + # Test nested arrays (numpy arrays containing numpy arrays) + nested_int = ret.iloc[0]["array_nested_int"] + self.assertIsInstance(nested_int, np.ndarray) + self.assertEqual(len(nested_int), 3) + np.testing.assert_array_equal(nested_int[0], [1, 2]) + np.testing.assert_array_equal(nested_int[1], [3, 4]) + np.testing.assert_array_equal(nested_int[2], [5, 6]) + + nested_uint32 = ret.iloc[0]["array_nested_uint32"] + self.assertIsInstance(nested_uint32, np.ndarray) + self.assertEqual(len(nested_uint32), 3) + np.testing.assert_array_equal(nested_uint32[0], [100, 200]) + np.testing.assert_array_equal(nested_uint32[1], [300, 400]) + np.testing.assert_array_equal(nested_uint32[2], [500, 600]) + + nested_string = ret.iloc[0]["array_nested_string"] + self.assertIsInstance(nested_string, np.ndarray) + self.assertEqual(len(nested_string), 2) + np.testing.assert_array_equal(nested_string[0], ['a', 'b']) + np.testing.assert_array_equal(nested_string[1], ['c', 'd']) + + # Test empty arrays and arrays with empty string elements + empty_int_array = ret.iloc[0]["array_empty_int"] + self.assertIsInstance(empty_int_array, np.ndarray) + self.assertEqual(len(empty_int_array), 0) + + string_element_array = ret.iloc[0]["array_empty_string_element"] + self.assertIsInstance(string_element_array, np.ndarray) + np.testing.assert_array_equal(string_element_array, ['']) + + # Test nullable arrays (numpy arrays with None values) + nullable_int = ret.iloc[0]["array_nullable_int"] + self.assertIsInstance(nullable_int, np.ndarray) + self.assertEqual(len(nullable_int), 4) + self.assertTrue(nullable_int.mask[0]) + self.assertEqual(nullable_int[1], 1) + self.assertTrue(nullable_int.mask[2]) + self.assertEqual(nullable_int[3], 3) + + nullable_string = ret.iloc[0]["array_nullable_string"] + self.assertIsInstance(nullable_string, np.ndarray) + self.assertEqual(len(nullable_string), 3) + # self.assertTrue(nullable_string.mask[0]) + self.assertIsNone(nullable_string[0]) + self.assertEqual(nullable_string[1], 'test') + # self.assertTrue(nullable_string.mask[2]) + self.assertIsNone(nullable_string[2]) + + # Test second row - different arrays (numpy arrays) + np.testing.assert_array_equal(ret.iloc[1]["array_int32"], [10, 20, 30]) + np.testing.assert_array_equal(ret.iloc[1]["array_uint64"], [100, 200, 300]) + np.testing.assert_array_equal(ret.iloc[1]["array_float64"], [10.5, 20.5]) + np.testing.assert_array_equal(ret.iloc[1]["array_string"], ['test', 'array', 'data']) + np.testing.assert_array_equal(ret.iloc[1]["array_bool"], [False, False, True]) + + # Test second row datetime array: 14:30:00 +0800 -> 06:30:00 UTC + datetime_array_2 = ret.iloc[1]["array_datetime"] + self.assertEqual(len(datetime_array_2), 1) + self.assertEqual(datetime_array_2[0], np.datetime64('2024-06-15T06:30:00')) + + # Test third row - edge cases (numpy arrays) + np.testing.assert_array_equal(ret.iloc[2]["array_int32"], [-1, 0, 1, 2147483647, -2147483648]) + np.testing.assert_array_equal(ret.iloc[2]["array_uint64"], [0, 18446744073709551615]) + + # Test third row datetime array: Asia/Shanghai times converted to UTC + datetime_array_3 = ret.iloc[2]["array_datetime"] + self.assertEqual(len(datetime_array_3), 2) + # 1970-01-02 00:00:00 +0800 -> 1970-01-01 16:00:00 UTC + self.assertEqual(datetime_array_3[0], np.datetime64('1970-01-01T16:00:00')) + # 2106-02-07 06:28:15 +0800 -> 2106-02-06 22:28:15 UTC + self.assertEqual(datetime_array_3[1], np.datetime64('2106-02-06T22:28:15')) + + # Test float special values in array + float_array = ret.iloc[2]["array_float64"] + self.assertEqual(float_array[0], 0.0) + self.assertEqual(float_array[1], -1.5) + self.assertTrue(math.isinf(float_array[2])) # positive infinity + self.assertTrue(math.isinf(float_array[3])) # negative infinity + self.assertTrue(math.isnan(float_array[4])) # NaN + + # Test string array with special characters (numpy array) + string_array = ret.iloc[2]["array_string"] + self.assertIsInstance(string_array, np.ndarray) + self.assertEqual(string_array[0], 'Unicode: 🌍') + self.assertEqual(string_array[1], "Special: \t\n\"'") # ClickHouse interprets escape sequences + self.assertEqual(string_array[2], '') + + # Test nested arrays with empty elements (numpy arrays) + nested_int_3 = ret.iloc[2]["array_nested_int"] + self.assertIsInstance(nested_int_3, np.ndarray) + self.assertEqual(len(nested_int_3[0]), 0) # empty array + np.testing.assert_array_equal(nested_int_3[1], [1]) # single element + np.testing.assert_array_equal(nested_int_3[2], [2, 3, 4, 5]) # multiple elements + + nested_uint32_3 = ret.iloc[2]["array_nested_uint32"] + self.assertIsInstance(nested_uint32_3, np.ndarray) + self.assertEqual(len(nested_uint32_3[0]), 0) # empty array + np.testing.assert_array_equal(nested_uint32_3[1], [1000]) # single element + np.testing.assert_array_equal(nested_uint32_3[2], [2000, 3000, 4000]) # multiple elements + + # Test empty typed arrays + self.assertEqual(len(ret.iloc[2]["array_empty_int"]), 0) + self.assertEqual(len(ret.iloc[2]["array_empty_string_element"]), 0) + + # Test nullable arrays with only NULL values + self.assertEqual(len(ret.iloc[2]["array_nullable_int"]), 1) + self.assertTrue(ret.iloc[2]["array_nullable_int"].mask[0]) + + self.assertEqual(len(ret.iloc[2]["array_nullable_string"]), 2) + # self.assertTrue(ret.iloc[2]["array_nullable_string"].mask[0]) + # self.assertTrue(ret.iloc[2]["array_nullable_string"].mask[1]) + self.assertIsNone(ret.iloc[2]["array_nullable_string"][0]) + self.assertIsNone(ret.iloc[2]["array_nullable_string"][1]) + + # Precise data type validation - Arrays should be mapped to object dtype in pandas + expected_types = { + "row_id": "uint8", + "array_int32": "object", # Array(Int32) mapped to object dtype + "array_uint64": "object", # Array(UInt64) mapped to object dtype + "array_float64": "object", # Array(Float64) mapped to object dtype + "array_string": "object", # Array(String) mapped to object dtype + "array_bool": "object", # Array(Bool) mapped to object dtype + "array_date": "object", # Array(Date) mapped to object dtype + "array_datetime": "object", # Array(DateTime) mapped to object dtype + "array_nested_int": "object", # Array(Array(Int32)) mapped to object dtype + "array_nested_uint32": "object", # Array(Array(UInt32)) mapped to object dtype + "array_nested_string": "object", # Array(Array(String)) mapped to object dtype + "array_empty_int": "object", # Empty Array(Int32) mapped to object dtype + "array_empty_string_element": "object", # Array(String) with empty string mapped to object dtype + "array_nullable_int": "object", # Array(Nullable(Int32)) mapped to object dtype + "array_nullable_string": "object" # Array(Nullable(String)) mapped to object dtype + } + + for col, expected_type in expected_types.items(): + actual_type = str(ret.dtypes[col]) + self.assertEqual(actual_type, expected_type) + + # Verify all array columns contain numpy arrays + array_columns = [col for col in ret.columns if col.startswith('array_')] + for col in array_columns: + for i in range(len(ret)): + array_value = ret.iloc[i][col] + # Check if it's a numpy array + self.assertIsInstance(array_value, np.ndarray, f"Row {i}, column {col} should be numpy array") + # Verify numpy array properties + self.assertTrue(hasattr(array_value, '__len__'), f"Row {i}, column {col} should have length") + self.assertTrue(hasattr(array_value, '__getitem__'), f"Row {i}, column {col} should be indexable") + + +if __name__ == "__main__": + unittest.main()