diff --git a/include/albatross/linalg/Block b/include/albatross/linalg/Block index 19cb1d5e..b0eb9849 100644 --- a/include/albatross/linalg/Block +++ b/include/albatross/linalg/Block @@ -13,9 +13,12 @@ #ifndef ALBATROSS_LINALG_BLOCK_H #define ALBATROSS_LINALG_BLOCK_H -#include "../Common" +#include "../Indexing" +#include "../src/linalg/operators.hpp" +#include "../src/linalg/structured.hpp" #include "../src/linalg/block_utils.hpp" #include "../src/linalg/block_diagonal.hpp" #include "../src/linalg/block_symmetric.hpp" +#include "../src/linalg/infer_structure.hpp" #endif diff --git a/include/albatross/src/cereal/block_utils.hpp b/include/albatross/src/cereal/block_utils.hpp index 87b7e61a..3c50cee2 100644 --- a/include/albatross/src/cereal/block_utils.hpp +++ b/include/albatross/src/cereal/block_utils.hpp @@ -24,6 +24,13 @@ inline void serialize(Archive &archive, cereal::make_nvp("S", block_sym.S)); } +template +inline void serialize(Archive &archive, + albatross::BlockDiagonalLDLT &block_ldlt, + const std::uint32_t) { + archive(cereal::make_nvp("blocks", block_ldlt.blocks)); +} + } // namespace cereal #endif /* ALBATROSS_SRC_CEREAL_BLOCK_UTILS_HPP_ */ diff --git a/include/albatross/src/linalg/block_diagonal.hpp b/include/albatross/src/linalg/block_diagonal.hpp index 3986efbb..a03dbea1 100644 --- a/include/albatross/src/linalg/block_diagonal.hpp +++ b/include/albatross/src/linalg/block_diagonal.hpp @@ -37,10 +37,11 @@ struct BlockDiagonalLDLT { solve(const Eigen::Matrix<_Scalar, _Rows, _Cols> &rhs, ThreadPool *pool) const; - template - Eigen::Matrix<_Scalar, _Rows, _Cols> - sqrt_solve(const Eigen::Matrix<_Scalar, _Rows, _Cols> &rhs, - ThreadPool *pool) const; + template + Eigen::MatrixXd sqrt_solve(const Eigen::DenseBase &rhs, + ThreadPool *pool) const; + + BlockDiagonal sqrt_transpose() const; std::map block_to_row_map() const; @@ -49,6 +50,8 @@ struct BlockDiagonalLDLT { Eigen::Index rows() const; Eigen::Index cols() const; + + bool operator==(const BlockDiagonalLDLT &other) const; }; struct BlockDiagonal { @@ -68,7 +71,7 @@ struct BlockDiagonal { Eigen::Index cols() const; - Eigen::MatrixXd toDense() const; + Eigen::MatrixXd to_dense() const; }; /* @@ -102,6 +105,13 @@ inline Eigen::Matrix<_Scalar, _Rows, _Cols> BlockDiagonalLDLT::sqrt_solve( return output; } +inline BlockDiagonal BlockDiagonalLDLT::sqrt_transpose() const { + auto sqrt_transpose_block = [](const auto &b) { return b.sqrt_transpose(); }; + BlockDiagonal output; + output.blocks = apply(this->blocks, sqrt_transpose_block); + return output; +} + inline std::map BlockDiagonalLDLT::block_to_row_map() const { Eigen::Index row = 0; @@ -132,15 +142,16 @@ BlockDiagonalLDLT::solve(const Eigen::Matrix<_Scalar, _Rows, _Cols> &rhs, return output; } -template -inline Eigen::Matrix<_Scalar, _Rows, _Cols> -BlockDiagonalLDLT::sqrt_solve(const Eigen::Matrix<_Scalar, _Rows, _Cols> &rhs, +template +inline Eigen::MatrixXd +BlockDiagonalLDLT::sqrt_solve(const Eigen::DenseBase &rhs, ThreadPool *pool) const { ALBATROSS_ASSERT(cols() == rhs.rows()); - Eigen::Matrix<_Scalar, _Rows, _Cols> output(rows(), rhs.cols()); + Eigen::MatrixXd output(rows(), rhs.cols()); auto solve_and_fill_one_block = [&](const size_t i, const Eigen::Index row) { - const auto rhs_chunk = rhs.block(row, 0, blocks[i].rows(), rhs.cols()); + const auto rhs_chunk = + rhs.derived().block(row, 0, blocks[i].rows(), rhs.cols()); output.block(row, 0, blocks[i].rows(), rhs.cols()) = blocks[i].sqrt_solve(rhs_chunk); }; @@ -173,6 +184,10 @@ inline Eigen::Index BlockDiagonalLDLT::cols() const { return n; } +inline bool +BlockDiagonalLDLT::operator==(const BlockDiagonalLDLT &other) const { + return blocks == other.blocks; +} /* * Block Diagonal */ @@ -204,7 +219,7 @@ inline BlockDiagonal BlockDiagonal::operator-(const BlockDiagonal &rhs) const { return output; } -inline Eigen::MatrixXd BlockDiagonal::toDense() const { +inline Eigen::MatrixXd BlockDiagonal::to_dense() const { Eigen::MatrixXd output = Eigen::MatrixXd::Zero(rows(), cols()); Eigen::Index i = 0; diff --git a/include/albatross/src/linalg/infer_structure.hpp b/include/albatross/src/linalg/infer_structure.hpp new file mode 100644 index 00000000..4c916bf9 --- /dev/null +++ b/include/albatross/src/linalg/infer_structure.hpp @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2019 Swift Navigation Inc. + * Contact: Swift Navigation + * + * This source is subject to the license found in the file 'LICENSE' which must + * be distributed together with this source. All other rights reserved. + * + * THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + * EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef ALBATROSS_SRC_LINALG_INFER_STRUCTURE_HPP +#define ALBATROSS_SRC_LINALG_INFER_STRUCTURE_HPP + +namespace albatross { + +namespace linalg { + +namespace detail { + +using VectorXb = Eigen::Matrix; + +void inline connected_indices(const Eigen::MatrixXd &x, const Eigen::Index &i, + VectorXb *visited, + std::set *active_set) { + assert(visited != nullptr); + assert(active_set != nullptr); + + for (Eigen::Index j = 0; j < x.cols(); ++j) { + if (x(i, j) != 0. && !(*visited)[j]) { + active_set->insert(j); + (*visited)[j] = true; + connected_indices(x, j, visited, active_set); + } + } +} + +} // namespace detail + +std::vector> inline infer_diagonal_blocks( + const Eigen::MatrixXd &x) { + assert(x.rows() == x.cols()); + if (x.rows() == 0) { + return {}; + } + std::vector> connected_sets; + detail::VectorXb visited(x.rows()); + visited.fill(false); + + for (Eigen::Index i = 0; i < x.rows(); ++i) { + if (!visited[i]) { + std::set active_set = {i}; + detail::connected_indices(x, i, &visited, &active_set); + connected_sets.emplace_back(active_set); + } + } + + return connected_sets; +} + +Eigen::PermutationMatrix inline to_permutation_matrix( + const std::vector> &blocks) { + Eigen::Index n = 0; + for (const auto &block : blocks) { + n += cast::to_index(block.size()); + } + + Eigen::VectorXi permutation_inds(n); + int i = 0; + for (const auto &block : blocks) { + for (const auto &ind : block) { + permutation_inds[ind] = i; + ++i; + } + } + return Eigen::PermutationMatrix(permutation_inds); +} + +Structured inline infer_block_diagonal_matrix( + const Eigen::MatrixXd &x) { + const auto block_inds = infer_diagonal_blocks(x); + const auto P = to_permutation_matrix(block_inds); + + Structured output; + output.P_rows = P.transpose(); + output.P_cols = P; + + // D = P * x * P.T + // x = P.T * D * P + for (const auto &block : block_inds) { + std::vector inds(block.begin(), block.end()); + output.matrix.blocks.emplace_back(subset(x, inds, inds)); + } + return output; +} + +} // namespace linalg + +} // namespace albatross + +#endif // ALBATROSS_SRC_LINALG_INFER_STRUCTURE_HPP diff --git a/include/albatross/src/linalg/operators.hpp b/include/albatross/src/linalg/operators.hpp new file mode 100644 index 00000000..004802da --- /dev/null +++ b/include/albatross/src/linalg/operators.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2019 Swift Navigation Inc. + * Contact: Swift Navigation + * + * This source is subject to the license found in the file 'LICENSE' which must + * be distributed together with this source. All other rights reserved. + * + * THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + * EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef ALBATROSS_SRC_LINALG_OPERATORS_HPP +#define ALBATROSS_SRC_LINALG_OPERATORS_HPP + +namespace albatross { + +template +inline Eigen::MatrixXd to_dense(const MatrixType &x) { + return x.to_dense(); +} + +template +inline Eigen::MatrixXd to_dense(const Eigen::DenseBase &x) { + return x.derived(); +} + +template +inline auto operator*(const Eigen::PermutationMatrix &matrix, + const std::vector &features) { + std::vector output(features.size()); + const auto &indices = matrix.indices(); + for (Eigen::Index i = 0; i < indices.size(); ++i) { + const std::size_t si = cast::to_size(i); + const std::size_t sj = cast::to_size(indices[i]); + output[sj] = features[si]; + } + return output; +} + +} // namespace albatross + +#endif // ALBATROSS_SRC_LINALG_OPERATORS_HPP diff --git a/include/albatross/src/linalg/structured.hpp b/include/albatross/src/linalg/structured.hpp new file mode 100644 index 00000000..8d61bd79 --- /dev/null +++ b/include/albatross/src/linalg/structured.hpp @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2019 Swift Navigation Inc. + * Contact: Swift Navigation + * + * This source is subject to the license found in the file 'LICENSE' which must + * be distributed together with this source. All other rights reserved. + * + * THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + * EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef ALBATROSS_SRC_LINALG_STRUCTURED_HPP +#define ALBATROSS_SRC_LINALG_STRUCTURED_HPP + +namespace albatross { + +/* + * Holds a structured representation of a matrix which often requires + * permuting rows and columns; + */ +template struct Structured { + + Eigen::Index rows() const { return matrix.rows(); } + + Eigen::Index cols() const { return matrix.cols(); } + + Eigen::MatrixXd to_dense() const { + return P_rows * albatross::to_dense(matrix) * P_cols; + } + + MatrixType matrix; + Eigen::PermutationMatrix P_rows; + Eigen::PermutationMatrix P_cols; +}; + +template +auto operator*(const Structured &lhs, const RhsType &rhs) { + return lhs.P_rows * (lhs.matrix * (lhs.P_cols * rhs)); +} + +template +auto make_structured(MatrixType &&x, + const Eigen::PermutationMatrix &P_rows, + const Eigen::PermutationMatrix &P_cols) { + return Structured{x, P_rows, P_cols}; +} + +namespace structured { + +template auto ldlt(const Structured &x) { + return make_structured(x.matrix.ldlt(), x.P_rows, x.P_cols); +} + +template +auto sqrt_solve(const Structured &x, const RhsType &rhs) { + assert(x.P_rows == x.P_cols); + return x.matrix.sqrt_solve(x.P_rows.transpose() * rhs); +} + +} // namespace structured + +} // namespace albatross + +#endif // ALBATROSS_SRC_LINALG_STRUCTURED_HPP diff --git a/include/albatross/src/models/sparse_gp.hpp b/include/albatross/src/models/sparse_gp.hpp index 64a86692..68d20ddb 100644 --- a/include/albatross/src/models/sparse_gp.hpp +++ b/include/albatross/src/models/sparse_gp.hpp @@ -10,8 +10,8 @@ * WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. */ -#ifndef INCLUDE_ALBATROSS_MODELS_SPARSE_GP_H_ -#define INCLUDE_ALBATROSS_MODELS_SPARSE_GP_H_ +#ifndef ALBATROSS_SRC_MODELS_SPARSE_GP_HPP +#define ALBATROSS_SRC_MODELS_SPARSE_GP_HPP namespace albatross { @@ -74,7 +74,7 @@ template struct SparseGPFit {}; template struct Fit> { std::vector train_features; - Eigen::SerializableLDLT train_covariance; + BlockDiagonalLDLT train_covariance; Eigen::MatrixXd sigma_R; Eigen::Matrix permutation_indices; Eigen::VectorXd information; @@ -83,7 +83,7 @@ template struct Fit> { Fit(){}; Fit(const std::vector &features_, - const Eigen::SerializableLDLT &train_covariance_, + const BlockDiagonalLDLT &train_covariance_, const Eigen::MatrixXd sigma_R_, const Eigen::Matrix permutation_indices_, const Eigen::VectorXd &information_, Eigen::Index numerical_rank_) @@ -296,11 +296,11 @@ class SparseGaussianProcessRegression const MarginalDistribution &targets) const { BlockDiagonalLDLT A_ldlt; - Eigen::SerializableLDLT K_uu_ldlt; Eigen::MatrixXd K_fu; Eigen::VectorXd y; + compute_internal_components(old_fit.train_features, features, targets, - &A_ldlt, &K_uu_ldlt, &K_fu, &y); + old_fit.train_covariance, &A_ldlt, &K_fu, &y); const Eigen::Index n_old = old_fit.sigma_R.rows(); const Eigen::Index n_new = A_ldlt.rows(); @@ -351,12 +351,12 @@ class SparseGaussianProcessRegression // Sigma = (B^T B)^-1 // Eigen::ColPivHouseholderQR - compute_sigma_qr(const Eigen::SerializableLDLT &K_uu_ldlt, + compute_sigma_qr(const BlockDiagonalLDLT &K_uu_ldlt, const BlockDiagonalLDLT &A_ldlt, const Eigen::MatrixXd &K_fu) const { Eigen::MatrixXd B(A_ldlt.rows() + K_uu_ldlt.rows(), K_uu_ldlt.rows()); B.topRows(A_ldlt.rows()) = A_ldlt.sqrt_solve(K_fu); - B.bottomRows(K_uu_ldlt.rows()) = K_uu_ldlt.sqrt_transpose(); + B.bottomRows(K_uu_ldlt.rows()) = K_uu_ldlt.sqrt_transpose().to_dense(); return B.colPivHouseholderQr(); }; @@ -368,16 +368,21 @@ class SparseGaussianProcessRegression const MarginalDistribution &targets) const { // Determine the set of inducing points, u. - const auto u = - inducing_point_strategy_(this->covariance_function_, features); - ALBATROSS_ASSERT(u.size() > 0 && "Empty inducing points!"); + auto u = inducing_point_strategy_(this->covariance_function_, features); + ALBATROSS_ASSERT(!u.empty() && "Empty inducing points!"); + + // Compute K_uu = P_cols.T * D * P_cols + // where D is a block diagonal. Instead of storing the permutation + // we can just use D and reorder the inducing points + const auto structured_K_uu_ldlt = compute_kuu_ldlt(&u); + const auto &K_uu_ldlt = structured_K_uu_ldlt.matrix; + u = structured_K_uu_ldlt.P_cols * u; BlockDiagonalLDLT A_ldlt; - Eigen::SerializableLDLT K_uu_ldlt; Eigen::MatrixXd K_fu; Eigen::VectorXd y; - compute_internal_components(u, features, targets, &A_ldlt, &K_uu_ldlt, - &K_fu, &y); + compute_internal_components(u, features, targets, K_uu_ldlt, &A_ldlt, &K_fu, + &y); const auto B_qr = compute_sigma_qr(K_uu_ldlt, A_ldlt, K_fu); Eigen::VectorXd y_augmented = Eigen::VectorXd::Zero(B_qr.matrixR().rows()); @@ -401,14 +406,23 @@ class SparseGaussianProcessRegression new_fit.train_features = new_inducing_points; - const Eigen::MatrixXd K_zz = - this->covariance_function_(new_inducing_points, Base::threads_.get()); - new_fit.train_covariance = Eigen::SerializableLDLT(K_zz); + auto K_zz = this->covariance_function_(new_fit.train_features, + Base::threads_.get()); + K_zz.diagonal() += + inducing_nugget_.value * Eigen::VectorXd::Ones(K_zz.rows()); + const auto block_diag_K_zz = linalg::infer_block_diagonal_matrix(K_zz); + // Store only the inner block diagonal representation and reorder + // the inducing points to match; + new_fit.train_covariance = block_diag_K_zz.matrix.ldlt(); + + const auto &P = block_diag_K_zz.P_cols; + new_fit.train_features = P * new_fit.train_features; + JointDistribution prediction(P * prediction_.mean, + P * prediction_.covariance * P.transpose()); // We're going to need to take the sqrt of the new covariance which // could be extremely small, so here we add a small nugget to avoid // numerical instability - JointDistribution prediction(prediction_); prediction.covariance.diagonal() += Eigen::VectorXd::Constant( cast::to_index(prediction.size()), 1, details::DEFAULT_NUGGET); new_fit.information = new_fit.train_covariance.solve(prediction.mean); @@ -439,6 +453,7 @@ class SparseGaussianProcessRegression // We can then compute and store the QR decomposition of B // as we do in a normal fit. const Eigen::SerializableLDLT C_ldlt(prediction.covariance); + // think about this, probably a better way: const Eigen::MatrixXd sigma_inv_sqrt = C_ldlt.sqrt_solve(K_zz); const auto B_qr = sigma_inv_sqrt.colPivHouseholderQr(); @@ -526,15 +541,22 @@ class SparseGaussianProcessRegression template double log_likelihood(const RegressionDataset &dataset) const { - const auto u = + auto u = inducing_point_strategy_(this->covariance_function_, dataset.features); + // Compute K_uu = P_cols.T * D * P_cols + // where D is a block diagonal. Instead of storing the permutation + // we can just use D and reorder the inducing points + const auto structured_K_uu_ldlt = compute_kuu_ldlt(&u); + const auto &K_uu_ldlt = structured_K_uu_ldlt.matrix; + u = structured_K_uu_ldlt.P_cols * u; + BlockDiagonalLDLT A_ldlt; - Eigen::SerializableLDLT K_uu_ldlt; Eigen::MatrixXd K_fu; Eigen::VectorXd y; - compute_internal_components(u, dataset.features, dataset.targets, &A_ldlt, - &K_uu_ldlt, &K_fu, &y); + compute_internal_components(u, dataset.features, dataset.targets, K_uu_ldlt, + &A_ldlt, &K_fu, &y); + const auto B_qr = compute_sigma_qr(K_uu_ldlt, A_ldlt, K_fu); // The log likelihood for y ~ N(0, K) is: // @@ -596,6 +618,17 @@ class SparseGaussianProcessRegression } private: + template + Structured + compute_kuu_ldlt(std::vector *inducing_features) const { + auto K_uu = + this->covariance_function_(*inducing_features, Base::threads_.get()); + K_uu.diagonal() += + inducing_nugget_.value * Eigen::VectorXd::Ones(K_uu.rows()); + const auto block_diag_K_uu = linalg::infer_block_diagonal_matrix(K_uu); + return structured::ldlt(block_diag_K_uu); + } + // This method takes care of a lot of the common book keeping required to // setup the Sparse Gaussian Process problem. Namely, we want to get from // possibly unordered features to a structured representation @@ -607,11 +640,10 @@ class SparseGaussianProcessRegression const std::vector &inducing_features, const std::vector &out_of_order_features, const MarginalDistribution &out_of_order_targets, - BlockDiagonalLDLT *A_ldlt, Eigen::SerializableLDLT *K_uu_ldlt, + const BlockDiagonalLDLT &K_uu_ldlt, BlockDiagonalLDLT *A_ldlt, Eigen::MatrixXd *K_fu, Eigen::VectorXd *y) const { ALBATROSS_ASSERT(A_ldlt != nullptr); - ALBATROSS_ASSERT(K_uu_ldlt != nullptr); ALBATROSS_ASSERT(K_fu != nullptr); ALBATROSS_ASSERT(y != nullptr); @@ -645,18 +677,12 @@ class SparseGaussianProcessRegression *K_fu = this->covariance_function_(features, inducing_features, Base::threads_.get()); - auto K_uu = - this->covariance_function_(inducing_features, Base::threads_.get()); - - K_uu.diagonal() += - inducing_nugget_.value * Eigen::VectorXd::Ones(K_uu.rows()); - - *K_uu_ldlt = K_uu.ldlt(); // P is such that: // Q_ff = K_fu K_uu^-1 K_uf // = K_fu K_uu^-T/2 K_uu^-1/2 K_uf // = P^T P - const Eigen::MatrixXd P = K_uu_ldlt->sqrt_solve(K_fu->transpose()); + const Eigen::MatrixXd P = + K_uu_ldlt.sqrt_solve(K_fu->transpose(), Base::threads_.get()); // We only need the diagonal blocks of Q_ff to get A BlockDiagonal Q_ff_diag; @@ -752,4 +778,4 @@ auto sparse_gp_from_covariance(CovFunc covariance_function, } // namespace albatross -#endif /* INCLUDE_ALBATROSS_MODELS_SPARSE_GP_H_ */ +#endif // ALBATROSS_SRC_MODELS_SPARSE_GP_HPP diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 5e555845..0ef0da92 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -23,6 +23,8 @@ swift_add_tool(albatross_unit_tests test_gp.cc test_group_by.cc test_indexing.cc + test_linalg_infer_structure.cc + test_linalg_structured.cc test_linalg_utils.cc test_map_utils.cc test_minimum_spanning_tree.cc diff --git a/tests/test_block_utils.cc b/tests/test_block_utils.cc index 030f9699..cc7eb3db 100644 --- a/tests/test_block_utils.cc +++ b/tests/test_block_utils.cc @@ -48,7 +48,7 @@ TEST(test_block_utils, test_to_dense) { auto block_diag = example.block; auto dense = example.dense; - const auto block_result = block_diag.toDense(); + const auto block_result = block_diag.to_dense(); EXPECT_LE((block_result - dense).norm(), 1e-6); } diff --git a/tests/test_linalg_infer_structure.cc b/tests/test_linalg_infer_structure.cc new file mode 100644 index 00000000..2afbf48f --- /dev/null +++ b/tests/test_linalg_infer_structure.cc @@ -0,0 +1,169 @@ +/* + * Copyright (C) 2018 Swift Navigation Inc. + * Contact: Swift Navigation + * + * This source is subject to the license found in the file 'LICENSE' which must + * be distributed together with this source. All other rights reserved. + * + * THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + * EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. + */ + +#include +#include + +#include +#include + +namespace albatross { + +struct FullyIndependent { + + Eigen::MatrixXd matrix() const { return Eigen::MatrixXd::Identity(5, 5); } + + std::vector> expected() const { + return std::vector>{{0}, {1}, {2}, {3}, {4}}; + } +}; + +struct FullyConnected { + + Eigen::MatrixXd matrix() const { + Eigen::MatrixXd x = Eigen::MatrixXd::Identity(5, 5); + // Add an offset diagonal so everything is connected + for (Eigen::Index i = 1; i < x.rows(); ++i) { + x(i, i - 1) = 1; + x(i - 1, i) = 1; + } + return x; + } + + std::vector> expected() const { + return std::vector>{{0, 1, 2, 3, 4}}; + } +}; + +struct TwoBlocks { + + Eigen::MatrixXd matrix() const { + Eigen::MatrixXd x = Eigen::MatrixXd::Identity(6, 6); + // Add an offset diagonal so everything is connected + for (Eigen::Index i = 1; i < x.rows(); ++i) { + // disconnect a row from the previous ones + if (i != 3) { + x(i, i - 1) = 1; + x(i - 1, i) = 1; + } + } + return x; + } + + std::vector> expected() const { + return std::vector>{{0, 1, 2}, {3, 4, 5}}; + } +}; + +struct ThreeBlocksEmptyRow { + + Eigen::MatrixXd matrix() const { + auto x = TwoBlocks().matrix(); + // completely zero out row/col 3 + x.row(3).fill(0.); + x.col(3).fill(0.); + + return x; + } + + std::vector> expected() const { + return std::vector>{{0, 1, 2}, {3}, {4, 5}}; + } +}; + +struct ThreeBlocksEmptyRowPermuted { + + Eigen::MatrixXd matrix() const { + auto x = ThreeBlocksEmptyRow().matrix(); + assert(x.rows() == 6); + Eigen::VectorXi indices(6); + indices << 2, 4, 3, 1, 0, 5; + Eigen::PermutationMatrix P(indices); + x = P * x * P.transpose(); + return x; + } + + std::vector> expected() const { + return std::vector>{{0, 5}, {1}, {2, 3, 4}}; + } +}; + +struct EmptyMatrix { + Eigen::MatrixXd matrix() const { return Eigen::MatrixXd::Zero(0, 0); } + + std::vector> expected() const { return {}; } +}; + +struct SizeOne { + Eigen::MatrixXd matrix() const { return Eigen::MatrixXd::Identity(1, 1); } + + std::vector> expected() const { return {{0}}; } +}; + +template class InferStructureTest : public ::testing::Test { +public: + TestCase test_case; +}; + +typedef ::testing::Types + InferStructureTestCases; +TYPED_TEST_SUITE(InferStructureTest, InferStructureTestCases); + +TYPED_TEST(InferStructureTest, test_expected) { + const auto actual = linalg::infer_diagonal_blocks(this->test_case.matrix()); + EXPECT_EQ(this->test_case.expected(), actual); +} + +TYPED_TEST(InferStructureTest, test_independent) { + const auto x = this->test_case.matrix(); + const auto blocks = linalg::infer_diagonal_blocks(x); + + // Make sure any values outside of blocks are actually 0 + for (const auto &block : blocks) { + for (Eigen::Index i : block) { + for (Eigen::Index j = 0; j < x.cols(); ++j) { + if (block.find(j) == block.end()) { + EXPECT_EQ(x(i, j), 0); + } + } + } + } +} + +TYPED_TEST(InferStructureTest, test_to_permutation_matrix) { + const auto x = this->test_case.matrix(); + const auto blocks = linalg::infer_diagonal_blocks(x); + const auto P = linalg::to_permutation_matrix(blocks); + const Eigen::MatrixXd block_diag = P * x * P.transpose(); + + // If you permute the input matrix you should end up with + // a block diagonal matrix, so if you then infer the blocks + // you should get monotonically increasing indices. + const auto block_diag_blocks = linalg::infer_diagonal_blocks(block_diag); + Eigen::Index expected = 0; + for (const auto &block : block_diag_blocks) { + for (const auto &ind : block) { + EXPECT_EQ(ind, expected); + ++expected; + } + } +} + +TYPED_TEST(InferStructureTest, test_to_structured_matrix) { + const auto x = this->test_case.matrix(); + const auto structured_x = linalg::infer_block_diagonal_matrix(x); + EXPECT_EQ(x, structured_x.to_dense()); +} + +} // namespace albatross \ No newline at end of file diff --git a/tests/test_linalg_structured.cc b/tests/test_linalg_structured.cc new file mode 100644 index 00000000..68586999 --- /dev/null +++ b/tests/test_linalg_structured.cc @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2018 Swift Navigation Inc. + * Contact: Swift Navigation + * + * This source is subject to the license found in the file 'LICENSE' which must + * be distributed together with this source. All other rights reserved. + * + * THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + * EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. + */ + +#include + +#include +#include +#include + +#include +#include + +namespace albatross { + +struct StructuredBlockDiagonal { + + Eigen::MatrixXd dense() const { + Eigen::MatrixXd output(4, 4); + output.row(0) << 1, 0, 0.9, 0; + output.row(1) << 0, 1, 0, 0.7; + output.row(2) << 0.9, 0, 1, 0; + output.row(3) << 0, 0.7, 0, 1; + return output; + } + + Structured structured() const { + Eigen::MatrixXd block_0(2, 2); + block_0 << 1, 0.7, 0.7, 1; + Eigen::MatrixXd block_1(2, 2); + block_1 << 1, 0.9, 0.9, 1; + BlockDiagonal block_diagonal; + block_diagonal.blocks.push_back(block_0); + block_diagonal.blocks.push_back(block_1); + Eigen::VectorXi indices(4); + indices << 1, 3, 2, 0; + Eigen::PermutationMatrix P(indices); + return Structured{block_diagonal, P, P.transpose()}; + } +}; + +template class StructuredTest : public ::testing::Test { +public: + TestCase test_case; +}; + +typedef ::testing::Types StructuredTestCases; +TYPED_TEST_SUITE(StructuredTest, StructuredTestCases); + +TYPED_TEST(StructuredTest, test_to_dense_matches_test_case) { + const Eigen::MatrixXd dense = this->test_case.dense(); + const auto structured = this->test_case.structured(); + const Eigen::MatrixXd actual = to_dense(structured); + EXPECT_EQ(actual, dense); +} + +TYPED_TEST(StructuredTest, test_to_dense_components) { + const Eigen::MatrixXd dense = this->test_case.dense(); + const auto structured = this->test_case.structured(); + Eigen::MatrixXd expected = + structured.P_rows * to_dense(structured.matrix) * structured.P_cols; + + EXPECT_EQ(expected, dense); +} + +TYPED_TEST(StructuredTest, test_reverse) { + const Eigen::MatrixXd dense = this->test_case.dense(); + const auto structured = this->test_case.structured(); + // Pr * A * Pc = X + // A = Pr.T * X * Pc.T + Eigen::MatrixXd reverse = + structured.P_rows.transpose() * dense * structured.P_cols.transpose(); + + EXPECT_EQ(reverse, to_dense(structured.matrix)); +} +} // namespace albatross \ No newline at end of file diff --git a/tests/test_sparse_gp.cc b/tests/test_sparse_gp.cc index eb57bb79..07b3dedf 100644 --- a/tests/test_sparse_gp.cc +++ b/tests/test_sparse_gp.cc @@ -355,7 +355,7 @@ TYPED_TEST(SparseGaussianProcessTest, test_rebase_inducing_points) { auto high_res_pred = high_res_fit.predict_with_measurement_noise(test_features).joint(); // Increasing the inducing points shouldn't change much - EXPECT_LT((high_res_pred.mean - full_pred.mean).norm(), 1e-6); + EXPECT_LT((high_res_pred.mean - full_pred.mean).norm(), 1e-5); const auto low_high_res_fit = rebase_inducing_points(low_res_fit, high_res_features);