From 358f7cbb23c50be6b7a0c74ab96d46d8c4167c5a Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Wed, 17 Sep 2025 11:23:35 -0400 Subject: [PATCH 1/8] refactor: move indexed_db_futures dependency to workspace's Cargo.toml Signed-off-by: Michael Goldenberg --- Cargo.toml | 1 + crates/matrix-sdk-indexeddb/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index ef99abbb995..4ba9dbe2e2e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,6 +51,7 @@ hkdf = "0.12.4" hmac = "0.12.1" http = "1.3.1" imbl = "6.1.0" +indexed_db_futures = "0.5.0" indexmap = "2.11.0" insta = { version = "1.43.1", features = ["json", "redactions"] } itertools = "0.14.0" diff --git a/crates/matrix-sdk-indexeddb/Cargo.toml b/crates/matrix-sdk-indexeddb/Cargo.toml index 5b72ec48aa0..8762f71b4c7 100644 --- a/crates/matrix-sdk-indexeddb/Cargo.toml +++ b/crates/matrix-sdk-indexeddb/Cargo.toml @@ -31,7 +31,7 @@ base64.workspace = true gloo-utils = { version = "0.2.0", features = ["serde"] } growable-bloom-filter = { workspace = true, optional = true } hkdf.workspace = true -indexed_db_futures = "0.5.0" +indexed_db_futures.workspace = true js-sys.workspace = true matrix-sdk-base = { workspace = true, features = ["js"], optional = true } matrix-sdk-crypto = { workspace = true, features = ["js"], optional = true } From 84e14da495ca99c8b84fc8460e3f6a8d8bd783bd Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 19 Sep 2025 09:58:55 -0400 Subject: [PATCH 2/8] refactor(indexeddb): add generic error type for easily creating backend errors for each store Signed-off-by: Michael Goldenberg --- crates/matrix-sdk-indexeddb/src/error.rs | 51 ++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/crates/matrix-sdk-indexeddb/src/error.rs b/crates/matrix-sdk-indexeddb/src/error.rs index b298f10d3ac..0aec55bb775 100644 --- a/crates/matrix-sdk-indexeddb/src/error.rs +++ b/crates/matrix-sdk-indexeddb/src/error.rs @@ -12,7 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License +#[cfg(feature = "event-cache-store")] +use matrix_sdk_base::event_cache::store::EventCacheStoreError; +#[cfg(feature = "media-store")] +use matrix_sdk_base::media::store::MediaStoreError; +#[cfg(feature = "state-store")] +use matrix_sdk_base::StoreError; use matrix_sdk_base::{SendOutsideWasm, SyncOutsideWasm}; +#[cfg(feature = "e2e-encryption")] +use matrix_sdk_crypto::CryptoStoreError; +use thiserror::Error; /// A trait that combines the necessary traits needed for asynchronous runtimes, /// but excludes them when running in a web environment - i.e., when @@ -21,3 +30,45 @@ pub trait AsyncErrorDeps: std::error::Error + SendOutsideWasm + SyncOutsideWasm impl AsyncErrorDeps for T where T: std::error::Error + SendOutsideWasm + SyncOutsideWasm + 'static {} + +/// A wrapper around [`String`] that derives [`Error`](std::error::Error). +/// This is useful when a particular error is not [`Send`] or [`Sync`] but +/// must be mapped into a higher-level error that requires those constraints, +/// e.g. [`StoreError::Backend`], [`CryptStoreError::Backend`], etc. +#[derive(Debug, Error)] +#[error("{0}")] +pub struct GenericError(String); + +impl> From for GenericError { + fn from(value: S) -> Self { + Self(value.as_ref().to_owned()) + } +} + +#[cfg(feature = "e2e-encryption")] +impl From for CryptoStoreError { + fn from(value: GenericError) -> Self { + Self::backend(value) + } +} + +#[cfg(feature = "event-cache-store")] +impl From for EventCacheStoreError { + fn from(value: GenericError) -> Self { + Self::backend(value) + } +} + +#[cfg(feature = "media-store")] +impl From for MediaStoreError { + fn from(value: GenericError) -> Self { + Self::backend(value) + } +} + +#[cfg(feature = "state-store")] +impl From for StoreError { + fn from(value: GenericError) -> Self { + Self::backend(value) + } +} From 17e9899b5985f951726646264387ec719febb84d Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 26 Sep 2025 14:36:51 -0400 Subject: [PATCH 3/8] refactor(indexeddb): upgrade indexed_db_futures dependency Signed-off-by: Michael Goldenberg --- Cargo.lock | 284 +++++-- Cargo.toml | 3 +- crates/matrix-sdk-indexeddb/Cargo.toml | 7 +- .../src/crypto_store/migrations/mod.rs | 198 +++-- .../src/crypto_store/migrations/v0_to_v5.rs | 54 +- .../src/crypto_store/migrations/v10_to_v11.rs | 18 +- .../src/crypto_store/migrations/v11_to_v12.rs | 17 +- .../src/crypto_store/migrations/v12_to_v13.rs | 8 +- .../src/crypto_store/migrations/v13_to_v14.rs | 16 +- .../src/crypto_store/migrations/v5_to_v7.rs | 49 +- .../src/crypto_store/migrations/v7_to_v8.rs | 53 +- .../src/crypto_store/migrations/v8_to_v10.rs | 50 +- .../src/crypto_store/mod.rs | 666 +++++++++------ crates/matrix-sdk-indexeddb/src/error.rs | 3 + .../src/event_cache_store/error.rs | 18 +- .../src/event_cache_store/migrations.rs | 117 ++- .../src/event_cache_store/mod.rs | 12 +- .../src/event_cache_store/transaction.rs | 4 +- crates/matrix-sdk-indexeddb/src/lib.rs | 1 - .../src/media_store/error.rs | 20 +- .../src/media_store/migrations.rs | 93 ++- .../src/media_store/mod.rs | 13 +- .../src/media_store/transaction.rs | 4 +- .../src/serializer/indexed_type/mod.rs | 20 +- .../src/serializer/safe_encode/traits.rs | 24 +- .../src/serializer/safe_encode/types.rs | 13 +- .../src/state_store/migrations.rs | 780 +++++++++--------- .../src/state_store/mod.rs | 574 +++++++------ .../src/transaction/mod.rs | 118 ++- 29 files changed, 1874 insertions(+), 1363 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f0061137ff..be6d46ed8a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,14 +4,14 @@ version = 4 [[package]] name = "accessory" -version = "1.3.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87537f9ae7cfa78d5b8ebd1a1db25959f5e737126be4d8eb44a5452fc4b63cde" +checksum = "28e416a3ab45838bac2ab2d81b1088d738d7b2d2c5272a54d39366565a29bd80" dependencies = [ "macroific", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -179,7 +179,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -238,7 +238,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn", + "syn 2.0.101", ] [[package]] @@ -350,7 +350,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -361,7 +361,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -594,7 +594,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn", + "syn 2.0.101", ] [[package]] @@ -852,7 +852,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1153,7 +1153,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1190,7 +1190,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1224,7 +1224,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn", + "syn 2.0.101", ] [[package]] @@ -1238,7 +1238,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn", + "syn 2.0.101", ] [[package]] @@ -1249,7 +1249,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1260,7 +1260,7 @@ checksum = "2b5be8a7a562d315a5b92a630c30cec6bcf663e6673f00fbb69cca66a6f521b9" dependencies = [ "darling_core 0.21.1", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1331,14 +1331,16 @@ dependencies = [ [[package]] name = "delegate-display" -version = "2.1.1" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98a85201f233142ac819bbf6226e36d0b5e129a47bd325084674261c82d4cd66" +checksum = "9926686c832494164c33a36bf65118f4bd6e704000b58c94681bf62e9ad67a74" dependencies = [ + "impartial-ord", + "itoa", "macroific", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1362,6 +1364,17 @@ dependencies = [ "serde", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive_builder" version = "0.20.2" @@ -1380,7 +1393,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1390,7 +1403,48 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn", + "syn 2.0.101", +] + +[[package]] +name = "derive_more" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +dependencies = [ + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", +] + +[[package]] +name = "derive_more-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "unicode-xid", ] [[package]] @@ -1439,7 +1493,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1722,7 +1776,7 @@ checksum = "dd65f1b59dd22d680c7a626cc4a000c1e03d241c51c3e034d2bc9f1e90734f9b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1789,14 +1843,14 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fancy_constructor" -version = "1.3.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b19d0e43eae2bfbafe4931b5e79c73fb1a849ca15cd41a761a7b8587f9a1a2" +checksum = "28a27643a5d05f3a22f5afd6e0d0e6e354f92d37907006f97b84b9cb79082198" dependencies = [ "macroific", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -1966,7 +2020,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -2262,7 +2316,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -2554,7 +2608,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -2620,6 +2674,17 @@ dependencies = [ "bitmaps", ] +[[package]] +name = "impartial-ord" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab604ee7085efba6efc65e4ebca0e9533e3aff6cb501d7d77b211e3a781c6d5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "include_dir" version = "0.7.4" @@ -2647,19 +2712,39 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexed_db_futures" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43315957678a70eb21fb0d2384fe86dde0d6c859a01e24ce127eb65a0143d28c" +version = "0.6.4" +source = "git+https://github.com/mgoldenberg/rust-indexed-db?rev=2ac8f0bc1c53f8d8654efcdf9e32b6d359457398#2ac8f0bc1c53f8d8654efcdf9e32b6d359457398" dependencies = [ "accessory", "cfg-if", "delegate-display", + "derive_more 2.0.1", "fancy_constructor", + "futures-core", + "indexed_db_futures_macros_internal", "js-sys", - "uuid", + "sealed", + "serde", + "serde-wasm-bindgen", + "smallvec", + "thiserror 2.0.16", + "tokio", "wasm-bindgen", "wasm-bindgen-futures", + "wasm_evt_listener", "web-sys", + "web-time", +] + +[[package]] +name = "indexed_db_futures_macros_internal" +version = "1.0.0" +source = "git+https://github.com/mgoldenberg/rust-indexed-db?rev=2ac8f0bc1c53f8d8654efcdf9e32b6d359457398#2ac8f0bc1c53f8d8654efcdf9e32b6d359457398" +dependencies = [ + "macroific", + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] @@ -2713,7 +2798,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -2998,9 +3083,9 @@ checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" [[package]] name = "macroific" -version = "1.3.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05c00ac596022625d01047c421a0d97d7f09a18e429187b341c201cb631b9dd" +checksum = "89f276537b4b8f981bf1c13d79470980f71134b7bdcc5e6e911e910e556b0285" dependencies = [ "macroific_attr_parse", "macroific_core", @@ -3009,38 +3094,39 @@ dependencies = [ [[package]] name = "macroific_attr_parse" -version = "1.3.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd94d5da95b30ae6e10621ad02340909346ad91661f3f8c0f2b62345e46a2f67" +checksum = "ad4023761b45fcd36abed8fb7ae6a80456b0a38102d55e89a57d9a594a236be9" dependencies = [ - "cfg-if", "proc-macro2", "quote", - "syn", + "sealed", + "syn 2.0.101", ] [[package]] name = "macroific_core" -version = "1.0.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13198c120864097a565ccb3ff947672d969932b7975ebd4085732c9f09435e55" +checksum = "d0a7594d3c14916fa55bef7e9d18c5daa9ed410dd37504251e4b75bbdeec33e3" dependencies = [ "proc-macro2", "quote", - "syn", + "sealed", + "syn 2.0.101", ] [[package]] name = "macroific_macro" -version = "1.1.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c9853143cbed7f1e41dc39fee95f9b361bec65c8dc2a01bf609be01b61f5ae" +checksum = "4da6f2ed796261b0a74e2b52b42c693bb6dee1effba3a482c49592659f824b3b" dependencies = [ "macroific_attr_parse", "macroific_core", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -3098,7 +3184,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -3380,7 +3466,7 @@ version = "0.7.0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -3569,7 +3655,7 @@ name = "matrix-sdk-test-macros" version = "0.14.0" dependencies = [ "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -3933,7 +4019,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -4091,7 +4177,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -4278,7 +4364,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6837b9e10d61f45f987d50808f83d1ee3d206c66acf650c3e4ae2e1f6ddedf55" dependencies = [ "proc-macro2", - "syn", + "syn 2.0.101", ] [[package]] @@ -4356,7 +4442,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -4896,7 +4982,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn", + "syn 2.0.101", "toml 0.8.15", ] @@ -5096,7 +5182,18 @@ checksum = "7f81c2fde025af7e69b1d1420531c8a8811ca898919db177141a85313b1cb932" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", +] + +[[package]] +name = "sealed" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22f968c5ea23d555e670b449c1c5e7b2fc399fdaec1d304a17cd48e288abc107" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] @@ -5276,7 +5373,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -5466,6 +5563,9 @@ name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +dependencies = [ + "serde", +] [[package]] name = "smawk" @@ -5594,7 +5694,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn", + "syn 2.0.101", ] [[package]] @@ -5607,7 +5707,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn", + "syn 2.0.101", ] [[package]] @@ -5616,6 +5716,17 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "syn" version = "2.0.101" @@ -5644,7 +5755,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -5854,7 +5965,7 @@ checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -5865,7 +5976,7 @@ checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -5981,7 +6092,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -6161,7 +6272,7 @@ source = "git+https://github.com/tokio-rs/tracing.git?rev=20f5b3d8ba057ca9c4ae00 dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -6362,6 +6473,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + [[package]] name = "uniffi" version = "0.28.0" @@ -6426,7 +6543,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "802d2051a700e3ec894c79f80d2705b69d85844dafbbe5d1a92776f8f48b563a" dependencies = [ "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -6458,7 +6575,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn", + "syn 2.0.101", "toml 0.5.11", "uniffi_meta", ] @@ -6751,7 +6868,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn", + "syn 2.0.101", "wasm-bindgen-shared", ] @@ -6786,7 +6903,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6821,7 +6938,7 @@ checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -6837,6 +6954,24 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasm_evt_listener" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc92d6378b411ed94839112a36d9dbc77143451d85b05dfb0cce93a78dab1963" +dependencies = [ + "accessory", + "derivative", + "derive_more 1.0.0", + "fancy_constructor", + "futures-core", + "js-sys", + "smallvec", + "tokio", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "web-sys" version = "0.3.77" @@ -6854,6 +6989,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", + "serde", "wasm-bindgen", ] @@ -6993,7 +7129,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -7004,7 +7140,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -7317,7 +7453,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", "synstructure", ] @@ -7338,7 +7474,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -7358,7 +7494,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", "synstructure", ] @@ -7379,7 +7515,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] @@ -7401,7 +7537,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 4ba9dbe2e2e..6c3369d7aa6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,7 @@ hkdf = "0.12.4" hmac = "0.12.1" http = "1.3.1" imbl = "6.1.0" -indexed_db_futures = "0.5.0" +indexed_db_futures = "0.6.4" indexmap = "2.11.0" insta = { version = "1.43.1", features = ["json", "redactions"] } itertools = "0.14.0" @@ -201,6 +201,7 @@ lto = false [patch.crates-io] async-compat = { git = "https://github.com/element-hq/async-compat", rev = "5a27c8b290f1f1dcfc0c4ec22c464e38528aa591" } const_panic = { git = "https://github.com/jplatte/const_panic", rev = "9024a4cb3eac45c1d2d980f17aaee287b17be498" } +indexed_db_futures = { git = "https://github.com/mgoldenberg/rust-indexed-db", rev = "2ac8f0bc1c53f8d8654efcdf9e32b6d359457398" } # Needed to fix rotation log issue on Android (https://github.com/tokio-rs/tracing/issues/2937) tracing = { git = "https://github.com/tokio-rs/tracing.git", rev = "20f5b3d8ba057ca9c4ae00ad30dda3dce8a71c05" } tracing-core = { git = "https://github.com/tokio-rs/tracing.git", rev = "20f5b3d8ba057ca9c4ae00ad30dda3dce8a71c05" } diff --git a/crates/matrix-sdk-indexeddb/Cargo.toml b/crates/matrix-sdk-indexeddb/Cargo.toml index 8762f71b4c7..6e6b395df73 100644 --- a/crates/matrix-sdk-indexeddb/Cargo.toml +++ b/crates/matrix-sdk-indexeddb/Cargo.toml @@ -31,7 +31,12 @@ base64.workspace = true gloo-utils = { version = "0.2.0", features = ["serde"] } growable-bloom-filter = { workspace = true, optional = true } hkdf.workspace = true -indexed_db_futures.workspace = true +indexed_db_futures = { workspace = true, features = [ + "serde", + "cursors", + "indices", + "streams", +]} js-sys.workspace = true matrix-sdk-base = { workspace = true, features = ["js"], optional = true } matrix-sdk-crypto = { workspace = true, features = ["js"], optional = true } diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/mod.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/mod.rs index e88c29ab58f..8656de1ce21 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/mod.rs @@ -14,9 +14,16 @@ use std::ops::Deref; -use indexed_db_futures::{prelude::*, web_sys::DomException}; +use indexed_db_futures::{ + database::Database, + error::{Error, OpenDbError}, + index::Index, + internals::SystemRepr, + object_store::ObjectStore, + prelude::*, + transaction::Transaction, +}; use tracing::info; -use wasm_bindgen::JsValue; use crate::{crypto_store::Result, serializer::SafeEncodeSerializer, IndexeddbCryptoStoreError}; @@ -32,7 +39,7 @@ mod v7_to_v8; mod v8_to_v10; struct MigrationDb { - db: IdbDatabase, + db: Database, next_version: u32, } @@ -42,12 +49,12 @@ impl MigrationDb { /// closing the DB when this object is dropped. async fn new(name: &str, next_version: u32) -> Result { info!("IndexeddbCryptoStore migrate data before v{next_version} starting"); - Ok(Self { db: IdbDatabase::open(name)?.await?, next_version }) + Ok(Self { db: Database::open(name).await?, next_version }) } } impl Deref for MigrationDb { - type Target = IdbDatabase; + type Target = Database; fn deref(&self) -> &Self::Target { &self.db @@ -58,7 +65,7 @@ impl Drop for MigrationDb { fn drop(&mut self) { let version = self.next_version; info!("IndexeddbCryptoStore migrate data before v{version} finished"); - self.db.close(); + self.db.as_sys().close(); } } @@ -101,7 +108,7 @@ const MAX_SUPPORTED_SCHEMA_VERSION: u32 = 99; pub async fn open_and_upgrade_db( name: &str, serializer: &SafeEncodeSerializer, -) -> Result { +) -> Result { // Move the DB version up from where it is to the latest version. // // Schema changes need to be separate from data migrations, so we often @@ -177,11 +184,11 @@ pub async fn open_and_upgrade_db( // `MAX_SUPPORTED_SCHEMA_VERSION` itself to the next multiple of 10). // Open and return the DB (we know it's at the latest version) - Ok(IdbDatabase::open(name)?.await?) + Ok(Database::open(name).await?) } async fn db_version(name: &str) -> Result { - let db = IdbDatabase::open(name)?.await?; + let db = Database::open(name).await?; let old_version = db.version() as u32; db.close(); Ok(old_version) @@ -197,50 +204,45 @@ type OldVersion = u32; /// * `version` - version we are upgrading to. /// * `f` - closure which will be called if the database is below the version /// given. It will be called with three arguments `(db, txn, oldver)`, where: -/// * `db` - the [`IdbDatabase`] -/// * `txn` - the database transaction: a [`IdbTransaction`] +/// * `db` - the [`Database`] +/// * `txn` - the database transaction: a [`Transaction`] /// * `oldver` - the version number before the upgrade. -async fn do_schema_upgrade(name: &str, version: u32, f: F) -> Result<(), DomException> +async fn do_schema_upgrade(name: &str, version: u32, f: F) -> Result<(), OpenDbError> where - F: Fn(&IdbDatabase, IdbTransaction<'_>, OldVersion) -> Result<(), JsValue> + 'static, + F: Fn(&Transaction<'_>, OldVersion) -> Result<(), Error> + 'static, { info!("IndexeddbCryptoStore upgrade schema -> v{version} starting"); - let mut db_req: OpenDbRequest = IdbDatabase::open_u32(name, version)?; - - db_req.set_on_upgrade_needed(Some(move |evt: &IdbVersionChangeEvent| { - // Even if the web-sys bindings expose the version as a f64, the IndexedDB API - // works with an unsigned integer. - // See - let old_version = evt.old_version() as u32; - - // Run the upgrade code we were supplied - f(evt.db(), evt.transaction(), old_version) - })); - - let db = db_req.await?; + let db = Database::open(name) + .with_version(version) + .with_on_upgrade_needed(move |evt, tx| { + // Even if the web-sys bindings expose the version as a f64, the IndexedDB API + // works with an unsigned integer. + // See + let old_version = evt.old_version() as u32; + + // Run the upgrade code we were supplied + f(tx, old_version) + }) + .await?; db.close(); info!("IndexeddbCryptoStore upgrade schema -> v{version} complete"); Ok(()) } fn add_nonunique_index<'a>( - object_store: &'a IdbObjectStore<'a>, + object_store: &'a ObjectStore<'a>, name: &str, key_path: &str, -) -> Result, DomException> { - let params = IdbIndexParameters::new(); - params.set_unique(false); - object_store.create_index_with_params(name, &IdbKeyPath::str(key_path), ¶ms) +) -> Result, Error> { + object_store.create_index(name, key_path.into()).with_unique(false).build() } fn add_unique_index<'a>( - object_store: &'a IdbObjectStore<'a>, + object_store: &'a ObjectStore<'a>, name: &str, key_path: &str, -) -> Result, DomException> { - let params = IdbIndexParameters::new(); - params.set_unique(true); - object_store.create_index_with_params(name, &IdbKeyPath::str(key_path), ¶ms) +) -> Result, Error> { + object_store.create_index(name, key_path.into()).with_unique(true).build() } #[cfg(all(test, target_family = "wasm"))] @@ -249,7 +251,9 @@ mod tests { use assert_matches::assert_matches; use gloo_utils::format::JsValueSerdeExt; - use indexed_db_futures::prelude::*; + use indexed_db_futures::{ + database::VersionChangeEvent, prelude::*, transaction::TransactionMode, + }; use matrix_sdk_common::js_tracing::make_tracing_subscriber; use matrix_sdk_crypto::{ olm::{InboundGroupSession, SenderData, SessionKey}, @@ -262,6 +266,7 @@ mod tests { use ruma::{room_id, OwnedRoomId, RoomId}; use serde::Serialize; use tracing_subscriber::util::SubscriberInitExt; + use wasm_bindgen::JsValue; use web_sys::console; use super::{v0_to_v5, v7::InboundGroupSessionIndexedDbObject2}; @@ -304,14 +309,21 @@ mod tests { // Check how long it takes to insert these records measure_performance("Inserting", "v8", NUM_RECORDS_FOR_PERF, || async { for (key, session_js) in objects.iter() { - store.add_key_val(key, session_js).unwrap().await.unwrap(); + store + .add(session_js) + .with_key(key) + .without_key_type() + .build() + .unwrap() + .await + .unwrap(); } }) .await; // Check how long it takes to count these records measure_performance("Counting", "v8", NUM_RECORDS_FOR_PERF, || async { - store.count().unwrap().await.unwrap(); + store.count().await.unwrap(); }) .await; } @@ -340,40 +352,51 @@ mod tests { // Check how long it takes to insert these records measure_performance("Inserting", "v10", NUM_RECORDS_FOR_PERF, || async { for (key, session_js) in objects.iter() { - store.add_key_val(key, session_js).unwrap().await.unwrap(); + store + .add(session_js) + .with_key(key) + .without_key_type() + .build() + .unwrap() + .await + .unwrap(); } }) .await; // Check how long it takes to count these records measure_performance("Counting", "v10", NUM_RECORDS_FOR_PERF, || async { - store.count().unwrap().await.unwrap(); + store.count().await.unwrap(); }) .await; } - async fn create_db(db_prefix: &str) -> IdbDatabase { + async fn create_db(db_prefix: &str) -> Database { let db_name = format!("{db_prefix}::matrix-sdk-crypto"); let store_name = format!("{db_prefix}_store"); - let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&db_name, 1).unwrap(); - db_req.set_on_upgrade_needed(Some( - move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { - evt.db().create_object_store(&store_name)?; - Ok(()) - }, - )); - db_req.await.unwrap() + Database::open(&db_name) + .with_version(1u32) + .with_on_upgrade_needed( + move |_: VersionChangeEvent, tx: &Transaction<'_>| -> Result<(), Error> { + tx.db().create_object_store(&store_name).build()?; + Ok(()) + }, + ) + .build() + .unwrap() + .await + .unwrap() } - async fn create_transaction<'a>(db: &'a IdbDatabase, db_prefix: &str) -> IdbTransaction<'a> { + async fn create_transaction<'a>(db: &'a Database, db_prefix: &str) -> Transaction<'a> { let store_name = format!("{db_prefix}_store"); - db.transaction_on_one_with_mode(&store_name, IdbTransactionMode::Readwrite).unwrap() + db.transaction(&store_name).with_mode(TransactionMode::Readwrite).build().unwrap() } async fn create_store<'a>( - transaction: &'a IdbTransaction<'a>, + transaction: &'a Transaction<'a>, db_prefix: &str, - ) -> IdbObjectStore<'a> { + ) -> ObjectStore<'a> { let store_name = format!("{db_prefix}_store"); transaction.object_store(&store_name).unwrap() } @@ -520,7 +543,7 @@ mod tests { let db_name = format!("{db_prefix:0}::matrix-sdk-crypto"); // delete the db in case it was used in a previous run - let _ = IdbDatabase::delete_by_name(&db_name); + let _ = Database::delete_by_name(&db_name); // Given a DB with data in it as it was at v5 let room_id = room_id!("!test:localhost"); @@ -568,21 +591,21 @@ mod tests { store: &IndexeddbCryptoStore, fetched_backed_up_session: &InboundGroupSession, ) { - let db = IdbDatabase::open(&db_name).unwrap().await.unwrap(); + let db = Database::open(&db_name).build().unwrap().await.unwrap(); assert!(db.version() >= 10.0); - let transaction = db.transaction_on_one("inbound_group_sessions3").unwrap(); + let transaction = db.transaction("inbound_group_sessions3").build().unwrap(); let raw_store = transaction.object_store("inbound_group_sessions3").unwrap(); let key = store.serializer.encode_key( keys::INBOUND_GROUP_SESSIONS_V3, (fetched_backed_up_session.room_id(), fetched_backed_up_session.session_id()), ); let idb_object: InboundGroupSessionIndexedDbObject = - serde_wasm_bindgen::from_value(raw_store.get(&key).unwrap().await.unwrap().unwrap()) - .unwrap(); + serde_wasm_bindgen::from_value(raw_store.get(&key).await.unwrap().unwrap()).unwrap(); assert_eq!(idb_object.backed_up_to, -1); assert!(raw_store.index_names().find(|idx| idx == "backed_up_to").is_some()); + transaction.commit().await.unwrap(); db.close(); } @@ -591,16 +614,15 @@ mod tests { store: &IndexeddbCryptoStore, session: &InboundGroupSession, ) { - let db = IdbDatabase::open(&db_name).unwrap().await.unwrap(); + let db = Database::open(&db_name).build().unwrap().await.unwrap(); assert!(db.version() >= 12.0); - let transaction = db.transaction_on_one("inbound_group_sessions3").unwrap(); + let transaction = db.transaction("inbound_group_sessions3").build().unwrap(); let raw_store = transaction.object_store("inbound_group_sessions3").unwrap(); let key = store .serializer .encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (session.room_id(), session.session_id())); let idb_object: InboundGroupSessionIndexedDbObject = - serde_wasm_bindgen::from_value(raw_store.get(&key).unwrap().await.unwrap().unwrap()) - .unwrap(); + serde_wasm_bindgen::from_value(raw_store.get(&key).await.unwrap().unwrap()).unwrap(); assert_eq!( idb_object.session_id, @@ -623,6 +645,7 @@ mod tests { .find(|idx| idx == "inbound_group_session_sender_key_sender_data_type_idx") .is_some()); + transaction.commit().await.unwrap(); db.close(); } @@ -685,10 +708,9 @@ mod tests { let serializer = SafeEncodeSerializer::new(store_cipher.clone()); let txn = db - .transaction_on_one_with_mode( - old_keys::INBOUND_GROUP_SESSIONS_V1, - IdbTransactionMode::Readwrite, - ) + .transaction(old_keys::INBOUND_GROUP_SESSIONS_V1) + .with_mode(TransactionMode::Readwrite) + .build() .unwrap(); let sessions = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V1).unwrap(); for session in session_entries { @@ -701,9 +723,9 @@ mod tests { // Serialize the session with the old style of serialization, since that's what // we used at the time. let serialized_session = serialize_value_as_legacy(&store_cipher, &pickle); - sessions.put_key_val(&key, &serialized_session).unwrap(); + sessions.put(&serialized_session).with_key(key).build().unwrap(); } - txn.await.into_result().unwrap(); + txn.commit().await.unwrap(); // now close our DB, reopen it properly, and check that we can still read our // data. @@ -736,21 +758,23 @@ mod tests { let db_name = format!("{db_prefix:0}::matrix-sdk-crypto"); // delete the db in case it was used in a previous run - let _ = IdbDatabase::delete_by_name(&db_name); + let _ = Database::delete_by_name(&db_name).unwrap().await.unwrap(); // Given a DB with data in it as it was at v5 let db = create_v5_db(&db_name).await.unwrap(); let txn = db - .transaction_on_one_with_mode(keys::BACKUP_KEYS, IdbTransactionMode::Readwrite) + .transaction(keys::BACKUP_KEYS) + .with_mode(TransactionMode::Readwrite) + .build() .unwrap(); let store = txn.object_store(keys::BACKUP_KEYS).unwrap(); store - .put_key_val( - &JsValue::from_str(old_keys::BACKUP_KEY_V1), - &serialize_value_as_legacy(&store_cipher, &"1".to_owned()), - ) + .put(&serialize_value_as_legacy(&store_cipher, &"1".to_owned())) + .with_key(JsValue::from_str(old_keys::BACKUP_KEY_V1)) + .build() .unwrap(); + txn.commit().await.unwrap(); db.close(); // When I open a store based on that DB, triggering an upgrade @@ -762,9 +786,9 @@ mod tests { assert_eq!(backup_data.backup_version, Some("1".to_owned())); } - async fn create_v5_db(name: &str) -> std::result::Result { + async fn create_v5_db(name: &str) -> std::result::Result { v0_to_v5::schema_add(name).await?; - IdbDatabase::open_u32(name, 5)?.await + Database::open(name).with_version(5u32).build()?.await } /// Opening a db that has been upgraded to MAX_SUPPORTED_SCHEMA_VERSION @@ -811,23 +835,27 @@ mod tests { let db_name = format!("{db_prefix}::matrix-sdk-crypto"); // delete the db in case it was used in a previous run - let _ = IdbDatabase::delete_by_name(&db_name); + let _ = Database::delete_by_name(&db_name); // Open, and close, the store at the regular version. IndexeddbCryptoStore::open_with_store_cipher(&db_prefix, None).await.unwrap(); // Now upgrade to the given version, keeping a record of the previous version so // that we can double-check it. - let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&db_name, version).unwrap(); - let old_version: Rc>> = Rc::new(Cell::new(None)); let old_version2 = old_version.clone(); - db_req.set_on_upgrade_needed(Some(move |evt: &IdbVersionChangeEvent| { - old_version2.set(Some(evt.old_version() as u32)); - Ok(()) - })); - let db = db_req.await.unwrap(); + let db = Database::open(&db_name) + .with_version(version) + .with_on_upgrade_needed(move |evt: VersionChangeEvent, _: &Transaction<'_>| { + old_version2.set(Some(evt.old_version() as u32)); + Ok(()) + }) + .build() + .unwrap() + .await + .unwrap(); + assert_eq!( old_version.get(), Some(EXPECTED_SCHEMA_VERSION), diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v0_to_v5.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v0_to_v5.rs index 75b1dc69865..4e423069b13 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v0_to_v5.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v0_to_v5.rs @@ -15,8 +15,11 @@ //! Schema-only migrations adding various stores and indices, notably //! the first version of `inbound_group_sessions`. -use indexed_db_futures::IdbDatabase; -use web_sys::DomException; +use indexed_db_futures::{ + database::Database, + error::{Error, OpenDbError}, + Build, +}; use crate::crypto_store::{ keys, @@ -25,11 +28,12 @@ use crate::crypto_store::{ }; /// Perform schema migrations as needed, up to schema version 5. -pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 5, |db, _, old_version| { +pub(crate) async fn schema_add(name: &str) -> Result<(), OpenDbError> { + do_schema_upgrade(name, 5, |tx, old_version| { + let db = tx.db(); // An old_version of 1 could either mean actually the first version of the // schema, or a completely empty schema that has been created with a - // call to `IdbDatabase::open` with no explicit "version". So, to determine + // call to `Database::open` with no explicit "version". So, to determine // if we need to create the V1 stores, we actually check if the schema is empty. if db.object_store_names().next().is_none() { schema_add_v1(db)?; @@ -56,57 +60,57 @@ pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { .await } -fn schema_add_v1(db: &IdbDatabase) -> Result<(), DomException> { - db.create_object_store(keys::CORE)?; - db.create_object_store(keys::SESSION)?; +fn schema_add_v1(db: &Database) -> Result<(), Error> { + db.create_object_store(keys::CORE).build()?; + db.create_object_store(keys::SESSION).build()?; - db.create_object_store(old_keys::INBOUND_GROUP_SESSIONS_V1)?; - db.create_object_store(keys::OUTBOUND_GROUP_SESSIONS)?; - db.create_object_store(keys::TRACKED_USERS)?; - db.create_object_store(keys::OLM_HASHES)?; - db.create_object_store(keys::DEVICES)?; + db.create_object_store(old_keys::INBOUND_GROUP_SESSIONS_V1).build()?; + db.create_object_store(keys::OUTBOUND_GROUP_SESSIONS).build()?; + db.create_object_store(keys::TRACKED_USERS).build()?; + db.create_object_store(keys::OLM_HASHES).build()?; + db.create_object_store(keys::DEVICES).build()?; - db.create_object_store(keys::IDENTITIES)?; - db.create_object_store(keys::BACKUP_KEYS)?; + db.create_object_store(keys::IDENTITIES).build()?; + db.create_object_store(keys::BACKUP_KEYS).build()?; Ok(()) } -fn schema_add_v2(db: &IdbDatabase) -> Result<(), DomException> { +fn schema_add_v2(db: &Database) -> Result<(), Error> { // We changed how we store inbound group sessions, the key used to // be a tuple of `(room_id, sender_key, session_id)` now it's a // tuple of `(room_id, session_id)` // // Let's just drop the whole object store. db.delete_object_store(old_keys::INBOUND_GROUP_SESSIONS_V1)?; - db.create_object_store(old_keys::INBOUND_GROUP_SESSIONS_V1)?; + db.create_object_store(old_keys::INBOUND_GROUP_SESSIONS_V1).build()?; - db.create_object_store(keys::ROOM_SETTINGS)?; + db.create_object_store(keys::ROOM_SETTINGS).build()?; Ok(()) } -fn schema_add_v3(db: &IdbDatabase) -> Result<(), DomException> { +fn schema_add_v3(db: &Database) -> Result<(), Error> { // We changed the way we store outbound session. // ShareInfo changed from a struct to an enum with struct variant. // Let's just discard the existing outbounds db.delete_object_store(keys::OUTBOUND_GROUP_SESSIONS)?; - db.create_object_store(keys::OUTBOUND_GROUP_SESSIONS)?; + db.create_object_store(keys::OUTBOUND_GROUP_SESSIONS).build()?; // Support for MSC2399 withheld codes - db.create_object_store(keys::DIRECT_WITHHELD_INFO)?; + db.create_object_store(keys::DIRECT_WITHHELD_INFO).build()?; Ok(()) } -fn schema_add_v4(db: &IdbDatabase) -> Result<(), DomException> { - db.create_object_store(keys::SECRETS_INBOX)?; +fn schema_add_v4(db: &Database) -> Result<(), Error> { + db.create_object_store(keys::SECRETS_INBOX).build()?; Ok(()) } -fn schema_add_v5(db: &IdbDatabase) -> Result<(), DomException> { +fn schema_add_v5(db: &Database) -> Result<(), Error> { // Create a new store for outgoing secret requests - let object_store = db.create_object_store(keys::GOSSIP_REQUESTS)?; + let object_store = db.create_object_store(keys::GOSSIP_REQUESTS).build()?; add_nonunique_index(&object_store, keys::GOSSIP_REQUESTS_UNSENT_INDEX, "unsent")?; diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v10_to_v11.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v10_to_v11.rs index 201f9d824b1..8ae0cd5b971 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v10_to_v11.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v10_to_v11.rs @@ -15,9 +15,10 @@ //! Migration code that moves from `backup_keys.backup_key_v1` to //! `backup_keys.backup_version_v1`, switching to a new serialization format. -use indexed_db_futures::IdbQuerySource; +use indexed_db_futures::{ + error::OpenDbError, query_source::QuerySource, transaction::TransactionMode, Build, +}; use wasm_bindgen::JsValue; -use web_sys::{DomException, IdbTransactionMode}; use crate::{ crypto_store::{ @@ -34,10 +35,10 @@ pub(crate) async fn data_migrate( serializer: &SafeEncodeSerializer, ) -> crate::crypto_store::Result<()> { let db = MigrationDb::new(name, 11).await?; - let txn = db.transaction_on_one_with_mode(keys::BACKUP_KEYS, IdbTransactionMode::Readwrite)?; + let txn = db.transaction(keys::BACKUP_KEYS).with_mode(TransactionMode::Readwrite).build()?; let store = txn.object_store(keys::BACKUP_KEYS)?; - let bv = store.get(&JsValue::from_str(old_keys::BACKUP_KEY_V1))?.await?; + let bv = store.get(&JsValue::from_str(old_keys::BACKUP_KEY_V1)).await?; let Some(bv) = bv else { return Ok(()); @@ -50,14 +51,15 @@ pub(crate) async fn data_migrate( // Re-serialize as new format, then store in the new field. let serialized = serializer.serialize_value(&bv)?; - store.put_key_val(&JsValue::from_str(keys::BACKUP_VERSION_V1), &serialized)?.await?; - store.delete(&JsValue::from_str(old_keys::BACKUP_KEY_V1))?.await?; + store.put(&serialized).with_key(JsValue::from_str(keys::BACKUP_VERSION_V1)).await?; + store.delete(&JsValue::from_str(old_keys::BACKUP_KEY_V1)).await?; + txn.commit().await?; Ok(()) } /// Perform the schema upgrade v10 to v11, just bumping the schema version. -pub(crate) async fn schema_bump(name: &str) -> crate::crypto_store::Result<(), DomException> { +pub(crate) async fn schema_bump(name: &str) -> crate::crypto_store::Result<(), OpenDbError> { // Just bump the version number to 11 to demonstrate that we have run the data // changes from data_migrate. - do_schema_upgrade(name, 11, |_, _, _| Ok(())).await + do_schema_upgrade(name, 11, |_, _| Ok(())).await } diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v11_to_v12.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v11_to_v12.rs index 4da1bcedccd..256947904ef 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v11_to_v12.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v11_to_v12.rs @@ -12,21 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -use indexed_db_futures::IdbKeyPath; -use web_sys::DomException; +use indexed_db_futures::{error::OpenDbError, Build}; use crate::crypto_store::{keys, migrations::do_schema_upgrade, Result}; /// Perform the schema upgrade v11 to v12, adding an index on /// `(curve_key, sender_data_type, session_id)` to `inbound_group_sessions3`. -pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 12, |_, transaction, _| { +pub(crate) async fn schema_add(name: &str) -> Result<(), OpenDbError> { + do_schema_upgrade(name, 12, |transaction, _| { let object_store = transaction.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; - object_store.create_index( - keys::INBOUND_GROUP_SESSIONS_SENDER_KEY_INDEX, - &IdbKeyPath::str_sequence(&["sender_key", "sender_data_type", "session_id"]), - )?; + object_store + .create_index( + keys::INBOUND_GROUP_SESSIONS_SENDER_KEY_INDEX, + ["sender_key", "sender_data_type", "session_id"].into(), + ) + .build()?; Ok(()) }) diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v12_to_v13.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v12_to_v13.rs index 638c7835b15..e17a3a4f66c 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v12_to_v13.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v12_to_v13.rs @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -use web_sys::DomException; +use indexed_db_futures::{error::OpenDbError, Build}; use crate::crypto_store::{keys, migrations::do_schema_upgrade, Result}; /// Perform the schema upgrade v12 to v13, adding the /// `received_room_key_bundles` store. -pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 13, |db, _, _| { - db.create_object_store(keys::RECEIVED_ROOM_KEY_BUNDLES)?; +pub(crate) async fn schema_add(name: &str) -> Result<(), OpenDbError> { + do_schema_upgrade(name, 13, |tx, _| { + tx.db().create_object_store(keys::RECEIVED_ROOM_KEY_BUNDLES).build()?; Ok(()) }) .await diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v13_to_v14.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v13_to_v14.rs index f544cb36600..ac9084fcf05 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v13_to_v14.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v13_to_v14.rs @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -use web_sys::{DomException, IdbTransactionMode}; +use indexed_db_futures::{error::OpenDbError, transaction::TransactionMode, Build}; use super::MigrationDb; use crate::{ @@ -24,25 +24,25 @@ use crate::{ pub(crate) async fn data_migrate(name: &str, _: &SafeEncodeSerializer) -> Result<()> { let db = MigrationDb::new(name, 14).await?; - let transaction = db.transaction_on_one_with_mode( - keys::RECEIVED_ROOM_KEY_BUNDLES, - IdbTransactionMode::Readwrite, - )?; + let transaction = db + .transaction(keys::RECEIVED_ROOM_KEY_BUNDLES) + .with_mode(TransactionMode::Readwrite) + .build()?; let store = transaction.object_store(keys::RECEIVED_ROOM_KEY_BUNDLES)?; // The schema didn't actually change, we just changed the objects that are // stored. So let us remove them. store.clear()?; - transaction.await.into_result()?; + transaction.commit().await?; Ok(()) } /// Perform the schema upgrade v13 to v14, just bumping the schema version since /// the schema didn't actually change. -pub(crate) async fn schema_bump(name: &str) -> Result<(), DomException> { +pub(crate) async fn schema_bump(name: &str) -> Result<(), OpenDbError> { // Just bump the version number to 14 to demonstrate that we have run the data // changes from data_migrate. - do_schema_upgrade(name, 14, |_, _, _| Ok(())).await + do_schema_upgrade(name, 14, |_, _| Ok(())).await } diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v5_to_v7.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v5_to_v7.rs index bd36367b8d6..fd3e803f4c0 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v5_to_v7.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v5_to_v7.rs @@ -19,10 +19,12 @@ //! Then we move the data into the new store. //! The migration 6->7 deletes the old store inbound_group_sessions. -use indexed_db_futures::IdbQuerySource; +use indexed_db_futures::{ + error::OpenDbError, query_source::QuerySource, transaction::TransactionMode, Build, +}; use matrix_sdk_crypto::olm::InboundGroupSession; use tracing::{debug, info}; -use web_sys::{DomException, IdbTransactionMode}; +use wasm_bindgen::JsValue; use crate::{ crypto_store::{ @@ -35,9 +37,10 @@ use crate::{ }; /// Perform the schema upgrade v5 to v6, creating `inbound_group_sessions2`. -pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 6, |db, _, _| { - let object_store = db.create_object_store(old_keys::INBOUND_GROUP_SESSIONS_V2)?; +pub(crate) async fn schema_add(name: &str) -> Result<(), OpenDbError> { + do_schema_upgrade(name, 6, |tx, _| { + let db = tx.db(); + let object_store = db.create_object_store(old_keys::INBOUND_GROUP_SESSIONS_V2).build()?; add_nonunique_index( &object_store, @@ -55,25 +58,25 @@ pub(crate) async fn data_migrate(name: &str, serializer: &SafeEncodeSerializer) let db = MigrationDb::new(name, 7).await?; // The new store has been made for inbound group sessions; time to populate it. - let txn = db.transaction_on_multi_with_mode( - &[old_keys::INBOUND_GROUP_SESSIONS_V1, old_keys::INBOUND_GROUP_SESSIONS_V2], - IdbTransactionMode::Readwrite, - )?; + let txn = db + .transaction([old_keys::INBOUND_GROUP_SESSIONS_V1, old_keys::INBOUND_GROUP_SESSIONS_V2]) + .with_mode(TransactionMode::Readwrite) + .build()?; let old_store = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V1)?; let new_store = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V2)?; - let row_count = old_store.count()?.await?; + let row_count = old_store.count().await?; info!(row_count, "Migrating inbound group session data from v1 to v2"); - if let Some(cursor) = old_store.open_cursor()?.await? { + if let Some(mut cursor) = old_store.open_cursor().await? { let mut idx = 0; - loop { + while let Some(value) = cursor.next_record::().await? { idx += 1; - let key = cursor.key().ok_or(matrix_sdk_crypto::CryptoStoreError::Backend( - "inbound_group_sessions v1 cursor has no key".into(), - ))?; - let value = cursor.value(); + let key = + cursor.key::()?.ok_or(matrix_sdk_crypto::CryptoStoreError::Backend( + "inbound_group_sessions v1 cursor has no key".into(), + ))?; if idx % 100 == 0 { debug!("Migrating session {idx} of {row_count}"); @@ -88,14 +91,10 @@ pub(crate) async fn data_migrate(name: &str, serializer: &SafeEncodeSerializer) needs_backup: !igs.backed_up(), })?; - new_store.add_key_val(&key, &new_data)?; + new_store.add(&new_data).with_key(key).build()?; // We are done with the original data, so delete it now. cursor.delete()?; - - if !cursor.continue_cursor()?.await? { - break; - } } } @@ -104,13 +103,13 @@ pub(crate) async fn data_migrate(name: &str, serializer: &SafeEncodeSerializer) // for more details. old_store.clear()?.await?; - Ok(txn.await.into_result()?) + Ok(txn.commit().await?) } /// Perform the schema upgrade v6 to v7, deleting `inbound_group_sessions`. -pub(crate) async fn schema_delete(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 7, |db, _, _| { - db.delete_object_store(old_keys::INBOUND_GROUP_SESSIONS_V1)?; +pub(crate) async fn schema_delete(name: &str) -> Result<(), OpenDbError> { + do_schema_upgrade(name, 7, |tx, _| { + tx.db().delete_object_store(old_keys::INBOUND_GROUP_SESSIONS_V1)?; Ok(()) }) .await diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v7_to_v8.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v7_to_v8.rs index 18675126767..e07f68ce876 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v7_to_v8.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v7_to_v8.rs @@ -15,10 +15,12 @@ //! Migration code that modifies the data inside inbound_group_sessions2, //! ensuring that the keys are correctly encoded for this new store name. -use indexed_db_futures::IdbQuerySource; +use indexed_db_futures::{ + error::OpenDbError, query_source::QuerySource, transaction::TransactionMode, Build, +}; use matrix_sdk_crypto::olm::InboundGroupSession; use tracing::{debug, info}; -use web_sys::{DomException, IdbTransactionMode}; +use wasm_bindgen::JsValue; use crate::{ crypto_store::{ @@ -36,32 +38,33 @@ use crate::{ pub(crate) async fn data_migrate(name: &str, serializer: &SafeEncodeSerializer) -> Result<()> { let db = MigrationDb::new(name, 8).await?; - let txn = db.transaction_on_one_with_mode( - old_keys::INBOUND_GROUP_SESSIONS_V2, - IdbTransactionMode::Readwrite, - )?; + let txn = db + .transaction(old_keys::INBOUND_GROUP_SESSIONS_V2) + .with_mode(TransactionMode::Readwrite) + .build()?; let store = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V2)?; - let row_count = store.count()?.await?; + let row_count = store.count().await?; info!(row_count, "Fixing inbound group session data keys"); // Iterate through all rows - if let Some(cursor) = store.open_cursor()?.await? { + if let Some(mut cursor) = store.open_cursor().await? { let mut idx = 0; let mut updated = 0; let mut deleted = 0; - loop { + while let Some(value) = cursor.next_record::().await? { idx += 1; // Get the old key and session - let old_key = cursor.key().ok_or(matrix_sdk_crypto::CryptoStoreError::Backend( - "inbound_group_sessions2 cursor has no key".into(), - ))?; + let old_key = + cursor.key::()?.ok_or(matrix_sdk_crypto::CryptoStoreError::Backend( + "inbound_group_sessions2 cursor has no key".into(), + ))?; let idb_object: v7::InboundGroupSessionIndexedDbObject2 = - serde_wasm_bindgen::from_value(cursor.value())?; + serde_wasm_bindgen::from_value(value)?; let pickled_session = serializer.deserialize_value_from_bytes(&idb_object.pickled_session)?; let session = InboundGroupSession::from_pickle(pickled_session) @@ -86,37 +89,37 @@ pub(crate) async fn data_migrate(name: &str, serializer: &SafeEncodeSerializer) cursor.delete()?; // Check for an existing entry with the new key - let new_value = store.get(&new_key)?.await?; + let new_value = store.get::(&new_key).await?; // If we found an existing entry, it is more up-to-date, so we don't need to do // anything more. // If we didn't find an existing entry, we must create one with the correct key if new_value.is_none() { - store.add_key_val(&new_key, &serde_wasm_bindgen::to_value(&idb_object)?)?; + store + .add(&serde_wasm_bindgen::to_value(&idb_object)?) + .with_key(new_key) + .build()?; updated += 1; } else { deleted += 1; } } + } - if !cursor.continue_cursor()?.await? { - debug!( - "Migrated {row_count} sessions: {updated} keys updated \ + debug!( + "Migrated {row_count} sessions: {updated} keys updated \ and {deleted} obsolete entries deleted." - ); - break; - } - } + ); } - txn.await.into_result()?; + txn.commit().await?; Ok(()) } /// Perform the schema upgrade v7 to v8, Just bumping the schema version. -pub(crate) async fn schema_bump(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 8, |_, _, _| { +pub(crate) async fn schema_bump(name: &str) -> Result<(), OpenDbError> { + do_schema_upgrade(name, 8, |_, _| { // Just bump the version number to 8 to demonstrate that we have run the data // changes from prepare_data_for_v8. Ok(()) diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v8_to_v10.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v8_to_v10.rs index cfb86aa5860..a7ff3ff0f69 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v8_to_v10.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v8_to_v10.rs @@ -15,10 +15,12 @@ //! Migration code that moves from inbound_group_sessions2 to //! inbound_group_sessions3, shrinking the values stored in each record. -use indexed_db_futures::IdbQuerySource; +use indexed_db_futures::{ + error::OpenDbError, query_source::QuerySource, transaction::TransactionMode, Build, +}; use matrix_sdk_crypto::olm::InboundGroupSession; use tracing::{debug, info}; -use web_sys::{DomException, IdbTransactionMode}; +use wasm_bindgen::JsValue; use crate::{ crypto_store::{ @@ -34,9 +36,10 @@ use crate::{ }; /// Perform the schema upgrade v8 to v9, creating `inbound_group_sessions3`. -pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 9, |db, _, _| { - let object_store = db.create_object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; +pub(crate) async fn schema_add(name: &str) -> Result<(), OpenDbError> { + do_schema_upgrade(name, 9, |tx, _| { + let db = tx.db(); + let object_store = db.create_object_store(keys::INBOUND_GROUP_SESSIONS_V3).build()?; add_nonunique_index( &object_store, @@ -62,21 +65,21 @@ pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { pub(crate) async fn data_migrate(name: &str, serializer: &SafeEncodeSerializer) -> Result<()> { let db = MigrationDb::new(name, 10).await?; - let txn = db.transaction_on_multi_with_mode( - &[old_keys::INBOUND_GROUP_SESSIONS_V2, keys::INBOUND_GROUP_SESSIONS_V3], - IdbTransactionMode::Readwrite, - )?; + let txn = db + .transaction([old_keys::INBOUND_GROUP_SESSIONS_V2, keys::INBOUND_GROUP_SESSIONS_V3]) + .with_mode(TransactionMode::Readwrite) + .build()?; let inbound_group_sessions2 = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V2)?; let inbound_group_sessions3 = txn.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; - let row_count = inbound_group_sessions2.count()?.await?; + let row_count = inbound_group_sessions2.count().await?; info!(row_count, "Shrinking inbound_group_session records"); // Iterate through all rows - if let Some(cursor) = inbound_group_sessions2.open_cursor()?.await? { + if let Some(mut cursor) = inbound_group_sessions2.open_cursor().await? { let mut idx = 0; - loop { + while let Some(value) = cursor.next_record::().await? { idx += 1; if idx % 100 == 0 { @@ -85,7 +88,7 @@ pub(crate) async fn data_migrate(name: &str, serializer: &SafeEncodeSerializer) // Deserialize the session from the old store let old_value: InboundGroupSessionIndexedDbObject2 = - serde_wasm_bindgen::from_value(cursor.value())?; + serde_wasm_bindgen::from_value(value)?; let session = InboundGroupSession::from_pickle( serializer.deserialize_value_from_bytes(&old_value.pickled_session)?, @@ -104,17 +107,16 @@ pub(crate) async fn data_migrate(name: &str, serializer: &SafeEncodeSerializer) // Write it to the new store inbound_group_sessions3 - .add_key_val(&new_key, &serde_wasm_bindgen::to_value(&new_value)?)?; + .add(&serde_wasm_bindgen::to_value(&new_value)?) + .with_key(new_key) + .build()?; // We are done with the original data, so delete it now. cursor.delete()?; - - // Continue to the next record, or stop if we're done - if !cursor.continue_cursor()?.await? { - debug!("Migrated {idx} sessions."); - break; - } } + + // Continue to the next record, or stop if we're done + debug!("Migrated {idx} sessions."); } // We have finished with the old store. Clear it, since it is faster to @@ -122,14 +124,14 @@ pub(crate) async fn data_migrate(name: &str, serializer: &SafeEncodeSerializer) // for more details. inbound_group_sessions2.clear()?.await?; - txn.await.into_result()?; + txn.commit().await?; Ok(()) } /// Perform the schema upgrade v8 to v10, deleting `inbound_group_sessions2`. -pub(crate) async fn schema_delete(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 10, |db, _, _| { - db.delete_object_store(old_keys::INBOUND_GROUP_SESSIONS_V2)?; +pub(crate) async fn schema_delete(name: &str) -> Result<(), OpenDbError> { + do_schema_upgrade(name, 10, |tx, _| { + tx.db().delete_object_store(old_keys::INBOUND_GROUP_SESSIONS_V2)?; Ok(()) }) .await diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/mod.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/mod.rs index b79a7eba1a1..1cad26b6ca4 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/mod.rs @@ -20,7 +20,15 @@ use std::{ use async_trait::async_trait; use gloo_utils::format::JsValueSerdeExt; use hkdf::Hkdf; -use indexed_db_futures::prelude::*; +use indexed_db_futures::{ + cursor::Cursor, + database::Database, + internals::SystemRepr, + object_store::ObjectStore, + prelude::*, + transaction::{Transaction, TransactionMode}, + KeyRange, +}; use js_sys::Array; use matrix_sdk_crypto::{ olm::{ @@ -48,10 +56,10 @@ use sha2::Sha256; use tokio::sync::Mutex; use tracing::{debug, warn}; use wasm_bindgen::JsValue; -use web_sys::IdbKeyRange; use crate::{ crypto_store::migrations::open_and_upgrade_db, + error::GenericError, serializer::{MaybeEncrypted, SafeEncodeSerializer, SafeEncodeSerializerError}, }; @@ -120,7 +128,7 @@ mod keys { pub struct IndexeddbCryptoStore { static_account: RwLock>, name: String, - pub(crate) inner: IdbDatabase, + pub(crate) inner: Database, serializer: SafeEncodeSerializer, save_changes_lock: Arc>, @@ -195,6 +203,58 @@ impl From for CryptoStoreError { } } +impl From for IndexeddbCryptoStoreError { + fn from(value: indexed_db_futures::error::DomException) -> Self { + web_sys::DomException::from(value).into() + } +} + +impl From for IndexeddbCryptoStoreError { + fn from(value: indexed_db_futures::error::SerialisationError) -> Self { + Self::Serialization(serde::de::Error::custom(value.to_string())) + } +} + +impl From for IndexeddbCryptoStoreError { + fn from(value: indexed_db_futures::error::UnexpectedDataError) -> Self { + Self::CryptoStoreError(CryptoStoreError::backend(value)) + } +} + +impl From for IndexeddbCryptoStoreError { + fn from(value: GenericError) -> Self { + Self::CryptoStoreError(value.into()) + } +} + +impl From for IndexeddbCryptoStoreError { + fn from(value: indexed_db_futures::error::JSError) -> Self { + GenericError::from(value.to_string()).into() + } +} + +impl From for IndexeddbCryptoStoreError { + fn from(value: indexed_db_futures::error::Error) -> Self { + use indexed_db_futures::error::Error; + match value { + Error::DomException(e) => e.into(), + Error::Serialisation(e) => e.into(), + Error::MissingData(e) => e.into(), + Error::Unknown(e) => e.into(), + } + } +} + +impl From for IndexeddbCryptoStoreError { + fn from(value: indexed_db_futures::error::OpenDbError) -> Self { + use indexed_db_futures::error::OpenDbError; + match value { + OpenDbError::Base(error) => error.into(), + _ => GenericError::from(value.to_string()).into(), + } + } +} + type Result = std::result::Result; /// Defines an operation to perform on the database. @@ -259,7 +319,7 @@ impl PendingIndexeddbChanges { } /// Applies all the pending operations to the store. - fn apply(self, tx: &IdbTransaction<'_>) -> Result<()> { + fn apply(self, tx: &Transaction<'_>) -> Result<()> { for (store, operations) in self.store_to_key_values { if operations.is_empty() { continue; @@ -268,10 +328,10 @@ impl PendingIndexeddbChanges { for op in operations { match op { PendingOperation::Put { key, value } => { - object_store.put_key_val(&key, &value)?; + object_store.put(&value).with_key(key).build()?; } PendingOperation::Delete(key) => { - object_store.delete(&key)?; + object_store.delete(&key).build()?; } } } @@ -410,8 +470,8 @@ impl IndexeddbCryptoStore { /// Delete the IndexedDB databases for the given name. #[cfg(test)] pub fn delete_stores(prefix: &str) -> Result<()> { - IdbDatabase::delete_by_name(&format!("{prefix:0}::matrix-sdk-crypto-meta"))?; - IdbDatabase::delete_by_name(&format!("{prefix:0}::matrix-sdk-crypto"))?; + Database::delete_by_name(&format!("{prefix:0}::matrix-sdk-crypto-meta"))?; + Database::delete_by_name(&format!("{prefix:0}::matrix-sdk-crypto"))?; Ok(()) } @@ -719,20 +779,17 @@ impl_crypto_store! { // TODO: #2000 should make this lock go away, or change its shape. let _guard = self.save_changes_lock.lock().await; - let stores: Vec<&str> = [ - (changes.account.is_some() , keys::CORE), - ] - .iter() - .filter_map(|(id, key)| if *id { Some(*key) } else { None }) - .collect(); + let stores: Vec<&str> = [(changes.account.is_some(), keys::CORE)] + .iter() + .filter_map(|(id, key)| if *id { Some(*key) } else { None }) + .collect(); if stores.is_empty() { // nothing to do, quit early return Ok(()); } - let tx = - self.inner.transaction_on_multi_with_mode(&stores, IdbTransactionMode::Readwrite)?; + let tx = self.inner.transaction(stores).with_mode(TransactionMode::Readwrite).build()?; let account_pickle = if let Some(account) = changes.account { *self.static_account.write().unwrap() = Some(account.static_data().clone()); @@ -743,10 +800,12 @@ impl_crypto_store! { if let Some(a) = &account_pickle { tx.object_store(keys::CORE)? - .put_key_val(&JsValue::from_str(keys::ACCOUNT), &self.serializer.serialize_value(&a)?)?; + .put(&self.serializer.serialize_value(&a)?) + .with_key(JsValue::from_str(keys::ACCOUNT)) + .build()?; } - tx.await.into_result()?; + tx.commit().await?; Ok(()) } @@ -767,12 +826,11 @@ impl_crypto_store! { return Ok(()); } - let tx = - self.inner.transaction_on_multi_with_mode(&stores, IdbTransactionMode::Readwrite)?; + let tx = self.inner.transaction(stores).with_mode(TransactionMode::Readwrite).build()?; indexeddb_changes.apply(&tx)?; - tx.await.into_result()?; + tx.commit().await?; Ok(()) } @@ -787,7 +845,8 @@ impl_crypto_store! { let backed_up = s.backed_up(); if backed_up != backed_up_to_version.is_some() { warn!( - backed_up, backed_up_to_version, + backed_up, + backed_up_to_version, "Session backed-up flag does not correspond to backup version setting", ); } @@ -801,15 +860,20 @@ impl_crypto_store! { async fn load_tracked_users(&self) -> Result> { let tx = self .inner - .transaction_on_one_with_mode(keys::TRACKED_USERS, IdbTransactionMode::Readonly)?; + .transaction(keys::TRACKED_USERS) + .with_mode(TransactionMode::Readonly) + .build()?; let os = tx.object_store(keys::TRACKED_USERS)?; - let user_ids = os.get_all_keys()?.await?; + let user_ids = os.get_all_keys::().await?; let mut users = Vec::new(); - for user_id in user_ids.iter() { - let dirty: bool = - !matches!(os.get(&user_id)?.await?.map(|v| v.into_serde()), Some(Ok(false))); + for result in user_ids { + let user_id = result?; + let dirty: bool = !matches!( + os.get(&user_id).await?.map(|v: JsValue| v.into_serde()), + Some(Ok(false)) + ); let Some(Ok(user_id)) = user_id.as_string().map(UserId::parse) else { continue }; users.push(TrackedUser { user_id, dirty }); @@ -825,12 +889,11 @@ impl_crypto_store! { let account_info = self.get_static_account().ok_or(CryptoStoreError::AccountUnset)?; if let Some(value) = self .inner - .transaction_on_one_with_mode( - keys::OUTBOUND_GROUP_SESSIONS, - IdbTransactionMode::Readonly, - )? + .transaction(keys::OUTBOUND_GROUP_SESSIONS) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::OUTBOUND_GROUP_SESSIONS)? - .get(&self.serializer.encode_key(keys::OUTBOUND_GROUP_SESSIONS, room_id))? + .get(&self.serializer.encode_key(keys::OUTBOUND_GROUP_SESSIONS, room_id)) .await? { Ok(Some( @@ -851,11 +914,12 @@ impl_crypto_store! { request_id: &TransactionId, ) -> Result> { let jskey = self.serializer.encode_key(keys::GOSSIP_REQUESTS, request_id.as_str()); - self - .inner - .transaction_on_one_with_mode(keys::GOSSIP_REQUESTS, IdbTransactionMode::Readonly)? + self.inner + .transaction(keys::GOSSIP_REQUESTS) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::GOSSIP_REQUESTS)? - .get_owned(jskey)? + .get(jskey) .await? .map(|val| self.deserialize_gossip_request(val)) .transpose() @@ -864,9 +928,11 @@ impl_crypto_store! { async fn load_account(&self) -> Result> { if let Some(pickle) = self .inner - .transaction_on_one_with_mode(keys::CORE, IdbTransactionMode::Readonly)? + .transaction(keys::CORE) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::CORE)? - .get(&JsValue::from_str(keys::ACCOUNT))? + .get(&JsValue::from_str(keys::ACCOUNT)) .await? { let pickle = self.serializer.deserialize_value(pickle)?; @@ -884,9 +950,11 @@ impl_crypto_store! { async fn next_batch_token(&self) -> Result> { if let Some(serialized) = self .inner - .transaction_on_one_with_mode(keys::CORE, IdbTransactionMode::Readonly)? + .transaction(keys::CORE) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::CORE)? - .get(&JsValue::from_str(keys::NEXT_BATCH_TOKEN))? + .get(&JsValue::from_str(keys::NEXT_BATCH_TOKEN)) .await? { let token = self.serializer.deserialize_value(serialized)?; @@ -899,9 +967,11 @@ impl_crypto_store! { async fn load_identity(&self) -> Result> { if let Some(pickle) = self .inner - .transaction_on_one_with_mode(keys::CORE, IdbTransactionMode::Readonly)? + .transaction(keys::CORE) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::CORE)? - .get(&JsValue::from_str(keys::PRIVATE_IDENTITY))? + .get(&JsValue::from_str(keys::PRIVATE_IDENTITY)) .await? { let pickle = self.serializer.deserialize_value(pickle)?; @@ -916,27 +986,27 @@ impl_crypto_store! { } async fn get_sessions(&self, sender_key: &str) -> Result>> { - let device_keys = self.get_own_device() - .await? - .as_device_keys() - .clone(); + let device_keys = self.get_own_device().await?.as_device_keys().clone(); - let range = self.serializer.encode_to_range(keys::SESSION, sender_key)?; + let range = self.serializer.encode_to_range(keys::SESSION, sender_key); let sessions: Vec = self - .inner - .transaction_on_one_with_mode(keys::SESSION, IdbTransactionMode::Readonly)? - .object_store(keys::SESSION)? - .get_all_with_key(&range)? - .await? - .iter() - .filter_map(|f| self.serializer.deserialize_value(f).ok().map(|p| { - Session::from_pickle( - device_keys.clone(), - p, - ) - .map_err(|_| IndexeddbCryptoStoreError::CryptoStoreError(CryptoStoreError::AccountUnset)) - })) - .collect::>>()?; + .inner + .transaction(keys::SESSION) + .with_mode(TransactionMode::Readonly) + .build()? + .object_store(keys::SESSION)? + .get_all() + .with_query(&range) + .await? + .filter_map(Result::ok) + .filter_map(|f| { + self.serializer.deserialize_value(f).ok().map(|p| { + Session::from_pickle(device_keys.clone(), p).map_err(|_| { + IndexeddbCryptoStoreError::CryptoStoreError(CryptoStoreError::AccountUnset) + }) + }) + }) + .collect::>>()?; if sessions.is_empty() { Ok(None) @@ -950,15 +1020,15 @@ impl_crypto_store! { room_id: &RoomId, session_id: &str, ) -> Result> { - let key = self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (room_id, session_id)); + let key = + self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (room_id, session_id)); if let Some(value) = self .inner - .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, - IdbTransactionMode::Readonly, - )? + .transaction(keys::INBOUND_GROUP_SESSIONS_V3) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::INBOUND_GROUP_SESSIONS_V3)? - .get(&key)? + .get(&key) .await? { Ok(Some(self.deserialize_inbound_group_session(value)?)) @@ -972,35 +1042,35 @@ impl_crypto_store! { let transaction = self .inner - .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, - IdbTransactionMode::Readonly, - )?; + .transaction(keys::INBOUND_GROUP_SESSIONS_V3) + .with_mode(TransactionMode::Readonly) + .build()?; let object_store = transaction.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; fetch_from_object_store_batched( object_store, |value| self.deserialize_inbound_group_session(value), - INBOUND_GROUP_SESSIONS_BATCH_SIZE - ).await + INBOUND_GROUP_SESSIONS_BATCH_SIZE, + ) + .await } async fn get_inbound_group_sessions_by_room_id( &self, room_id: &RoomId, ) -> Result> { - let range = self.serializer.encode_to_range(keys::INBOUND_GROUP_SESSIONS_V3, room_id)?; + let range = self.serializer.encode_to_range(keys::INBOUND_GROUP_SESSIONS_V3, room_id); Ok(self .inner - .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, - IdbTransactionMode::Readonly, - )? + .transaction(keys::INBOUND_GROUP_SESSIONS_V3) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::INBOUND_GROUP_SESSIONS_V3)? - .get_all_with_key(&range)? + .get_all() + .with_query(&range) .await? - .into_iter() + .filter_map(Result::ok) .filter_map(|v| match self.deserialize_inbound_group_session(v) { Ok(session) => Some(session), Err(e) => { @@ -1018,32 +1088,38 @@ impl_crypto_store! { after_session_id: Option, limit: usize, ) -> Result> { - let sender_key = self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, sender_key.to_base64()); + let sender_key = + self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, sender_key.to_base64()); // The empty string is before all keys in Indexed DB - first batch starts there. - let after_session_id = after_session_id.map(|s| self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, s)).unwrap_or("".into()); + let after_session_id = after_session_id + .map(|s| self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, s)) + .unwrap_or("".into()); - let lower_bound: Array = [sender_key.clone(), (sender_data_type as u8).into(), after_session_id].iter().collect(); - let upper_bound: Array = [sender_key, ((sender_data_type as u8) + 1).into()].iter().collect(); - let key = IdbKeyRange::bound_with_lower_open_and_upper_open( - &lower_bound, - &upper_bound, - true, true - ).expect("Key was not valid!"); + let lower_bound: Array = + [sender_key.clone(), (sender_data_type as u8).into(), after_session_id] + .iter() + .collect(); + let upper_bound: Array = + [sender_key, ((sender_data_type as u8) + 1).into()].iter().collect(); + let key = KeyRange::Bound( + lower_bound, true, + upper_bound, true); let tx = self .inner - .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, - IdbTransactionMode::Readonly, - )?; + .transaction(keys::INBOUND_GROUP_SESSIONS_V3) + .with_mode(TransactionMode::Readonly) + .build()?; let store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; let idx = store.index(keys::INBOUND_GROUP_SESSIONS_SENDER_KEY_INDEX)?; - let serialized_sessions = idx.get_all_with_key_and_limit_owned(key, limit as u32)?.await?; + let serialized_sessions = + idx.get_all().with_query::(key).with_limit(limit as u32).await?; // Deserialize and decrypt after the transaction is complete. - let result = serialized_sessions.into_iter() + let result = serialized_sessions + .filter_map(Result::ok) .filter_map(|v| match self.deserialize_inbound_group_session(v) { Ok(session) => Some(session), Err(e) => { @@ -1056,17 +1132,20 @@ impl_crypto_store! { Ok(result) } - async fn inbound_group_session_counts(&self, _backup_version: Option<&str>) -> Result { + async fn inbound_group_session_counts( + &self, + _backup_version: Option<&str>, + ) -> Result { let tx = self .inner - .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, - IdbTransactionMode::Readonly, - )?; + .transaction(keys::INBOUND_GROUP_SESSIONS_V3) + .with_mode(TransactionMode::Readonly) + .build()?; let store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; - let all = store.count()?.await? as usize; - let not_backed_up = store.index(keys::INBOUND_GROUP_SESSIONS_BACKUP_INDEX)?.count()?.await? as usize; - tx.await.into_result()?; + let all = store.count().await? as usize; + let not_backed_up = + store.index(keys::INBOUND_GROUP_SESSIONS_BACKUP_INDEX)?.count().await? as usize; + tx.commit().await?; Ok(RoomKeyCounts { total: all, backed_up: all - not_backed_up }) } @@ -1077,11 +1156,9 @@ impl_crypto_store! { ) -> Result> { let tx = self .inner - .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, - IdbTransactionMode::Readonly, - )?; - + .transaction(keys::INBOUND_GROUP_SESSIONS_V3) + .with_mode(TransactionMode::Readonly) + .build()?; let store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; let idx = store.index(keys::INBOUND_GROUP_SESSIONS_BACKUP_INDEX)?; @@ -1089,22 +1166,23 @@ impl_crypto_store! { // XXX ideally we would use `get_all_with_key_and_limit`, but that doesn't appear to be // exposed (https://github.com/Alorel/rust-indexed-db/issues/31). Instead we replicate // the behaviour with a cursor. - let Some(cursor) = idx.open_cursor()?.await? else { + let Some(mut cursor) = idx.open_cursor().await? else { return Ok(vec![]); }; let mut serialized_sessions = Vec::with_capacity(limit); for _ in 0..limit { - serialized_sessions.push(cursor.value()); - if !cursor.continue_cursor()?.await? { + let Some(value) = cursor.next_record().await? else { break; - } + }; + serialized_sessions.push(value) } - tx.await.into_result()?; + tx.commit().await?; // Deserialize and decrypt after the transaction is complete. - let result = serialized_sessions.into_iter() + let result = serialized_sessions + .into_iter() .filter_map(|v| match self.deserialize_inbound_group_session(v) { Ok(session) => Some(session), Err(e) => { @@ -1117,73 +1195,78 @@ impl_crypto_store! { Ok(result) } - async fn mark_inbound_group_sessions_as_backed_up(&self, + async fn mark_inbound_group_sessions_as_backed_up( + &self, _backup_version: &str, - room_and_session_ids: &[(&RoomId, &str)] + room_and_session_ids: &[(&RoomId, &str)], ) -> Result<()> { let tx = self .inner - .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, - IdbTransactionMode::Readwrite, - )?; + .transaction(keys::INBOUND_GROUP_SESSIONS_V3) + .with_mode(TransactionMode::Readwrite) + .build()?; let object_store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; for (room_id, session_id) in room_and_session_ids { - let key = self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (room_id, session_id)); - if let Some(idb_object_js) = object_store.get(&key)?.await? { - let mut idb_object: InboundGroupSessionIndexedDbObject = serde_wasm_bindgen::from_value(idb_object_js)?; + let key = + self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (room_id, session_id)); + if let Some(idb_object_js) = object_store.get(&key).await? { + let mut idb_object: InboundGroupSessionIndexedDbObject = + serde_wasm_bindgen::from_value(idb_object_js)?; idb_object.needs_backup = false; - object_store.put_key_val(&key, &serde_wasm_bindgen::to_value(&idb_object)?)?; + object_store + .put(&serde_wasm_bindgen::to_value(&idb_object)?) + .with_key(key) + .build()?; } else { warn!(?key, "Could not find inbound group session to mark it as backed up."); } } - Ok(tx.await.into_result()?) + Ok(tx.commit().await?) } async fn reset_backup_state(&self) -> Result<()> { let tx = self .inner - .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, - IdbTransactionMode::Readwrite, - )?; - - if let Some(cursor) = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?.open_cursor()?.await? { - loop { - let mut idb_object: InboundGroupSessionIndexedDbObject = serde_wasm_bindgen::from_value(cursor.value())?; + .transaction(keys::INBOUND_GROUP_SESSIONS_V3) + .with_mode(TransactionMode::Readwrite) + .build()?; + + if let Some(mut cursor) = + tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?.open_cursor().await? + { + while let Some(value) = cursor.next_record().await? { + let mut idb_object: InboundGroupSessionIndexedDbObject = + serde_wasm_bindgen::from_value(value)?; if !idb_object.needs_backup { idb_object.needs_backup = true; // We don't bother to update the encrypted `InboundGroupSession` object stored // inside `idb_object.data`, since that would require decryption and encryption. // Instead, it will be patched up by `deserialize_inbound_group_session`. let idb_object = serde_wasm_bindgen::to_value(&idb_object)?; - cursor.update(&idb_object)?.await?; - } - - if !cursor.continue_cursor()?.await? { - break; + cursor.update(&idb_object).await?; } } } - Ok(tx.await.into_result()?) + Ok(tx.commit().await?) } async fn save_tracked_users(&self, users: &[(&UserId, bool)]) -> Result<()> { let tx = self .inner - .transaction_on_one_with_mode(keys::TRACKED_USERS, IdbTransactionMode::Readwrite)?; + .transaction(keys::TRACKED_USERS) + .with_mode(TransactionMode::Readwrite) + .build()?; let os = tx.object_store(keys::TRACKED_USERS)?; for (user, dirty) in users { - os.put_key_val(&JsValue::from_str(user.as_str()), &JsValue::from(*dirty))?; + os.put(&JsValue::from(*dirty)).with_key(JsValue::from_str(user.as_str())).build()?; } - tx.await.into_result()?; + tx.commit().await?; Ok(()) } @@ -1193,11 +1276,12 @@ impl_crypto_store! { device_id: &DeviceId, ) -> Result> { let key = self.serializer.encode_key(keys::DEVICES, (user_id, device_id)); - self - .inner - .transaction_on_one_with_mode(keys::DEVICES, IdbTransactionMode::Readonly)? + self.inner + .transaction(keys::DEVICES) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::DEVICES)? - .get(&key)? + .get(&key) .await? .map(|i| self.serializer.deserialize_value(i).map_err(Into::into)) .transpose() @@ -1207,14 +1291,17 @@ impl_crypto_store! { &self, user_id: &UserId, ) -> Result> { - let range = self.serializer.encode_to_range(keys::DEVICES, user_id)?; + let range = self.serializer.encode_to_range(keys::DEVICES, user_id); Ok(self .inner - .transaction_on_one_with_mode(keys::DEVICES, IdbTransactionMode::Readonly)? + .transaction(keys::DEVICES) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::DEVICES)? - .get_all_with_key(&range)? + .get_all() + .with_query(&range) .await? - .iter() + .filter_map(Result::ok) .filter_map(|d| { let d: DeviceData = self.serializer.deserialize_value(d).ok()?; Some((d.device_id().to_owned(), d)) @@ -1224,17 +1311,16 @@ impl_crypto_store! { async fn get_own_device(&self) -> Result { let account_info = self.get_static_account().ok_or(CryptoStoreError::AccountUnset)?; - Ok(self.get_device(&account_info.user_id, &account_info.device_id) - .await? - .unwrap()) + Ok(self.get_device(&account_info.user_id, &account_info.device_id).await?.unwrap()) } async fn get_user_identity(&self, user_id: &UserId) -> Result> { - self - .inner - .transaction_on_one_with_mode(keys::IDENTITIES, IdbTransactionMode::Readonly)? + self.inner + .transaction(keys::IDENTITIES) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::IDENTITIES)? - .get(&self.serializer.encode_key(keys::IDENTITIES, user_id))? + .get(&self.serializer.encode_key(keys::IDENTITIES, user_id)) .await? .map(|i| self.serializer.deserialize_value(i).map_err(Into::into)) .transpose() @@ -1243,9 +1329,11 @@ impl_crypto_store! { async fn is_message_known(&self, hash: &OlmMessageHash) -> Result { Ok(self .inner - .transaction_on_one_with_mode(keys::OLM_HASHES, IdbTransactionMode::Readonly)? + .transaction(keys::OLM_HASHES) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::OLM_HASHES)? - .get(&self.serializer.encode_key(keys::OLM_HASHES, (&hash.sender_key, &hash.hash)))? + .get::(&self.serializer.encode_key(keys::OLM_HASHES, (&hash.sender_key, &hash.hash))) .await? .is_some()) } @@ -1253,34 +1341,38 @@ impl_crypto_store! { async fn get_secrets_from_inbox( &self, secret_name: &SecretName, - ) -> Result> { - let range = self.serializer.encode_to_range(keys::SECRETS_INBOX, secret_name.as_str())?; + ) -> Result> { + let range = self.serializer.encode_to_range(keys::SECRETS_INBOX, secret_name.as_str()); - self - .inner - .transaction_on_one_with_mode(keys::SECRETS_INBOX, IdbTransactionMode::Readonly)? + self.inner + .transaction(keys::SECRETS_INBOX) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::SECRETS_INBOX)? - .get_all_with_key(&range)? + .get_all() + .with_query(&range) .await? - .iter() - .map(|d| { + .map(|result| { + let d = result?; let secret = self.serializer.deserialize_value(d)?; Ok(secret) - }).collect() + }) + .collect() } #[allow(clippy::unused_async)] // Mandated by trait on wasm. - async fn delete_secrets_from_inbox( - &self, - secret_name: &SecretName, - ) -> Result<()> { - let range = self.serializer.encode_to_range(keys::SECRETS_INBOX, secret_name.as_str())?; - - self - .inner - .transaction_on_one_with_mode(keys::SECRETS_INBOX, IdbTransactionMode::Readwrite)? + async fn delete_secrets_from_inbox(&self, secret_name: &SecretName) -> Result<()> { + let range = self.serializer.encode_to_range(keys::SECRETS_INBOX, secret_name.as_str()); + + let transaction = self.inner + .transaction(keys::SECRETS_INBOX) + .with_mode(TransactionMode::Readwrite) + .build()?; + transaction .object_store(keys::SECRETS_INBOX)? - .delete(&range)?; + .delete(&range) + .build()?; + transaction.commit().await?; Ok(()) } @@ -1293,13 +1385,10 @@ impl_crypto_store! { let val = self .inner - .transaction_on_one_with_mode( - keys::GOSSIP_REQUESTS, - IdbTransactionMode::Readonly, - )? + .transaction(keys::GOSSIP_REQUESTS).with_mode( TransactionMode::Readonly).build()? .object_store(keys::GOSSIP_REQUESTS)? .index(keys::GOSSIP_REQUESTS_BY_INFO_INDEX)? - .get_owned(key)? + .get(key) .await?; if let Some(val) = val { @@ -1313,15 +1402,14 @@ impl_crypto_store! { async fn get_unsent_secret_requests(&self) -> Result> { let results = self .inner - .transaction_on_one_with_mode( - keys::GOSSIP_REQUESTS, - IdbTransactionMode::Readonly, - )? + .transaction(keys::GOSSIP_REQUESTS) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::GOSSIP_REQUESTS)? .index(keys::GOSSIP_REQUESTS_UNSENT_INDEX)? - .get_all()? + .get_all() .await? - .iter() + .filter_map(Result::ok) .filter_map(|val| self.deserialize_gossip_request(val).ok()) .collect(); @@ -1330,26 +1418,32 @@ impl_crypto_store! { async fn delete_outgoing_secret_requests(&self, request_id: &TransactionId) -> Result<()> { let jskey = self.serializer.encode_key(keys::GOSSIP_REQUESTS, request_id); - let tx = self.inner.transaction_on_one_with_mode(keys::GOSSIP_REQUESTS, IdbTransactionMode::Readwrite)?; - tx.object_store(keys::GOSSIP_REQUESTS)?.delete_owned(jskey)?; - tx.await.into_result().map_err(|e| e.into()) + let tx = self + .inner + .transaction(keys::GOSSIP_REQUESTS) + .with_mode(TransactionMode::Readwrite) + .build()?; + tx.object_store(keys::GOSSIP_REQUESTS)?.delete(jskey).build()?; + tx.commit().await.map_err(|e| e.into()) } async fn load_backup_keys(&self) -> Result { let key = { let tx = self .inner - .transaction_on_one_with_mode(keys::BACKUP_KEYS, IdbTransactionMode::Readonly)?; + .transaction(keys::BACKUP_KEYS) + .with_mode(TransactionMode::Readonly) + .build()?; let store = tx.object_store(keys::BACKUP_KEYS)?; let backup_version = store - .get(&JsValue::from_str(keys::BACKUP_VERSION_V1))? + .get(&JsValue::from_str(keys::BACKUP_VERSION_V1)) .await? .map(|i| self.serializer.deserialize_value(i)) .transpose()?; let decryption_key = store - .get(&JsValue::from_str(keys::RECOVERY_KEY_V1))? + .get(&JsValue::from_str(keys::RECOVERY_KEY_V1)) .await? .map(|i| self.serializer.deserialize_value(i)) .transpose()?; @@ -1360,13 +1454,14 @@ impl_crypto_store! { Ok(key) } - - async fn load_dehydrated_device_pickle_key(&self) -> Result> { - if let Some(pickle) = self + async fn load_dehydrated_device_pickle_key(&self) -> Result> { + if let Some(pickle) = self .inner - .transaction_on_one_with_mode(keys::CORE, IdbTransactionMode::Readonly)? + .transaction(keys::CORE) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::CORE)? - .get(&JsValue::from_str(keys::DEHYDRATION_PICKLE_KEY))? + .get(&JsValue::from_str(keys::DEHYDRATION_PICKLE_KEY)) .await? { let pickle: DehydratedDeviceKey = self.serializer.deserialize_value(pickle)?; @@ -1390,12 +1485,9 @@ impl_crypto_store! { let key = self.serializer.encode_key(keys::DIRECT_WITHHELD_INFO, (session_id, room_id)); if let Some(pickle) = self .inner - .transaction_on_one_with_mode( - keys::DIRECT_WITHHELD_INFO, - IdbTransactionMode::Readonly, - )? + .transaction(keys::DIRECT_WITHHELD_INFO).with_mode( TransactionMode::Readonly).build()? .object_store(keys::DIRECT_WITHHELD_INFO)? - .get(&key)? + .get(&key) .await? { let info = self.serializer.deserialize_value(pickle)?; @@ -1407,23 +1499,30 @@ impl_crypto_store! { async fn get_room_settings(&self, room_id: &RoomId) -> Result> { let key = self.serializer.encode_key(keys::ROOM_SETTINGS, room_id); - self - .inner - .transaction_on_one_with_mode(keys::ROOM_SETTINGS, IdbTransactionMode::Readonly)? + self.inner + .transaction(keys::ROOM_SETTINGS) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::ROOM_SETTINGS)? - .get(&key)? + .get(&key) .await? .map(|v| self.serializer.deserialize_value(v).map_err(Into::into)) .transpose() } - async fn get_received_room_key_bundle_data(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + async fn get_received_room_key_bundle_data( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result> { let key = self.serializer.encode_key(keys::RECEIVED_ROOM_KEY_BUNDLES, (room_id, user_id)); let result = self .inner - .transaction_on_one_with_mode(keys::RECEIVED_ROOM_KEY_BUNDLES, IdbTransactionMode::Readonly)? + .transaction(keys::RECEIVED_ROOM_KEY_BUNDLES) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::RECEIVED_ROOM_KEY_BUNDLES)? - .get(&key)? + .get(&key) .await? .map(|v| self.serializer.deserialize_value(v)) .transpose()?; @@ -1432,11 +1531,12 @@ impl_crypto_store! { } async fn get_custom_value(&self, key: &str) -> Result>> { - self - .inner - .transaction_on_one_with_mode(keys::CORE, IdbTransactionMode::Readonly)? + self.inner + .transaction(keys::CORE) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::CORE)? - .get(&JsValue::from_str(key))? + .get(&JsValue::from_str(key)) .await? .map(|v| self.serializer.deserialize_value(v).map_err(Into::into)) .transpose() @@ -1444,21 +1544,30 @@ impl_crypto_store! { #[allow(clippy::unused_async)] // Mandated by trait on wasm. async fn set_custom_value(&self, key: &str, value: Vec) -> Result<()> { - self - .inner - .transaction_on_one_with_mode(keys::CORE, IdbTransactionMode::Readwrite)? + let transaction = self.inner + .transaction(keys::CORE) + .with_mode(TransactionMode::Readwrite) + .build()?; + transaction .object_store(keys::CORE)? - .put_key_val(&JsValue::from_str(key), &self.serializer.serialize_value(&value)?)?; + .put(&self.serializer.serialize_value(&value)?) + .with_key(JsValue::from_str(key)) + .build()?; + transaction.commit().await?; Ok(()) } #[allow(clippy::unused_async)] // Mandated by trait on wasm. async fn remove_custom_value(&self, key: &str) -> Result<()> { - self - .inner - .transaction_on_one_with_mode(keys::CORE, IdbTransactionMode::Readwrite)? + let transaction = self.inner + .transaction(keys::CORE) + .with_mode(TransactionMode::Readwrite) + .build()?; + transaction .object_store(keys::CORE)? - .delete(&JsValue::from_str(key))?; + .delete(&JsValue::from_str(key)) + .build()?; + transaction.commit().await?; Ok(()) } @@ -1470,11 +1579,9 @@ impl_crypto_store! { ) -> Result { // As of 2023-06-23, the code below hasn't been tested yet. let key = JsValue::from_str(key); - let txn = self - .inner - .transaction_on_one_with_mode(keys::CORE, IdbTransactionMode::Readwrite)?; - let object_store = txn - .object_store(keys::CORE)?; + let txn = + self.inner.transaction(keys::CORE).with_mode(TransactionMode::Readwrite).build()?; + let object_store = txn.object_store(keys::CORE)?; #[derive(serde::Deserialize, serde::Serialize)] struct Lease { @@ -1485,19 +1592,34 @@ impl_crypto_store! { let now_ts: u64 = MilliSecondsSinceUnixEpoch::now().get().into(); let expiration_ts = now_ts + lease_duration_ms as u64; - let prev = object_store.get(&key)?.await?; + let prev = object_store.get(&key).await?; match prev { Some(prev) => { let lease: Lease = self.serializer.deserialize_value(prev)?; if lease.holder == holder || lease.expiration_ts < now_ts { - object_store.put_key_val(&key, &self.serializer.serialize_value(&Lease { holder: holder.to_owned(), expiration_ts })?)?; + object_store + .put( + &self.serializer.serialize_value(&Lease { + holder: holder.to_owned(), + expiration_ts, + })?, + ) + .with_key(key) + .build()?; Ok(true) } else { Ok(false) } } None => { - object_store.put_key_val(&key, &self.serializer.serialize_value(&Lease { holder: holder.to_owned(), expiration_ts })?)?; + object_store + .put( + &self + .serializer + .serialize_value(&Lease { holder: holder.to_owned(), expiration_ts })?, + ) + .with_key(key) + .build()?; Ok(true) } } @@ -1508,30 +1630,29 @@ impl Drop for IndexeddbCryptoStore { fn drop(&mut self) { // Must release the database access manually as it's not done when // dropping it. - self.inner.close(); + self.inner.as_sys().close(); } } /// Open the meta store. /// /// The meta store contains details about the encryption of the main store. -async fn open_meta_db(prefix: &str) -> Result { +async fn open_meta_db(prefix: &str) -> Result { let name = format!("{prefix:0}::matrix-sdk-crypto-meta"); debug!("IndexedDbCryptoStore: Opening meta-store {name}"); - let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, 1)?; - db_req.set_on_upgrade_needed(Some(|evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { - let old_version = evt.old_version() as u32; - if old_version < 1 { - // migrating to version 1 - let db = evt.db(); - - db.create_object_store("matrix-sdk-crypto")?; - } - Ok(()) - })); - - Ok(db_req.await?) + Database::open(&name) + .with_version(1u32) + .with_on_upgrade_needed(|evt, tx| { + let old_version = evt.old_version() as u32; + if old_version < 1 { + // migrating to version 1 + tx.db().create_object_store("matrix-sdk-crypto").build()?; + } + Ok(()) + }) + .await + .map_err(Into::into) } /// Load the serialised store cipher from the meta store. @@ -1544,16 +1665,16 @@ async fn open_meta_db(prefix: &str) -> Result Result>, IndexeddbCryptoStoreError> { - let tx: IdbTransaction<'_> = - meta_db.transaction_on_one_with_mode("matrix-sdk-crypto", IdbTransactionMode::Readonly)?; + let tx: Transaction<'_> = + meta_db.transaction("matrix-sdk-crypto").with_mode(TransactionMode::Readonly).build()?; let ob = tx.object_store("matrix-sdk-crypto")?; let store_cipher: Option> = ob - .get(&JsValue::from_str(keys::STORE_CIPHER))? + .get(&JsValue::from_str(keys::STORE_CIPHER)) .await? - .map(|k| k.into_serde()) + .map(|k: JsValue| k.into_serde()) .transpose()?; Ok(store_cipher) } @@ -1565,15 +1686,17 @@ async fn load_store_cipher( /// * `meta_db`: Connection to the meta store, as returned by [`open_meta_db`]. /// * `store_cipher`: The serialised `StoreCipher` object. async fn save_store_cipher( - db: &IdbDatabase, + db: &Database, export: &Vec, ) -> Result<(), IndexeddbCryptoStoreError> { - let tx: IdbTransaction<'_> = - db.transaction_on_one_with_mode("matrix-sdk-crypto", IdbTransactionMode::Readwrite)?; + let tx: Transaction<'_> = + db.transaction("matrix-sdk-crypto").with_mode(TransactionMode::Readwrite).build()?; let ob = tx.object_store("matrix-sdk-crypto")?; - ob.put_key_val(&JsValue::from_str(keys::STORE_CIPHER), &JsValue::from_serde(&export)?)?; - tx.await.into_result()?; + ob.put(&JsValue::from_serde(&export)?) + .with_key(JsValue::from_str(keys::STORE_CIPHER)) + .build()?; + tx.commit().await?; Ok(()) } @@ -1594,7 +1717,7 @@ async fn import_store_cipher_with_key( chacha_key: &[u8; 32], original_key: &[u8], serialised_cipher: &[u8], - db: &IdbDatabase, + db: &Database, ) -> Result { let cipher = match StoreCipher::import_with_key(chacha_key, serialised_cipher) { Ok(cipher) => cipher, @@ -1626,7 +1749,7 @@ async fn import_store_cipher_with_key( /// the supplied function, and stuff the transformed items into a single /// vector to return. async fn fetch_from_object_store_batched( - object_store: IdbObjectStore<'_>, + object_store: ObjectStore<'_>, f: F, batch_size: usize, ) -> Result> @@ -1649,9 +1772,8 @@ where // Get hold of a cursor for this batch. (This should not panic in expect() // because we always use "", or the result of cursor.key(), both of // which are valid keys.) - let after_latest_key = - IdbKeyRange::lower_bound_with_open(&latest_key, true).expect("Key was not valid!"); - let cursor = object_store.open_cursor_with_range(&after_latest_key)?.await?; + let after_latest_key = KeyRange::LowerBound(&latest_key, true); + let cursor = object_store.open_cursor().with_query(&after_latest_key).await?; // Fetch batch_size records into result let next_key = fetch_batch(cursor, batch_size, &f, &mut result).await?; @@ -1671,16 +1793,16 @@ where /// and return the last key we processed, or None if /// we reached the end of the cursor. async fn fetch_batch( - cursor: Option>, + cursor: Option>, batch_size: usize, f: &F, result: &mut Vec, ) -> Result> where F: Fn(JsValue) -> Result, - Q: IdbQuerySource, + Q: QuerySource, { - let Some(cursor) = cursor else { + let Some(mut cursor) = cursor else { // Cursor was None - there are no more records return Ok(None); }; @@ -1688,8 +1810,12 @@ where let mut latest_key = None; for _ in 0..batch_size { + let Some(value) = cursor.next_record().await? else { + return Ok(None); + }; + // Process the record - let processed = f(cursor.value()); + let processed = f(value); if let Ok(processed) = processed { result.push(processed); } @@ -1697,15 +1823,9 @@ where // Remember that we have processed this record, so if we hit // the end of the batch, the next batch can start after this one - if let Some(key) = cursor.key() { + if let Some(key) = cursor.key()? { latest_key = Some(key); } - - // Move on to the next record - let more_records = cursor.continue_cursor()?.await?; - if !more_records { - return Ok(None); - } } // We finished the batch but there are more records - diff --git a/crates/matrix-sdk-indexeddb/src/error.rs b/crates/matrix-sdk-indexeddb/src/error.rs index 0aec55bb775..0bd42147ae1 100644 --- a/crates/matrix-sdk-indexeddb/src/error.rs +++ b/crates/matrix-sdk-indexeddb/src/error.rs @@ -18,6 +18,7 @@ use matrix_sdk_base::event_cache::store::EventCacheStoreError; use matrix_sdk_base::media::store::MediaStoreError; #[cfg(feature = "state-store")] use matrix_sdk_base::StoreError; +#[cfg(any(feature = "event-cache-store", feature = "media-store"))] use matrix_sdk_base::{SendOutsideWasm, SyncOutsideWasm}; #[cfg(feature = "e2e-encryption")] use matrix_sdk_crypto::CryptoStoreError; @@ -26,8 +27,10 @@ use thiserror::Error; /// A trait that combines the necessary traits needed for asynchronous runtimes, /// but excludes them when running in a web environment - i.e., when /// `#[cfg(target_family = "wasm")]`. +#[cfg(any(feature = "event-cache-store", feature = "media-store"))] pub trait AsyncErrorDeps: std::error::Error + SendOutsideWasm + SyncOutsideWasm + 'static {} +#[cfg(any(feature = "event-cache-store", feature = "media-store"))] impl AsyncErrorDeps for T where T: std::error::Error + SendOutsideWasm + SyncOutsideWasm + 'static {} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs index 038294d2637..34db417bf1a 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs @@ -16,10 +16,12 @@ use matrix_sdk_base::event_cache::store::EventCacheStoreError; use serde::de::Error; use thiserror::Error; -use crate::transaction::TransactionError; +use crate::{error::GenericError, transaction::TransactionError}; #[derive(Debug, Error)] pub enum IndexeddbEventCacheStoreError { + #[error("unable to open database: {0}")] + UnableToOpenDatabase(String), #[error("DomException {name} ({code}): {message}")] DomException { name: String, message: String, code: u16 }, #[error("chunks contain disjoint lists")] @@ -44,11 +46,24 @@ impl From for IndexeddbEventCacheStoreError { } } +impl From for IndexeddbEventCacheStoreError { + fn from(value: indexed_db_futures::error::OpenDbError) -> Self { + use indexed_db_futures::error::OpenDbError::*; + match value { + VersionZero | UnsupportedEnvironment | NullFactory => { + Self::UnableToOpenDatabase(value.to_string()) + } + Base(e) => TransactionError::from(e).into(), + } + } +} + impl From for EventCacheStoreError { fn from(value: IndexeddbEventCacheStoreError) -> Self { use IndexeddbEventCacheStoreError::*; match value { + UnableToOpenDatabase(e) => GenericError::from(e).into(), DomException { .. } | ChunksContainCycle | ChunksContainDisjointLists @@ -67,6 +82,7 @@ impl From for EventCacheStoreError { DomException { .. } => Self::InvalidData { details: value.to_string() }, Serialization(e) => Self::Serialization(serde_json::Error::custom(e.to_string())), ItemIsNotUnique | ItemNotFound => Self::InvalidData { details: value.to_string() }, + Backend(e) => GenericError::from(e.to_string()).into(), } } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index 451b2f81d51..dedd6426bed 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -13,12 +13,10 @@ // limitations under the License use indexed_db_futures::{ - idb_object_store::IdbObjectStoreParameters, request::IdbOpenDbRequestLike, IdbDatabase, - IdbVersionChangeEvent, + database::Database, + error::{DomException, Error, OpenDbError}, }; use thiserror::Error; -use wasm_bindgen::JsValue; -use web_sys::{DomException, IdbIndexParameters}; /// The current version and keys used in the database. pub mod current { @@ -31,20 +29,20 @@ pub mod current { /// Opens a connection to the IndexedDB database and takes care of upgrading it /// if necessary. #[allow(unused)] -pub async fn open_and_upgrade_db(name: &str) -> Result { - let mut request = IdbDatabase::open_u32(name, current::VERSION as u32)?; - request.set_on_upgrade_needed(Some(|event: &IdbVersionChangeEvent| -> Result<(), JsValue> { - let mut version = - Version::try_from(event.old_version() as u32).map_err(DomException::from)?; - while version < current::VERSION { - version = match version.upgrade(event.db())? { - Some(next) => next, - None => current::VERSION, /* No more upgrades to apply, jump forward! */ - }; - } - Ok(()) - })); - request.await +pub async fn open_and_upgrade_db(name: &str) -> Result { + Database::open(name) + .with_version(current::VERSION as u32) + .with_on_upgrade_needed(|event, transaction| { + let mut version = Version::try_from(event.old_version() as u32)?; + while version < current::VERSION { + version = match version.upgrade(transaction.db())? { + Some(next) => next, + None => current::VERSION, /* No more upgrades to apply, jump forward! */ + }; + } + Ok(()) + }) + .await } /// Represents the version of the IndexedDB database. @@ -59,7 +57,7 @@ pub enum Version { impl Version { /// Upgrade the database to the next version, if one exists. - pub fn upgrade(self, db: &IdbDatabase) -> Result, DomException> { + pub fn upgrade(self, db: &Database) -> Result, Error> { match self { Self::V0 => v0::upgrade(db).map(Some), Self::V1 => Ok(None), @@ -83,12 +81,12 @@ impl TryFrom for Version { } } -impl From for DomException { +impl From for Error { fn from(value: UnknownVersionError) -> Self { let message = format!("unknown version: {}", value.0); let name = "UnknownVersionError"; - match DomException::new_with_message_and_name(&message, name) { - Ok(inner) => inner, + match web_sys::DomException::new_with_message_and_name(&message, name) { + Ok(inner) => Self::DomException(DomException::DataError(inner)), Err(err) => err.into(), } } @@ -98,13 +96,15 @@ pub mod v0 { use super::*; /// Upgrade database from `v0` to `v1` - pub fn upgrade(db: &IdbDatabase) -> Result { + pub fn upgrade(db: &Database) -> Result { v1::create_object_stores(db)?; Ok(Version::V1) } } pub mod v1 { + use indexed_db_futures::Build; + use super::*; pub mod keys { @@ -131,7 +131,7 @@ pub mod v1 { } /// Create all object stores and indices for v1 database - pub fn create_object_stores(db: &IdbDatabase) -> Result<(), DomException> { + pub fn create_object_stores(db: &Database) -> Result<(), Error> { create_lease_object_store(db)?; create_linked_chunks_object_store(db)?; create_events_object_store(db)?; @@ -140,10 +140,11 @@ pub mod v1 { } /// Create an object store tracking leases on time-based locks - fn create_lease_object_store(db: &IdbDatabase) -> Result<(), DomException> { - let mut object_store_params = IdbObjectStoreParameters::new(); - object_store_params.key_path(Some(&keys::LEASES_KEY_PATH.into())); - let _ = db.create_object_store_with_params(keys::LEASES, &object_store_params)?; + fn create_lease_object_store(db: &Database) -> Result<(), Error> { + let _ = db + .create_object_store(keys::LEASES) + .with_key_path(keys::LEASES_KEY_PATH.into()) + .build()?; Ok(()) } @@ -151,13 +152,13 @@ pub mod v1 { /// /// * Primary Key - `id` /// * Index - `is_last` - tracks the last chunk in linked chunks - fn create_linked_chunks_object_store(db: &IdbDatabase) -> Result<(), DomException> { - let mut object_store_params = IdbObjectStoreParameters::new(); - object_store_params.key_path(Some(&keys::LINKED_CHUNKS_KEY_PATH.into())); - let linked_chunks = - db.create_object_store_with_params(keys::LINKED_CHUNKS, &object_store_params)?; - linked_chunks - .create_index(keys::LINKED_CHUNKS_NEXT, &keys::LINKED_CHUNKS_NEXT_KEY_PATH.into())?; + fn create_linked_chunks_object_store(db: &Database) -> Result<(), Error> { + let _ = db + .create_object_store(keys::LINKED_CHUNKS) + .with_key_path(keys::LINKED_CHUNKS_KEY_PATH.into()) + .build()? + .create_index(keys::LINKED_CHUNKS_NEXT, keys::LINKED_CHUNKS_NEXT_KEY_PATH.into()) + .build()?; Ok(()) } @@ -169,39 +170,31 @@ pub mod v1 { /// chunks /// * Index - `relation` - tracks any event to which the given event is /// related - fn create_events_object_store(db: &IdbDatabase) -> Result<(), DomException> { - let mut object_store_params = IdbObjectStoreParameters::new(); - object_store_params.key_path(Some(&keys::EVENTS_KEY_PATH.into())); - let events = db.create_object_store_with_params(keys::EVENTS, &object_store_params)?; - - let events_room_params = IdbIndexParameters::new(); - events_room_params.set_unique(true); - events.create_index_with_params( - keys::EVENTS_ROOM, - &keys::EVENTS_ROOM_KEY_PATH.into(), - &events_room_params, - )?; - - let events_position_params = IdbIndexParameters::new(); - events_position_params.set_unique(true); - events.create_index_with_params( - keys::EVENTS_POSITION, - &keys::EVENTS_POSITION_KEY_PATH.into(), - &events_position_params, - )?; - - events.create_index(keys::EVENTS_RELATION, &keys::EVENTS_RELATION_KEY_PATH.into())?; - + fn create_events_object_store(db: &Database) -> Result<(), Error> { + let events = db + .create_object_store(keys::EVENTS) + .with_key_path(keys::EVENTS_KEY_PATH.into()) + .build()?; + let _ = events + .create_index(keys::EVENTS_ROOM, keys::EVENTS_ROOM_KEY_PATH.into()) + .with_unique(true) + .build()?; + let _ = events + .create_index(keys::EVENTS_POSITION, keys::EVENTS_POSITION_KEY_PATH.into()) + .with_unique(true) + .build()?; + let _ = events + .create_index(keys::EVENTS_RELATION, keys::EVENTS_RELATION_KEY_PATH.into()) + .build()?; Ok(()) } /// Create an object store for tracking information about gaps. /// /// * Primary Key - `id` - fn create_gaps_object_store(db: &IdbDatabase) -> Result<(), DomException> { - let mut object_store_params = IdbObjectStoreParameters::new(); - object_store_params.key_path(Some(&keys::GAPS_KEY_PATH.into())); - let _ = db.create_object_store_with_params(keys::GAPS, &object_store_params)?; + fn create_gaps_object_store(db: &Database) -> Result<(), Error> { + let _ = + db.create_object_store(keys::GAPS).with_key_path(keys::GAPS_KEY_PATH.into()).build()?; Ok(()) } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 44ba91763fb..39889fff8c4 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -16,7 +16,7 @@ use std::{rc::Rc, time::Duration}; -use indexed_db_futures::IdbDatabase; +use indexed_db_futures::{database::Database, Build}; use matrix_sdk_base::{ event_cache::{store::EventCacheStore, Event, Gap}, linked_chunk::{ @@ -61,7 +61,7 @@ pub use error::IndexeddbEventCacheStoreError; #[derive(Debug, Clone)] pub struct IndexeddbEventCacheStore { // A handle to the IndexedDB database - inner: Rc, + inner: Rc, // A serializer with functionality tailored to `IndexeddbEventCacheStore` serializer: IndexedTypeSerializer, } @@ -82,7 +82,11 @@ impl IndexeddbEventCacheStore { mode: IdbTransactionMode, ) -> Result, IndexeddbEventCacheStoreError> { Ok(IndexeddbEventCacheStoreTransaction::new( - self.inner.transaction_on_multi_with_mode(stores, mode)?, + self.inner + .transaction(stores) + .with_mode(mode) + .build() + .map_err(TransactionError::from)?, &self.serializer, )) } @@ -120,7 +124,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { expiration: now + Duration::from_millis(lease_duration_ms.into()), }) .await?; - + transaction.commit().await?; Ok(true) } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs index 73e2048fb17..e404ef4ace1 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs @@ -14,7 +14,7 @@ use std::ops::Deref; -use indexed_db_futures::prelude::IdbTransaction; +use indexed_db_futures::transaction as inner; use matrix_sdk_base::{ event_cache::{Event as RawEvent, Gap as RawGap}, linked_chunk::{ChunkContent, ChunkIdentifier, LinkedChunkId, RawChunk}, @@ -54,7 +54,7 @@ impl<'a> Deref for IndexeddbEventCacheStoreTransaction<'a> { } impl<'a> IndexeddbEventCacheStoreTransaction<'a> { - pub fn new(transaction: IdbTransaction<'a>, serializer: &'a IndexedTypeSerializer) -> Self { + pub fn new(transaction: inner::Transaction<'a>, serializer: &'a IndexedTypeSerializer) -> Self { Self { transaction: Transaction::new(transaction, serializer) } } diff --git a/crates/matrix-sdk-indexeddb/src/lib.rs b/crates/matrix-sdk-indexeddb/src/lib.rs index c9e5c1de83b..f161d319dd8 100644 --- a/crates/matrix-sdk-indexeddb/src/lib.rs +++ b/crates/matrix-sdk-indexeddb/src/lib.rs @@ -6,7 +6,6 @@ use thiserror::Error; #[cfg(feature = "e2e-encryption")] mod crypto_store; -#[cfg(any(feature = "event-cache-store", feature = "media-store"))] mod error; #[cfg(feature = "event-cache-store")] mod event_cache_store; diff --git a/crates/matrix-sdk-indexeddb/src/media_store/error.rs b/crates/matrix-sdk-indexeddb/src/media_store/error.rs index ce52a25d622..c57f8aa7186 100644 --- a/crates/matrix-sdk-indexeddb/src/media_store/error.rs +++ b/crates/matrix-sdk-indexeddb/src/media_store/error.rs @@ -16,16 +16,16 @@ use matrix_sdk_base::media::store::{MediaStore, MediaStoreError, MemoryMediaStor use serde::de::Error; use thiserror::Error; -use crate::transaction::TransactionError; +use crate::{error::GenericError, transaction::TransactionError}; #[derive(Debug, Error)] pub enum IndexeddbMediaStoreError { + #[error("unable to open database: {0}")] + UnableToOpenDatabase(String), #[error("media store: {0}")] MemoryStore(::Error), - #[error("transaction: {0}")] Transaction(#[from] TransactionError), - #[error("DomException {name} ({code}): {message}")] DomException { name: String, message: String, code: u16 }, } @@ -35,6 +35,7 @@ impl From for MediaStoreError { use IndexeddbMediaStoreError::*; match value { + UnableToOpenDatabase(e) => GenericError::from(e).into(), DomException { .. } => Self::InvalidData { details: value.to_string() }, Transaction(inner) => inner.into(), MemoryStore(error) => error, @@ -48,6 +49,18 @@ impl From for IndexeddbMediaStoreError { } } +impl From for IndexeddbMediaStoreError { + fn from(value: indexed_db_futures::error::OpenDbError) -> Self { + use indexed_db_futures::error::OpenDbError::*; + match value { + VersionZero | UnsupportedEnvironment | NullFactory => { + Self::UnableToOpenDatabase(value.to_string()) + } + Base(e) => TransactionError::from(e).into(), + } + } +} + impl From for MediaStoreError { fn from(value: TransactionError) -> Self { use TransactionError::*; @@ -56,6 +69,7 @@ impl From for MediaStoreError { DomException { .. } => Self::InvalidData { details: value.to_string() }, Serialization(e) => Self::Serialization(serde_json::Error::custom(e.to_string())), ItemIsNotUnique | ItemNotFound => Self::InvalidData { details: value.to_string() }, + Backend(e) => GenericError::from(e.to_string()).into(), } } } diff --git a/crates/matrix-sdk-indexeddb/src/media_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/media_store/migrations.rs index 72e495a7082..cbbee415b13 100644 --- a/crates/matrix-sdk-indexeddb/src/media_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/media_store/migrations.rs @@ -13,12 +13,10 @@ // limitations under the License use indexed_db_futures::{ - idb_object_store::IdbObjectStoreParameters, request::IdbOpenDbRequestLike, IdbDatabase, - IdbVersionChangeEvent, + database::Database, + error::{DomException, Error, OpenDbError}, }; use thiserror::Error; -use wasm_bindgen::JsValue; -use web_sys::DomException; /// The current version and keys used in the database. pub mod current { @@ -31,20 +29,20 @@ pub mod current { /// Opens a connection to the IndexedDB database and takes care of upgrading it /// if necessary. #[allow(unused)] -pub async fn open_and_upgrade_db(name: &str) -> Result { - let mut request = IdbDatabase::open_u32(name, current::VERSION as u32)?; - request.set_on_upgrade_needed(Some(|event: &IdbVersionChangeEvent| -> Result<(), JsValue> { - let mut version = - Version::try_from(event.old_version() as u32).map_err(DomException::from)?; - while version < current::VERSION { - version = match version.upgrade(event.db())? { - Some(next) => next, - None => current::VERSION, /* No more upgrades to apply, jump forward! */ - }; - } - Ok(()) - })); - request.await +pub async fn open_and_upgrade_db(name: &str) -> Result { + Database::open(name) + .with_version(current::VERSION as u32) + .with_on_upgrade_needed(|event, transaction| { + let mut version = Version::try_from(event.old_version() as u32)?; + while version < current::VERSION { + version = match version.upgrade(transaction.db())? { + Some(next) => next, + None => current::VERSION, /* No more upgrades to apply, jump forward! */ + }; + } + Ok(()) + }) + .await } /// Represents the version of the IndexedDB database. @@ -59,7 +57,7 @@ pub enum Version { impl Version { /// Upgrade the database to the next version, if one exists. - pub fn upgrade(self, db: &IdbDatabase) -> Result, DomException> { + pub fn upgrade(self, db: &Database) -> Result, Error> { match self { Self::V0 => v0::upgrade(db).map(Some), Self::V1 => Ok(None), @@ -83,12 +81,12 @@ impl TryFrom for Version { } } -impl From for DomException { +impl From for Error { fn from(value: UnknownVersionError) -> Self { let message = format!("unknown version: {}", value.0); let name = "UnknownVersionError"; - match DomException::new_with_message_and_name(&message, name) { - Ok(inner) => inner, + match web_sys::DomException::new_with_message_and_name(&message, name) { + Ok(inner) => Self::DomException(DomException::DataError(inner)), Err(err) => err.into(), } } @@ -98,13 +96,15 @@ pub mod v0 { use super::*; /// Upgrade database from `v0` to `v1` - pub fn upgrade(db: &IdbDatabase) -> Result { + pub fn upgrade(db: &Database) -> Result { v1::create_object_stores(db)?; Ok(Version::V1) } } pub mod v1 { + use indexed_db_futures::Build; + use super::*; pub mod keys { @@ -126,7 +126,7 @@ pub mod v1 { } /// Create all object stores and indices for v1 database - pub fn create_object_stores(db: &IdbDatabase) -> Result<(), DomException> { + pub fn create_object_stores(db: &Database) -> Result<(), Error> { create_core_object_store(db)?; create_lease_object_store(db)?; create_media_object_store(db)?; @@ -136,18 +136,18 @@ pub mod v1 { /// Create an object store for tracking miscellaneous information /// /// * Primary Key - `id` - fn create_core_object_store(db: &IdbDatabase) -> Result<(), DomException> { - let mut object_store_params = IdbObjectStoreParameters::new(); - object_store_params.key_path(Some(&keys::CORE_KEY_PATH.into())); - let _ = db.create_object_store_with_params(keys::CORE, &object_store_params)?; + fn create_core_object_store(db: &Database) -> Result<(), Error> { + let _ = + db.create_object_store(keys::CORE).with_key_path(keys::CORE_KEY_PATH.into()).build()?; Ok(()) } /// Create an object store tracking leases on time-based locks - fn create_lease_object_store(db: &IdbDatabase) -> Result<(), DomException> { - let mut object_store_params = IdbObjectStoreParameters::new(); - object_store_params.key_path(Some(&keys::LEASES_KEY_PATH.into())); - let _ = db.create_object_store_with_params(keys::LEASES, &object_store_params)?; + fn create_lease_object_store(db: &Database) -> Result<(), Error> { + let _ = db + .create_object_store(keys::LEASES) + .with_key_path(keys::LEASES_KEY_PATH.into()) + .build()?; Ok(()) } @@ -164,17 +164,24 @@ pub mod v1 { /// /// [1]: ruma::MxcUri /// [2]: matrix_sdk_base::media::store::MediaRetentionPolicy - fn create_media_object_store(db: &IdbDatabase) -> Result<(), DomException> { - let mut object_store_params = IdbObjectStoreParameters::new(); - object_store_params.key_path(Some(&keys::MEDIA_KEY_PATH.into())); - let media = db.create_object_store_with_params(keys::MEDIA, &object_store_params)?; - media.create_index(keys::MEDIA_URI, &keys::MEDIA_URI_KEY_PATH.into())?; - media.create_index(keys::MEDIA_CONTENT_SIZE, &keys::MEDIA_CONTENT_SIZE_KEY_PATH.into())?; - media.create_index(keys::MEDIA_LAST_ACCESS, &keys::MEDIA_LAST_ACCESS_KEY_PATH.into())?; - media.create_index( - keys::MEDIA_RETENTION_METADATA, - &keys::MEDIA_RETENTION_METADATA_KEY_PATH.into(), - )?; + fn create_media_object_store(db: &Database) -> Result<(), Error> { + let media = db + .create_object_store(keys::MEDIA) + .with_key_path(keys::MEDIA_KEY_PATH.into()) + .build()?; + let _ = media.create_index(keys::MEDIA_URI, keys::MEDIA_URI_KEY_PATH.into()).build()?; + let _ = media + .create_index(keys::MEDIA_CONTENT_SIZE, keys::MEDIA_CONTENT_SIZE_KEY_PATH.into()) + .build()?; + let _ = media + .create_index(keys::MEDIA_LAST_ACCESS, keys::MEDIA_LAST_ACCESS_KEY_PATH.into()) + .build()?; + let _ = media + .create_index( + keys::MEDIA_RETENTION_METADATA, + keys::MEDIA_RETENTION_METADATA_KEY_PATH.into(), + ) + .build()?; Ok(()) } } diff --git a/crates/matrix-sdk-indexeddb/src/media_store/mod.rs b/crates/matrix-sdk-indexeddb/src/media_store/mod.rs index 0dfc4f07eb9..6f830c06175 100644 --- a/crates/matrix-sdk-indexeddb/src/media_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/media_store/mod.rs @@ -28,7 +28,7 @@ use std::{rc::Rc, time::Duration}; pub use builder::IndexeddbMediaStoreBuilder; pub use error::IndexeddbMediaStoreError; -use indexed_db_futures::IdbDatabase; +use indexed_db_futures::{database::Database, Build}; use matrix_sdk_base::{ media::{ store::{ @@ -49,6 +49,7 @@ use crate::{ types::{Lease, Media, MediaMetadata}, }, serializer::{Indexed, IndexedTypeSerializer}, + transaction::TransactionError, }; /// A type for providing an IndexedDB implementation of [`MediaStore`][1]. @@ -59,7 +60,7 @@ use crate::{ #[derive(Debug, Clone)] pub struct IndexeddbMediaStore { // A handle to the IndexedDB database - inner: Rc, + inner: Rc, // A serializer with functionality tailored to `IndexeddbMediaStore` serializer: IndexedTypeSerializer, // A service for conveniently delegating media-related queries to an `MediaStoreInner` @@ -89,7 +90,11 @@ impl IndexeddbMediaStore { mode: IdbTransactionMode, ) -> Result, IndexeddbMediaStoreError> { Ok(IndexeddbMediaStoreTransaction::new( - self.inner.transaction_on_multi_with_mode(stores, mode)?, + self.inner + .transaction(stores) + .with_mode(mode) + .build() + .map_err(TransactionError::from)?, &self.serializer, )) } @@ -127,7 +132,7 @@ impl MediaStore for IndexeddbMediaStore { expiration: now + Duration::from_millis(lease_duration_ms.into()), }) .await?; - + transaction.commit().await?; Ok(true) } diff --git a/crates/matrix-sdk-indexeddb/src/media_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/media_store/transaction.rs index 3f4aa9275c8..9daceb7f400 100644 --- a/crates/matrix-sdk-indexeddb/src/media_store/transaction.rs +++ b/crates/matrix-sdk-indexeddb/src/media_store/transaction.rs @@ -14,7 +14,7 @@ use std::ops::Deref; -use indexed_db_futures::prelude::IdbTransaction; +use indexed_db_futures::transaction as inner; use matrix_sdk_base::media::{store::MediaRetentionPolicy, MediaRequestParameters}; use ruma::MxcUri; @@ -45,7 +45,7 @@ impl<'a> Deref for IndexeddbMediaStoreTransaction<'a> { } impl<'a> IndexeddbMediaStoreTransaction<'a> { - pub fn new(transaction: IdbTransaction<'a>, serializer: &'a IndexedTypeSerializer) -> Self { + pub fn new(transaction: inner::Transaction<'a>, serializer: &'a IndexedTypeSerializer) -> Self { Self { transaction: Transaction::new(transaction, serializer) } } diff --git a/crates/matrix-sdk-indexeddb/src/serializer/indexed_type/mod.rs b/crates/matrix-sdk-indexeddb/src/serializer/indexed_type/mod.rs index 5e18d115a90..29854bf7ca9 100644 --- a/crates/matrix-sdk-indexeddb/src/serializer/indexed_type/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/serializer/indexed_type/mod.rs @@ -23,12 +23,12 @@ pub mod range; pub mod traits; use gloo_utils::format::JsValueSerdeExt; +use indexed_db_futures::KeyRange; use range::IndexedKeyRange; use serde::{de::DeserializeOwned, Serialize}; use thiserror::Error; use traits::{Indexed, IndexedKey}; use wasm_bindgen::JsValue; -use web_sys::IdbKeyRange; use crate::serializer::SafeEncodeSerializer; @@ -99,21 +99,15 @@ impl IndexedTypeSerializer { /// /// Note that the particular key which is encoded is defined by the type /// `K`. - pub fn encode_key_range( - &self, - range: impl Into>, - ) -> Result + pub fn encode_key_range(&self, range: impl Into>) -> KeyRange where T: Indexed, K: Serialize, { - use serde_wasm_bindgen::to_value; - Ok(match range.into() { - IndexedKeyRange::Only(key) => IdbKeyRange::only(&to_value(&key)?)?, - IndexedKeyRange::Bound(lower, upper) => { - IdbKeyRange::bound(&to_value(&lower)?, &to_value(&upper)?)? - } - }) + match range.into() { + IndexedKeyRange::Only(key) => KeyRange::Only(key), + IndexedKeyRange::Bound(lower, upper) => KeyRange::Bound(lower, false, upper, false), + } } /// Encodes a key component range for an [`Indexed`] type. @@ -123,7 +117,7 @@ impl IndexedTypeSerializer { pub fn encode_key_component_range<'a, T, K>( &self, range: impl Into>>, - ) -> Result + ) -> KeyRange where T: Indexed, K: IndexedKey + Serialize, diff --git a/crates/matrix-sdk-indexeddb/src/serializer/safe_encode/traits.rs b/crates/matrix-sdk-indexeddb/src/serializer/safe_encode/traits.rs index 7f8917fe40a..aa4bf32c928 100644 --- a/crates/matrix-sdk-indexeddb/src/serializer/safe_encode/traits.rs +++ b/crates/matrix-sdk-indexeddb/src/serializer/safe_encode/traits.rs @@ -5,6 +5,7 @@ use base64::{ engine::{general_purpose, GeneralPurpose}, Engine, }; +use indexed_db_futures::KeyRange; use matrix_sdk_store_encryption::StoreCipher; use ruma::{ events::{ @@ -14,7 +15,6 @@ use ruma::{ UserId, }; use wasm_bindgen::JsValue; -use web_sys::IdbKeyRange; /// ASCII Group Separator, for elements in the keys pub const KEY_SEPARATOR: &str = "\u{001D}"; @@ -53,26 +53,28 @@ pub trait SafeEncode { /// Encode self into a IdbKeyRange for searching all keys that are /// prefixed with this key, followed by `KEY_SEPARATOR`. Internally /// uses `as_encoded_string` to ensure the given key is escaped properly. - fn encode_to_range(&self) -> Result { + fn encode_to_range(&self) -> KeyRange { let key = self.as_encoded_string(); - IdbKeyRange::bound( - &JsValue::from([&key, KEY_SEPARATOR].concat()), - &JsValue::from([&key, RANGE_END].concat()), + KeyRange::Bound( + JsValue::from([&key, KEY_SEPARATOR].concat()), + false, + JsValue::from([&key, RANGE_END].concat()), + false, ) - .map_err(|e| e.as_string().unwrap_or_else(|| "Creating key range failed".to_owned())) } fn encode_to_range_secure( &self, table_name: &str, store_cipher: &StoreCipher, - ) -> Result { + ) -> KeyRange { let key = self.as_secure_string(table_name, store_cipher); - IdbKeyRange::bound( - &JsValue::from([&key, KEY_SEPARATOR].concat()), - &JsValue::from([&key, RANGE_END].concat()), + KeyRange::Bound( + JsValue::from([&key, KEY_SEPARATOR].concat()), + false, + JsValue::from([&key, RANGE_END].concat()), + false, ) - .map_err(|e| e.as_string().unwrap_or_else(|| "Creating key range failed".to_owned())) } } diff --git a/crates/matrix-sdk-indexeddb/src/serializer/safe_encode/types.rs b/crates/matrix-sdk-indexeddb/src/serializer/safe_encode/types.rs index c7ab2fb0828..7f7b953f02f 100644 --- a/crates/matrix-sdk-indexeddb/src/serializer/safe_encode/types.rs +++ b/crates/matrix-sdk-indexeddb/src/serializer/safe_encode/types.rs @@ -20,11 +20,11 @@ use base64::{ Engine, }; use gloo_utils::format::JsValueSerdeExt; +use indexed_db_futures::KeyRange; use matrix_sdk_crypto::CryptoStoreError; use matrix_sdk_store_encryption::{EncryptedValueBase64, StoreCipher}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use wasm_bindgen::JsValue; -use web_sys::IdbKeyRange; use zeroize::Zeroizing; use crate::serializer::safe_encode::traits::SafeEncode; @@ -146,11 +146,7 @@ impl SafeEncodeSerializer { } } - pub fn encode_to_range( - &self, - table_name: &str, - key: T, - ) -> Result + pub fn encode_to_range(&self, table_name: &str, key: T) -> KeyRange where T: SafeEncode, { @@ -158,11 +154,6 @@ impl SafeEncodeSerializer { Some(cipher) => key.encode_to_range_secure(table_name, cipher), None => key.encode_to_range(), } - .map_err(|e| SafeEncodeSerializerError::DomException { - code: 0, - name: "IdbKeyRangeMakeError".to_owned(), - message: e, - }) } /// Encode the value for storage as a value in indexeddb. diff --git a/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs index 8eed725ed3a..d066d12d236 100644 --- a/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs @@ -18,7 +18,14 @@ use std::{ }; use gloo_utils::format::JsValueSerdeExt; -use indexed_db_futures::{prelude::*, request::OpenDbRequest, IdbDatabase, IdbVersionChangeEvent}; +use indexed_db_futures::{ + database::{Database, VersionChangeEvent}, + error::Error, + future::OpenDbRequest, + object_store::ObjectStore, + prelude::*, + transaction::{Transaction, TransactionMode}, +}; use js_sys::Date as JsDate; use matrix_sdk_base::{ deserialized_responses::SyncOrStrippedState, store::migration_helpers::RoomInfoV1, @@ -38,7 +45,6 @@ use ruma::{ use serde::{Deserialize, Serialize}; use serde_json::value::{RawValue as RawJsonValue, Value as JsonValue}; use wasm_bindgen::JsValue; -use web_sys::IdbTransactionMode; use super::{ deserialize_value, encode_key, encode_to_range, keys, serialize_value, Result, RoomMember, @@ -82,35 +88,41 @@ mod old_keys { pub async fn upgrade_meta_db( meta_name: &str, passphrase: Option<&str>, -) -> Result<(IdbDatabase, Option>)> { +) -> Result<(Database, Option>)> { // Meta database. - let mut db_req: OpenDbRequest = IdbDatabase::open_u32(meta_name, CURRENT_META_DB_VERSION)?; - db_req.set_on_upgrade_needed(Some(|evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { - let db = evt.db(); - let old_version = evt.old_version() as u32; - - if old_version < 1 { - db.create_object_store(keys::INTERNAL_STATE)?; - } + let db_req: OpenDbRequest = Database::open(meta_name) + .with_version(CURRENT_META_DB_VERSION) + .with_on_upgrade_needed( + |evt: VersionChangeEvent, tx: &Transaction<'_>| -> Result<(), Error> { + let db = tx.db(); + let old_version = evt.old_version() as u32; + + if old_version < 1 { + db.create_object_store(keys::INTERNAL_STATE).build()?; + } - if old_version < 2 { - db.create_object_store(keys::BACKUPS_META)?; - } + if old_version < 2 { + db.create_object_store(keys::BACKUPS_META).build()?; + } - Ok(()) - })); + Ok(()) + }, + ) + .build()?; - let meta_db: IdbDatabase = db_req.await?; + let meta_db: Database = db_req.await?; let store_cipher = if let Some(passphrase) = passphrase { - let tx: IdbTransaction<'_> = meta_db - .transaction_on_one_with_mode(keys::INTERNAL_STATE, IdbTransactionMode::Readwrite)?; + let tx: Transaction<'_> = meta_db + .transaction(keys::INTERNAL_STATE) + .with_mode(TransactionMode::Readwrite) + .build()?; let ob = tx.object_store(keys::INTERNAL_STATE)?; let cipher = if let Some(StoreKeyWrapper(inner)) = ob - .get(&JsValue::from_str(keys::STORE_KEY))? + .get(&JsValue::from_str(keys::STORE_KEY)) .await? - .map(|v| v.into_serde()) + .map(|v: JsValue| v.into_serde()) .transpose()? { StoreCipher::import(passphrase, &inner)? @@ -120,14 +132,11 @@ pub async fn upgrade_meta_db( let export = cipher.export(passphrase)?; #[cfg(test)] let export = cipher._insecure_export_fast_for_testing(passphrase)?; - ob.put_key_val( - &JsValue::from_str(keys::STORE_KEY), - &JsValue::from_serde(&StoreKeyWrapper(export))?, - )?; + ob.put(&StoreKeyWrapper(export)).with_key(keys::STORE_KEY.to_owned()).serde()?.await?; cipher }; - tx.await.into_result()?; + tx.commit().await?; Some(Arc::new(cipher)) } else { None @@ -151,9 +160,9 @@ pub async fn upgrade_inner_db( name: &str, store_cipher: Option<&StoreCipher>, migration_strategy: MigrationConflictStrategy, - meta_db: &IdbDatabase, -) -> Result { - let mut db = IdbDatabase::open(name)?.await?; + meta_db: &Database, +) -> Result { + let mut db = Database::open(name).await?; // Even if the web-sys bindings expose the version as a f64, the IndexedDB API // works with an unsigned integer. @@ -247,19 +256,21 @@ pub async fn upgrade_inner_db( db.close(); - let mut db_req: OpenDbRequest = IdbDatabase::open_u32(name, CURRENT_DB_VERSION)?; - db_req.set_on_upgrade_needed(Some( - move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { - // Sanity check. - // There should be no upgrade needed since the database should have already been - // upgraded to the latest version. - panic!( - "Opening database that was not fully upgraded: \ + let db_req: OpenDbRequest = Database::open(name) + .with_version(CURRENT_DB_VERSION) + .with_on_upgrade_needed( + move |evt: VersionChangeEvent, _: &Transaction<'_>| -> Result<(), Error> { + // Sanity check. + // There should be no upgrade needed since the database should have already been + // upgraded to the latest version. + panic!( + "Opening database that was not fully upgraded: \ DB version: {}; latest version: {CURRENT_DB_VERSION}", - evt.old_version() - ) - }, - )); + evt.old_version() + ) + }, + ) + .build()?; db = db_req.await?; } @@ -269,41 +280,45 @@ pub async fn upgrade_inner_db( /// Apply the given migration by upgrading the database with the given name to /// the given version. async fn apply_migration( - db: IdbDatabase, + db: Database, version: u32, migration: OngoingMigration, -) -> Result { +) -> Result { let name = db.name(); db.close(); - let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, version)?; - db_req.set_on_upgrade_needed(Some(move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { - // Changing the format can only happen in the upgrade procedure - for store in &migration.drop_stores { - evt.db().delete_object_store(store)?; - } - for store in &migration.create_stores { - evt.db().create_object_store(store)?; - } + let db_req: OpenDbRequest = Database::open(&name) + .with_version(version) + .with_on_upgrade_needed( + move |_: VersionChangeEvent, tx: &Transaction<'_>| -> Result<(), Error> { + // Changing the format can only happen in the upgrade procedure + for store in &migration.drop_stores { + tx.db().delete_object_store(store)?; + } + for store in &migration.create_stores { + tx.db().create_object_store(store).build()?; + } - Ok(()) - })); + Ok(()) + }, + ) + .build()?; let db = db_req.await?; // Finally, we can add data to the newly created tables if needed. if !migration.data.is_empty() { let stores: Vec<_> = migration.data.keys().copied().collect(); - let tx = db.transaction_on_multi_with_mode(&stores, IdbTransactionMode::Readwrite)?; + let tx = db.transaction(stores).with_mode(TransactionMode::Readwrite).build()?; for (name, data) in migration.data { let store = tx.object_store(name)?; for (key, value) in data { - store.put_key_val(&key, &value)?; + store.put(&value).with_key(key).await?; } } - tx.await.into_result()?; + tx.commit().await?; } Ok(db) @@ -333,57 +348,61 @@ pub const V1_STORES: &[&str] = &[ old_keys::SYNC_TOKEN, ]; -async fn backup_v1(source: &IdbDatabase, meta: &IdbDatabase) -> Result<()> { +async fn backup_v1(source: &Database, meta: &Database) -> Result<()> { let now = JsDate::now(); let backup_name = format!("backup-{}-{now}", source.name()); - let mut db_req: OpenDbRequest = IdbDatabase::open_f64(&backup_name, source.version())?; - db_req.set_on_upgrade_needed(Some(move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { - // migrating to version 1 - let db = evt.db(); - for name in V1_STORES { - db.create_object_store(name)?; - } - Ok(()) - })); + let db_req: OpenDbRequest = Database::open(&backup_name) + .with_version(source.version()) + .with_on_upgrade_needed( + move |_: VersionChangeEvent, tx: &Transaction<'_>| -> Result<(), Error> { + // migrating to version 1 + let db = tx.db(); + for name in V1_STORES { + db.create_object_store(name).build()?; + } + Ok(()) + }, + ) + .build()?; let target = db_req.await?; for name in V1_STORES { - let source_tx = source.transaction_on_one_with_mode(name, IdbTransactionMode::Readonly)?; + let source_tx = source.transaction(*name).with_mode(TransactionMode::Readonly).build()?; let source_obj = source_tx.object_store(name)?; - let Some(curs) = source_obj.open_cursor()?.await? else { + let Some(mut curs) = source_obj.open_cursor().await? else { continue; }; - let data = curs.into_vec(0).await?; + let mut data = vec![]; + while let Some(value) = curs.next_record::().await? { + if let Some(key) = curs.key::()? { + data.push((key, value)); + } + } - let target_tx = target.transaction_on_one_with_mode(name, IdbTransactionMode::Readwrite)?; + let target_tx = target.transaction(*name).with_mode(TransactionMode::Readwrite).build()?; let target_obj = target_tx.object_store(name)?; - for kv in data { - target_obj.put_key_val(kv.key(), kv.value())?; + for (key, value) in data { + target_obj.put(value).with_key(key).await?; } - target_tx.await.into_result()?; + target_tx.commit().await?; } - let tx = - meta.transaction_on_one_with_mode(keys::BACKUPS_META, IdbTransactionMode::Readwrite)?; + let tx = meta.transaction(keys::BACKUPS_META).with_mode(TransactionMode::Readwrite).build()?; let backup_store = tx.object_store(keys::BACKUPS_META)?; - backup_store.put_key_val(&JsValue::from_f64(now), &JsValue::from_str(&backup_name))?; + backup_store.put(&backup_name).with_key(now).await?; - tx.await; + tx.commit().await?; - source.close(); target.close(); Ok(()) } -async fn v3_fix_store( - store: &IdbObjectStore<'_>, - store_cipher: Option<&StoreCipher>, -) -> Result<()> { +async fn v3_fix_store(store: &ObjectStore<'_>, store_cipher: Option<&StoreCipher>) -> Result<()> { fn maybe_fix_json(raw_json: &RawJsonValue) -> Result> { let json = raw_json.get(); @@ -400,18 +419,14 @@ async fn v3_fix_store( Ok(None) } - let cursor = store.open_cursor()?.await?; + let cursor = store.open_cursor().await?; - if let Some(cursor) = cursor { - loop { - let raw_json: Box = deserialize_value(store_cipher, &cursor.value())?; + if let Some(mut cursor) = cursor { + while let Some(value) = cursor.next_record().await? { + let raw_json: Box = deserialize_value(store_cipher, &value)?; if let Some(fixed_json) = maybe_fix_json(&raw_json)? { - cursor.update(&serialize_value(store_cipher, &fixed_json)?)?.await?; - } - - if !cursor.continue_cursor()?.await? { - break; + cursor.update(&serialize_value(store_cipher, &fixed_json)?).await?; } } } @@ -420,35 +435,35 @@ async fn v3_fix_store( } /// Fix serialized redacted state events. -async fn migrate_to_v3(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> Result { - let tx = db.transaction_on_multi_with_mode( - &[keys::ROOM_STATE, keys::ROOM_INFOS], - IdbTransactionMode::Readwrite, - )?; +async fn migrate_to_v3(db: Database, store_cipher: Option<&StoreCipher>) -> Result { + let tx = db + .transaction([keys::ROOM_STATE, keys::ROOM_INFOS]) + .with_mode(TransactionMode::Readwrite) + .build()?; v3_fix_store(&tx.object_store(keys::ROOM_STATE)?, store_cipher).await?; v3_fix_store(&tx.object_store(keys::ROOM_INFOS)?, store_cipher).await?; - tx.await.into_result()?; + tx.commit().await?; let name = db.name(); db.close(); // Update the version of the database. - Ok(IdbDatabase::open_u32(&name, 3)?.await?) + Ok(Database::open(&name).with_version(3u32).await?) } /// Move the content of the SYNC_TOKEN and SESSION stores to the new KV store. -async fn migrate_to_v4(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> Result { - let tx = db.transaction_on_multi_with_mode( - &[old_keys::SYNC_TOKEN, old_keys::SESSION], - IdbTransactionMode::Readonly, - )?; +async fn migrate_to_v4(db: Database, store_cipher: Option<&StoreCipher>) -> Result { + let tx = db + .transaction([old_keys::SYNC_TOKEN, old_keys::SESSION]) + .with_mode(TransactionMode::Readonly) + .build()?; let mut values = Vec::new(); // Sync token let sync_token_store = tx.object_store(old_keys::SYNC_TOKEN)?; - let sync_token = sync_token_store.get(&JsValue::from_str(old_keys::SYNC_TOKEN))?.await?; + let sync_token = sync_token_store.get(&JsValue::from_str(old_keys::SYNC_TOKEN)).await?; if let Some(sync_token) = sync_token { values.push(( @@ -459,17 +474,17 @@ async fn migrate_to_v4(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R // Filters let session_store = tx.object_store(old_keys::SESSION)?; - let range = - encode_to_range(store_cipher, StateStoreDataKey::FILTER, StateStoreDataKey::FILTER)?; - if let Some(cursor) = session_store.open_cursor_with_range(&range)?.await? { - while let Some(key) = cursor.key() { - let value = cursor.value(); + let range = encode_to_range(store_cipher, StateStoreDataKey::FILTER, StateStoreDataKey::FILTER); + if let Some(mut cursor) = session_store.open_cursor().with_query(&range).await? { + while let Some(value) = cursor.next_record().await? { + let Some(key) = cursor.key()? else { + break; + }; values.push((key, value)); - cursor.continue_cursor()?.await?; } } - tx.await.into_result()?; + tx.commit().await?; let mut data = HashMap::new(); if !values.is_empty() { @@ -485,33 +500,34 @@ async fn migrate_to_v4(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R } /// Move the member events with other state events. -async fn migrate_to_v5(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> Result { - let tx = db.transaction_on_multi_with_mode( - &[ +async fn migrate_to_v5(db: Database, store_cipher: Option<&StoreCipher>) -> Result { + let tx = db + .transaction([ old_keys::MEMBERS, old_keys::STRIPPED_MEMBERS, keys::ROOM_STATE, keys::STRIPPED_ROOM_STATE, keys::ROOM_INFOS, old_keys::STRIPPED_ROOM_INFOS, - ], - IdbTransactionMode::Readwrite, - )?; + ]) + .with_mode(TransactionMode::Readwrite) + .build()?; let members_store = tx.object_store(old_keys::MEMBERS)?; let state_store = tx.object_store(keys::ROOM_STATE)?; let room_infos = tx .object_store(keys::ROOM_INFOS)? - .get_all()? + .get_all() .await? - .iter() + .filter_map(Result::ok) .filter_map(|f| deserialize_value::(store_cipher, &f).ok()) .collect::>(); for room_info in room_infos { let room_id = room_info.room_id(); - let range = encode_to_range(store_cipher, old_keys::MEMBERS, room_id)?; - for value in members_store.get_all_with_key(&range)?.await?.iter() { + let range = encode_to_range(store_cipher, old_keys::MEMBERS, room_id); + for result in members_store.get_all().with_query(&range).await? { + let value = result?; let raw_member_event = deserialize_value::>(store_cipher, &value)?; let state_key = raw_member_event.get_field::("state_key")?.unwrap_or_default(); @@ -521,7 +537,7 @@ async fn migrate_to_v5(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R (room_id, StateEventType::RoomMember, state_key), ); - state_store.add_key_val(&key, &value)?; + state_store.add(&value).with_key(key).build()?.await?; } } @@ -529,16 +545,17 @@ async fn migrate_to_v5(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R let stripped_state_store = tx.object_store(keys::STRIPPED_ROOM_STATE)?; let stripped_room_infos = tx .object_store(old_keys::STRIPPED_ROOM_INFOS)? - .get_all()? + .get_all() .await? - .iter() + .filter_map(Result::ok) .filter_map(|f| deserialize_value::(store_cipher, &f).ok()) .collect::>(); for room_info in stripped_room_infos { let room_id = room_info.room_id(); - let range = encode_to_range(store_cipher, old_keys::STRIPPED_MEMBERS, room_id)?; - for value in stripped_members_store.get_all_with_key(&range)?.await?.iter() { + let range = encode_to_range(store_cipher, old_keys::STRIPPED_MEMBERS, room_id); + for result in stripped_members_store.get_all().with_query(&range).await? { + let value = result?; let raw_member_event = deserialize_value::>(store_cipher, &value)?; let state_key = raw_member_event.get_field::("state_key")?.unwrap_or_default(); @@ -548,11 +565,11 @@ async fn migrate_to_v5(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R (room_id, StateEventType::RoomMember, state_key), ); - stripped_state_store.add_key_val(&key, &value)?; + stripped_state_store.add(&value).with_key(key).build()?.await?; } } - tx.await.into_result()?; + tx.commit().await?; let migration = OngoingMigration { drop_stores: [old_keys::MEMBERS, old_keys::STRIPPED_MEMBERS].into_iter().collect(), @@ -563,25 +580,25 @@ async fn migrate_to_v5(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R } /// Remove the old user IDs stores and populate the new ones. -async fn migrate_to_v6(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> Result { +async fn migrate_to_v6(db: Database, store_cipher: Option<&StoreCipher>) -> Result { // We only have joined and invited user IDs in the old store, so instead we will // use the room member events to populate the new store. - let tx = db.transaction_on_multi_with_mode( - &[ + let tx = db + .transaction([ keys::ROOM_STATE, keys::ROOM_INFOS, keys::STRIPPED_ROOM_STATE, old_keys::STRIPPED_ROOM_INFOS, - ], - IdbTransactionMode::Readonly, - )?; + ]) + .with_mode(TransactionMode::Readonly) + .build()?; let state_store = tx.object_store(keys::ROOM_STATE)?; let room_infos = tx .object_store(keys::ROOM_INFOS)? - .get_all()? + .get_all() .await? - .iter() + .filter_map(Result::ok) .filter_map(|f| deserialize_value::(store_cipher, &f).ok()) .collect::>(); let mut values = Vec::new(); @@ -589,8 +606,9 @@ async fn migrate_to_v6(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R for room_info in room_infos { let room_id = room_info.room_id(); let range = - encode_to_range(store_cipher, keys::ROOM_STATE, (room_id, StateEventType::RoomMember))?; - for value in state_store.get_all_with_key(&range)?.await?.iter() { + encode_to_range(store_cipher, keys::ROOM_STATE, (room_id, StateEventType::RoomMember)); + for result in state_store.get_all().with_query(&range).await? { + let value = result?; let member_event = deserialize_value::>(store_cipher, &value)? .deserialize()?; let key = encode_key(store_cipher, keys::USER_IDS, (room_id, member_event.state_key())); @@ -603,9 +621,9 @@ async fn migrate_to_v6(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R let stripped_state_store = tx.object_store(keys::STRIPPED_ROOM_STATE)?; let stripped_room_infos = tx .object_store(old_keys::STRIPPED_ROOM_INFOS)? - .get_all()? + .get_all() .await? - .iter() + .filter_map(Result::ok) .filter_map(|f| deserialize_value::(store_cipher, &f).ok()) .collect::>(); let mut stripped_values = Vec::new(); @@ -616,8 +634,9 @@ async fn migrate_to_v6(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R store_cipher, keys::STRIPPED_ROOM_STATE, (room_id, StateEventType::RoomMember), - )?; - for value in stripped_state_store.get_all_with_key(&range)?.await?.iter() { + ); + for result in stripped_state_store.get_all().with_query(&range).await? { + let value = result?; let stripped_member_event = deserialize_value::>(store_cipher, &value)? .deserialize()?; @@ -632,7 +651,7 @@ async fn migrate_to_v6(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R } } - tx.await.into_result()?; + tx.commit().await?; let mut data = HashMap::new(); if !values.is_empty() { @@ -657,17 +676,17 @@ async fn migrate_to_v6(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R /// Remove the stripped room infos store and migrate the data with the other /// room infos, as well as . -async fn migrate_to_v7(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> Result { - let tx = db.transaction_on_multi_with_mode( - &[old_keys::STRIPPED_ROOM_INFOS], - IdbTransactionMode::Readonly, - )?; +async fn migrate_to_v7(db: Database, store_cipher: Option<&StoreCipher>) -> Result { + let tx = db + .transaction([old_keys::STRIPPED_ROOM_INFOS]) + .with_mode(TransactionMode::Readonly) + .build()?; let room_infos = tx .object_store(old_keys::STRIPPED_ROOM_INFOS)? - .get_all()? + .get_all() .await? - .iter() + .filter_map(Result::ok) .filter_map(|value| { deserialize_value::(store_cipher, &value) .ok() @@ -675,7 +694,7 @@ async fn migrate_to_v7(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R }) .collect::>(); - tx.await.into_result()?; + tx.commit().await?; let mut data = HashMap::new(); if !room_infos.is_empty() { @@ -691,21 +710,21 @@ async fn migrate_to_v7(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R } /// Change the format of the room infos. -async fn migrate_to_v8(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> Result { - let tx = db.transaction_on_multi_with_mode( - &[keys::ROOM_STATE, keys::STRIPPED_ROOM_STATE, keys::ROOM_INFOS], - IdbTransactionMode::Readwrite, - )?; +async fn migrate_to_v8(db: Database, store_cipher: Option<&StoreCipher>) -> Result { + let tx = db + .transaction([keys::ROOM_STATE, keys::STRIPPED_ROOM_STATE, keys::ROOM_INFOS]) + .with_mode(TransactionMode::Readwrite) + .build()?; let room_state_store = tx.object_store(keys::ROOM_STATE)?; let stripped_room_state_store = tx.object_store(keys::STRIPPED_ROOM_STATE)?; let room_infos_store = tx.object_store(keys::ROOM_INFOS)?; let room_infos_v1 = room_infos_store - .get_all()? + .get_all() + .build()? .await? - .iter() - .map(|value| deserialize_value::(store_cipher, &value)) + .map(|value| deserialize_value::(store_cipher, &value?)) .collect::, _>>()?; for room_info_v1 in room_infos_v1 { @@ -714,7 +733,7 @@ async fn migrate_to_v8(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R store_cipher, keys::STRIPPED_ROOM_STATE, (room_info_v1.room_id(), &StateEventType::RoomCreate, ""), - ))? + )) .await? .map(|f| deserialize_value(store_cipher, &f)) .transpose()? @@ -726,7 +745,7 @@ async fn migrate_to_v8(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R store_cipher, keys::ROOM_STATE, (room_info_v1.room_id(), &StateEventType::RoomCreate, ""), - ))? + )) .await? .map(|f| deserialize_value(store_cipher, &f)) .transpose()? @@ -734,23 +753,23 @@ async fn migrate_to_v8(db: IdbDatabase, store_cipher: Option<&StoreCipher>) -> R }; let room_info = room_info_v1.migrate(create.as_ref()); - room_infos_store.put_key_val( - &encode_key(store_cipher, keys::ROOM_INFOS, room_info.room_id()), - &serialize_value(store_cipher, &room_info)?, - )?; + room_infos_store + .put(&serialize_value(store_cipher, &room_info)?) + .with_key(encode_key(store_cipher, keys::ROOM_INFOS, room_info.room_id())) + .build()?; } - tx.await.into_result()?; + tx.commit().await?; let name = db.name(); db.close(); // Update the version of the database. - Ok(IdbDatabase::open_u32(&name, 8)?.await?) + Ok(Database::open(&name).with_version(8u32).build()?.await?) } /// Add the new [`keys::ROOM_SEND_QUEUE`] table. -async fn migrate_to_v9(db: IdbDatabase) -> Result { +async fn migrate_to_v9(db: Database) -> Result { let migration = OngoingMigration { drop_stores: [].into(), create_stores: [keys::ROOM_SEND_QUEUE].into_iter().collect(), @@ -760,7 +779,7 @@ async fn migrate_to_v9(db: IdbDatabase) -> Result { } /// Add the new [`keys::DEPENDENT_SEND_QUEUE`] table. -async fn migrate_to_v10(db: IdbDatabase) -> Result { +async fn migrate_to_v10(db: Database) -> Result { let migration = OngoingMigration { drop_stores: [].into(), create_stores: [keys::DEPENDENT_SEND_QUEUE].into_iter().collect(), @@ -770,7 +789,7 @@ async fn migrate_to_v10(db: IdbDatabase) -> Result { } /// Drop the [`old_keys::MEDIA`] table. -async fn migrate_to_v11(db: IdbDatabase) -> Result { +async fn migrate_to_v11(db: Database) -> Result { let migration = OngoingMigration { drop_stores: [old_keys::MEDIA].into(), create_stores: Default::default(), @@ -781,26 +800,26 @@ async fn migrate_to_v11(db: IdbDatabase) -> Result { /// The format of data serialized into the send queue and dependent send queue /// tables have changed, clear both. -async fn migrate_to_v12(db: IdbDatabase) -> Result { - let store_keys = &[keys::DEPENDENT_SEND_QUEUE, keys::ROOM_SEND_QUEUE]; - let tx = db.transaction_on_multi_with_mode(store_keys, IdbTransactionMode::Readwrite)?; +async fn migrate_to_v12(db: Database) -> Result { + let store_keys = [keys::DEPENDENT_SEND_QUEUE, keys::ROOM_SEND_QUEUE]; + let tx = db.transaction(store_keys).with_mode(TransactionMode::Readwrite).build()?; for store_name in store_keys { let store = tx.object_store(store_name)?; store.clear()?; } - tx.await.into_result()?; + tx.commit().await?; let name = db.name(); db.close(); // Update the version of the database. - Ok(IdbDatabase::open_u32(&name, 12)?.await?) + Ok(Database::open(&name).with_version(12u32).build()?.await?) } /// Add the thread subscriptions table. -async fn migrate_to_v13(db: IdbDatabase) -> Result { +async fn migrate_to_v13(db: Database) -> Result { let migration = OngoingMigration { drop_stores: [].into(), create_stores: [keys::THREAD_SUBSCRIPTIONS].into_iter().collect(), @@ -812,7 +831,7 @@ async fn migrate_to_v13(db: IdbDatabase) -> Result { /// Empty the thread subscriptions table, because the serialized format has /// changed (from storing only the subscription to storing the /// `StoredThreadSubscription`). -async fn migrate_to_v14(db: IdbDatabase) -> Result { +async fn migrate_to_v14(db: Database) -> Result { let migration = OngoingMigration { drop_stores: [keys::THREAD_SUBSCRIPTIONS].into_iter().collect(), create_stores: [keys::THREAD_SUBSCRIPTIONS].into_iter().collect(), @@ -827,7 +846,13 @@ mod tests { use assert_matches::assert_matches; use assert_matches2::assert_let; - use indexed_db_futures::prelude::*; + use indexed_db_futures::{ + database::{Database, VersionChangeEvent}, + error::Error, + object_store::ObjectStore, + prelude::*, + transaction::{Transaction, TransactionMode}, + }; use matrix_sdk_base::{ deserialized_responses::RawMemberEvent, store::{RoomLoadSettings, StateStoreExt}, @@ -861,70 +886,73 @@ mod tests { const CUSTOM_DATA_KEY: &[u8] = b"custom_data_key"; const CUSTOM_DATA: &[u8] = b"some_custom_data"; - pub async fn create_fake_db(name: &str, version: u32) -> Result { - let mut db_req: OpenDbRequest = IdbDatabase::open_u32(name, version)?; - db_req.set_on_upgrade_needed(Some( - move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { - let db = evt.db(); - - // Stores common to all versions. - let common_stores = &[ - keys::ACCOUNT_DATA, - keys::PROFILES, - keys::DISPLAY_NAMES, - keys::ROOM_STATE, - keys::ROOM_INFOS, - keys::PRESENCE, - keys::ROOM_ACCOUNT_DATA, - keys::STRIPPED_ROOM_STATE, - keys::ROOM_USER_RECEIPTS, - keys::ROOM_EVENT_RECEIPTS, - keys::CUSTOM, - ]; - - for name in common_stores { - db.create_object_store(name)?; - } + pub async fn create_fake_db(name: &str, version: u32) -> Result { + Database::open(name) + .with_version(version) + .with_on_upgrade_needed( + move |_: VersionChangeEvent, tx: &Transaction<'_>| -> Result<(), Error> { + let db = tx.db(); + + // Stores common to all versions. + let common_stores = &[ + keys::ACCOUNT_DATA, + keys::PROFILES, + keys::DISPLAY_NAMES, + keys::ROOM_STATE, + keys::ROOM_INFOS, + keys::PRESENCE, + keys::ROOM_ACCOUNT_DATA, + keys::STRIPPED_ROOM_STATE, + keys::ROOM_USER_RECEIPTS, + keys::ROOM_EVENT_RECEIPTS, + keys::CUSTOM, + ]; + + for name in common_stores { + db.create_object_store(name).build()?; + } - if version < 4 { - for name in [old_keys::SYNC_TOKEN, old_keys::SESSION] { - db.create_object_store(name)?; + if version < 4 { + for name in [old_keys::SYNC_TOKEN, old_keys::SESSION] { + db.create_object_store(name).build()?; + } } - } - if version >= 4 { - db.create_object_store(keys::KV)?; - } - if version < 5 { - for name in [old_keys::MEMBERS, old_keys::STRIPPED_MEMBERS] { - db.create_object_store(name)?; + if version >= 4 { + db.create_object_store(keys::KV).build()?; } - } - if version < 6 { - for name in [ - old_keys::INVITED_USER_IDS, - old_keys::JOINED_USER_IDS, - old_keys::STRIPPED_INVITED_USER_IDS, - old_keys::STRIPPED_JOINED_USER_IDS, - ] { - db.create_object_store(name)?; + if version < 5 { + for name in [old_keys::MEMBERS, old_keys::STRIPPED_MEMBERS] { + db.create_object_store(name).build()?; + } } - } - if version >= 6 { - for name in [keys::USER_IDS, keys::STRIPPED_USER_IDS] { - db.create_object_store(name)?; + if version < 6 { + for name in [ + old_keys::INVITED_USER_IDS, + old_keys::JOINED_USER_IDS, + old_keys::STRIPPED_INVITED_USER_IDS, + old_keys::STRIPPED_JOINED_USER_IDS, + ] { + db.create_object_store(name).build()?; + } + } + if version >= 6 { + for name in [keys::USER_IDS, keys::STRIPPED_USER_IDS] { + db.create_object_store(name).build()?; + } + } + if version < 7 { + db.create_object_store(old_keys::STRIPPED_ROOM_INFOS).build()?; + } + if version < 11 { + db.create_object_store(old_keys::MEDIA).build()?; } - } - if version < 7 { - db.create_object_store(old_keys::STRIPPED_ROOM_INFOS)?; - } - if version < 11 { - db.create_object_store(old_keys::MEDIA)?; - } - Ok(()) - }, - )); - db_req.await.map_err(Into::into) + Ok(()) + }, + ) + .build()? + .await + .map_err(Into::into) } fn room_info_v1_json( @@ -996,14 +1024,13 @@ mod tests { // Create and populate db. { let db = create_fake_db(&name, 1).await?; - let tx = - db.transaction_on_one_with_mode(keys::CUSTOM, IdbTransactionMode::Readwrite)?; + let tx = db.transaction(keys::CUSTOM).with_mode(TransactionMode::Readwrite).build()?; let custom = tx.object_store(keys::CUSTOM)?; let jskey = JsValue::from_str( core::str::from_utf8(CUSTOM_DATA_KEY).map_err(StoreError::Codec)?, ); - custom.put_key_val(&jskey, &serialize_value(None, &CUSTOM_DATA)?)?; - tx.await.into_result()?; + custom.put(&serialize_value(None, &CUSTOM_DATA)?).with_key(jskey).await?; + tx.commit().await?; db.close(); } @@ -1031,14 +1058,13 @@ mod tests { // Create and populate db. { let db = create_fake_db(&name, 1).await?; - let tx = - db.transaction_on_one_with_mode(keys::CUSTOM, IdbTransactionMode::Readwrite)?; + let tx = db.transaction(keys::CUSTOM).with_mode(TransactionMode::Readwrite).build()?; let custom = tx.object_store(keys::CUSTOM)?; let jskey = JsValue::from_str( core::str::from_utf8(CUSTOM_DATA_KEY).map_err(StoreError::Codec)?, ); - custom.put_key_val(&jskey, &serialize_value(None, &CUSTOM_DATA)?)?; - tx.await.into_result()?; + custom.put(&serialize_value(None, &CUSTOM_DATA)?).with_key(jskey).await?; + tx.commit().await?; db.close(); } @@ -1069,14 +1095,13 @@ mod tests { // Create and populate db. { let db = create_fake_db(&name, 1).await?; - let tx = - db.transaction_on_one_with_mode(keys::CUSTOM, IdbTransactionMode::Readwrite)?; + let tx = db.transaction(keys::CUSTOM).with_mode(TransactionMode::Readwrite).build()?; let custom = tx.object_store(keys::CUSTOM)?; let jskey = JsValue::from_str( core::str::from_utf8(CUSTOM_DATA_KEY).map_err(StoreError::Codec)?, ); - custom.put_key_val(&jskey, &serialize_value(None, &CUSTOM_DATA)?)?; - tx.await.into_result()?; + custom.put(&serialize_value(None, &CUSTOM_DATA)?).with_key(jskey).await?; + tx.commit().await?; db.close(); } @@ -1110,14 +1135,13 @@ mod tests { // Create and populate db. { let db = create_fake_db(&name, 1).await?; - let tx = - db.transaction_on_one_with_mode(keys::CUSTOM, IdbTransactionMode::Readwrite)?; + let tx = db.transaction(keys::CUSTOM).with_mode(TransactionMode::Readwrite).build()?; let custom = tx.object_store(keys::CUSTOM)?; let jskey = JsValue::from_str( core::str::from_utf8(CUSTOM_DATA_KEY).map_err(StoreError::Codec)?, ); - custom.put_key_val(&jskey, &serialize_value(None, &CUSTOM_DATA)?)?; - tx.await.into_result()?; + custom.put(&serialize_value(None, &CUSTOM_DATA)?).with_key(jskey).await?; + tx.commit().await?; db.close(); } @@ -1167,11 +1191,11 @@ mod tests { { let db = create_fake_db(&name, 2).await?; let tx = - db.transaction_on_one_with_mode(keys::ROOM_STATE, IdbTransactionMode::Readwrite)?; + db.transaction(keys::ROOM_STATE).with_mode(TransactionMode::Readwrite).build()?; let state = tx.object_store(keys::ROOM_STATE)?; let key: JsValue = (room_id, StateEventType::RoomTopic, "").as_encoded_string().into(); - state.put_key_val(&key, &serialize_value(None, &wrong_redacted_state_event)?)?; - tx.await.into_result()?; + state.put(&serialize_value(None, &wrong_redacted_state_event)?).with_key(key).await?; + tx.commit().await?; db.close(); } @@ -1201,28 +1225,36 @@ mod tests { // Populate DB with old table. { let db = create_fake_db(&name, 3).await?; - let tx = db.transaction_on_multi_with_mode( - &[old_keys::SYNC_TOKEN, old_keys::SESSION], - IdbTransactionMode::Readwrite, - )?; + let tx = db + .transaction([old_keys::SYNC_TOKEN, old_keys::SESSION]) + .with_mode(TransactionMode::Readwrite) + .build()?; let sync_token_store = tx.object_store(old_keys::SYNC_TOKEN)?; - sync_token_store.put_key_val( - &JsValue::from_str(old_keys::SYNC_TOKEN), - &serialize_value(None, &sync_token)?, - )?; + sync_token_store + .put(&serialize_value(None, &sync_token)?) + .with_key(JsValue::from_str(old_keys::SYNC_TOKEN)) + .build()?; let session_store = tx.object_store(old_keys::SESSION)?; - session_store.put_key_val( - &encode_key(None, StateStoreDataKey::FILTER, (StateStoreDataKey::FILTER, filter_1)), - &serialize_value(None, &filter_1_id)?, - )?; - session_store.put_key_val( - &encode_key(None, StateStoreDataKey::FILTER, (StateStoreDataKey::FILTER, filter_2)), - &serialize_value(None, &filter_2_id)?, - )?; + session_store + .put(&serialize_value(None, &filter_1_id)?) + .with_key(encode_key( + None, + StateStoreDataKey::FILTER, + (StateStoreDataKey::FILTER, filter_1), + )) + .build()?; + session_store + .put(&serialize_value(None, &filter_2_id)?) + .with_key(encode_key( + None, + StateStoreDataKey::FILTER, + (StateStoreDataKey::FILTER, filter_2), + )) + .build()?; - tx.await.into_result()?; + tx.commit().await?; db.close(); } @@ -1274,42 +1306,46 @@ mod tests { // Populate DB with old table. { let db = create_fake_db(&name, 4).await?; - let tx = db.transaction_on_multi_with_mode( - &[ + let tx = db + .transaction([ old_keys::MEMBERS, keys::ROOM_INFOS, old_keys::STRIPPED_MEMBERS, old_keys::STRIPPED_ROOM_INFOS, - ], - IdbTransactionMode::Readwrite, - )?; + ]) + .with_mode(TransactionMode::Readwrite) + .build()?; let members_store = tx.object_store(old_keys::MEMBERS)?; - members_store.put_key_val( - &encode_key(None, old_keys::MEMBERS, (room_id, user_id)), - &serialize_value(None, &member_event)?, - )?; + members_store + .put(&serialize_value(None, &member_event)?) + .with_key(encode_key(None, old_keys::MEMBERS, (room_id, user_id))) + .build()?; let room_infos_store = tx.object_store(keys::ROOM_INFOS)?; let room_info = room_info_v1_json(room_id, RoomState::Joined, None, None); - room_infos_store.put_key_val( - &encode_key(None, keys::ROOM_INFOS, room_id), - &serialize_value(None, &room_info)?, - )?; + room_infos_store + .put(&serialize_value(None, &room_info)?) + .with_key(encode_key(None, keys::ROOM_INFOS, room_id)) + .build()?; let stripped_members_store = tx.object_store(old_keys::STRIPPED_MEMBERS)?; - stripped_members_store.put_key_val( - &encode_key(None, old_keys::STRIPPED_MEMBERS, (stripped_room_id, stripped_user_id)), - &serialize_value(None, &stripped_member_event)?, - )?; + stripped_members_store + .put(&serialize_value(None, &stripped_member_event)?) + .with_key(encode_key( + None, + old_keys::STRIPPED_MEMBERS, + (stripped_room_id, stripped_user_id), + )) + .build()?; let stripped_room_infos_store = tx.object_store(old_keys::STRIPPED_ROOM_INFOS)?; let stripped_room_info = room_info_v1_json(stripped_room_id, RoomState::Invited, None, None); - stripped_room_infos_store.put_key_val( - &encode_key(None, old_keys::STRIPPED_ROOM_INFOS, stripped_room_id), - &serialize_value(None, &stripped_room_info)?, - )?; + stripped_room_infos_store + .put(&serialize_value(None, &stripped_room_info)?) + .with_key(encode_key(None, old_keys::STRIPPED_ROOM_INFOS, stripped_room_id)) + .build()?; - tx.await.into_result()?; + tx.commit().await?; db.close(); } @@ -1352,8 +1388,8 @@ mod tests { // Populate DB with old table. { let db = create_fake_db(&name, 5).await?; - let tx = db.transaction_on_multi_with_mode( - &[ + let tx = db + .transaction([ keys::ROOM_STATE, keys::ROOM_INFOS, keys::STRIPPED_ROOM_STATE, @@ -1362,82 +1398,82 @@ mod tests { old_keys::JOINED_USER_IDS, old_keys::STRIPPED_INVITED_USER_IDS, old_keys::STRIPPED_JOINED_USER_IDS, - ], - IdbTransactionMode::Readwrite, - )?; + ]) + .with_mode(TransactionMode::Readwrite) + .build()?; let state_store = tx.object_store(keys::ROOM_STATE)?; - state_store.put_key_val( - &encode_key( + state_store + .put(&serialize_value(None, &invite_member_event)?) + .with_key(encode_key( None, keys::ROOM_STATE, (room_id, StateEventType::RoomMember, invite_user_id), - ), - &serialize_value(None, &invite_member_event)?, - )?; - state_store.put_key_val( - &encode_key( + )) + .build()?; + state_store + .put(&serialize_value(None, &ban_member_event)?) + .with_key(encode_key( None, keys::ROOM_STATE, (room_id, StateEventType::RoomMember, ban_user_id), - ), - &serialize_value(None, &ban_member_event)?, - )?; + )) + .build()?; let room_infos_store = tx.object_store(keys::ROOM_INFOS)?; let room_info = room_info_v1_json(room_id, RoomState::Joined, None, None); - room_infos_store.put_key_val( - &encode_key(None, keys::ROOM_INFOS, room_id), - &serialize_value(None, &room_info)?, - )?; + room_infos_store + .put(&serialize_value(None, &room_info)?) + .with_key(encode_key(None, keys::ROOM_INFOS, room_id)) + .build()?; let stripped_state_store = tx.object_store(keys::STRIPPED_ROOM_STATE)?; - stripped_state_store.put_key_val( - &encode_key( + stripped_state_store + .put(&serialize_value(None, &stripped_member_event)?) + .with_key(encode_key( None, keys::STRIPPED_ROOM_STATE, (stripped_room_id, StateEventType::RoomMember, stripped_user_id), - ), - &serialize_value(None, &stripped_member_event)?, - )?; + )) + .build()?; let stripped_room_infos_store = tx.object_store(old_keys::STRIPPED_ROOM_INFOS)?; let stripped_room_info = room_info_v1_json(stripped_room_id, RoomState::Invited, None, None); - stripped_room_infos_store.put_key_val( - &encode_key(None, old_keys::STRIPPED_ROOM_INFOS, stripped_room_id), - &serialize_value(None, &stripped_room_info)?, - )?; + stripped_room_infos_store + .put(&serialize_value(None, &stripped_room_info)?) + .with_key(encode_key(None, old_keys::STRIPPED_ROOM_INFOS, stripped_room_id)) + .build()?; // Populate the old user IDs stores to check the data is not reused. let joined_user_id = user_id!("@joined_user:localhost"); - tx.object_store(old_keys::JOINED_USER_IDS)?.put_key_val( - &encode_key(None, old_keys::JOINED_USER_IDS, (room_id, joined_user_id)), - &serialize_value(None, &joined_user_id)?, - )?; + tx.object_store(old_keys::JOINED_USER_IDS)? + .put(&serialize_value(None, &joined_user_id)?) + .with_key(encode_key(None, old_keys::JOINED_USER_IDS, (room_id, joined_user_id))) + .build()?; let invited_user_id = user_id!("@invited_user:localhost"); - tx.object_store(old_keys::INVITED_USER_IDS)?.put_key_val( - &encode_key(None, old_keys::INVITED_USER_IDS, (room_id, invited_user_id)), - &serialize_value(None, &invited_user_id)?, - )?; + tx.object_store(old_keys::INVITED_USER_IDS)? + .put(&serialize_value(None, &invited_user_id)?) + .with_key(encode_key(None, old_keys::INVITED_USER_IDS, (room_id, invited_user_id))) + .build()?; let stripped_joined_user_id = user_id!("@stripped_joined_user:localhost"); - tx.object_store(old_keys::STRIPPED_JOINED_USER_IDS)?.put_key_val( - &encode_key( + tx.object_store(old_keys::STRIPPED_JOINED_USER_IDS)? + .put(&serialize_value(None, &stripped_joined_user_id)?) + .with_key(encode_key( None, old_keys::STRIPPED_JOINED_USER_IDS, (room_id, stripped_joined_user_id), - ), - &serialize_value(None, &stripped_joined_user_id)?, - )?; + )) + .build()?; let stripped_invited_user_id = user_id!("@stripped_invited_user:localhost"); - tx.object_store(old_keys::STRIPPED_INVITED_USER_IDS)?.put_key_val( - &encode_key( + tx.object_store(old_keys::STRIPPED_INVITED_USER_IDS)? + .put(&serialize_value(None, &stripped_invited_user_id)?) + .with_key(encode_key( None, old_keys::STRIPPED_INVITED_USER_IDS, (room_id, stripped_invited_user_id), - ), - &serialize_value(None, &stripped_invited_user_id)?, - )?; + )) + .build()?; - tx.await.into_result()?; + tx.commit().await?; db.close(); } @@ -1484,27 +1520,27 @@ mod tests { // Populate DB with old table. { let db = create_fake_db(&name, 6).await?; - let tx = db.transaction_on_multi_with_mode( - &[keys::ROOM_INFOS, old_keys::STRIPPED_ROOM_INFOS], - IdbTransactionMode::Readwrite, - )?; + let tx = db + .transaction([keys::ROOM_INFOS, old_keys::STRIPPED_ROOM_INFOS]) + .with_mode(TransactionMode::Readwrite) + .build()?; let room_infos_store = tx.object_store(keys::ROOM_INFOS)?; let room_info = room_info_v1_json(room_id, RoomState::Joined, None, None); - room_infos_store.put_key_val( - &encode_key(None, keys::ROOM_INFOS, room_id), - &serialize_value(None, &room_info)?, - )?; + room_infos_store + .put(&serialize_value(None, &room_info)?) + .with_key(encode_key(None, keys::ROOM_INFOS, room_id)) + .build()?; let stripped_room_infos_store = tx.object_store(old_keys::STRIPPED_ROOM_INFOS)?; let stripped_room_info = room_info_v1_json(stripped_room_id, RoomState::Invited, None, None); - stripped_room_infos_store.put_key_val( - &encode_key(None, old_keys::STRIPPED_ROOM_INFOS, stripped_room_id), - &serialize_value(None, &stripped_room_info)?, - )?; + stripped_room_infos_store + .put(&serialize_value(None, &stripped_room_info)?) + .with_key(encode_key(None, old_keys::STRIPPED_ROOM_INFOS, stripped_room_id)) + .build()?; - tx.await.into_result()?; + tx.commit().await?; db.close(); } @@ -1518,8 +1554,8 @@ mod tests { // Add a room in version 7 format of the state store. fn add_room_v7( - room_infos_store: &IdbObjectStore<'_>, - room_state_store: &IdbObjectStore<'_>, + room_infos_store: &ObjectStore<'_>, + room_state_store: &ObjectStore<'_>, room_id: &RoomId, name: Option<&str>, create_creator: Option, @@ -1528,10 +1564,10 @@ mod tests { let room_info_json = room_info_v1_json(room_id, RoomState::Joined, name, create_creator.as_deref()); - room_infos_store.put_key_val( - &encode_key(None, keys::ROOM_INFOS, room_id), - &serialize_value(None, &room_info_json)?, - )?; + room_infos_store + .put(&serialize_value(None, &room_info_json)?) + .with_key(encode_key(None, keys::ROOM_INFOS, room_id)) + .build()?; // Test with or without `m.room.create` event in the room state. let Some(create_sender) = create_sender else { @@ -1554,10 +1590,14 @@ mod tests { "unsigned": {}, }); - room_state_store.put_key_val( - &encode_key(None, keys::ROOM_STATE, (room_id, &StateEventType::RoomCreate, "")), - &serialize_value(None, &create_event)?, - )?; + room_state_store + .put(&serialize_value(None, &create_event)?) + .with_key(encode_key( + None, + keys::ROOM_STATE, + (room_id, &StateEventType::RoomCreate, ""), + )) + .build()?; Ok(()) } @@ -1584,10 +1624,10 @@ mod tests { // Create and populate db. { let db = create_fake_db(&name, 6).await?; - let tx = db.transaction_on_multi_with_mode( - &[keys::ROOM_INFOS, keys::ROOM_STATE], - IdbTransactionMode::Readwrite, - )?; + let tx = db + .transaction([keys::ROOM_INFOS, keys::ROOM_STATE]) + .with_mode(TransactionMode::Readwrite) + .build()?; let room_infos_store = tx.object_store(keys::ROOM_INFOS)?; let room_state_store = tx.object_store(keys::ROOM_STATE)?; @@ -1610,7 +1650,7 @@ mod tests { Some(&room_c_create_sender), )?; - tx.await.into_result()?; + tx.commit().await?; db.close(); } diff --git a/crates/matrix-sdk-indexeddb/src/state_store/mod.rs b/crates/matrix-sdk-indexeddb/src/state_store/mod.rs index e0eaa2f9437..99107b9e6ed 100644 --- a/crates/matrix-sdk-indexeddb/src/state_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/state_store/mod.rs @@ -18,11 +18,13 @@ use std::{ sync::Arc, }; -use anyhow::anyhow; use async_trait::async_trait; use gloo_utils::format::JsValueSerdeExt; use growable_bloom_filter::GrowableBloom; -use indexed_db_futures::prelude::*; +use indexed_db_futures::{ + cursor::CursorDirection, database::Database, error::OpenDbError, prelude::*, + transaction::TransactionMode, KeyRange, +}; use matrix_sdk_base::{ deserialized_responses::{DisplayName, RawAnySyncOrStrippedState}, store::{ @@ -50,16 +52,15 @@ use ruma::{ CanonicalJsonObject, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedMxcUri, OwnedRoomId, OwnedTransactionId, OwnedUserId, RoomId, TransactionId, UserId, }; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde::{de::DeserializeOwned, ser::Error, Deserialize, Serialize}; use tracing::{debug, warn}; use wasm_bindgen::JsValue; -use web_sys::IdbKeyRange; mod migrations; pub use self::migrations::MigrationConflictStrategy; use self::migrations::{upgrade_inner_db, upgrade_meta_db}; -use crate::serializer::safe_encode::traits::SafeEncode; +use crate::{error::GenericError, serializer::safe_encode::traits::SafeEncode}; #[derive(Debug, thiserror::Error)] pub enum IndexeddbStateStoreError { @@ -78,6 +79,12 @@ pub enum IndexeddbStateStoreError { MigrationConflict { name: String, old_version: u32, new_version: u32 }, } +impl From for IndexeddbStateStoreError { + fn from(value: GenericError) -> Self { + Self::StoreError(value.into()) + } +} + impl From for IndexeddbStateStoreError { fn from(frm: web_sys::DomException) -> IndexeddbStateStoreError { IndexeddbStateStoreError::DomException { @@ -99,6 +106,51 @@ impl From for StoreError { } } +impl From for IndexeddbStateStoreError { + fn from(value: indexed_db_futures::error::DomException) -> Self { + web_sys::DomException::from(value).into() + } +} + +impl From for IndexeddbStateStoreError { + fn from(value: indexed_db_futures::error::SerialisationError) -> Self { + Self::Json(serde_json::Error::custom(value.to_string())) + } +} + +impl From for IndexeddbStateStoreError { + fn from(value: indexed_db_futures::error::UnexpectedDataError) -> Self { + IndexeddbStateStoreError::StoreError(StoreError::backend(value)) + } +} + +impl From for IndexeddbStateStoreError { + fn from(value: indexed_db_futures::error::JSError) -> Self { + GenericError::from(value.to_string()).into() + } +} + +impl From for IndexeddbStateStoreError { + fn from(value: indexed_db_futures::error::Error) -> Self { + use indexed_db_futures::error::Error; + match value { + Error::DomException(e) => e.into(), + Error::Serialisation(e) => e.into(), + Error::MissingData(e) => e.into(), + Error::Unknown(e) => e.into(), + } + } +} + +impl From for IndexeddbStateStoreError { + fn from(value: OpenDbError) -> Self { + match value { + OpenDbError::Base(error) => error.into(), + _ => GenericError::from(value.to_string()).into(), + } + } +} + mod keys { pub const INTERNAL_STATE: &str = "matrix-sdk-state"; pub const BACKUPS_META: &str = "backups"; @@ -200,7 +252,7 @@ fn encode_to_range( store_cipher: Option<&StoreCipher>, table_name: &str, key: T, -) -> Result +) -> KeyRange where T: SafeEncode, { @@ -208,7 +260,6 @@ where Some(cipher) => key.encode_to_range_secure(table_name, cipher), None => key.encode_to_range(), } - .map_err(|e| IndexeddbStateStoreError::StoreError(StoreError::Backend(anyhow!(e).into()))) } /// Builder for [`IndexeddbStateStore`]. @@ -266,8 +317,8 @@ impl IndexeddbStateStoreBuilder { pub struct IndexeddbStateStore { name: String, - pub(crate) inner: IdbDatabase, - pub(crate) meta: IdbDatabase, + pub(crate) inner: Database, + pub(crate) meta: Database, pub(crate) store_cipher: Option>, } @@ -300,22 +351,32 @@ impl IndexeddbStateStore { pub async fn has_backups(&self) -> Result { Ok(self .meta - .transaction_on_one_with_mode(keys::BACKUPS_META, IdbTransactionMode::Readonly)? + .transaction(keys::BACKUPS_META) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::BACKUPS_META)? - .count()? + .count() .await? > 0) } /// What's the database name of the latest backup< pub async fn latest_backup(&self) -> Result> { - Ok(self + if let Some(mut cursor) = self .meta - .transaction_on_one_with_mode(keys::BACKUPS_META, IdbTransactionMode::Readonly)? + .transaction(keys::BACKUPS_META) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::BACKUPS_META)? - .open_cursor_with_direction(IdbCursorDirection::Prev)? + .open_cursor() + .with_direction(CursorDirection::Prev) .await? - .and_then(|c| c.value().as_string())) + { + if let Some(record) = cursor.next_record::().await? { + return Ok(record.as_string()); + } + } + Ok(None) } /// Encrypt (if needs be) then JSON-serialize a value. @@ -335,7 +396,7 @@ impl IndexeddbStateStore { encode_key(self.store_cipher.as_deref(), table_name, key) } - fn encode_to_range(&self, table_name: &str, key: T) -> Result + fn encode_to_range(&self, table_name: &str, key: T) -> KeyRange where T: SafeEncode, { @@ -352,35 +413,30 @@ impl IndexeddbStateStore { ) -> Result> { let store_name = if stripped { keys::STRIPPED_USER_IDS } else { keys::USER_IDS }; - let tx = - self.inner.transaction_on_one_with_mode(store_name, IdbTransactionMode::Readonly)?; + let tx = self.inner.transaction(store_name).with_mode(TransactionMode::Readonly).build()?; let store = tx.object_store(store_name)?; - let range = self.encode_to_range(store_name, room_id)?; + let range = self.encode_to_range(store_name, room_id); let user_ids = if memberships.is_empty() { // It should be faster to just get all user IDs in this case. store - .get_all_with_key(&range)? + .get_all() + .with_query(&range) .await? - .iter() + .filter_map(Result::ok) .filter_map(|f| self.deserialize_value::(&f).ok().map(|m| m.user_id)) .collect::>() } else { let mut user_ids = Vec::new(); - let cursor = store.open_cursor_with_range(&range)?.await?; + let cursor = store.open_cursor().with_query(&range).await?; - if let Some(cursor) = cursor { - loop { - let value = cursor.value(); + if let Some(mut cursor) = cursor { + while let Some(value) = cursor.next_record().await? { let member = self.deserialize_value::(&value)?; if memberships.matches(&member.membership) { user_ids.push(member.user_id); } - - if !cursor.continue_cursor()?.await? { - break; - } } } @@ -392,9 +448,11 @@ impl IndexeddbStateStore { async fn get_custom_value_for_js(&self, jskey: &JsValue) -> Result>> { self.inner - .transaction_on_one_with_mode(keys::CUSTOM, IdbTransactionMode::Readonly)? + .transaction(keys::CUSTOM) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::CUSTOM)? - .get(jskey)? + .get(jskey) .await? .map(|f| self.deserialize_value(&f)) .transpose() @@ -552,9 +610,11 @@ impl_state_store!({ let value = self .inner - .transaction_on_one_with_mode(keys::KV, IdbTransactionMode::Readonly)? + .transaction(keys::KV) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::KV)? - .get(&encoded_key)? + .get(&encoded_key) .await?; let value = match key { @@ -646,14 +706,13 @@ impl_state_store!({ ), }; - let tx = - self.inner.transaction_on_one_with_mode(keys::KV, IdbTransactionMode::Readwrite)?; + let tx = self.inner.transaction(keys::KV).with_mode(TransactionMode::Readwrite).build()?; let obj = tx.object_store(keys::KV)?; - obj.put_key_val(&encoded_key, &serialized_value?)?; + obj.put(&serialized_value?).with_key(encoded_key).build()?; - tx.await.into_result()?; + tx.commit().await?; Ok(()) } @@ -661,13 +720,12 @@ impl_state_store!({ async fn remove_kv_data(&self, key: StateStoreDataKey<'_>) -> Result<()> { let encoded_key = self.encode_kv_data_key(key); - let tx = - self.inner.transaction_on_one_with_mode(keys::KV, IdbTransactionMode::Readwrite)?; + let tx = self.inner.transaction(keys::KV).with_mode(TransactionMode::Readwrite).build()?; let obj = tx.object_store(keys::KV)?; - obj.delete(&encoded_key)?; + obj.delete(&encoded_key).build()?; - tx.await.into_result()?; + tx.commit().await?; Ok(()) } @@ -721,14 +779,13 @@ impl_state_store!({ } let stores: Vec<&'static str> = stores.into_iter().collect(); - let tx = - self.inner.transaction_on_multi_with_mode(&stores, IdbTransactionMode::Readwrite)?; + let tx = self.inner.transaction(stores).with_mode(TransactionMode::Readwrite).build()?; if let Some(s) = &changes.sync_token { - tx.object_store(keys::KV)?.put_key_val( - &self.encode_kv_data_key(StateStoreDataKey::SyncToken), - &self.serialize_value(s)?, - )?; + tx.object_store(keys::KV)? + .put(&self.serialize_value(s)?) + .with_key(self.encode_kv_data_key(StateStoreDataKey::SyncToken)) + .build()?; } if !changes.ambiguity_maps.is_empty() { @@ -745,7 +802,7 @@ impl_state_store!({ ), ); - store.put_key_val(&key, &self.serialize_value(&map)?)?; + store.put(&self.serialize_value(&map)?).with_key(key).build()?; } } } @@ -753,10 +810,10 @@ impl_state_store!({ if !changes.account_data.is_empty() { let store = tx.object_store(keys::ACCOUNT_DATA)?; for (event_type, event) in &changes.account_data { - store.put_key_val( - &self.encode_key(keys::ACCOUNT_DATA, event_type), - &self.serialize_value(&event)?, - )?; + store + .put(&self.serialize_value(&event)?) + .with_key(self.encode_key(keys::ACCOUNT_DATA, event_type)) + .build()?; } } @@ -765,7 +822,7 @@ impl_state_store!({ for (room, events) in &changes.room_account_data { for (event_type, event) in events { let key = self.encode_key(keys::ROOM_ACCOUNT_DATA, (room, event_type)); - store.put_key_val(&key, &self.serialize_value(&event)?)?; + store.put(&self.serialize_value(&event)?).with_key(key).build()?; } } } @@ -780,7 +837,7 @@ impl_state_store!({ for (room, user_ids) in &changes.profiles_to_delete { for user_id in user_ids { let key = self.encode_key(keys::PROFILES, (room, user_id)); - profiles.delete(&key)?; + profiles.delete(&key).build()?; } } @@ -790,8 +847,11 @@ impl_state_store!({ for (event_type, events) in event_types { for (state_key, raw_event) in events { let key = self.encode_key(keys::ROOM_STATE, (room, event_type, state_key)); - state.put_key_val(&key, &self.serialize_value(&raw_event)?)?; - stripped_state.delete(&key)?; + state + .put(&self.serialize_value(&raw_event)?) + .with_key(key.clone()) + .build()?; + stripped_state.delete(&key).build()?; if *event_type == StateEventType::RoomMember { let event = @@ -808,20 +868,21 @@ impl_state_store!({ let key = (room, state_key); stripped_user_ids - .delete(&self.encode_key(keys::STRIPPED_USER_IDS, key))?; + .delete(&self.encode_key(keys::STRIPPED_USER_IDS, key)) + .build()?; - user_ids.put_key_val_owned( - self.encode_key(keys::USER_IDS, key), - &self.serialize_value(&RoomMember::from(&event))?, - )?; + user_ids + .put(&self.serialize_value(&RoomMember::from(&event))?) + .with_key(self.encode_key(keys::USER_IDS, key)) + .build()?; if let Some(profile) = profile_changes.and_then(|p| p.get(event.state_key())) { - profiles.put_key_val_owned( - self.encode_key(keys::PROFILES, key), - &self.serialize_value(&profile)?, - )?; + profiles + .put(&self.serialize_value(&profile)?) + .with_key(self.encode_key(keys::PROFILES, key)) + .build()?; } } } @@ -832,20 +893,20 @@ impl_state_store!({ if !changes.room_infos.is_empty() { let room_infos = tx.object_store(keys::ROOM_INFOS)?; for (room_id, room_info) in &changes.room_infos { - room_infos.put_key_val( - &self.encode_key(keys::ROOM_INFOS, room_id), - &self.serialize_value(&room_info)?, - )?; + room_infos + .put(&self.serialize_value(&room_info)?) + .with_key(self.encode_key(keys::ROOM_INFOS, room_id)) + .build()?; } } if !changes.presence.is_empty() { let store = tx.object_store(keys::PRESENCE)?; for (sender, event) in &changes.presence { - store.put_key_val( - &self.encode_key(keys::PRESENCE, sender), - &self.serialize_value(&event)?, - )?; + store + .put(&self.serialize_value(&event)?) + .with_key(self.encode_key(keys::PRESENCE, sender)) + .build()?; } } @@ -858,7 +919,7 @@ impl_state_store!({ for (state_key, raw_event) in events { let key = self .encode_key(keys::STRIPPED_ROOM_STATE, (room, event_type, state_key)); - store.put_key_val(&key, &self.serialize_value(&raw_event)?)?; + store.put(&self.serialize_value(&raw_event)?).with_key(key).build()?; if *event_type == StateEventType::RoomMember { let event = match raw_event @@ -878,10 +939,10 @@ impl_state_store!({ let key = (room, state_key); - user_ids.put_key_val_owned( - self.encode_key(keys::STRIPPED_USER_IDS, key), - &self.serialize_value(&RoomMember::from(&event))?, - )?; + user_ids + .put(&self.serialize_value(&RoomMember::from(&event))?) + .with_key(self.encode_key(keys::STRIPPED_USER_IDS, key)) + .build()?; } } } @@ -908,7 +969,7 @@ impl_state_store!({ }; if let Some((old_event, _)) = - room_user_receipts.get(&key)?.await?.and_then(|f| { + room_user_receipts.get(&key).await?.and_then(|f| { self.deserialize_value::<(OwnedEventId, Receipt)>(&f).ok() }) { @@ -922,11 +983,13 @@ impl_state_store!({ (room, receipt_type, old_event, user_id), ), }; - room_event_receipts.delete(&key)?; + room_event_receipts.delete(&key).build()?; } room_user_receipts - .put_key_val(&key, &self.serialize_value(&(event_id, receipt))?)?; + .put(&self.serialize_value(&(event_id, receipt))?) + .with_key(key) + .build()?; // Add the receipt to the room event receipts let key = match receipt.thread.as_str() { @@ -940,7 +1003,9 @@ impl_state_store!({ ), }; room_event_receipts - .put_key_val(&key, &self.serialize_value(&(user_id, receipt))?)?; + .put(&self.serialize_value(&(user_id, receipt))?) + .with_key(key) + .build()?; } } } @@ -952,20 +1017,25 @@ impl_state_store!({ let room_info = tx.object_store(keys::ROOM_INFOS)?; for (room_id, redactions) in &changes.redactions { - let range = self.encode_to_range(keys::ROOM_STATE, room_id)?; - let Some(cursor) = state.open_cursor_with_range(&range)?.await? else { continue }; + let range = self.encode_to_range(keys::ROOM_STATE, room_id); + let Some(mut cursor) = state.open_cursor().with_query(&range).await? else { + continue; + }; let mut redaction_rules = None; - while let Some(key) = cursor.key() { - let raw_evt = - self.deserialize_value::>(&cursor.value())?; + while let Some(value) = cursor.next_record().await? { + let Some(key) = cursor.key::()? else { + break; + }; + + let raw_evt = self.deserialize_value::>(&value)?; if let Ok(Some(event_id)) = raw_evt.get_field::("event_id") { if let Some(redaction) = redactions.get(&event_id) { let redaction_rules = { if redaction_rules.is_none() { redaction_rules.replace(room_info - .get(&self.encode_key(keys::ROOM_INFOS, room_id))? + .get(&self.encode_key(keys::ROOM_INFOS, room_id)) .await? .and_then(|f| self.deserialize_value::(&f).ok()) .map(|info| info.room_version_rules_or_default()) @@ -984,24 +1054,23 @@ impl_state_store!({ Some(RedactedBecause::from_raw_event(redaction)?), ) .map_err(StoreError::Redaction)?; - state.put_key_val(&key, &self.serialize_value(&redacted)?)?; + state.put(&self.serialize_value(&redacted)?).with_key(key).build()?; } } - - // move forward. - cursor.advance(1)?.await?; } } } - tx.await.into_result().map_err(|e| e.into()) + tx.commit().await.map_err(|e| e.into()) } async fn get_presence_event(&self, user_id: &UserId) -> Result>> { self.inner - .transaction_on_one_with_mode(keys::PRESENCE, IdbTransactionMode::Readonly)? + .transaction(keys::PRESENCE) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::PRESENCE)? - .get(&self.encode_key(keys::PRESENCE, user_id))? + .get(&self.encode_key(keys::PRESENCE, user_id)) .await? .map(|f| self.deserialize_value(&f)) .transpose() @@ -1015,16 +1084,15 @@ impl_state_store!({ return Ok(Vec::new()); } - let txn = self - .inner - .transaction_on_one_with_mode(keys::PRESENCE, IdbTransactionMode::Readonly)?; + let txn = + self.inner.transaction(keys::PRESENCE).with_mode(TransactionMode::Readonly).build()?; let store = txn.object_store(keys::PRESENCE)?; let mut events = Vec::with_capacity(user_ids.len()); for user_id in user_ids { if let Some(event) = store - .get(&self.encode_key(keys::PRESENCE, user_id))? + .get(&self.encode_key(keys::PRESENCE, user_id)) .await? .map(|f| self.deserialize_value(&f)) .transpose()? @@ -1055,14 +1123,17 @@ impl_state_store!({ event_type: StateEventType, ) -> Result> { let stripped_range = - self.encode_to_range(keys::STRIPPED_ROOM_STATE, (room_id, &event_type))?; + self.encode_to_range(keys::STRIPPED_ROOM_STATE, (room_id, &event_type)); let stripped_events = self .inner - .transaction_on_one_with_mode(keys::STRIPPED_ROOM_STATE, IdbTransactionMode::Readonly)? + .transaction(keys::STRIPPED_ROOM_STATE) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::STRIPPED_ROOM_STATE)? - .get_all_with_key(&stripped_range)? + .get_all() + .with_query(&stripped_range) .await? - .iter() + .filter_map(Result::ok) .filter_map(|f| { self.deserialize_value(&f).ok().map(RawAnySyncOrStrippedState::Stripped) }) @@ -1072,14 +1143,17 @@ impl_state_store!({ return Ok(stripped_events); } - let range = self.encode_to_range(keys::ROOM_STATE, (room_id, event_type))?; + let range = self.encode_to_range(keys::ROOM_STATE, (room_id, event_type)); Ok(self .inner - .transaction_on_one_with_mode(keys::ROOM_STATE, IdbTransactionMode::Readonly)? + .transaction(keys::ROOM_STATE) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::ROOM_STATE)? - .get_all_with_key(&range)? + .get_all() + .with_query(&range) .await? - .iter() + .filter_map(Result::ok) .filter_map(|f| self.deserialize_value(&f).ok().map(RawAnySyncOrStrippedState::Sync)) .collect::>()) } @@ -1097,10 +1171,11 @@ impl_state_store!({ let mut events = Vec::with_capacity(state_keys.len()); { - let txn = self.inner.transaction_on_one_with_mode( - keys::STRIPPED_ROOM_STATE, - IdbTransactionMode::Readonly, - )?; + let txn = self + .inner + .transaction(keys::STRIPPED_ROOM_STATE) + .with_mode(TransactionMode::Readonly) + .build()?; let store = txn.object_store(keys::STRIPPED_ROOM_STATE)?; for state_key in state_keys { @@ -1109,7 +1184,7 @@ impl_state_store!({ .get(&self.encode_key( keys::STRIPPED_ROOM_STATE, (room_id, &event_type, state_key), - ))? + )) .await? .map(|f| self.deserialize_value(&f)) .transpose()? @@ -1125,12 +1200,14 @@ impl_state_store!({ let txn = self .inner - .transaction_on_one_with_mode(keys::ROOM_STATE, IdbTransactionMode::Readonly)?; + .transaction(keys::ROOM_STATE) + .with_mode(TransactionMode::Readonly) + .build()?; let store = txn.object_store(keys::ROOM_STATE)?; for state_key in state_keys { if let Some(event) = store - .get(&self.encode_key(keys::ROOM_STATE, (room_id, &event_type, state_key)))? + .get(&self.encode_key(keys::ROOM_STATE, (room_id, &event_type, state_key))) .await? .map(|f| self.deserialize_value(&f)) .transpose()? @@ -1148,9 +1225,11 @@ impl_state_store!({ user_id: &UserId, ) -> Result> { self.inner - .transaction_on_one_with_mode(keys::PROFILES, IdbTransactionMode::Readonly)? + .transaction(keys::PROFILES) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::PROFILES)? - .get(&self.encode_key(keys::PROFILES, (room_id, user_id)))? + .get(&self.encode_key(keys::PROFILES, (room_id, user_id))) .await? .map(|f| self.deserialize_value(&f)) .transpose() @@ -1165,15 +1244,14 @@ impl_state_store!({ return Ok(BTreeMap::new()); } - let txn = self - .inner - .transaction_on_one_with_mode(keys::PROFILES, IdbTransactionMode::Readonly)?; + let txn = + self.inner.transaction(keys::PROFILES).with_mode(TransactionMode::Readonly).build()?; let store = txn.object_store(keys::PROFILES)?; let mut profiles = BTreeMap::new(); for user_id in user_ids { if let Some(profile) = store - .get(&self.encode_key(keys::PROFILES, (room_id, user_id)))? + .get(&self.encode_key(keys::PROFILES, (room_id, user_id))) .await? .map(|f| self.deserialize_value(&f)) .transpose()? @@ -1188,20 +1266,21 @@ impl_state_store!({ async fn get_room_infos(&self, room_load_settings: &RoomLoadSettings) -> Result> { let transaction = self .inner - .transaction_on_one_with_mode(keys::ROOM_INFOS, IdbTransactionMode::Readonly)?; + .transaction(keys::ROOM_INFOS) + .with_mode(TransactionMode::Readonly) + .build()?; let object_store = transaction.object_store(keys::ROOM_INFOS)?; Ok(match room_load_settings { RoomLoadSettings::All => object_store - .get_all()? + .get_all() .await? - .iter() - .map(|room_info| self.deserialize_value::(&room_info)) + .map(|room_info| self.deserialize_value::(&room_info?)) .collect::>()?, RoomLoadSettings::One(room_id) => { - match object_store.get(&self.encode_key(keys::ROOM_INFOS, room_id))?.await? { + match object_store.get(&self.encode_key(keys::ROOM_INFOS, room_id)).await? { Some(room_info) => vec![self.deserialize_value::(&room_info)?], None => vec![], } @@ -1215,7 +1294,9 @@ impl_state_store!({ display_name: &DisplayName, ) -> Result> { self.inner - .transaction_on_one_with_mode(keys::DISPLAY_NAMES, IdbTransactionMode::Readonly)? + .transaction(keys::DISPLAY_NAMES) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::DISPLAY_NAMES)? .get(&self.encode_key( keys::DISPLAY_NAMES, @@ -1223,7 +1304,7 @@ impl_state_store!({ room_id, display_name.as_normalized_str().unwrap_or_else(|| display_name.as_raw_str()), ), - ))? + )) .await? .map(|f| self.deserialize_value::>(&f)) .unwrap_or_else(|| Ok(Default::default())) @@ -1242,7 +1323,9 @@ impl_state_store!({ let txn = self .inner - .transaction_on_one_with_mode(keys::DISPLAY_NAMES, IdbTransactionMode::Readonly)?; + .transaction(keys::DISPLAY_NAMES) + .with_mode(TransactionMode::Readonly) + .build()?; let store = txn.object_store(keys::DISPLAY_NAMES)?; for display_name in display_names { @@ -1257,7 +1340,7 @@ impl_state_store!({ .unwrap_or_else(|| display_name.as_raw_str()), ), ), - )? + ) .await? .map(|f| self.deserialize_value::>(&f)) .transpose()? @@ -1274,9 +1357,11 @@ impl_state_store!({ event_type: GlobalAccountDataEventType, ) -> Result>> { self.inner - .transaction_on_one_with_mode(keys::ACCOUNT_DATA, IdbTransactionMode::Readonly)? + .transaction(keys::ACCOUNT_DATA) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::ACCOUNT_DATA)? - .get(&self.encode_key(keys::ACCOUNT_DATA, event_type))? + .get(&self.encode_key(keys::ACCOUNT_DATA, event_type)) .await? .map(|f| self.deserialize_value(&f)) .transpose() @@ -1288,9 +1373,11 @@ impl_state_store!({ event_type: RoomAccountDataEventType, ) -> Result>> { self.inner - .transaction_on_one_with_mode(keys::ROOM_ACCOUNT_DATA, IdbTransactionMode::Readonly)? + .transaction(keys::ROOM_ACCOUNT_DATA) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::ROOM_ACCOUNT_DATA)? - .get(&self.encode_key(keys::ROOM_ACCOUNT_DATA, (room_id, event_type)))? + .get(&self.encode_key(keys::ROOM_ACCOUNT_DATA, (room_id, event_type))) .await? .map(|f| self.deserialize_value(&f)) .transpose() @@ -1309,9 +1396,11 @@ impl_state_store!({ None => self.encode_key(keys::ROOM_USER_RECEIPTS, (room_id, receipt_type, user_id)), }; self.inner - .transaction_on_one_with_mode(keys::ROOM_USER_RECEIPTS, IdbTransactionMode::Readonly)? + .transaction(keys::ROOM_USER_RECEIPTS) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::ROOM_USER_RECEIPTS)? - .get(&key)? + .get(&key) .await? .map(|f| self.deserialize_value(&f)) .transpose() @@ -1332,17 +1421,19 @@ impl_state_store!({ None => { self.encode_to_range(keys::ROOM_EVENT_RECEIPTS, (room_id, receipt_type, event_id)) } - }?; - let tx = self.inner.transaction_on_one_with_mode( - keys::ROOM_EVENT_RECEIPTS, - IdbTransactionMode::Readonly, - )?; + }; + let tx = self + .inner + .transaction(keys::ROOM_EVENT_RECEIPTS) + .with_mode(TransactionMode::Readonly) + .build()?; let store = tx.object_store(keys::ROOM_EVENT_RECEIPTS)?; Ok(store - .get_all_with_key(&range)? + .get_all() + .with_query(&range) .await? - .iter() + .filter_map(Result::ok) .filter_map(|f| self.deserialize_value(&f).ok()) .collect::>()) } @@ -1358,11 +1449,14 @@ impl_state_store!({ let prev = self.get_custom_value_for_js(&jskey).await?; let tx = - self.inner.transaction_on_one_with_mode(keys::CUSTOM, IdbTransactionMode::Readwrite)?; + self.inner.transaction(keys::CUSTOM).with_mode(TransactionMode::Readwrite).build()?; - tx.object_store(keys::CUSTOM)?.put_key_val(&jskey, &self.serialize_value(&value)?)?; + tx.object_store(keys::CUSTOM)? + .put(&self.serialize_value(&value)?) + .with_key(jskey) + .build()?; - tx.await.into_result().map_err(IndexeddbStateStoreError::from)?; + tx.commit().await.map_err(IndexeddbStateStoreError::from)?; Ok(prev) } @@ -1372,11 +1466,11 @@ impl_state_store!({ let prev = self.get_custom_value_for_js(&jskey).await?; let tx = - self.inner.transaction_on_one_with_mode(keys::CUSTOM, IdbTransactionMode::Readwrite)?; + self.inner.transaction(keys::CUSTOM).with_mode(TransactionMode::Readwrite).build()?; - tx.object_store(keys::CUSTOM)?.delete(&jskey)?; + tx.object_store(keys::CUSTOM)?.delete(&jskey).build()?; - tx.await.into_result().map_err(IndexeddbStateStoreError::from)?; + tx.commit().await.map_err(IndexeddbStateStoreError::from)?; Ok(prev) } @@ -1406,23 +1500,22 @@ impl_state_store!({ v }; - let tx = self - .inner - .transaction_on_multi_with_mode(&all_stores, IdbTransactionMode::Readwrite)?; + let tx = + self.inner.transaction(all_stores).with_mode(TransactionMode::Readwrite).build()?; for store_name in direct_stores { - tx.object_store(store_name)?.delete(&self.encode_key(store_name, room_id))?; + tx.object_store(store_name)?.delete(&self.encode_key(store_name, room_id)).build()?; } for store_name in prefixed_stores { let store = tx.object_store(store_name)?; - let range = self.encode_to_range(store_name, room_id)?; - for key in store.get_all_keys_with_key(&range)?.await?.iter() { - store.delete(&key)?; + let range = self.encode_to_range(store_name, room_id); + for key in store.get_all_keys::().with_query(&range).await? { + store.delete(&key?).build()?; } } - tx.await.into_result().map_err(|e| e.into()) + tx.commit().await.map_err(|e| e.into()) } async fn get_user_ids( @@ -1449,7 +1542,9 @@ impl_state_store!({ let tx = self .inner - .transaction_on_one_with_mode(keys::ROOM_SEND_QUEUE, IdbTransactionMode::Readwrite)?; + .transaction(keys::ROOM_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::ROOM_SEND_QUEUE)?; @@ -1457,7 +1552,7 @@ impl_state_store!({ // ids. // Reload the previous vector for this room, or create an empty one. - let prev = obj.get(&encoded_key)?.await?; + let prev = obj.get(&encoded_key).await?; let mut prev = prev.map_or_else( || Ok(Vec::new()), @@ -1477,9 +1572,9 @@ impl_state_store!({ }); // Save the new vector into db. - obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; + obj.put(&self.serialize_value(&prev)?).with_key(encoded_key).build()?; - tx.await.into_result()?; + tx.commit().await?; Ok(()) } @@ -1494,7 +1589,9 @@ impl_state_store!({ let tx = self .inner - .transaction_on_one_with_mode(keys::ROOM_SEND_QUEUE, IdbTransactionMode::Readwrite)?; + .transaction(keys::ROOM_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::ROOM_SEND_QUEUE)?; @@ -1502,7 +1599,7 @@ impl_state_store!({ // ids. // Reload the previous vector for this room, or create an empty one. - let prev = obj.get(&encoded_key)?.await?; + let prev = obj.get(&encoded_key).await?; let mut prev = prev.map_or_else( || Ok(Vec::new()), @@ -1519,8 +1616,8 @@ impl_state_store!({ entry.event = None; // Save the new vector into db. - obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; - tx.await.into_result()?; + obj.put(&self.serialize_value(&prev)?).with_key(encoded_key).build()?; + tx.commit().await?; Ok(true) } else { @@ -1535,10 +1632,11 @@ impl_state_store!({ ) -> Result { let encoded_key = self.encode_key(keys::ROOM_SEND_QUEUE, room_id); - let tx = self.inner.transaction_on_multi_with_mode( - &[keys::ROOM_SEND_QUEUE, keys::DEPENDENT_SEND_QUEUE], - IdbTransactionMode::Readwrite, - )?; + let tx = self + .inner + .transaction([keys::ROOM_SEND_QUEUE, keys::DEPENDENT_SEND_QUEUE]) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::ROOM_SEND_QUEUE)?; @@ -1546,18 +1644,18 @@ impl_state_store!({ // ids. // Reload the previous vector for this room. - if let Some(val) = obj.get(&encoded_key)?.await? { + if let Some(val) = obj.get(&encoded_key).await? { let mut prev = self.deserialize_value::>(&val)?; if let Some(pos) = prev.iter().position(|item| item.transaction_id == transaction_id) { prev.remove(pos); if prev.is_empty() { - obj.delete(&encoded_key)?; + obj.delete(&encoded_key).build()?; } else { - obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; + obj.put(&self.serialize_value(&prev)?).with_key(encoded_key).build()?; } - tx.await.into_result()?; + tx.commit().await?; return Ok(true); } } @@ -1572,9 +1670,11 @@ impl_state_store!({ // ids. let prev = self .inner - .transaction_on_one_with_mode(keys::ROOM_SEND_QUEUE, IdbTransactionMode::Readwrite)? + .transaction(keys::ROOM_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()? .object_store(keys::ROOM_SEND_QUEUE)? - .get(&encoded_key)? + .get(&encoded_key) .await?; let mut prev = prev.map_or_else( @@ -1598,22 +1698,24 @@ impl_state_store!({ let tx = self .inner - .transaction_on_one_with_mode(keys::ROOM_SEND_QUEUE, IdbTransactionMode::Readwrite)?; + .transaction(keys::ROOM_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::ROOM_SEND_QUEUE)?; - if let Some(val) = obj.get(&encoded_key)?.await? { + if let Some(val) = obj.get(&encoded_key).await? { let mut prev = self.deserialize_value::>(&val)?; if let Some(request) = prev.iter_mut().find(|item| item.transaction_id == transaction_id) { request.is_wedged = None; request.error = error; - obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; + obj.put(&self.serialize_value(&prev)?).with_key(encoded_key).build()?; } } - tx.await.into_result()?; + tx.commit().await?; Ok(()) } @@ -1621,15 +1723,16 @@ impl_state_store!({ async fn load_rooms_with_unsent_requests(&self) -> Result> { let tx = self .inner - .transaction_on_one_with_mode(keys::ROOM_SEND_QUEUE, IdbTransactionMode::Readwrite)?; + .transaction(keys::ROOM_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::ROOM_SEND_QUEUE)?; let all_entries = obj - .get_all()? + .get_all() .await? - .into_iter() - .map(|item| self.deserialize_value::>(&item)) + .map(|item| self.deserialize_value::>(&item?)) .collect::>, _>>()? .into_iter() .flat_map(|vec| vec.into_iter().map(|item| item.room_id)) @@ -1648,16 +1751,17 @@ impl_state_store!({ ) -> Result<()> { let encoded_key = self.encode_key(keys::DEPENDENT_SEND_QUEUE, room_id); - let tx = self.inner.transaction_on_one_with_mode( - keys::DEPENDENT_SEND_QUEUE, - IdbTransactionMode::Readwrite, - )?; + let tx = self + .inner + .transaction(keys::DEPENDENT_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::DEPENDENT_SEND_QUEUE)?; // We store an encoded vector of the dependent requests. // Reload the previous vector for this room, or create an empty one. - let prev = obj.get(&encoded_key)?.await?; + let prev = obj.get(&encoded_key).await?; let mut prev = prev.map_or_else( || Ok(Vec::new()), @@ -1674,9 +1778,9 @@ impl_state_store!({ }); // Save the new vector into db. - obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; + obj.put(&self.serialize_value(&prev)?).with_key(encoded_key).build()?; - tx.await.into_result()?; + tx.commit().await?; Ok(()) } @@ -1689,16 +1793,17 @@ impl_state_store!({ ) -> Result { let encoded_key = self.encode_key(keys::DEPENDENT_SEND_QUEUE, room_id); - let tx = self.inner.transaction_on_one_with_mode( - keys::DEPENDENT_SEND_QUEUE, - IdbTransactionMode::Readwrite, - )?; + let tx = self + .inner + .transaction(keys::DEPENDENT_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::DEPENDENT_SEND_QUEUE)?; // We store an encoded vector of the dependent requests. // Reload the previous vector for this room, or create an empty one. - let prev = obj.get(&encoded_key)?.await?; + let prev = obj.get(&encoded_key).await?; let mut prev = prev.map_or_else( || Ok(Vec::new()), @@ -1716,8 +1821,8 @@ impl_state_store!({ } if found { - obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; - tx.await.into_result()?; + obj.put(&self.serialize_value(&prev)?).with_key(encoded_key).build()?; + tx.commit().await?; } Ok(found) @@ -1731,16 +1836,17 @@ impl_state_store!({ ) -> Result { let encoded_key = self.encode_key(keys::DEPENDENT_SEND_QUEUE, room_id); - let tx = self.inner.transaction_on_one_with_mode( - keys::DEPENDENT_SEND_QUEUE, - IdbTransactionMode::Readwrite, - )?; + let tx = self + .inner + .transaction(keys::DEPENDENT_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::DEPENDENT_SEND_QUEUE)?; // We store an encoded vector of the dependent requests. // Reload the previous vector for this room, or create an empty one. - let prev = obj.get(&encoded_key)?.await?; + let prev = obj.get(&encoded_key).await?; let mut prev = prev.map_or_else( || Ok(Vec::new()), @@ -1755,8 +1861,8 @@ impl_state_store!({ } if num_updated > 0 { - obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; - tx.await.into_result()?; + obj.put(&self.serialize_value(&prev)?).with_key(encoded_key).build()?; + tx.commit().await?; } Ok(num_updated) @@ -1769,27 +1875,28 @@ impl_state_store!({ ) -> Result { let encoded_key = self.encode_key(keys::DEPENDENT_SEND_QUEUE, room_id); - let tx = self.inner.transaction_on_one_with_mode( - keys::DEPENDENT_SEND_QUEUE, - IdbTransactionMode::Readwrite, - )?; + let tx = self + .inner + .transaction(keys::DEPENDENT_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::DEPENDENT_SEND_QUEUE)?; // We store an encoded vector of the dependent requests. // Reload the previous vector for this room. - if let Some(val) = obj.get(&encoded_key)?.await? { + if let Some(val) = obj.get(&encoded_key).await? { let mut prev = self.deserialize_value::>(&val)?; if let Some(pos) = prev.iter().position(|item| item.own_transaction_id == *txn_id) { prev.remove(pos); if prev.is_empty() { - obj.delete(&encoded_key)?; + obj.delete(&encoded_key).build()?; } else { - obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; + obj.put(&self.serialize_value(&prev)?).with_key(encoded_key).build()?; } - tx.await.into_result()?; + tx.commit().await?; return Ok(true); } } @@ -1806,12 +1913,11 @@ impl_state_store!({ // We store an encoded vector of the dependent requests. let prev = self .inner - .transaction_on_one_with_mode( - keys::DEPENDENT_SEND_QUEUE, - IdbTransactionMode::Readwrite, - )? + .transaction(keys::DEPENDENT_SEND_QUEUE) + .with_mode(TransactionMode::Readwrite) + .build()? .object_store(keys::DEPENDENT_SEND_QUEUE)? - .get(&encoded_key)? + .get(&encoded_key) .await?; prev.map_or_else( @@ -1828,16 +1934,17 @@ impl_state_store!({ ) -> Result<()> { let encoded_key = self.encode_key(keys::THREAD_SUBSCRIPTIONS, (room, thread_id)); - let tx = self.inner.transaction_on_one_with_mode( - keys::THREAD_SUBSCRIPTIONS, - IdbTransactionMode::Readwrite, - )?; + let tx = self + .inner + .transaction(keys::THREAD_SUBSCRIPTIONS) + .with_mode(TransactionMode::Readwrite) + .build()?; let obj = tx.object_store(keys::THREAD_SUBSCRIPTIONS)?; let mut new = PersistedThreadSubscription::from(subscription); // See if there's a previous subscription. - if let Some(previous_value) = obj.get(&encoded_key)?.await? { + if let Some(previous_value) = obj.get(&encoded_key).await? { let previous: PersistedThreadSubscription = self.deserialize_value(&previous_value)?; // If the previous status is the same as the new one, don't do anything. @@ -1850,9 +1957,9 @@ impl_state_store!({ } let serialized_value = self.serialize_value(&new); - obj.put_key_val(&encoded_key, &serialized_value?)?; + obj.put(&serialized_value?).with_key(encoded_key).build()?; - tx.await.into_result()?; + tx.commit().await?; Ok(()) } @@ -1866,9 +1973,11 @@ impl_state_store!({ let js_value = self .inner - .transaction_on_one_with_mode(keys::THREAD_SUBSCRIPTIONS, IdbTransactionMode::Readonly)? + .transaction(keys::THREAD_SUBSCRIPTIONS) + .with_mode(TransactionMode::Readonly) + .build()? .object_store(keys::THREAD_SUBSCRIPTIONS)? - .get(&encoded_key)? + .get(&encoded_key) .await?; let Some(js_value) = js_value else { @@ -1893,14 +2002,13 @@ impl_state_store!({ async fn remove_thread_subscription(&self, room: &RoomId, thread_id: &EventId) -> Result<()> { let encoded_key = self.encode_key(keys::THREAD_SUBSCRIPTIONS, (room, thread_id)); - self.inner - .transaction_on_one_with_mode( - keys::THREAD_SUBSCRIPTIONS, - IdbTransactionMode::Readwrite, - )? - .object_store(keys::THREAD_SUBSCRIPTIONS)? - .delete(&encoded_key)? - .await?; + let transaction = self + .inner + .transaction(keys::THREAD_SUBSCRIPTIONS) + .with_mode(TransactionMode::Readwrite) + .build()?; + transaction.object_store(keys::THREAD_SUBSCRIPTIONS)?.delete(&encoded_key).await?; + transaction.commit().await?; Ok(()) } diff --git a/crates/matrix-sdk-indexeddb/src/transaction/mod.rs b/crates/matrix-sdk-indexeddb/src/transaction/mod.rs index bca6f4577ef..4a26385476d 100644 --- a/crates/matrix-sdk-indexeddb/src/transaction/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/transaction/mod.rs @@ -18,16 +18,19 @@ // clean up any dead code. #![allow(dead_code)] -use indexed_db_futures::{prelude::IdbTransaction, IdbQuerySource}; +use indexed_db_futures::{ + internals::SystemRepr, query_source::QuerySource, transaction as inner, BuildSerde, +}; use serde::{ de::{DeserializeOwned, Error}, Serialize, }; use thiserror::Error; +use wasm_bindgen::JsValue; use web_sys::IdbCursorDirection; use crate::{ - error::AsyncErrorDeps, + error::{AsyncErrorDeps, GenericError}, serializer::{Indexed, IndexedKey, IndexedKeyRange, IndexedTypeSerializer}, }; @@ -41,6 +44,8 @@ pub enum TransactionError { ItemIsNotUnique, #[error("item not found")] ItemNotFound, + #[error("backend: {0}")] + Backend(Box), } impl From for TransactionError { @@ -55,16 +60,40 @@ impl From for TransactionError { } } +impl From for TransactionError { + fn from(e: indexed_db_futures::error::SerialisationError) -> Self { + Self::Serialization(Box::new(serde_json::Error::custom(e.to_string()))) + } +} + +impl From for TransactionError { + fn from(value: indexed_db_futures::error::JSError) -> Self { + Self::Backend(Box::new(GenericError::from(value.to_string()))) + } +} + +impl From for TransactionError { + fn from(value: indexed_db_futures::error::Error) -> Self { + use indexed_db_futures::error::Error; + match value { + Error::DomException(e) => e.into_sys().into(), + Error::Serialisation(e) => e.into(), + Error::MissingData(e) => Self::Backend(Box::new(e)), + Error::Unknown(e) => e.into(), + } + } +} + /// Represents an IndexedDB transaction, but provides a convenient interface for /// performing operations on types that implement [`Indexed`] and related /// traits. pub struct Transaction<'a> { - transaction: IdbTransaction<'a>, + transaction: inner::Transaction<'a>, serializer: &'a IndexedTypeSerializer, } impl<'a> Transaction<'a> { - pub fn new(transaction: IdbTransaction<'a>, serializer: &'a IndexedTypeSerializer) -> Self { + pub fn new(transaction: inner::Transaction<'a>, serializer: &'a IndexedTypeSerializer) -> Self { Self { transaction, serializer } } @@ -75,13 +104,13 @@ impl<'a> Transaction<'a> { } /// Returns the underlying IndexedDB transaction. - pub fn into_inner(self) -> IdbTransaction<'a> { + pub fn into_inner(self) -> inner::Transaction<'a> { self.transaction } /// Commit all operations tracked in this transaction to IndexedDB. pub async fn commit(self) -> Result<(), TransactionError> { - self.transaction.await.into_result().map_err(Into::into) + self.transaction.commit().await.map_err(Into::into) } /// Query IndexedDB for items that match the given key range @@ -95,18 +124,16 @@ impl<'a> Transaction<'a> { T::Error: AsyncErrorDeps, K: IndexedKey + Serialize, { - let range = self.serializer.encode_key_range::(range)?; + let range = self.serializer.encode_key_range::(range); let object_store = self.transaction.object_store(T::OBJECT_STORE)?; let array = if let Some(index) = K::INDEX { - object_store.index(index)?.get_all_with_key(&range)?.await? + object_store.index(index)?.get_all().with_query(range).serde()?.await? } else { - object_store.get_all_with_key(&range)?.await? + object_store.get_all().with_query(range).serde()?.await? }; - let mut items = Vec::with_capacity(array.length() as usize); + let mut items = Vec::with_capacity(array.len()); for value in array { - let item = self - .serializer - .deserialize(value) + let item = T::from_indexed(value?, self.serializer.inner()) .map_err(|e| TransactionError::Serialization(Box::new(e)))?; items.push(item); } @@ -174,12 +201,12 @@ impl<'a> Transaction<'a> { T::Error: AsyncErrorDeps, K: IndexedKey + Serialize, { - let range = self.serializer.encode_key_range::(range)?; + let range = self.serializer.encode_key_range::(range); let object_store = self.transaction.object_store(T::OBJECT_STORE)?; let count = if let Some(index) = K::INDEX { - object_store.index(index)?.count_with_key(&range)?.await? + object_store.index(index)?.count().with_query(range).serde()?.await? } else { - object_store.count_with_key(&range)?.await? + object_store.count().with_query(range).serde()?.await? }; Ok(count as usize) } @@ -211,25 +238,30 @@ impl<'a> Transaction<'a> { T::Error: AsyncErrorDeps, K: IndexedKey + Serialize, { - let range = self.serializer.encode_key_range::(range)?; + let range = self.serializer.encode_key_range::(range); let direction = IdbCursorDirection::Prev; let object_store = self.transaction.object_store(T::OBJECT_STORE)?; if let Some(index) = K::INDEX { - object_store - .index(index)? - .open_cursor_with_range_and_direction(&range, direction)? - .await? - .map(|cursor| self.serializer.deserialize(cursor.value())) - .transpose() - .map_err(|e| TransactionError::Serialization(Box::new(e))) - } else { - object_store - .open_cursor_with_range_and_direction(&range, direction)? - .await? - .map(|cursor| self.serializer.deserialize(cursor.value())) - .transpose() - .map_err(|e| TransactionError::Serialization(Box::new(e))) + let index = object_store.index(index)?; + if let Some(mut cursor) = + index.open_cursor().with_query(range).with_direction(direction).serde()?.await? + { + if let Some(record) = cursor.next_record_ser().await? { + return T::from_indexed(record, self.serializer.inner()) + .map(Some) + .map_err(|e| TransactionError::Serialization(Box::new(e))); + } + } + } else if let Some(mut cursor) = + object_store.open_cursor().with_query(range).with_direction(direction).serde()?.await? + { + if let Some(record) = cursor.next_record_ser().await? { + return T::from_indexed(record, self.serializer.inner()) + .map(Some) + .map_err(|e| TransactionError::Serialization(Box::new(e))); + } } + Ok(None) } /// Adds an item to the corresponding IndexedDB object @@ -243,11 +275,11 @@ impl<'a> Transaction<'a> { { self.transaction .object_store(T::OBJECT_STORE)? - .add_val_owned( + .add( self.serializer .serialize(item) .map_err(|e| TransactionError::Serialization(Box::new(e)))?, - )? + ) .await .map_err(Into::into) } @@ -263,11 +295,11 @@ impl<'a> Transaction<'a> { { self.transaction .object_store(T::OBJECT_STORE)? - .put_val_owned( + .put( self.serializer .serialize(item) .map_err(|e| TransactionError::Serialization(Box::new(e)))?, - )? + ) .await .map_err(Into::into) } @@ -291,7 +323,7 @@ impl<'a> Transaction<'a> { .serialize_if(item, f) .map_err(|e| TransactionError::Serialization(Box::new(e)))?; if let Some(value) = option { - self.transaction.object_store(T::OBJECT_STORE)?.put_val_owned(value)?.await?; + self.transaction.object_store(T::OBJECT_STORE)?.put(value).await?; } Ok(()) } @@ -305,18 +337,20 @@ impl<'a> Transaction<'a> { T: Indexed, K: IndexedKey + Serialize, { - let range = self.serializer.encode_key_range::(range)?; + let range = self.serializer.encode_key_range::(range); let object_store = self.transaction.object_store(T::OBJECT_STORE)?; if let Some(index) = K::INDEX { let index = object_store.index(index)?; - if let Some(cursor) = index.open_cursor_with_range(&range)?.await? { - while cursor.key().is_some() { - cursor.delete()?.await?; - cursor.continue_cursor()?.await?; + if let Some(mut cursor) = index.open_cursor().with_query(range).serde()?.await? { + loop { + cursor.delete()?; + if cursor.next_record::().await?.is_none() { + break; + } } } } else { - object_store.delete_owned(&range)?.await?; + object_store.delete(range).serde()?.await?; } Ok(()) } From 1e446234efb69bac54fe96bdf2ae5b4b2eb743c7 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Fri, 26 Sep 2025 15:21:59 -0400 Subject: [PATCH 4/8] ci(wasm): run all wasm tests against chrome, in addition to node and firefox Signed-off-by: Michael Goldenberg --- xtask/src/ci.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xtask/src/ci.rs b/xtask/src/ci.rs index 727cb94781a..1769fe628bc 100644 --- a/xtask/src/ci.rs +++ b/xtask/src/ci.rs @@ -440,6 +440,10 @@ fn run_wasm_pack_tests(cmd: Option) -> Result<()> { .env(WASM_TIMEOUT_ENV_KEY, WASM_TIMEOUT_VALUE) .run()?; cmd!(sh, "wasm-pack test --firefox --headless --") + .args(arg_set.split_whitespace()) + .env(WASM_TIMEOUT_ENV_KEY, WASM_TIMEOUT_VALUE) + .run()?; + cmd!(sh, "wasm-pack test --chrome --headless --") .args(arg_set.split_whitespace()) .env(WASM_TIMEOUT_ENV_KEY, WASM_TIMEOUT_VALUE) .run() From d166502a3843bb76123d4bd98bce6af82b199545 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Sat, 27 Sep 2025 19:21:27 -0400 Subject: [PATCH 5/8] refactor(indexeddb): remove unused depedency Signed-off-by: Michael Goldenberg --- Cargo.lock | 1 - crates/matrix-sdk-indexeddb/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index be6d46ed8a2..42a00e5c608 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3473,7 +3473,6 @@ dependencies = [ name = "matrix-sdk-indexeddb" version = "0.14.0" dependencies = [ - "anyhow", "assert_matches", "assert_matches2", "async-trait", diff --git a/crates/matrix-sdk-indexeddb/Cargo.toml b/crates/matrix-sdk-indexeddb/Cargo.toml index 6e6b395df73..8c6d396890a 100644 --- a/crates/matrix-sdk-indexeddb/Cargo.toml +++ b/crates/matrix-sdk-indexeddb/Cargo.toml @@ -25,7 +25,6 @@ experimental-encrypted-state-events = [ ] [dependencies] -anyhow.workspace = true async-trait.workspace = true base64.workspace = true gloo-utils = { version = "0.2.0", features = ["serde"] } From 54f64542ae83974d9208baba382f27c0e30f1c32 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Wed, 1 Oct 2025 10:57:35 -0400 Subject: [PATCH 6/8] fixup! refactor(indexeddb): upgrade indexed_db_futures dependency --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 42a00e5c608..05cec8289f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2713,7 +2713,7 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexed_db_futures" version = "0.6.4" -source = "git+https://github.com/mgoldenberg/rust-indexed-db?rev=2ac8f0bc1c53f8d8654efcdf9e32b6d359457398#2ac8f0bc1c53f8d8654efcdf9e32b6d359457398" +source = "git+https://github.com/matrix-org/rust-indexed-db?rev=201caac274e38756c6e523ba7cf84ffb625b7c48#201caac274e38756c6e523ba7cf84ffb625b7c48" dependencies = [ "accessory", "cfg-if", @@ -2739,7 +2739,7 @@ dependencies = [ [[package]] name = "indexed_db_futures_macros_internal" version = "1.0.0" -source = "git+https://github.com/mgoldenberg/rust-indexed-db?rev=2ac8f0bc1c53f8d8654efcdf9e32b6d359457398#2ac8f0bc1c53f8d8654efcdf9e32b6d359457398" +source = "git+https://github.com/matrix-org/rust-indexed-db?rev=201caac274e38756c6e523ba7cf84ffb625b7c48#201caac274e38756c6e523ba7cf84ffb625b7c48" dependencies = [ "macroific", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 6c3369d7aa6..63bdac4f9bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -201,7 +201,7 @@ lto = false [patch.crates-io] async-compat = { git = "https://github.com/element-hq/async-compat", rev = "5a27c8b290f1f1dcfc0c4ec22c464e38528aa591" } const_panic = { git = "https://github.com/jplatte/const_panic", rev = "9024a4cb3eac45c1d2d980f17aaee287b17be498" } -indexed_db_futures = { git = "https://github.com/mgoldenberg/rust-indexed-db", rev = "2ac8f0bc1c53f8d8654efcdf9e32b6d359457398" } +indexed_db_futures = { git = "https://github.com/matrix-org/rust-indexed-db", rev = "201caac274e38756c6e523ba7cf84ffb625b7c48" } # Needed to fix rotation log issue on Android (https://github.com/tokio-rs/tracing/issues/2937) tracing = { git = "https://github.com/tokio-rs/tracing.git", rev = "20f5b3d8ba057ca9c4ae00ad30dda3dce8a71c05" } tracing-core = { git = "https://github.com/tokio-rs/tracing.git", rev = "20f5b3d8ba057ca9c4ae00ad30dda3dce8a71c05" } From e7c2a655f94a9a830c4081894e8c656858286961 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Wed, 1 Oct 2025 11:19:35 -0400 Subject: [PATCH 7/8] fixup! refactor(indexeddb): upgrade indexed_db_futures dependency --- .deny.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.deny.toml b/.deny.toml index 9426ea3cdee..4fcdc1a29a8 100644 --- a/.deny.toml +++ b/.deny.toml @@ -54,4 +54,6 @@ allow-git = [ # We can release vodozemac whenever we need but let's not block development # on releases. "https://github.com/matrix-org/vodozemac", + # A patch override for the bindings: https://github.com/Alorel/rust-indexed-db/pull/72 + "https://github.com/matrix-org/rust-indexed-db", ] From 9ad64d60c8b90b3fa122f61bdde596c00615beb0 Mon Sep 17 00:00:00 2001 From: Michael Goldenberg Date: Wed, 1 Oct 2025 11:44:30 -0400 Subject: [PATCH 8/8] fixup! refactor(indexeddb): upgrade indexed_db_futures dependency --- .deny.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/.deny.toml b/.deny.toml index 4fcdc1a29a8..cfa3b052e1f 100644 --- a/.deny.toml +++ b/.deny.toml @@ -11,6 +11,7 @@ version = 2 ignore = [ { id = "RUSTSEC-2024-0436", reason = "Unmaintained paste crate, not critical." }, { id = "RUSTSEC-2025-0056", reason = "Unmaintained adler crate, not a direct dependency" }, + { id = "RUSTSEC-2024-0388", reason = "Unmaintained derivative crate, not a direct dependency" }, ] [licenses]