Skip to content

Commit cf881ab

Browse files
committed
test(sdk): Add the test_reset_when_dirty test.
This patch adds the new `test_reset_when_dirty` test, which ensures the state is correctly reset when the cross-process lock over the store becomes dirty.
1 parent 6163013 commit cf881ab

File tree

1 file changed

+268
-3
lines changed
  • crates/matrix-sdk/src/event_cache/room

1 file changed

+268
-3
lines changed

crates/matrix-sdk/src/event_cache/room/mod.rs

Lines changed: 268 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2565,7 +2565,7 @@ mod tests {
25652565

25662566
#[cfg(all(test, not(target_family = "wasm")))] // This uses the cross-process lock, so needs time support.
25672567
mod timed_tests {
2568-
use std::sync::Arc;
2568+
use std::{ops::Not, sync::Arc};
25692569

25702570
use assert_matches::assert_matches;
25712571
use assert_matches2::assert_let;
@@ -2585,7 +2585,7 @@ mod timed_tests {
25852585
};
25862586
use matrix_sdk_test::{ALICE, BOB, async_test, event_factory::EventFactory};
25872587
use ruma::{
2588-
OwnedUserId, event_id,
2588+
EventId, OwnedUserId, event_id,
25892589
events::{AnySyncMessageLikeEvent, AnySyncTimelineEvent},
25902590
room_id, user_id,
25912591
};
@@ -2594,7 +2594,7 @@ mod timed_tests {
25942594
use super::RoomEventCacheGenericUpdate;
25952595
use crate::{
25962596
assert_let_timeout,
2597-
event_cache::{RoomEventCacheUpdate, room::LoadMoreEventsBackwardsOutcome},
2597+
event_cache::{RoomEventCache, RoomEventCacheUpdate, room::LoadMoreEventsBackwardsOutcome},
25982598
test_utils::client::MockClientBuilder,
25992599
};
26002600

@@ -3757,4 +3757,269 @@ mod timed_tests {
37573757
room_event_cache.rfind_map_event_in_memory_by(|_| None::<()>).await.unwrap().is_none()
37583758
);
37593759
}
3760+
3761+
#[async_test]
3762+
async fn test_reset_when_dirty() {
3763+
let user_id = user_id!("@mnt_io:matrix.org");
3764+
let room_id = room_id!("!raclette:patate.ch");
3765+
3766+
// The storage shared by the two clients.
3767+
let event_cache_store = MemoryStore::new();
3768+
3769+
// Client for the process 0.
3770+
let client_p0 = MockClientBuilder::new(None)
3771+
.on_builder(|builder| {
3772+
builder.store_config(
3773+
StoreConfig::new("process #0".to_owned())
3774+
.event_cache_store(event_cache_store.clone()),
3775+
)
3776+
})
3777+
.build()
3778+
.await;
3779+
3780+
// Client for the process 1.
3781+
let client_p1 = MockClientBuilder::new(None)
3782+
.on_builder(|builder| {
3783+
builder.store_config(
3784+
StoreConfig::new("process #1".to_owned()).event_cache_store(event_cache_store),
3785+
)
3786+
})
3787+
.build()
3788+
.await;
3789+
3790+
let event_factory = EventFactory::new().room(room_id).sender(user_id);
3791+
3792+
let ev_id_0 = event_id!("$ev_0");
3793+
let ev_id_1 = event_id!("$ev_1");
3794+
3795+
let ev_0 = event_factory.text_msg("comté").event_id(ev_id_0).into_event();
3796+
let ev_1 = event_factory.text_msg("morbier").event_id(ev_id_1).into_event();
3797+
3798+
// Add events to the storage (shared by the two clients!).
3799+
client_p0
3800+
.event_cache_store()
3801+
.lock()
3802+
.await
3803+
.expect("[p0] Could not acquire the event cache lock")
3804+
.as_clean()
3805+
.expect("[p0] Could not acquire a clean event cache lock")
3806+
.handle_linked_chunk_updates(
3807+
LinkedChunkId::Room(room_id),
3808+
vec![
3809+
Update::NewItemsChunk {
3810+
previous: None,
3811+
new: ChunkIdentifier::new(0),
3812+
next: None,
3813+
},
3814+
Update::PushItems {
3815+
at: Position::new(ChunkIdentifier::new(0), 0),
3816+
items: vec![ev_0],
3817+
},
3818+
Update::NewItemsChunk {
3819+
previous: Some(ChunkIdentifier::new(0)),
3820+
new: ChunkIdentifier::new(1),
3821+
next: None,
3822+
},
3823+
Update::PushItems {
3824+
at: Position::new(ChunkIdentifier::new(1), 0),
3825+
items: vec![ev_1],
3826+
},
3827+
],
3828+
)
3829+
.await
3830+
.unwrap();
3831+
3832+
// Subscribe the event caches, and create the room.
3833+
let (room_event_cache_p0, room_event_cache_p1) = {
3834+
let event_cache_p0 = client_p0.event_cache();
3835+
event_cache_p0.subscribe().unwrap();
3836+
3837+
let event_cache_p1 = client_p1.event_cache();
3838+
event_cache_p1.subscribe().unwrap();
3839+
3840+
client_p0.base_client().get_or_create_room(room_id, matrix_sdk_base::RoomState::Joined);
3841+
client_p1.base_client().get_or_create_room(room_id, matrix_sdk_base::RoomState::Joined);
3842+
3843+
let (room_event_cache_p0, _drop_handles) =
3844+
client_p0.get_room(room_id).unwrap().event_cache().await.unwrap();
3845+
let (room_event_cache_p1, _drop_handles) =
3846+
client_p1.get_room(room_id).unwrap().event_cache().await.unwrap();
3847+
3848+
(room_event_cache_p0, room_event_cache_p1)
3849+
};
3850+
3851+
// Okay. We are ready for the test!
3852+
//
3853+
// First off, let's check `room_event_cache_p0` has access to the first event
3854+
// loaded in-memory, then do a pagination, and see more events.
3855+
{
3856+
let room_event_cache = &room_event_cache_p0;
3857+
3858+
// `ev_id_1` must be loaded in memory.
3859+
assert!(event_loaded(room_event_cache, ev_id_1).await);
3860+
3861+
// `ev_id_0` must NOT be loaded in memory.
3862+
assert!(event_loaded(room_event_cache, ev_id_0).await.not());
3863+
3864+
// Load one more event with a backpagination.
3865+
room_event_cache.pagination().run_backwards_once(1).await.unwrap();
3866+
3867+
// `ev_id_0` must now be loaded in memory.
3868+
assert!(event_loaded(room_event_cache, ev_id_0).await);
3869+
}
3870+
3871+
// Second, let's check `room_event_cache_p1` has the same accesses.
3872+
{
3873+
let room_event_cache = &room_event_cache_p1;
3874+
3875+
// `ev_id_1` must be loaded in memory.
3876+
assert!(event_loaded(room_event_cache, ev_id_1).await);
3877+
3878+
// `ev_id_0` must NOT be loaded in memory.
3879+
assert!(event_loaded(room_event_cache, ev_id_0).await.not());
3880+
3881+
// Load one more event with a backpagination.
3882+
room_event_cache.pagination().run_backwards_once(1).await.unwrap();
3883+
3884+
// `ev_id_0` must now be loaded in memory.
3885+
assert!(event_loaded(room_event_cache, ev_id_0).await);
3886+
}
3887+
3888+
// Do this a couple times, for the fun.
3889+
for _ in 0..3 {
3890+
// Third, because `room_event_cache_p1` has locked the store, the lock
3891+
// is dirty for `room_event_cache_p0`, so it will shrink to its last
3892+
// chunk!
3893+
{
3894+
let room_event_cache = &room_event_cache_p0;
3895+
3896+
// `ev_id_1` must be loaded in memory, just like before.
3897+
assert!(event_loaded(room_event_cache, ev_id_1).await);
3898+
3899+
// However, `ev_id_0` must NOT be loaded in memory. It WAS loaded, but the
3900+
// state has shrunk to its last chunk.
3901+
assert!(event_loaded(room_event_cache, ev_id_0).await.not());
3902+
3903+
// Load one more event with a backpagination.
3904+
room_event_cache.pagination().run_backwards_once(1).await.unwrap();
3905+
3906+
// `ev_id_0` must now be loaded in memory.
3907+
assert!(event_loaded(room_event_cache, ev_id_0).await);
3908+
}
3909+
3910+
// Fourth, because `room_event_cache_p0` has locked the store again, the lock
3911+
// is dirty for `room_event_cache_p1` too!, so it will shrink to its last
3912+
// chunk!
3913+
{
3914+
let room_event_cache = &room_event_cache_p1;
3915+
3916+
// `ev_id_1` must be loaded in memory, just like before.
3917+
assert!(event_loaded(room_event_cache, ev_id_1).await);
3918+
3919+
// However, `ev_id_0` must NOT be loaded in memory. It WAS loaded, but the
3920+
// state has shrunk to its last chunk.
3921+
assert!(event_loaded(room_event_cache, ev_id_0).await.not());
3922+
3923+
// Load one more event with a backpagination.
3924+
room_event_cache.pagination().run_backwards_once(1).await.unwrap();
3925+
3926+
// `ev_id_0` must now be loaded in memory.
3927+
assert!(event_loaded(room_event_cache, ev_id_0).await);
3928+
}
3929+
}
3930+
3931+
// Repeat that with an explicit read lock (so that we don't rely on
3932+
// `event_loaded` to trigger the dirty detection).
3933+
for _ in 0..3 {
3934+
{
3935+
let room_event_cache = &room_event_cache_p0;
3936+
3937+
let guard = room_event_cache.inner.state.read().await.unwrap();
3938+
3939+
// Guard is kept alive, to ensure we can have multiple read guards alive with a
3940+
// shared access.
3941+
// See `RoomEventCacheStateLock::read` to learn more.
3942+
3943+
assert!(event_loaded(room_event_cache, ev_id_1).await);
3944+
assert!(event_loaded(room_event_cache, ev_id_0).await.not());
3945+
3946+
// Ensure `guard` is alive up to this point (in case this test is refactored, I
3947+
// want to make this super explicit).
3948+
//
3949+
// We drop need to drop it before the pagination because the pagination needs to
3950+
// obtain a write lock.
3951+
drop(guard);
3952+
3953+
room_event_cache.pagination().run_backwards_once(1).await.unwrap();
3954+
assert!(event_loaded(room_event_cache, ev_id_0).await);
3955+
}
3956+
3957+
{
3958+
let room_event_cache = &room_event_cache_p1;
3959+
3960+
let guard = room_event_cache.inner.state.read().await.unwrap();
3961+
3962+
// Guard is kept alive, to ensure we can have multiple read guards alive with a
3963+
// shared access.
3964+
3965+
assert!(event_loaded(room_event_cache, ev_id_1).await);
3966+
assert!(event_loaded(room_event_cache, ev_id_0).await.not());
3967+
3968+
// Ensure `guard` is alive up to this point (in case this test is refactored, I
3969+
// want to make this super explicit).
3970+
//
3971+
// We drop need to drop it before the pagination because the pagination needs to
3972+
// obtain a write lock.
3973+
drop(guard);
3974+
3975+
room_event_cache.pagination().run_backwards_once(1).await.unwrap();
3976+
assert!(event_loaded(room_event_cache, ev_id_0).await);
3977+
}
3978+
}
3979+
3980+
// Repeat that with an explicit write lock.
3981+
for _ in 0..3 {
3982+
{
3983+
let room_event_cache = &room_event_cache_p0;
3984+
3985+
let guard = room_event_cache.inner.state.write().await.unwrap();
3986+
3987+
// Guard isn't kept alive, otherwise `event_loaded` couldn't run because it
3988+
// needs to obtain a read lock.
3989+
drop(guard);
3990+
3991+
assert!(event_loaded(room_event_cache, ev_id_1).await);
3992+
assert!(event_loaded(room_event_cache, ev_id_0).await.not());
3993+
3994+
room_event_cache.pagination().run_backwards_once(1).await.unwrap();
3995+
assert!(event_loaded(room_event_cache, ev_id_0).await);
3996+
}
3997+
3998+
{
3999+
let room_event_cache = &room_event_cache_p1;
4000+
4001+
let guard = room_event_cache.inner.state.write().await.unwrap();
4002+
4003+
// Guard isn't kept alive, otherwise `event_loaded` couldn't run because it
4004+
// needs to obtain a read lock.
4005+
drop(guard);
4006+
4007+
assert!(event_loaded(room_event_cache, ev_id_1).await);
4008+
assert!(event_loaded(room_event_cache, ev_id_0).await.not());
4009+
4010+
room_event_cache.pagination().run_backwards_once(1).await.unwrap();
4011+
assert!(event_loaded(room_event_cache, ev_id_0).await);
4012+
}
4013+
}
4014+
}
4015+
4016+
async fn event_loaded(room_event_cache: &RoomEventCache, event_id: &EventId) -> bool {
4017+
room_event_cache
4018+
.rfind_map_event_in_memory_by(|event| {
4019+
(event.event_id().as_deref() == Some(event_id)).then_some(())
4020+
})
4021+
.await
4022+
.unwrap()
4023+
.is_some()
4024+
}
37604025
}

0 commit comments

Comments
 (0)