@@ -717,9 +717,10 @@ mod private {
717717 /// [`EventLinkedChunk`] relies on a [`LinkedChunk`] to store all
718718 /// events. Only the last chunk will be loaded. It means the
719719 /// events are loaded from the most recent to the oldest. To
720- /// load more events, see [`Self::load_more_events_backwards `].
720+ /// load more events, see [`RoomPagination `].
721721 ///
722722 /// [`LinkedChunk`]: matrix_sdk_common::linked_chunk::LinkedChunk
723+ /// [`RoomPagination`]: super::RoomPagination
723724 pub async fn new (
724725 room_id : OwnedRoomId ,
725726 room_version_rules : RoomVersionRules ,
@@ -729,10 +730,11 @@ mod private {
729730 pagination_status : SharedObservable < RoomPaginationStatus > ,
730731 ) -> Result < Self , EventCacheError > {
731732 let store_guard = match store. lock ( ) . await ? {
732- //
733+ // Lock is clean: all good!
733734 EventCacheStoreLockState :: Clean ( guard) => guard,
734735
735- //
736+ // Lock is dirty, not a problem, it's the first time we are creating this state, no
737+ // need to refresh.
736738 EventCacheStoreLockState :: Dirty ( guard) => {
737739 EventCacheStoreLockGuard :: clear_dirty ( & guard) ;
738740
@@ -812,6 +814,15 @@ mod private {
812814 } )
813815 }
814816
817+ /// Lock this [`RoomEventCacheStateLock`] with per-thread shared access.
818+ ///
819+ /// This method locks the per-thread lock over the state, and then locks
820+ /// the cross-process lock over the store. It returns an RAII guard
821+ /// which will drop the read access to the state and to the store when
822+ /// dropped.
823+ ///
824+ /// If the cross-process lock over the store is dirty (see
825+ /// [`EventCacheStoreLockState`]), the state is reset to the last chunk.
815826 pub async fn read ( & self ) -> Result < RoomEventCacheStateLockReadGuard < ' _ > , EventCacheError > {
816827 // Take a write-lock in case the lock is dirty and we need to reset the state.
817828 let state_guard = self . locked_state . write ( ) . await ;
@@ -843,6 +854,16 @@ mod private {
843854 }
844855 }
845856
857+ /// Lock this [`RoomEventCacheStateLock`] with exclusive per-thread
858+ /// write access.
859+ ///
860+ /// This method locks the per-thread lock over the state, and then locks
861+ /// the cross-process lock over the store. It returns an RAII guard
862+ /// which will drop the write access to the state and to the store when
863+ /// dropped.
864+ ///
865+ /// If the cross-process lock over the store is dirty (see
866+ /// [`EventCacheStoreLockState`]), the state is reset to the last chunk.
846867 pub async fn write (
847868 & self ,
848869 ) -> Result < RoomEventCacheStateLockWriteGuard < ' _ > , EventCacheError > {
@@ -868,13 +889,23 @@ mod private {
868889 }
869890 }
870891
892+ /// The read lock guard returned by [`RoomEventCacheStateLock::read`].
871893 pub struct RoomEventCacheStateLockReadGuard < ' a > {
894+ /// The per-thread read lock guard over the
895+ /// [`RoomEventCacheStateLockInner`].
872896 state : RwLockReadGuard < ' a , RoomEventCacheStateLockInner > ,
897+
898+ /// The cross-process lock guard over the store.
873899 store : EventCacheStoreLockGuard ,
874900 }
875901
902+ /// The write lock guard return by [`RoomEventCacheStateLock::write`].
876903 pub struct RoomEventCacheStateLockWriteGuard < ' a > {
904+ /// The per-thread write lock guard over the
905+ /// [`RoomEventCacheStateLockInner`].
877906 state : RwLockWriteGuard < ' a , RoomEventCacheStateLockInner > ,
907+
908+ /// The cross-process lock guard over the store.
878909 store : EventCacheStoreLockGuard ,
879910 }
880911
@@ -947,6 +978,7 @@ mod private {
947978 & mut self . state . room_linked_chunk
948979 }
949980
981+ /// Get a reference to the `waited_for_initial_prev_token` atomic bool.
950982 pub fn waited_for_initial_prev_token ( & self ) -> & Arc < AtomicBool > {
951983 & self . state . waited_for_initial_prev_token
952984 }
0 commit comments