@@ -32,32 +32,46 @@ use crate::fs::{FileSystem, Result};
3232
3333use crate :: fs:: ext2:: Ext2 ;
3434use crate :: mem:: paging:: * ;
35+ use crate :: mem:: AddressSpace ;
3536use crate :: utils:: sync:: Mutex ;
3637
3738use super :: cache:: { Cache , CacheArc , CacheItem , Cacheable } ;
3839use super :: devfs:: { alloc_device_marker, Device } ;
3940use super :: inode:: INodeInterface ;
4041
41- type PageCacheKey = ( usize , usize ) ; // (block device pointer, offset )
42- type PageCacheItem = CacheArc < CacheItem < PageCacheKey , CachedPage > > ;
42+ type PageCacheKey = ( usize , usize ) ; // (owner ptr, index )
43+ pub type PageCacheItem = CacheArc < CacheItem < PageCacheKey , CachedPage > > ;
4344
44- struct CachedPage {
45- device : Weak < dyn CachedAccess > ,
45+ struct DirtyMapping {
46+ addr_space : AddressSpace ,
47+ addr : VirtAddr ,
48+ }
49+
50+ pub struct CachedPage {
51+ owner : Weak < dyn CachedAccess > ,
4652 offset : usize ,
4753 page : PhysFrame ,
4854 dirty : AtomicBool ,
55+ dirty_mappings : Mutex < Vec < DirtyMapping > > ,
4956}
5057
5158impl CachedPage {
52- fn new ( device : Weak < dyn CachedAccess > , offset : usize ) -> Self {
53- Self {
54- device ,
59+ fn new ( owner : Weak < dyn CachedAccess > , offset : usize ) -> Self {
60+ let k = Self {
61+ owner ,
5562 offset,
5663 page : FRAME_ALLOCATOR
5764 . allocate_frame ( )
5865 . expect ( "page_cache: out of memory" ) ,
5966 dirty : AtomicBool :: new ( false ) ,
60- }
67+ dirty_mappings : Mutex :: new ( Vec :: new ( ) ) ,
68+ } ;
69+ // TODO: temporary hack. i mean this is fine but is there a cleaner way to do this. this is
70+ // required since when the VM for the process umaps a page that contains a cached page, it
71+ // will unmap this page which will decrease the refcnt to 0 and deallocate it.
72+ get_vm_frames ( ) . unwrap ( ) [ k. page . start_address ( ) . as_u64 ( ) as usize / 4096usize ]
73+ . inc_ref_count ( ) ;
74+ k
6175 }
6276
6377 fn data_mut ( & self ) -> & mut [ MaybeUninit < u8 > ] {
@@ -72,10 +86,14 @@ impl CachedPage {
7286 unsafe { core:: slice:: from_raw_parts_mut ( data_ptr, Size4KiB :: SIZE as usize ) }
7387 }
7488
75- fn data_addr ( & self ) -> PhysAddr {
89+ pub fn data_addr ( & self ) -> PhysAddr {
7690 self . page . start_address ( )
7791 }
7892
93+ pub fn page ( & self ) -> PhysFrame {
94+ self . page
95+ }
96+
7997 fn make_key ( device : & Weak < dyn CachedAccess > , offset : usize ) -> PageCacheKey {
8098 ( device. as_ptr ( ) . addr ( ) , offset)
8199 }
@@ -85,26 +103,35 @@ impl CachedPage {
85103 self . dirty . load ( Ordering :: SeqCst )
86104 }
87105
88- fn mark_dirty ( & self ) {
106+ pub fn mark_dirty ( & self ) {
107+ log:: error!( "marking dirty --------------------------------------" ) ;
89108 self . dirty . store ( true , Ordering :: SeqCst ) ;
90109 }
91110
92111 fn device ( & self ) -> Arc < dyn CachedAccess > {
93- self . device . upgrade ( ) . unwrap ( )
112+ self . owner . upgrade ( ) . unwrap ( )
94113 }
95114
96115 fn sync ( & self ) {
97116 if !self . is_dirty ( ) {
98117 return ;
99118 }
100119
101- // Commit the changes made to the cache to the disk.
102- let disk = self . device ( ) ;
103-
120+ // Commit the changes made to the cache to the owner.
121+ let owner = self . device ( ) ;
104122 let offset_bytes = self . offset * Size4KiB :: SIZE as usize ;
105- let sector = offset_bytes / disk. block_size ( ) ;
123+ owner. write_direct ( offset_bytes, self . page ) ;
124+
125+ for mut mapping in self . dirty_mappings . lock_irq ( ) . drain ( ..) {
126+ let mut offset_table = mapping. addr_space . offset_page_table ( ) ;
127+ offset_table
128+ . unmap ( Page :: < Size4KiB > :: containing_address ( mapping. addr ) )
129+ . unwrap ( )
130+ . 1
131+ . flush ( ) ;
132+ }
106133
107- disk . write_dma ( sector , self . data_addr ( ) , Size4KiB :: SIZE as usize ) ;
134+ self . dirty . store ( false , Ordering :: SeqCst ) ;
108135 }
109136}
110137
@@ -116,12 +143,12 @@ impl Drop for CachedPage {
116143
117144impl Cacheable < PageCacheKey > for CachedPage {
118145 fn cache_key ( & self ) -> PageCacheKey {
119- Self :: make_key ( & self . device , self . offset )
146+ Self :: make_key ( & self . owner , self . offset )
120147 }
121148}
122149
123150lazy_static:: lazy_static! {
124- static ref PAGE_CACHE : Arc <Cache <PageCacheKey , CachedPage >> = Cache :: new( ) ;
151+ pub ( in crate :: fs ) static ref PAGE_CACHE : Arc <Cache <PageCacheKey , CachedPage >> = Cache :: new( ) ;
125152}
126153
127154impl Cache < PageCacheKey , CachedPage > {
@@ -145,16 +172,16 @@ impl Cache<PageCacheKey, CachedPage> {
145172 let device = device. upgrade ( ) . expect ( "page_cache: device dropped" ) ;
146173
147174 let aligned_offset = align_down ( offset as u64 , Size4KiB :: SIZE ) as usize ;
148- let sector = aligned_offset / device. block_size ( ) ;
149-
150175 device
151- . read_dma ( sector , page. data_addr ( ) , Size4KiB :: SIZE as usize )
176+ . read_direct ( aligned_offset , page. page ( ) )
152177 . expect ( "page_cache: failed to read block" ) ;
153178
154179 PAGE_CACHE . make_item_cached ( page)
155180 }
156181}
157182
183+ // TODO: cache hit miss stats
184+
158185pub struct DirtyRef < T : Sized > {
159186 cache : PageCacheItem ,
160187 ptr : * mut T ,
@@ -202,9 +229,12 @@ pub trait BlockDeviceInterface: Send + Sync {
202229 fn write_block ( & self , sector : usize , buf : & [ u8 ] ) -> Option < usize > ;
203230}
204231
205- pub trait CachedAccess : BlockDeviceInterface {
232+ pub trait CachedAccess : Send + Sync {
206233 fn sref ( & self ) -> Weak < dyn CachedAccess > ;
207234
235+ fn read_direct ( & self , offset : usize , dest : PhysFrame ) -> Option < usize > ;
236+ fn write_direct ( & self , offset : usize , src : PhysFrame ) -> Option < usize > ;
237+
208238 fn read ( & self , mut offset : usize , dest : & mut [ MaybeUninit < u8 > ] ) -> Option < usize > {
209239 let mut loc = 0 ;
210240
@@ -236,6 +266,9 @@ pub trait CachedAccess: BlockDeviceInterface {
236266 let mut loc = 0 ;
237267
238268 while loc < buffer. len ( ) {
269+ // TODO: If it is not found in the page cache, then, when the write perfectly falls on
270+ // page size boundaries, the page is not even read from disk, but allocated and
271+ // immediately marked dirty.
239272 let page = PAGE_CACHE . get_page ( & self . sref ( ) , offset) ;
240273
241274 let page_offset = offset % Size4KiB :: SIZE as usize ;
@@ -318,6 +351,22 @@ impl CachedAccess for BlockDevice {
318351 fn sref ( & self ) -> Weak < dyn CachedAccess > {
319352 self . sref . clone ( )
320353 }
354+
355+ fn read_direct ( & self , offset : usize , dest : PhysFrame ) -> Option < usize > {
356+ self . dev . read_dma (
357+ offset / self . dev . block_size ( ) ,
358+ dest. start_address ( ) ,
359+ Size4KiB :: SIZE as _ ,
360+ )
361+ }
362+
363+ fn write_direct ( & self , offset : usize , src : PhysFrame ) -> Option < usize > {
364+ self . dev . write_dma (
365+ offset / self . dev . block_size ( ) ,
366+ src. start_address ( ) ,
367+ Size4KiB :: SIZE as _ ,
368+ )
369+ }
321370}
322371
323372impl INodeInterface for BlockDevice { }
0 commit comments