@@ -6,16 +6,13 @@ use rustc_index::bit_set::DenseBitSet;
66const COMPRESSION_FACTOR : usize = 4 ;
77
88/// A dedicated allocator for interpreter memory contents, ensuring they are stored on dedicated
9- /// pages (not mixed with Miri's own memory). This is very useful for native-lib mode.
9+ /// pages (not mixed with Miri's own memory). This is used in native-lib mode.
1010#[ derive( Debug ) ]
1111pub struct IsolatedAlloc {
1212 /// Pointers to page-aligned memory that has been claimed by the allocator.
1313 /// Every pointer here must point to a page-sized allocation claimed via
14- /// the global allocator.
14+ /// the global allocator. These pointers are used for "small" allocations.
1515 page_ptrs : Vec < * mut u8 > ,
16- /// Pointers to multiple-page-sized allocations. These must also be page-aligned,
17- /// with their size stored as the second element of the vector.
18- huge_ptrs : Vec < ( * mut u8 , usize ) > ,
1916 /// Metadata about which bytes have been allocated on each page. The length
2017 /// of this vector must be the same as that of `page_ptrs`, and the domain
2118 /// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR`.
@@ -25,6 +22,9 @@ pub struct IsolatedAlloc {
2522 /// indexing into it should be done with a value one-nth of the corresponding
2623 /// offset on the matching `page_ptrs` element (n = `COMPRESSION_FACTOR`).
2724 page_infos : Vec < DenseBitSet < usize > > ,
25+ /// Pointers to multiple-page-sized allocations. These must also be page-aligned,
26+ /// with their size stored as the second element of the vector.
27+ huge_ptrs : Vec < ( * mut u8 , usize ) > ,
2828 /// The host (not emulated) page size.
2929 page_size : usize ,
3030}
@@ -42,31 +42,23 @@ impl IsolatedAlloc {
4242 }
4343 }
4444
45- /// Expands the available memory pool by adding one page.
46- fn add_page ( & mut self ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
47- let page_layout = Layout :: from_size_align ( self . page_size , self . page_size ) . unwrap ( ) ;
48- // SAFETY: The system page size, which is the layout size, cannot be 0
49- let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
50- // `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
51- assert ! ( self . page_size % COMPRESSION_FACTOR == 0 ) ;
52- self . page_infos . push ( DenseBitSet :: new_empty ( self . page_size / COMPRESSION_FACTOR ) ) ;
53- self . page_ptrs . push ( page_ptr) ;
54- ( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
55- }
56-
5745 /// For simplicity, we serve small allocations in multiples of COMPRESSION_FACTOR
5846 /// bytes with at least that alignment.
5947 #[ inline]
60- fn normalized_layout ( layout : Layout ) -> ( usize , usize ) {
48+ fn normalized_layout ( layout : Layout ) -> Layout {
6149 let align =
6250 if layout. align ( ) < COMPRESSION_FACTOR { COMPRESSION_FACTOR } else { layout. align ( ) } ;
6351 let size = layout. size ( ) . next_multiple_of ( COMPRESSION_FACTOR ) ;
64- ( size, align)
52+ Layout :: from_size_align ( size, align) . unwrap ( )
53+ }
54+
55+ /// Returns the layout used to allocate the pages that hold small allocations.
56+ #[ inline]
57+ fn page_layout ( & self ) -> Layout {
58+ Layout :: from_size_align ( self . page_size , self . page_size ) . unwrap ( )
6559 }
6660
6761 /// If the allocation is greater than a page, then round to the nearest page #.
68- /// Since we pass this into the global allocator, it's more useful to return
69- /// a `Layout` instead of a pair of usizes.
7062 #[ inline]
7163 fn huge_normalized_layout ( layout : Layout , page_size : usize ) -> Layout {
7264 // Allocate in page-sized chunks
@@ -76,11 +68,11 @@ impl IsolatedAlloc {
7668 Layout :: from_size_align ( size, align) . unwrap ( )
7769 }
7870
79- /// Determined whether a given (size, align) should be sent to `alloc_huge` /
80- /// `dealloc_huge`.
71+ /// Determined whether a given normalized (size, align) should be sent to
72+ /// `alloc_huge` / ` dealloc_huge`.
8173 #[ inline]
82- fn is_huge_alloc ( size : usize , align : usize , page_size : usize ) -> bool {
83- align >= page_size || size >= page_size
74+ fn is_huge_alloc ( & self , layout : & Layout ) -> bool {
75+ layout . align ( ) > self . page_size / 2 || layout . size ( ) >= self . page_size / 2
8476 }
8577
8678 /// Allocates memory as described in `Layout`. This memory should be deallocated
@@ -106,8 +98,8 @@ impl IsolatedAlloc {
10698 /// SAFETY: See `alloc::alloc()`, with the added restriction that `page_size`
10799 /// corresponds to the host pagesize.
108100 unsafe fn allocate ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
109- let ( size , align ) = IsolatedAlloc :: normalized_layout ( layout) ;
110- if IsolatedAlloc :: is_huge_alloc ( size , align , self . page_size ) {
101+ let layout = IsolatedAlloc :: normalized_layout ( layout) ;
102+ if self . is_huge_alloc ( & layout ) {
111103 // SAFETY: Validity of `layout` upheld by caller; we checked that
112104 // the size and alignment are appropriate for being a huge alloc
113105 unsafe { self . alloc_huge ( layout, zeroed) }
@@ -116,7 +108,7 @@ impl IsolatedAlloc {
116108 // SAFETY: The value in `self.page_size` is used to allocate
117109 // `page`, with page alignment
118110 if let Some ( ptr) =
119- unsafe { Self :: alloc_from_page ( self . page_size , layout, page, pinfo, zeroed) }
111+ unsafe { Self :: alloc_small ( self . page_size , layout, page, pinfo, zeroed) }
120112 {
121113 return ptr;
122114 }
@@ -129,44 +121,42 @@ impl IsolatedAlloc {
129121 let ( page, pinfo) = self . add_page ( ) ;
130122
131123 // SAFETY: See comment on `alloc_from_page` above
132- unsafe { Self :: alloc_from_page ( page_size, layout, page, pinfo, zeroed) . unwrap ( ) }
124+ unsafe { Self :: alloc_small ( page_size, layout, page, pinfo, zeroed) . unwrap ( ) }
133125 }
134126 }
135127
136128 /// Used internally by `allocate` to abstract over some logic.
137129 ///
138130 /// SAFETY: `page` must be a page-aligned pointer to an allocated page,
139131 /// where the allocation is (at least) `page_size` bytes.
140- unsafe fn alloc_from_page (
132+ unsafe fn alloc_small (
141133 page_size : usize ,
142134 layout : Layout ,
143135 page : * mut u8 ,
144136 pinfo : & mut DenseBitSet < usize > ,
145137 zeroed : bool ,
146138 ) -> Option < * mut u8 > {
147- let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
148-
149139 // Check every alignment-sized block and see if there exists a `size`
150140 // chunk of empty space i.e. forall idx . !pinfo.contains(idx / n)
151- for idx in ( 0 ..page_size) . step_by ( align) {
152- let idx_pinfo = idx / COMPRESSION_FACTOR ;
153- let size_pinfo = size / COMPRESSION_FACTOR ;
141+ for offset in ( 0 ..page_size) . step_by ( layout . align ( ) ) {
142+ let offset_pinfo = offset / COMPRESSION_FACTOR ;
143+ let size_pinfo = layout . size ( ) / COMPRESSION_FACTOR ;
154144 // DenseBitSet::contains() panics if the index is out of bounds
155- if pinfo. domain_size ( ) < idx_pinfo + size_pinfo {
145+ if pinfo. domain_size ( ) < offset_pinfo + size_pinfo {
156146 break ;
157147 }
158148 // FIXME: is there a more efficient way to check whether the entire range is unset
159149 // in the bitset?
160- let range_avail = !( idx_pinfo..idx_pinfo + size_pinfo) . any ( |idx | pinfo. contains ( idx ) ) ;
150+ let range_avail = !( offset_pinfo..offset_pinfo + size_pinfo) . any ( |i | pinfo. contains ( i ) ) ;
161151 if range_avail {
162- pinfo. insert_range ( idx_pinfo..idx_pinfo + size_pinfo) ;
152+ pinfo. insert_range ( offset_pinfo..offset_pinfo + size_pinfo) ;
163153 // SAFETY: We checked the available bytes after `idx` in the call
164154 // to `domain_size` above and asserted there are at least `idx +
165155 // layout.size()` bytes available and unallocated after it.
166156 // `page` must point to the start of the page, so adding `idx`
167157 // is safe per the above.
168158 unsafe {
169- let ptr = page. add ( idx ) ;
159+ let ptr = page. add ( offset ) ;
170160 if zeroed {
171161 // Only write the bytes we were specifically asked to
172162 // zero out, even if we allocated more
@@ -179,6 +169,17 @@ impl IsolatedAlloc {
179169 None
180170 }
181171
172+ /// Expands the available memory pool by adding one page.
173+ fn add_page ( & mut self ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
174+ // SAFETY: The system page size, which is the layout size, cannot be 0
175+ let page_ptr = unsafe { alloc:: alloc ( self . page_layout ( ) ) } ;
176+ // `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
177+ assert ! ( self . page_size % COMPRESSION_FACTOR == 0 ) ;
178+ self . page_infos . push ( DenseBitSet :: new_empty ( self . page_size / COMPRESSION_FACTOR ) ) ;
179+ self . page_ptrs . push ( page_ptr) ;
180+ ( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
181+ }
182+
182183 /// Allocates in multiples of one page on the host system.
183184 ///
184185 /// SAFETY: Same as `alloc()`.
@@ -197,54 +198,60 @@ impl IsolatedAlloc {
197198 /// `alloc_zeroed()`) with the same layout as the one passed on this same
198199 /// `IsolatedAlloc`.
199200 pub unsafe fn dealloc ( & mut self , ptr : * mut u8 , layout : Layout ) {
200- let ( size , align ) = IsolatedAlloc :: normalized_layout ( layout) ;
201+ let layout = IsolatedAlloc :: normalized_layout ( layout) ;
201202
202- if IsolatedAlloc :: is_huge_alloc ( size , align , self . page_size ) {
203+ if self . is_huge_alloc ( & layout ) {
203204 // SAFETY: Partly upheld by caller, and we checked that the size
204205 // and align, meaning this must have been allocated via `alloc_huge`
205206 unsafe {
206207 self . dealloc_huge ( ptr, layout) ;
207208 }
208209 } else {
209- // Offset of the pointer in the current page
210- let ptr_idx = ptr. addr ( ) % self . page_size ;
211- // And then the page's base address
212- let page_addr = ptr. addr ( ) - ptr_idx;
213-
214- // Find the page this allocation belongs to.
215- // This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
216- let pinfo = std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos )
217- . enumerate ( )
218- . find ( |( _, ( page, _) ) | page. addr ( ) == page_addr) ;
219- let Some ( ( idx_of_pinfo, ( _, pinfo) ) ) = pinfo else {
220- panic ! (
221- "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" ,
222- self . page_ptrs
223- )
224- } ;
225- // Mark this range as available in the page.
226- let ptr_idx_pinfo = ptr_idx / COMPRESSION_FACTOR ;
227- let size_pinfo = size / COMPRESSION_FACTOR ;
228- for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo {
229- pinfo. remove ( idx) ;
230- }
210+ // SAFETY: It's not a huge allocation, therefore it is a small one.
211+ let idx = unsafe { self . dealloc_small ( ptr, layout) } ;
231212
232213 // This may have been the last allocation on this page. If so, free the entire page.
233214 // FIXME: this can lead to threshold effects, we should probably add some form
234215 // of hysteresis.
235- if pinfo . is_empty ( ) {
236- let page_layout = Layout :: from_size_align ( self . page_size , self . page_size ) . unwrap ( ) ;
237- self . page_infos . remove ( idx_of_pinfo ) ;
216+ if self . page_infos [ idx ] . is_empty ( ) {
217+ self . page_infos . remove ( idx ) ;
218+ let page_ptr = self . page_ptrs . remove ( idx ) ;
238219 // SAFETY: We checked that there are no outstanding allocations
239220 // from us pointing to this page, and we know it was allocated
240221 // with this layout
241222 unsafe {
242- alloc:: dealloc ( self . page_ptrs . remove ( idx_of_pinfo ) , page_layout ) ;
223+ alloc:: dealloc ( page_ptr , self . page_layout ( ) ) ;
243224 }
244225 }
245226 }
246227 }
247228
229+ /// Returns the index of the page that this was deallocated from
230+ ///
231+ /// SAFETY: the pointer must have been allocated with `alloc_small`.
232+ unsafe fn dealloc_small ( & mut self , ptr : * mut u8 , layout : Layout ) -> usize {
233+ // Offset of the pointer in the current page
234+ let offset = ptr. addr ( ) % self . page_size ;
235+ // And then the page's base address
236+ let page_addr = ptr. addr ( ) - offset;
237+
238+ // Find the page this allocation belongs to.
239+ // This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
240+ let pinfo = std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos )
241+ . enumerate ( )
242+ . find ( |( _, ( page, _) ) | page. addr ( ) == page_addr) ;
243+ let Some ( ( idx_of_pinfo, ( _, pinfo) ) ) = pinfo else {
244+ panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , self . page_ptrs)
245+ } ;
246+ // Mark this range as available in the page.
247+ let ptr_idx_pinfo = offset / COMPRESSION_FACTOR ;
248+ let size_pinfo = layout. size ( ) / COMPRESSION_FACTOR ;
249+ for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo {
250+ pinfo. remove ( idx) ;
251+ }
252+ idx_of_pinfo
253+ }
254+
248255 /// SAFETY: Same as `dealloc()` with the added requirement that `layout`
249256 /// must ask for a size larger than the host pagesize.
250257 unsafe fn dealloc_huge ( & mut self , ptr : * mut u8 , layout : Layout ) {
0 commit comments