@@ -2,6 +2,9 @@ use std::alloc::{self, Layout};
22
33use rustc_index:: bit_set:: DenseBitSet ;
44
5+ /// How many bytes of memory each bit in the bitset represents.
6+ const COMPRESSION_FACTOR : usize = 4 ;
7+
58/// A dedicated allocator for interpreter memory contents, ensuring they are stored on dedicated
69/// pages (not mixed with Miri's own memory). This is very useful for native-lib mode.
710#[ derive( Debug ) ]
@@ -10,19 +13,19 @@ pub struct IsolatedAlloc {
1013 /// Every pointer here must point to a page-sized allocation claimed via
1114 /// the global allocator.
1215 page_ptrs : Vec < * mut u8 > ,
13- /// Pointers to multi -page-sized allocations. These must also be page-aligned,
16+ /// Pointers to multiple -page-sized allocations. These must also be page-aligned,
1417 /// with their size stored as the second element of the vector.
1518 huge_ptrs : Vec < ( * mut u8 , usize ) > ,
1619 /// Metadata about which bytes have been allocated on each page. The length
1720 /// of this vector must be the same as that of `page_ptrs`, and the domain
18- /// size of the bitset must be exactly `page_size / 8 `.
21+ /// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR `.
1922 ///
2023 /// Conceptually, each bit of the bitset represents the allocation status of
21- /// one 8 -byte chunk on the corresponding element of `page_ptrs`. Thus,
22- /// indexing into it should be done with a value one-eighth of the corresponding
23- /// offset on the matching `page_ptrs` element.
24+ /// one n -byte chunk on the corresponding element of `page_ptrs`. Thus,
25+ /// indexing into it should be done with a value one-nth of the corresponding
26+ /// offset on the matching `page_ptrs` element (n = `COMPRESSION_FACTOR`) .
2427 page_infos : Vec < DenseBitSet < usize > > ,
25- /// The host (not emulated) page size, or 0 if it has not yet been set .
28+ /// The host (not emulated) page size.
2629 page_size : usize ,
2730}
2831
@@ -39,38 +42,57 @@ impl IsolatedAlloc {
3942
4043 /// Expands the available memory pool by adding one page.
4144 fn add_page ( & mut self , page_size : usize ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
42- assert_ne ! ( page_size, 0 ) ;
43-
4445 let page_layout = unsafe { Layout :: from_size_align_unchecked ( page_size, page_size) } ;
4546 let page_ptr = unsafe { alloc:: alloc ( page_layout) } ;
4647 // `page_infos` has to have one-eighth as many bits as a page has bytes
4748 // (or one-64th as many bytes)
48- self . page_infos . push ( DenseBitSet :: new_empty ( page_size / 8 ) ) ;
49+ self . page_infos . push ( DenseBitSet :: new_empty ( page_size / COMPRESSION_FACTOR ) ) ;
4950 self . page_ptrs . push ( page_ptr) ;
5051 ( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
5152 }
5253
53- /// For simplicity, we allocate in multiples of 8 bytes with at least that
54- /// alignment.
54+ /// For simplicity, we serve allocations in multiples of COMPRESSION_FACTOR
55+ /// bytes with at least that alignment.
5556 #[ inline]
5657 fn normalized_layout ( layout : Layout ) -> ( usize , usize ) {
57- let align = if layout. align ( ) < 8 { 8 } else { layout. align ( ) } ;
58- let size = layout. size ( ) . next_multiple_of ( 8 ) ;
58+ let align =
59+ if layout. align ( ) < COMPRESSION_FACTOR { COMPRESSION_FACTOR } else { layout. align ( ) } ;
60+ let size = layout. size ( ) . next_multiple_of ( COMPRESSION_FACTOR ) ;
5961 ( size, align)
6062 }
6163
64+ /// If the allocation is greater than a page, then round to the nearest page #.
65+ /// Since we pass this into the global allocator, it's more useful to return
66+ /// a `Layout` instead of a pair of usizes.
67+ #[ inline]
68+ fn huge_normalized_layout ( layout : Layout , page_size : usize ) -> Layout {
69+ // Allocate in page-sized chunks
70+ let size = layout. size ( ) . next_multiple_of ( page_size) ;
71+ // Align probably *shouldn't* ever be greater than the page size,
72+ // but just in case
73+ let align = std:: cmp:: max ( layout. align ( ) , page_size) ;
74+ Layout :: from_size_align ( size, align) . unwrap ( )
75+ }
76+
77+ /// Determined whether a given (size, align) should be sent to `alloc_huge` /
78+ /// `dealloc_huge`.
79+ #[ inline]
80+ fn is_huge_alloc ( size : usize , align : usize , page_size : usize ) -> bool {
81+ align >= page_size || size >= page_size
82+ }
83+
6284 /// Allocates memory as described in `Layout`. This memory should be deallocated
6385 /// by calling `dealloc` on this same allocator.
6486 ///
6587 /// SAFETY: See `alloc::alloc()`
66- pub fn alloc ( & mut self , layout : Layout ) -> * mut u8 {
88+ pub unsafe fn alloc ( & mut self , layout : Layout ) -> * mut u8 {
6789 unsafe { self . allocate ( layout, false ) }
6890 }
6991
7092 /// Same as `alloc`, but zeroes out the memory.
7193 ///
7294 /// SAFETY: See `alloc::alloc_zeroed()`
73- pub fn alloc_zeroed ( & mut self , layout : Layout ) -> * mut u8 {
95+ pub unsafe fn alloc_zeroed ( & mut self , layout : Layout ) -> * mut u8 {
7496 unsafe { self . allocate ( layout, true ) }
7597 }
7698
@@ -80,8 +102,9 @@ impl IsolatedAlloc {
80102 /// SAFETY: See `alloc::alloc()`, with the added restriction that `page_size`
81103 /// corresponds to the host pagesize.
82104 unsafe fn allocate ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
83- if layout. align ( ) > self . page_size || layout. size ( ) > self . page_size {
84- unsafe { self . alloc_multi_page ( layout, zeroed) }
105+ let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
106+ if IsolatedAlloc :: is_huge_alloc ( size, align, self . page_size ) {
107+ unsafe { self . alloc_huge ( layout, zeroed) }
85108 } else {
86109 for ( & mut page, pinfo) in std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos ) {
87110 if let Some ( ptr) =
@@ -93,6 +116,8 @@ impl IsolatedAlloc {
93116
94117 // We get here only if there's no space in our existing pages
95118 let page_size = self . page_size ;
119+ // Add another page and allocate from it; this cannot fail since the
120+ // new page is empty and we already asserted it fits into a page
96121 let ( page, pinfo) = self . add_page ( page_size) ;
97122 unsafe { Self :: alloc_from_page ( page_size, layout, page, pinfo, zeroed) . unwrap ( ) }
98123 }
@@ -112,18 +137,18 @@ impl IsolatedAlloc {
112137 let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
113138
114139 // Check every alignment-sized block and see if there exists a `size`
115- // chunk of empty space i.e. forall idx . !pinfo.contains(idx / 8 )
140+ // chunk of empty space i.e. forall idx . !pinfo.contains(idx / n )
116141 for idx in ( 0 ..page_size) . step_by ( align) {
117- let idx_pinfo = idx / 8 ;
118- let size_pinfo = size / 8 ;
142+ let idx_pinfo = idx / COMPRESSION_FACTOR ;
143+ let size_pinfo = size / COMPRESSION_FACTOR ;
119144 // DenseBitSet::contains() panics if the index is out of bounds
120145 if pinfo. domain_size ( ) < idx_pinfo + size_pinfo {
121146 break ;
122147 }
123148 // FIXME: is there a more efficient way to check whether the entire range is unset
124149 // in the bitset?
125150 let range_avail = !( idx_pinfo..idx_pinfo + size_pinfo) . any ( |idx| pinfo. contains ( idx) ) ;
126- if pred {
151+ if range_avail {
127152 pinfo. insert_range ( idx_pinfo..idx_pinfo + size_pinfo) ;
128153 unsafe {
129154 let ptr = page. add ( idx) ;
@@ -142,7 +167,8 @@ impl IsolatedAlloc {
142167 /// Allocates in multiples of one page on the host system.
143168 ///
144169 /// SAFETY: Same as `alloc()`.
145- unsafe fn alloc_multi_page ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
170+ unsafe fn alloc_huge ( & mut self , layout : Layout , zeroed : bool ) -> * mut u8 {
171+ let layout = IsolatedAlloc :: huge_normalized_layout ( layout, self . page_size ) ;
146172 let ret =
147173 unsafe { if zeroed { alloc:: alloc_zeroed ( layout) } else { alloc:: alloc ( layout) } } ;
148174 self . huge_ptrs . push ( ( ret, layout. size ( ) ) ) ;
@@ -157,14 +183,16 @@ impl IsolatedAlloc {
157183 pub unsafe fn dealloc ( & mut self , ptr : * mut u8 , layout : Layout ) {
158184 let ( size, align) = IsolatedAlloc :: normalized_layout ( layout) ;
159185
160- let ptr_idx = ptr. addr ( ) % self . page_size ;
161- let page_addr = ptr. addr ( ) - ptr_idx;
162-
163- if align > self . page_size || size > self . page_size {
186+ if IsolatedAlloc :: is_huge_alloc ( size, align, self . page_size ) {
164187 unsafe {
165- self . dealloc_multi_page ( ptr, layout) ;
188+ self . dealloc_huge ( ptr, layout) ;
166189 }
167190 } else {
191+ // Offset of the pointer in the current page
192+ let ptr_idx = ptr. addr ( ) % self . page_size ;
193+ // And then the page's base address
194+ let page_addr = ptr. addr ( ) - ptr_idx;
195+
168196 // Find the page this allocation belongs to.
169197 // This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
170198 let pinfo = std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos )
@@ -175,49 +203,52 @@ impl IsolatedAlloc {
175203 self . page_ptrs
176204 )
177205 } ;
178- let ptr_idx_pinfo = ptr_idx / 8 ;
179- let size_pinfo = size / 8 ;
206+ // Since each bit of the bitset represents COMPRESSION_FACTOR bytes,
207+ // we need to divide by that when getting the indices
208+ let ptr_idx_pinfo = ptr_idx / COMPRESSION_FACTOR ;
209+ let size_pinfo = size / COMPRESSION_FACTOR ;
180210 for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo {
181211 pinfo. remove ( idx) ;
182212 }
183- }
184213
185- let mut free = vec ! [ ] ;
186- let page_layout =
187- unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
188- for ( idx, pinfo) in self . page_infos . iter ( ) . enumerate ( ) {
189- if pinfo. is_empty ( ) {
190- free. push ( idx) ;
191- }
192- }
193- free. reverse ( ) ;
194- for idx in free {
195- let _ = self . page_infos . remove ( idx) ;
196- unsafe {
197- alloc:: dealloc ( self . page_ptrs . remove ( idx) , page_layout) ;
214+ // We allocated all the pages with this layout
215+ let page_layout =
216+ unsafe { Layout :: from_size_align_unchecked ( self . page_size , self . page_size ) } ;
217+ // Only 1 page might have been freed after a dealloc, so if it exists,
218+ // find it and free it (and adjust the vectors)
219+ if let Some ( free_idx) = self . page_infos . iter ( ) . position ( |pinfo| pinfo. is_empty ( ) ) {
220+ self . page_infos . remove ( free_idx) ;
221+ unsafe {
222+ alloc:: dealloc ( self . page_ptrs . remove ( free_idx) , page_layout) ;
223+ }
198224 }
199225 }
200226 }
201227
202228 /// SAFETY: Same as `dealloc()` with the added requirement that `layout`
203229 /// must ask for a size larger than the host pagesize.
204- unsafe fn dealloc_multi_page ( & mut self , ptr : * mut u8 , layout : Layout ) {
230+ unsafe fn dealloc_huge ( & mut self , ptr : * mut u8 , layout : Layout ) {
231+ let layout = IsolatedAlloc :: huge_normalized_layout ( layout, self . page_size ) ;
232+ // Find the pointer matching in address with the one we got
205233 let idx = self
206234 . huge_ptrs
207235 . iter ( )
208236 . position ( |pg| ptr. addr ( ) == pg. 0 . addr ( ) )
209237 . expect ( "Freeing unallocated pages" ) ;
210- let ptr = self . huge_ptrs . remove ( idx) . 0 ;
238+ // And kick it from the list
239+ self . huge_ptrs . remove ( idx) ;
211240 unsafe {
212241 alloc:: dealloc ( ptr, layout) ;
213242 }
214243 }
215244}
216- /*
245+
217246#[ cfg( test) ]
218247mod tests {
219248 use super :: * ;
220249
250+ /// Helper function to assert that all bytes from `ptr` to `ptr.add(layout.size())`
251+ /// are zeroes.
221252 fn assert_zeroes ( ptr : * mut u8 , layout : Layout ) {
222253 unsafe {
223254 for ofs in 0 ..layout. size ( ) {
@@ -226,85 +257,101 @@ mod tests {
226257 }
227258 }
228259
260+ /// Check that small (sub-pagesize) allocations are properly zeroed out.
229261 #[ test]
230262 fn small_zeroes ( ) {
263+ let mut alloc = IsolatedAlloc :: new ( ) ;
264+ // 256 should be less than the pagesize on *any* system
231265 let layout = Layout :: from_size_align ( 256 , 32 ) . unwrap ( ) ;
232- // allocate_zeroed
233- let ptr = unsafe { IsolatedAlloc::alloc_zeroed(layout, 0) };
266+ let ptr = unsafe { alloc. alloc_zeroed ( layout) } ;
234267 assert_zeroes ( ptr, layout) ;
235268 unsafe {
236- IsolatedAlloc:: dealloc(ptr, layout, 0 );
269+ alloc . dealloc ( ptr, layout) ;
237270 }
238271 }
239272
273+ /// Check that huge (> 1 page) allocations are properly zeroed out also.
240274 #[ test]
241- fn big_zeroes() {
275+ fn huge_zeroes ( ) {
276+ let mut alloc = IsolatedAlloc :: new ( ) ;
277+ // 16k is about as big as pages get e.g. on macos aarch64
242278 let layout = Layout :: from_size_align ( 16 * 1024 , 128 ) . unwrap ( ) ;
243- let ptr = unsafe { IsolatedAlloc:: alloc_zeroed(layout, 1 ) };
279+ let ptr = unsafe { alloc . alloc_zeroed ( layout) } ;
244280 assert_zeroes ( ptr, layout) ;
245281 unsafe {
246- IsolatedAlloc:: dealloc(ptr, layout, 1 );
282+ alloc . dealloc ( ptr, layout) ;
247283 }
248284 }
249285
286+ /// Check that repeatedly reallocating the same memory will still zero out
287+ /// everything properly
250288 #[ test]
251289 fn repeated_allocs ( ) {
290+ let mut alloc = IsolatedAlloc :: new ( ) ;
291+ // Try both sub-pagesize allocs and those larger than / equal to a page
252292 for sz in ( 1 ..=( 16 * 1024 ) ) . step_by ( 128 ) {
253293 let layout = Layout :: from_size_align ( sz, 1 ) . unwrap ( ) ;
254- let ptr = unsafe { IsolatedAlloc:: alloc_zeroed(layout, 2 ) };
294+ let ptr = unsafe { alloc . alloc_zeroed ( layout) } ;
255295 assert_zeroes ( ptr, layout) ;
256296 unsafe {
257297 ptr. write_bytes ( 255 , sz) ;
258- IsolatedAlloc:: dealloc(ptr, layout, 2 );
298+ alloc . dealloc ( ptr, layout) ;
259299 }
260300 }
261301 }
262302
303+ /// Checks that allocations of different sizes do not overlap.
263304 #[ test]
264305 fn no_overlaps ( ) {
265- no_overlaps_inner(3);
306+ let mut alloc = IsolatedAlloc :: new ( ) ;
307+ no_overlaps_inner ( & mut alloc) ;
266308 }
267309
268- fn no_overlaps_inner(id: u64) {
310+ /// Allows us to reuse this bit for `no_overlaps` and `check_leaks`.
311+ fn no_overlaps_inner ( alloc : & mut IsolatedAlloc ) {
269312 // Some random sizes and aligns
270313 let mut sizes = vec ! [ 32 ; 10 ] ;
271314 sizes. append ( & mut vec ! [ 15 ; 4 ] ) ;
272315 sizes. append ( & mut vec ! [ 256 ; 12 ] ) ;
273316 // Give it some multi-page ones too
274317 sizes. append ( & mut vec ! [ 32 * 1024 ; 4 ] ) ;
275318
319+ // Matching aligns for the sizes
276320 let mut aligns = vec ! [ 16 ; 12 ] ;
277321 aligns. append ( & mut vec ! [ 256 ; 2 ] ) ;
278322 aligns. append ( & mut vec ! [ 64 ; 12 ] ) ;
279323 aligns. append ( & mut vec ! [ 4096 ; 4 ] ) ;
280324
325+ // Make sure we didn't mess up in the test itself!
281326 assert_eq ! ( sizes. len( ) , aligns. len( ) ) ;
327+
328+ // Aggregate the sizes and aligns into a vec of layouts, then allocate them
282329 let layouts: Vec < _ > = std:: iter:: zip ( sizes, aligns)
283330 . map ( |( sz, al) | Layout :: from_size_align ( sz, al) . unwrap ( ) )
284331 . collect ( ) ;
285- let ptrs: Vec<_> = layouts
286- .iter()
287- .map(|layout| unsafe { IsolatedAlloc::alloc_zeroed(*layout, id) })
288- .collect();
332+ let ptrs: Vec < _ > =
333+ layouts. iter ( ) . map ( |layout| unsafe { alloc. alloc_zeroed ( * layout) } ) . collect ( ) ;
289334
290335 for ( & ptr, & layout) in std:: iter:: zip ( & ptrs, & layouts) {
291- // Make sure we don't allocate overlapping ranges
336+ // We requested zeroed allocations, so check that that's true
337+ // Then write to the end of the current size, so if the allocs
338+ // overlap (or the zeroing is wrong) then `assert_zeroes` will panic
292339 unsafe {
293340 assert_zeroes ( ptr, layout) ;
294341 ptr. write_bytes ( 255 , layout. size ( ) ) ;
295- IsolatedAlloc:: dealloc(ptr, layout, id );
342+ alloc . dealloc ( ptr, layout) ;
296343 }
297344 }
298345 }
299346
347+ /// Check for memory leaks after repeated allocations and deallocations.
300348 #[ test]
301349 fn check_leaks ( ) {
302- // Generate some noise first
303- no_overlaps_inner(4);
304- let alloc = ALLOCATOR.lock().unwrap();
350+ let mut alloc = IsolatedAlloc :: new ( ) ;
305351
306- // Should get auto-deleted if the allocations are empty
307- assert!(!alloc.allocators.contains_key(&4));
352+ // Generate some noise first so leaks can manifest
353+ no_overlaps_inner ( & mut alloc) ;
354+ // And then verify that no memory was leaked
355+ assert ! ( alloc. page_ptrs. is_empty( ) && alloc. huge_ptrs. is_empty( ) ) ;
308356 }
309357}
310- */
0 commit comments