11use std:: alloc:: Layout ;
2+ use std:: ptr:: NonNull ;
23
4+ use nix:: sys:: mman;
35use rustc_index:: bit_set:: DenseBitSet ;
46
57/// How many bytes of memory each bit in the bitset represents.
@@ -12,7 +14,7 @@ pub struct IsolatedAlloc {
1214 /// Pointers to page-aligned memory that has been claimed by the allocator.
1315 /// Every pointer here must point to a page-sized allocation claimed via
1416 /// mmap. These pointers are used for "small" allocations.
15- page_ptrs : Vec < * mut u8 > ,
17+ page_ptrs : Vec < NonNull < u8 > > ,
1618 /// Metadata about which bytes have been allocated on each page. The length
1719 /// of this vector must be the same as that of `page_ptrs`, and the domain
1820 /// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR`.
@@ -24,7 +26,7 @@ pub struct IsolatedAlloc {
2426 page_infos : Vec < DenseBitSet < usize > > ,
2527 /// Pointers to multiple-page-sized allocations. These must also be page-aligned,
2628 /// with their size stored as the second element of the vector.
27- huge_ptrs : Vec < ( * mut u8 , usize ) > ,
29+ huge_ptrs : Vec < ( NonNull < u8 > , usize ) > ,
2830 /// The host (not emulated) page size.
2931 page_size : usize ,
3032}
@@ -137,7 +139,7 @@ impl IsolatedAlloc {
137139 unsafe fn alloc_small (
138140 page_size : usize ,
139141 layout : Layout ,
140- page : * mut u8 ,
142+ page : NonNull < u8 > ,
141143 pinfo : & mut DenseBitSet < usize > ,
142144 zeroed : bool ,
143145 ) -> Option < * mut u8 > {
@@ -164,15 +166,15 @@ impl IsolatedAlloc {
164166 // zero out, even if we allocated more
165167 ptr. write_bytes ( 0 , layout. size ( ) ) ;
166168 }
167- return Some ( ptr) ;
169+ return Some ( ptr. as_ptr ( ) ) ;
168170 }
169171 }
170172 }
171173 None
172174 }
173175
174176 /// Expands the available memory pool by adding one page.
175- fn add_page ( & mut self ) -> ( * mut u8 , & mut DenseBitSet < usize > ) {
177+ fn add_page ( & mut self ) -> ( NonNull < u8 > , & mut DenseBitSet < usize > ) {
176178 // SAFETY: mmap is always safe to call when requesting anonymous memory
177179 let page_ptr = unsafe {
178180 libc:: mmap (
@@ -189,8 +191,8 @@ impl IsolatedAlloc {
189191 // `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page.
190192 assert ! ( self . page_size % COMPRESSION_FACTOR == 0 ) ;
191193 self . page_infos . push ( DenseBitSet :: new_empty ( self . page_size / COMPRESSION_FACTOR ) ) ;
192- self . page_ptrs . push ( page_ptr) ;
193- ( page_ptr, self . page_infos . last_mut ( ) . unwrap ( ) )
194+ self . page_ptrs . push ( NonNull :: new ( page_ptr) . unwrap ( ) ) ;
195+ ( NonNull :: new ( page_ptr) . unwrap ( ) , self . page_infos . last_mut ( ) . unwrap ( ) )
194196 }
195197
196198 /// Allocates in multiples of one page on the host system.
@@ -212,7 +214,7 @@ impl IsolatedAlloc {
212214 . cast :: < u8 > ( )
213215 } ;
214216 assert_ne ! ( ret. addr( ) , usize :: MAX , "mmap failed" ) ;
215- self . huge_ptrs . push ( ( ret, size) ) ;
217+ self . huge_ptrs . push ( ( NonNull :: new ( ret) . unwrap ( ) , size) ) ;
216218 // huge_normalized_layout ensures that we've overallocated enough space
217219 // for this to be valid.
218220 ret. map_addr ( |a| a. next_multiple_of ( layout. align ( ) ) )
@@ -246,7 +248,7 @@ impl IsolatedAlloc {
246248 // from us pointing to this page, and we know it was allocated
247249 // in add_page as exactly a single page.
248250 unsafe {
249- assert_eq ! ( libc:: munmap( page_ptr. cast( ) , self . page_size) , 0 ) ;
251+ assert_eq ! ( libc:: munmap( page_ptr. as_ptr ( ) . cast( ) , self . page_size) , 0 ) ;
250252 }
251253 }
252254 }
@@ -265,7 +267,7 @@ impl IsolatedAlloc {
265267 // This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment.
266268 let pinfo = std:: iter:: zip ( & mut self . page_ptrs , & mut self . page_infos )
267269 . enumerate ( )
268- . find ( |( _, ( page, _) ) | page. addr ( ) == page_addr) ;
270+ . find ( |( _, ( page, _) ) | page. addr ( ) . get ( ) == page_addr) ;
269271 let Some ( ( idx_of_pinfo, ( _, pinfo) ) ) = pinfo else {
270272 panic ! ( "Freeing in an unallocated page: {ptr:?}\n Holding pages {:?}" , self . page_ptrs)
271273 } ;
@@ -287,30 +289,67 @@ impl IsolatedAlloc {
287289 . huge_ptrs
288290 . iter ( )
289291 . position ( |& ( pg, size) | {
290- pg. addr ( ) <= ptr. addr ( ) && ptr. addr ( ) < pg. addr ( ) . strict_add ( size)
292+ pg. addr ( ) . get ( ) <= ptr. addr ( ) && ptr. addr ( ) < pg. addr ( ) . get ( ) . strict_add ( size)
291293 } )
292294 . expect ( "Freeing unallocated pages" ) ;
293295 // And kick it from the list
294296 let ( un_offset_ptr, size2) = self . huge_ptrs . remove ( idx) ;
295297 assert_eq ! ( size, size2, "got wrong layout in dealloc" ) ;
296298 // SAFETY: huge_ptrs contains allocations made with mmap with the size recorded there.
297299 unsafe {
298- let ret = libc:: munmap ( un_offset_ptr. cast ( ) , size) ;
300+ let ret = libc:: munmap ( un_offset_ptr. as_ptr ( ) . cast ( ) , size) ;
299301 assert_eq ! ( ret, 0 ) ;
300302 }
301303 }
302304
303305 /// Returns a vector of page addresses managed by the allocator.
304306 pub fn pages ( & self ) -> Vec < usize > {
305- let mut pages: Vec < _ > =
306- self . page_ptrs . clone ( ) . into_iter ( ) . map ( |p| p. expose_provenance ( ) ) . collect ( ) ;
307- for ( ptr, size) in & self . huge_ptrs {
307+ let mut pages: Vec < usize > =
308+ self . page_ptrs . clone ( ) . into_iter ( ) . map ( |p| p. expose_provenance ( ) . get ( ) ) . collect ( ) ;
309+ self . huge_ptrs . iter ( ) . for_each ( | ( ptr, size) | {
308310 for i in 0 ..size / self . page_size {
309- pages. push ( ptr. expose_provenance ( ) . strict_add ( i * self . page_size ) ) ;
311+ pages. push ( ptr. expose_provenance ( ) . get ( ) . strict_add ( i * self . page_size ) ) ;
310312 }
311- }
313+ } ) ;
312314 pages
313315 }
316+
317+ /// Protects all owned memory as `PROT_NONE`, preventing accesses.
318+ ///
319+ /// SAFETY: Accessing memory after this point will result in a segfault
320+ /// unless it is first unprotected.
321+ pub unsafe fn prepare_ffi ( & mut self ) -> Result < ( ) , nix:: errno:: Errno > {
322+ let prot = mman:: ProtFlags :: PROT_NONE ;
323+ unsafe { self . mprotect ( prot) }
324+ }
325+
326+ /// Deprotects all owned memory by setting it to RW. Erroring here is very
327+ /// likely unrecoverable, so it may panic if applying those permissions
328+ /// fails.
329+ pub fn unprep_ffi ( & mut self ) {
330+ let prot = mman:: ProtFlags :: PROT_READ | mman:: ProtFlags :: PROT_WRITE ;
331+ unsafe {
332+ self . mprotect ( prot) . unwrap ( ) ;
333+ }
334+ }
335+
336+ /// Applies `prot` to every page managed by the allocator.
337+ ///
338+ /// SAFETY: Accessing memory in violation of the protection flags will
339+ /// trigger a segfault.
340+ unsafe fn mprotect ( & mut self , prot : mman:: ProtFlags ) -> Result < ( ) , nix:: errno:: Errno > {
341+ for & pg in & self . page_ptrs {
342+ unsafe {
343+ mman:: mprotect ( pg. cast ( ) , self . page_size , prot) ?;
344+ }
345+ }
346+ for & ( hpg, size) in & self . huge_ptrs {
347+ unsafe {
348+ mman:: mprotect ( hpg. cast ( ) , size. next_multiple_of ( self . page_size ) , prot) ?;
349+ }
350+ }
351+ Ok ( ( ) )
352+ }
314353}
315354
316355#[ cfg( test) ]
0 commit comments