@@ -193,75 +193,109 @@ impl<'tcx> AllocExtra<'tcx> {
193193/// If `init` is set to this, we consider the primitive initialized.
194194pub const LAZY_INIT_COOKIE : u32 = 0xcafe_affe ;
195195
196- /// Helper for lazily initialized `alloc_extra.sync` data:
197- /// this forces an immediate init.
198- pub fn lazy_sync_init < ' tcx , T : ' static + Copy > (
199- ecx : & mut MiriInterpCx < ' tcx > ,
200- primitive : & MPlaceTy < ' tcx > ,
201- init_offset : Size ,
202- data : T ,
203- ) -> InterpResult < ' tcx > {
204- let ( alloc, offset, _) = ecx. ptr_get_alloc_id ( primitive. ptr ( ) , 0 ) ?;
205- let ( alloc_extra, _machine) = ecx. get_alloc_extra_mut ( alloc) ?;
206- alloc_extra. sync . insert ( offset, Box :: new ( data) ) ;
207- // Mark this as "initialized".
208- let init_field = primitive. offset ( init_offset, ecx. machine . layouts . u32 , ecx) ?;
209- ecx. write_scalar_atomic (
210- Scalar :: from_u32 ( LAZY_INIT_COOKIE ) ,
211- & init_field,
212- AtomicWriteOrd :: Relaxed ,
213- ) ?;
214- interp_ok ( ( ) )
215- }
216-
217- /// Helper for lazily initialized `alloc_extra.sync` data:
218- /// Checks if the primitive is initialized, and return its associated data if so.
219- /// Otherwise, calls `new_data` to initialize the primitive.
220- pub fn lazy_sync_get_data < ' tcx , T : ' static + Copy > (
221- ecx : & mut MiriInterpCx < ' tcx > ,
222- primitive : & MPlaceTy < ' tcx > ,
223- init_offset : Size ,
224- name : & str ,
225- new_data : impl FnOnce ( & mut MiriInterpCx < ' tcx > ) -> InterpResult < ' tcx , T > ,
226- ) -> InterpResult < ' tcx , T > {
227- // Check if this is already initialized. Needs to be atomic because we can race with another
228- // thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
229- // So we just try to replace MUTEX_INIT_COOKIE with itself.
230- let init_cookie = Scalar :: from_u32 ( LAZY_INIT_COOKIE ) ;
231- let init_field = primitive. offset ( init_offset, ecx. machine . layouts . u32 , ecx) ?;
232- let ( _init, success) = ecx
233- . atomic_compare_exchange_scalar (
234- & init_field,
235- & ImmTy :: from_scalar ( init_cookie, ecx. machine . layouts . u32 ) ,
236- init_cookie,
237- AtomicRwOrd :: Relaxed ,
238- AtomicReadOrd :: Relaxed ,
239- /* can_fail_spuriously */ false ,
240- ) ?
241- . to_scalar_pair ( ) ;
242-
243- if success. to_bool ( ) ? {
244- // If it is initialized, it must be found in the "sync primitive" table,
245- // or else it has been moved illegally.
246- let ( alloc, offset, _) = ecx. ptr_get_alloc_id ( primitive. ptr ( ) , 0 ) ?;
247- let alloc_extra = ecx. get_alloc_extra ( alloc) ?;
248- let data = alloc_extra
249- . get_sync :: < T > ( offset)
250- . ok_or_else ( || err_ub_format ! ( "`{name}` can't be moved after first use" ) ) ?;
251- interp_ok ( * data)
252- } else {
253- let data = new_data ( ecx) ?;
254- lazy_sync_init ( ecx, primitive, init_offset, data) ?;
255- interp_ok ( data)
256- }
257- }
258-
259196// Public interface to synchronization primitives. Please note that in most
260197// cases, the function calls are infallible and it is the client's (shim
261198// implementation's) responsibility to detect and deal with erroneous
262199// situations.
263200impl < ' tcx > EvalContextExt < ' tcx > for crate :: MiriInterpCx < ' tcx > { }
264201pub trait EvalContextExt < ' tcx > : crate :: MiriInterpCxExt < ' tcx > {
202+ /// Helper for lazily initialized `alloc_extra.sync` data:
203+ /// this forces an immediate init.
204+ fn lazy_sync_init < T : ' static + Copy > (
205+ & mut self ,
206+ primitive : & MPlaceTy < ' tcx > ,
207+ init_offset : Size ,
208+ data : T ,
209+ ) -> InterpResult < ' tcx > {
210+ let this = self . eval_context_mut ( ) ;
211+
212+ let ( alloc, offset, _) = this. ptr_get_alloc_id ( primitive. ptr ( ) , 0 ) ?;
213+ let ( alloc_extra, _machine) = this. get_alloc_extra_mut ( alloc) ?;
214+ alloc_extra. sync . insert ( offset, Box :: new ( data) ) ;
215+ // Mark this as "initialized".
216+ let init_field = primitive. offset ( init_offset, this. machine . layouts . u32 , this) ?;
217+ this. write_scalar_atomic (
218+ Scalar :: from_u32 ( LAZY_INIT_COOKIE ) ,
219+ & init_field,
220+ AtomicWriteOrd :: Relaxed ,
221+ ) ?;
222+ interp_ok ( ( ) )
223+ }
224+
225+ /// Helper for lazily initialized `alloc_extra.sync` data:
226+ /// Checks if the primitive is initialized:
227+ /// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
228+ /// and stores that in `alloc_extra.sync`.
229+ /// - Otherwise, calls `new_data` to initialize the primitive.
230+ fn lazy_sync_get_data < T : ' static + Copy > (
231+ & mut self ,
232+ primitive : & MPlaceTy < ' tcx > ,
233+ init_offset : Size ,
234+ missing_data : impl FnOnce ( ) -> InterpResult < ' tcx , T > ,
235+ new_data : impl FnOnce ( & mut MiriInterpCx < ' tcx > ) -> InterpResult < ' tcx , T > ,
236+ ) -> InterpResult < ' tcx , T > {
237+ let this = self . eval_context_mut ( ) ;
238+
239+ // Check if this is already initialized. Needs to be atomic because we can race with another
240+ // thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
241+ // So we just try to replace MUTEX_INIT_COOKIE with itself.
242+ let init_cookie = Scalar :: from_u32 ( LAZY_INIT_COOKIE ) ;
243+ let init_field = primitive. offset ( init_offset, this. machine . layouts . u32 , this) ?;
244+ let ( _init, success) = this
245+ . atomic_compare_exchange_scalar (
246+ & init_field,
247+ & ImmTy :: from_scalar ( init_cookie, this. machine . layouts . u32 ) ,
248+ init_cookie,
249+ AtomicRwOrd :: Relaxed ,
250+ AtomicReadOrd :: Relaxed ,
251+ /* can_fail_spuriously */ false ,
252+ ) ?
253+ . to_scalar_pair ( ) ;
254+
255+ if success. to_bool ( ) ? {
256+ // If it is initialized, it must be found in the "sync primitive" table,
257+ // or else it has been moved illegally.
258+ let ( alloc, offset, _) = this. ptr_get_alloc_id ( primitive. ptr ( ) , 0 ) ?;
259+ let ( alloc_extra, _machine) = this. get_alloc_extra_mut ( alloc) ?;
260+ if let Some ( data) = alloc_extra. get_sync :: < T > ( offset) {
261+ interp_ok ( * data)
262+ } else {
263+ let data = missing_data ( ) ?;
264+ alloc_extra. sync . insert ( offset, Box :: new ( data) ) ;
265+ interp_ok ( data)
266+ }
267+ } else {
268+ let data = new_data ( this) ?;
269+ this. lazy_sync_init ( primitive, init_offset, data) ?;
270+ interp_ok ( data)
271+ }
272+ }
273+
274+ /// Get the synchronization primitive associated with the given pointer,
275+ /// or initialize a new one.
276+ fn get_sync_or_init < ' a , T : ' static > (
277+ & ' a mut self ,
278+ ptr : Pointer ,
279+ new : impl FnOnce ( & ' a mut MiriMachine < ' tcx > ) -> InterpResult < ' tcx , T > ,
280+ ) -> InterpResult < ' tcx , & ' a T >
281+ where
282+ ' tcx : ' a ,
283+ {
284+ let this = self . eval_context_mut ( ) ;
285+ // Ensure there is memory behind this pointer, so that this allocation
286+ // is truly the only place where the data could be stored.
287+ this. check_ptr_access ( ptr, Size :: from_bytes ( 1 ) , CheckInAllocMsg :: InboundsTest ) ?;
288+
289+ let ( alloc, offset, _) = this. ptr_get_alloc_id ( ptr, 0 ) ?;
290+ let ( alloc_extra, machine) = this. get_alloc_extra_mut ( alloc) ?;
291+ // Due to borrow checker reasons, we have to do the lookup twice.
292+ if alloc_extra. get_sync :: < T > ( offset) . is_none ( ) {
293+ let new = new ( machine) ?;
294+ alloc_extra. sync . insert ( offset, Box :: new ( new) ) ;
295+ }
296+ interp_ok ( alloc_extra. get_sync :: < T > ( offset) . unwrap ( ) )
297+ }
298+
265299 #[ inline]
266300 /// Get the id of the thread that currently owns this lock.
267301 fn mutex_get_owner ( & mut self , id : MutexId ) -> ThreadId {
0 commit comments