|
47 | 47 |
|
48 | 48 | #[allow(non_camel_case_types)]; |
49 | 49 |
|
| 50 | +use int; |
50 | 51 | use libc::c_void; |
51 | 52 | use sync::atomics; |
52 | 53 |
|
@@ -315,10 +316,156 @@ mod imp { |
315 | 316 | } |
316 | 317 | } |
317 | 318 |
|
| 319 | +/// A type which can be used to run a one-time global initialization. This type |
| 320 | +/// is *unsafe* to use because it is built on top of the `Mutex` in this module. |
| 321 | +/// It does not know whether the currently running task is in a green or native |
| 322 | +/// context, and a blocking mutex should *not* be used under normal |
| 323 | +/// circumstances on a green task. |
| 324 | +/// |
| 325 | +/// Despite its unsafety, it is often useful to have a one-time initialization |
| 326 | +/// routine run for FFI bindings or related external functionality. This type |
| 327 | +/// can only be statically constructed with the `ONCE_INIT` value. |
| 328 | +/// |
| 329 | +/// # Example |
| 330 | +/// |
| 331 | +/// ```rust |
| 332 | +/// use std::unstable::mutex::{Once, ONCE_INIT}; |
| 333 | +/// |
| 334 | +/// static mut START: Once = ONCE_INIT; |
| 335 | +/// unsafe { |
| 336 | +/// START.doit(|| { |
| 337 | +/// // run initialization here |
| 338 | +/// }); |
| 339 | +/// } |
| 340 | +/// ``` |
| 341 | +pub struct Once { |
| 342 | + priv mutex: Mutex, |
| 343 | + priv cnt: atomics::AtomicInt, |
| 344 | + priv lock_cnt: atomics::AtomicInt, |
| 345 | +} |
| 346 | + |
| 347 | +/// Initialization value for static `Once` values. |
| 348 | +pub static ONCE_INIT: Once = Once { |
| 349 | + mutex: MUTEX_INIT, |
| 350 | + cnt: atomics::INIT_ATOMIC_INT, |
| 351 | + lock_cnt: atomics::INIT_ATOMIC_INT, |
| 352 | +}; |
| 353 | + |
| 354 | +impl Once { |
| 355 | + /// Perform an initialization routine once and only once. The given closure |
| 356 | + /// will be executed if this is the first time `doit` has been called, and |
| 357 | + /// otherwise the routine will *not* be invoked. |
| 358 | + /// |
| 359 | + /// This method will block the calling *os thread* if another initialization |
| 360 | + /// routine is currently running. |
| 361 | + /// |
| 362 | + /// When this function returns, it is guaranteed that some initialization |
| 363 | + /// has run and completed (it may not be the closure specified). |
| 364 | + pub fn doit(&mut self, f: ||) { |
| 365 | + // Implementation-wise, this would seem like a fairly trivial primitive. |
| 366 | + // The stickler part is where our mutexes currently require an |
| 367 | + // allocation, and usage of a `Once` should't leak this allocation. |
| 368 | + // |
| 369 | + // This means that there must be a deterministic destroyer of the mutex |
| 370 | + // contained within (because it's not needed after the initialization |
| 371 | + // has run). |
| 372 | + // |
| 373 | + // The general scheme here is to gate all future threads once |
| 374 | + // initialization has completed with a "very negative" count, and to |
| 375 | + // allow through threads to lock the mutex if they see a non negative |
| 376 | + // count. For all threads grabbing the mutex, exactly one of them should |
| 377 | + // be responsible for unlocking the mutex, and this should only be done |
| 378 | + // once everyone else is done with the mutex. |
| 379 | + // |
| 380 | + // This atomicity is achieved by swapping a very negative value into the |
| 381 | + // shared count when the initialization routine has completed. This will |
| 382 | + // read the number of threads which will at some point attempt to |
| 383 | + // acquire the mutex. This count is then squirreled away in a separate |
| 384 | + // variable, and the last person on the way out of the mutex is then |
| 385 | + // responsible for destroying the mutex. |
| 386 | + // |
| 387 | + // It is crucial that the negative value is swapped in *after* the |
| 388 | + // initialization routine has completed because otherwise new threads |
| 389 | + // calling `doit` will return immediately before the initialization has |
| 390 | + // completed. |
| 391 | + |
| 392 | + let prev = self.cnt.fetch_add(1, atomics::SeqCst); |
| 393 | + if prev < 0 { |
| 394 | + // Make sure we never overflow, we'll never have int::min_value |
| 395 | + // simultaneous calls to `doit` to make this value go back to 0 |
| 396 | + self.cnt.store(int::min_value, atomics::SeqCst); |
| 397 | + return |
| 398 | + } |
| 399 | + |
| 400 | + // If the count is negative, then someone else finished the job, |
| 401 | + // otherwise we run the job and record how many people will try to grab |
| 402 | + // this lock |
| 403 | + unsafe { self.mutex.lock() } |
| 404 | + if self.cnt.load(atomics::SeqCst) > 0 { |
| 405 | + f(); |
| 406 | + let prev = self.cnt.swap(int::min_value, atomics::SeqCst); |
| 407 | + self.lock_cnt.store(prev, atomics::SeqCst); |
| 408 | + } |
| 409 | + unsafe { self.mutex.unlock() } |
| 410 | + |
| 411 | + // Last one out cleans up after everyone else, no leaks! |
| 412 | + if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 { |
| 413 | + unsafe { self.mutex.destroy() } |
| 414 | + } |
| 415 | + } |
| 416 | +} |
| 417 | + |
318 | 418 | #[cfg(test)] |
319 | 419 | mod test { |
320 | | - use super::{Mutex, MUTEX_INIT}; |
| 420 | + use prelude::*; |
| 421 | + |
321 | 422 | use rt::thread::Thread; |
| 423 | + use super::{ONCE_INIT, Once, Mutex, MUTEX_INIT}; |
| 424 | + use task; |
| 425 | + |
| 426 | + #[test] |
| 427 | + fn smoke_once() { |
| 428 | + static mut o: Once = ONCE_INIT; |
| 429 | + let mut a = 0; |
| 430 | + unsafe { o.doit(|| a += 1); } |
| 431 | + assert_eq!(a, 1); |
| 432 | + unsafe { o.doit(|| a += 1); } |
| 433 | + assert_eq!(a, 1); |
| 434 | + } |
| 435 | + |
| 436 | + #[test] |
| 437 | + fn stampede_once() { |
| 438 | + static mut o: Once = ONCE_INIT; |
| 439 | + static mut run: bool = false; |
| 440 | + |
| 441 | + let (p, c) = SharedChan::new(); |
| 442 | + for _ in range(0, 10) { |
| 443 | + let c = c.clone(); |
| 444 | + do spawn { |
| 445 | + for _ in range(0, 4) { task::deschedule() } |
| 446 | + unsafe { |
| 447 | + o.doit(|| { |
| 448 | + assert!(!run); |
| 449 | + run = true; |
| 450 | + }); |
| 451 | + assert!(run); |
| 452 | + } |
| 453 | + c.send(()); |
| 454 | + } |
| 455 | + } |
| 456 | + |
| 457 | + unsafe { |
| 458 | + o.doit(|| { |
| 459 | + assert!(!run); |
| 460 | + run = true; |
| 461 | + }); |
| 462 | + assert!(run); |
| 463 | + } |
| 464 | + |
| 465 | + for _ in range(0, 10) { |
| 466 | + p.recv(); |
| 467 | + } |
| 468 | + } |
322 | 469 |
|
323 | 470 | #[test] |
324 | 471 | fn somke_lock() { |
|
0 commit comments