|
8 | 8 | // option. This file may not be copied, modified, or distributed |
9 | 9 | // except according to those terms. |
10 | 10 |
|
11 | | -use prelude::v1::*; |
12 | | - |
13 | | -use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; |
14 | | -use alloc::{self, heap}; |
15 | | - |
16 | | -use libc::DWORD; |
| 11 | +use marker::Sync; |
| 12 | +use cell::UnsafeCell; |
17 | 13 | use sys::sync as ffi; |
18 | 14 |
|
19 | | -const SPIN_COUNT: DWORD = 4000; |
| 15 | +pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> } |
20 | 16 |
|
21 | | -pub struct Mutex { inner: AtomicUsize } |
22 | | - |
23 | | -pub const MUTEX_INIT: Mutex = Mutex { inner: ATOMIC_USIZE_INIT }; |
| 17 | +pub const MUTEX_INIT: Mutex = Mutex { |
| 18 | + inner: UnsafeCell { value: ffi::SRWLOCK_INIT } |
| 19 | +}; |
24 | 20 |
|
25 | 21 | unsafe impl Sync for Mutex {} |
26 | 22 |
|
27 | 23 | #[inline] |
28 | | -pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION { |
29 | | - m.get() |
| 24 | +pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK { |
| 25 | + m.inner.get() |
30 | 26 | } |
31 | 27 |
|
| 28 | +// So you might be asking why we're using SRWLock instead of CriticalSection? |
| 29 | +// |
| 30 | +// 1. SRWLock is several times faster than CriticalSection according to benchmarks performed on both |
| 31 | +// Windows 8 and Windows 7. |
| 32 | +// |
| 33 | +// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix implementation |
| 34 | +// deadlocks so consistency is preferred. See #19962 for more details. |
| 35 | +// |
| 36 | +// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy is there there are |
| 37 | +// no guarantees of fairness. |
| 38 | + |
32 | 39 | impl Mutex { |
33 | 40 | #[inline] |
34 | | - pub unsafe fn new() -> Mutex { |
35 | | - Mutex { inner: AtomicUsize::new(init_lock() as uint) } |
36 | | - } |
| 41 | + pub unsafe fn new() -> Mutex { MUTEX_INIT } |
37 | 42 | #[inline] |
38 | 43 | pub unsafe fn lock(&self) { |
39 | | - ffi::EnterCriticalSection(self.get()) |
| 44 | + ffi::AcquireSRWLockExclusive(self.inner.get()) |
40 | 45 | } |
41 | 46 | #[inline] |
42 | 47 | pub unsafe fn try_lock(&self) -> bool { |
43 | | - ffi::TryEnterCriticalSection(self.get()) != 0 |
| 48 | + ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0 |
44 | 49 | } |
45 | 50 | #[inline] |
46 | 51 | pub unsafe fn unlock(&self) { |
47 | | - ffi::LeaveCriticalSection(self.get()) |
| 52 | + ffi::ReleaseSRWLockExclusive(self.inner.get()) |
48 | 53 | } |
| 54 | + #[inline] |
49 | 55 | pub unsafe fn destroy(&self) { |
50 | | - let lock = self.inner.swap(0, Ordering::SeqCst); |
51 | | - if lock != 0 { free_lock(lock as ffi::LPCRITICAL_SECTION) } |
52 | | - } |
53 | | - |
54 | | - unsafe fn get(&self) -> ffi::LPCRITICAL_SECTION { |
55 | | - match self.inner.load(Ordering::SeqCst) { |
56 | | - 0 => {} |
57 | | - n => return n as ffi::LPCRITICAL_SECTION |
58 | | - } |
59 | | - let lock = init_lock(); |
60 | | - match self.inner.compare_and_swap(0, lock as uint, Ordering::SeqCst) { |
61 | | - 0 => return lock as ffi::LPCRITICAL_SECTION, |
62 | | - _ => {} |
63 | | - } |
64 | | - free_lock(lock); |
65 | | - return self.inner.load(Ordering::SeqCst) as ffi::LPCRITICAL_SECTION; |
| 56 | + // ... |
66 | 57 | } |
67 | 58 | } |
68 | | - |
69 | | -unsafe fn init_lock() -> ffi::LPCRITICAL_SECTION { |
70 | | - let block = heap::allocate(ffi::CRITICAL_SECTION_SIZE, 8) |
71 | | - as ffi::LPCRITICAL_SECTION; |
72 | | - if block.is_null() { alloc::oom() } |
73 | | - ffi::InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT); |
74 | | - return block; |
75 | | -} |
76 | | - |
77 | | -unsafe fn free_lock(h: ffi::LPCRITICAL_SECTION) { |
78 | | - ffi::DeleteCriticalSection(h); |
79 | | - heap::deallocate(h as *mut _, ffi::CRITICAL_SECTION_SIZE, 8); |
80 | | -} |
0 commit comments