11use alloc:: collections:: VecDeque ;
2- use core:: { cmp, hint :: unreachable_unchecked , mem :: MaybeUninit , slice } ;
2+ use core:: cmp;
33
44pub struct RingBuffer {
5- buf : VecDeque < MaybeUninit < u8 > > ,
5+ buf : VecDeque < u8 > ,
66}
77
88impl RingBuffer {
@@ -24,12 +24,10 @@ impl RingBuffer {
2424 }
2525
2626 /// Return the amount of available space (in bytes) of the buffer.
27+ #[ cfg( test) ]
2728 pub fn free ( & self ) -> usize {
2829 let len = self . buf . len ( ) ;
2930 let capacity = self . buf . capacity ( ) ;
30- if len > capacity {
31- unsafe { unreachable_unchecked ( ) }
32- }
3331
3432 capacity - len
3533 }
@@ -46,41 +44,23 @@ impl RingBuffer {
4644
4745 /// Ensure that there's space for `amount` elements in the buffer.
4846 pub fn reserve ( & mut self , additional : usize ) {
49- if self . free ( ) < additional {
50- self . reserve_amortized ( additional) ;
51- }
52-
53- if self . free ( ) < additional {
54- unsafe { unreachable_unchecked ( ) }
55- }
56- }
57-
58- #[ inline( never) ]
59- #[ cold]
60- fn reserve_amortized ( & mut self , additional : usize ) {
6147 self . buf . reserve ( additional) ;
6248 }
6349
6450 #[ allow( dead_code) ]
6551 pub fn push_back ( & mut self , byte : u8 ) {
66- self . reserve ( 1 ) ;
67- self . buf . push_back ( MaybeUninit :: new ( byte) ) ;
52+ self . buf . push_back ( byte) ;
6853 }
6954
7055 /// Fetch the byte stored at the selected index from the buffer, returning it, or
7156 /// `None` if the index is out of bounds.
7257 #[ allow( dead_code) ]
7358 pub fn get ( & self , idx : usize ) -> Option < u8 > {
74- self . buf
75- . get ( idx)
76- . map ( |& byte| unsafe { MaybeUninit :: assume_init ( byte) } )
59+ self . buf . get ( idx) . copied ( )
7760 }
7861
7962 /// Append the provided data to the end of `self`.
8063 pub fn extend ( & mut self , data : & [ u8 ] ) {
81- let len = data. len ( ) ;
82- let data = data. as_ptr ( ) . cast :: < MaybeUninit < u8 > > ( ) ;
83- let data = unsafe { slice:: from_raw_parts ( data, len) } ;
8464 self . buf . extend ( data) ;
8565 }
8666
@@ -93,16 +73,12 @@ impl RingBuffer {
9373
9474 /// Return references to each part of the ring buffer.
9575 pub fn as_slices ( & self ) -> ( & [ u8 ] , & [ u8 ] ) {
96- let ( a, b) = self . buf . as_slices ( ) ;
97-
98- ( unsafe { slice_assume_init_ref_polyfill ( a) } , unsafe {
99- slice_assume_init_ref_polyfill ( b)
100- } )
76+ self . buf . as_slices ( )
10177 }
10278
10379 /// Copies elements from the provided range to the end of the buffer.
10480 #[ allow( dead_code) ]
105- pub fn extend_from_within ( & mut self , start : usize , len : usize ) {
81+ pub fn extend_from_within ( & mut self , mut start : usize , len : usize ) {
10682 if start + len > self . len ( ) {
10783 panic ! (
10884 "Calls to this functions must respect start ({}) + len ({}) <= self.len() ({})!" ,
@@ -112,43 +88,26 @@ impl RingBuffer {
11288 ) ;
11389 }
11490
115- self . reserve ( len ) ;
116-
117- // SAFETY: Requirements checked:
118- // 2. explicitly checked above, resulting in a panic if it does not hold
119- // 3. explicitly reserved enough memory
120- unsafe { self . extend_from_within_unchecked ( start , len ) }
121- }
91+ // Naive and cheaper implementation (for small lengths)
92+ if len <= 12 {
93+ self . reserve ( len ) ;
94+ for i in 0 ..len {
95+ let byte = self . get ( start + i ) . unwrap ( ) ;
96+ self . push_back ( byte ) ;
97+ }
12298
123- /// Copies data from the provided range to the end of the buffer, without
124- /// first verifying that the unoccupied capacity is available.
125- ///
126- /// SAFETY:
127- /// For this to be safe two requirements need to hold:
128- /// 2. start + len <= self.len() so we do not copy uninitialised memory
129- /// 3. More then len reserved space so we do not write out-of-bounds
130- #[ warn( unsafe_op_in_unsafe_fn) ]
131- pub unsafe fn extend_from_within_unchecked ( & mut self , mut start : usize , len : usize ) {
132- debug_assert ! ( start + len <= self . len( ) ) ;
133- debug_assert ! ( self . free( ) >= len) ;
134-
135- if self . free ( ) < len {
136- unsafe { unreachable_unchecked ( ) }
99+ return ;
137100 }
138101
139102 let original_len = self . len ( ) ;
140103 let mut intermediate = {
141104 IntermediateRingBuffer {
142105 this : self ,
143106 original_len,
144- disarmed : false ,
145107 }
146108 } ;
147109
148- intermediate
149- . this
150- . buf
151- . extend ( ( 0 ..len) . map ( |_| MaybeUninit :: uninit ( ) ) ) ;
110+ intermediate. this . buf . extend ( ( 0 ..len) . map ( |_| 0 ) ) ;
152111 debug_assert_eq ! ( intermediate. this. buf. len( ) , original_len + len) ;
153112
154113 let ( a, b, a_spare, b_spare) = intermediate. as_slices_spare_mut ( ) ;
@@ -157,7 +116,7 @@ impl RingBuffer {
157116 let skip = cmp:: min ( a. len ( ) , start) ;
158117 start -= skip;
159118 let a = & a[ skip..] ;
160- let b = unsafe { b . get_unchecked ( start..) } ;
119+ let b = & b [ start..] ;
161120
162121 let mut remaining_copy_len = len;
163122
@@ -167,7 +126,6 @@ impl RingBuffer {
167126 remaining_copy_len -= copy_at_least;
168127
169128 if remaining_copy_len == 0 {
170- intermediate. disarmed = true ;
171129 return ;
172130 }
173131
@@ -180,7 +138,6 @@ impl RingBuffer {
180138 remaining_copy_len -= copy_at_least;
181139
182140 if remaining_copy_len == 0 {
183- intermediate. disarmed = true ;
184141 return ;
185142 }
186143
@@ -192,7 +149,6 @@ impl RingBuffer {
192149 remaining_copy_len -= copy_at_least;
193150
194151 if remaining_copy_len == 0 {
195- intermediate. disarmed = true ;
196152 return ;
197153 }
198154
@@ -204,22 +160,17 @@ impl RingBuffer {
204160 remaining_copy_len -= copy_at_least;
205161
206162 debug_assert_eq ! ( remaining_copy_len, 0 ) ;
207-
208- intermediate. disarmed = true ;
209163 }
210164}
211165
212166struct IntermediateRingBuffer < ' a > {
213167 this : & ' a mut RingBuffer ,
214168 original_len : usize ,
215- disarmed : bool ,
216169}
217170
218171impl < ' a > IntermediateRingBuffer < ' a > {
219172 // inspired by `Vec::split_at_spare_mut`
220- fn as_slices_spare_mut (
221- & mut self ,
222- ) -> ( & [ u8 ] , & [ u8 ] , & mut [ MaybeUninit < u8 > ] , & mut [ MaybeUninit < u8 > ] ) {
173+ fn as_slices_spare_mut ( & mut self ) -> ( & [ u8 ] , & [ u8 ] , & mut [ u8 ] , & mut [ u8 ] ) {
223174 let ( a, b) = self . this . buf . as_mut_slices ( ) ;
224175 debug_assert ! ( a. len( ) + b. len( ) >= self . original_len) ;
225176
@@ -229,26 +180,11 @@ impl<'a> IntermediateRingBuffer<'a> {
229180 let b_mid = remaining_init_len;
230181 debug_assert ! ( b. len( ) >= b_mid) ;
231182
232- let ( a, a_spare) = unsafe { a . split_at_mut_unchecked ( a_mid) } ;
233- let ( b, b_spare) = unsafe { b . split_at_mut_unchecked ( b_mid) } ;
183+ let ( a, a_spare) = a . split_at_mut ( a_mid) ;
184+ let ( b, b_spare) = b . split_at_mut ( b_mid) ;
234185 debug_assert ! ( a_spare. is_empty( ) || b. is_empty( ) ) ;
235186
236- (
237- unsafe { slice_assume_init_ref_polyfill ( a) } ,
238- unsafe { slice_assume_init_ref_polyfill ( b) } ,
239- a_spare,
240- b_spare,
241- )
242- }
243- }
244-
245- impl < ' a > Drop for IntermediateRingBuffer < ' a > {
246- fn drop ( & mut self ) {
247- if self . disarmed {
248- return ;
249- }
250-
251- self . this . buf . truncate ( self . original_len ) ;
187+ ( a, b, a_spare, b_spare)
252188 }
253189}
254190
@@ -265,48 +201,11 @@ impl<'a> Drop for IntermediateRingBuffer<'a> {
265201/// The chunk size is not part of the contract and may change depending on the target platform.
266202///
267203/// If that isn't possible we just fall back to ptr::copy_nonoverlapping
268- fn copy_bytes_overshooting ( src : & [ u8 ] , dst : & mut [ MaybeUninit < u8 > ] , copy_at_least : usize ) {
269- // this assert is required for this function to be safe
270- // the optimizer should be able to remove it given how the caller
271- // has somehow to figure out `copy_at_least <= src.len() && copy_at_least <= dst.len()`
272- assert ! ( src. len( ) >= copy_at_least && dst. len( ) >= copy_at_least) ;
273-
274- type CopyType = usize ;
275-
276- const COPY_AT_ONCE_SIZE : usize = core:: mem:: size_of :: < CopyType > ( ) ;
277- let min_buffer_size = usize:: min ( src. len ( ) , dst. len ( ) ) ;
278-
279- // this check should be removed by the optimizer thanks to the above assert
280- // if `src.len() >= copy_at_least && dst.len() >= copy_at_least` then `min_buffer_size >= copy_at_least`
281- assert ! ( min_buffer_size >= copy_at_least) ;
282-
283- // these bounds checks are removed because this is guaranteed:
284- // `min_buffer_size <= src.len() && min_buffer_size <= dst.len()`
285- let src = & src[ ..min_buffer_size] ;
286- let dst = & mut dst[ ..min_buffer_size] ;
287-
288- // Can copy in just one read+write, very common case
289- if min_buffer_size >= COPY_AT_ONCE_SIZE && copy_at_least <= COPY_AT_ONCE_SIZE {
290- let chunk = unsafe { src. as_ptr ( ) . cast :: < CopyType > ( ) . read_unaligned ( ) } ;
291- unsafe { dst. as_mut_ptr ( ) . cast :: < CopyType > ( ) . write_unaligned ( chunk) } ;
292- } else {
293- unsafe {
294- dst. as_mut_ptr ( )
295- . cast :: < u8 > ( )
296- . copy_from_nonoverlapping ( src. as_ptr ( ) , copy_at_least)
297- } ;
298- }
299-
300- debug_assert_eq ! ( & src[ ..copy_at_least] , unsafe {
301- slice_assume_init_ref_polyfill( & dst[ ..copy_at_least] )
302- } ) ;
303- }
204+ fn copy_bytes_overshooting ( src : & [ u8 ] , dst : & mut [ u8 ] , copy_at_least : usize ) {
205+ let src = & src[ ..copy_at_least] ;
206+ let dst = & mut dst[ ..copy_at_least] ;
304207
305- #[ inline( always) ]
306- unsafe fn slice_assume_init_ref_polyfill ( slice : & [ MaybeUninit < u8 > ] ) -> & [ u8 ] {
307- let len = slice. len ( ) ;
308- let data = slice. as_ptr ( ) . cast :: < u8 > ( ) ;
309- slice:: from_raw_parts ( data, len)
208+ dst. copy_from_slice ( src) ;
310209}
311210
312211#[ cfg( test) ]
0 commit comments