@@ -1462,12 +1462,12 @@ impl<T> AtomicPtr<T> {
14621462 /// to offset the pointer by an amount which is not a multiple of
14631463 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
14641464 /// work with a deliberately misaligned pointer. In such cases, you may use
1465- /// the [`fetch_add_bytes `](Self::fetch_add_bytes ) method instead.
1465+ /// the [`fetch_byte_add `](Self::fetch_byte_add ) method instead.
14661466 ///
1467- /// `fetch_add ` takes an [`Ordering`] argument which describes the memory
1468- /// ordering of this operation. All ordering modes are possible. Note that
1469- /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
1470- /// and using [`Release`] makes the load part [`Relaxed`].
1467+ /// `fetch_ptr_add ` takes an [`Ordering`] argument which describes the
1468+ /// memory ordering of this operation. All ordering modes are possible. Note
1469+ /// that using [`Acquire`] makes the store part of this operation
1470+ /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
14711471 ///
14721472 /// **Note**: This method is only available on platforms that support atomic
14731473 /// operations on [`AtomicPtr`].
@@ -1481,15 +1481,15 @@ impl<T> AtomicPtr<T> {
14811481 /// use core::sync::atomic::{AtomicPtr, Ordering};
14821482 ///
14831483 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1484- /// assert_eq!(atom.fetch_add (1, Ordering::Relaxed).addr(), 0);
1484+ /// assert_eq!(atom.fetch_ptr_add (1, Ordering::Relaxed).addr(), 0);
14851485 /// // Note: units of `size_of::<i64>()`.
14861486 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
14871487 /// ```
14881488 #[ inline]
14891489 #[ cfg( target_has_atomic = "ptr" ) ]
14901490 #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1491- pub fn fetch_add ( & self , val : usize , order : Ordering ) -> * mut T {
1492- self . fetch_add_bytes ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
1491+ pub fn fetch_ptr_add ( & self , val : usize , order : Ordering ) -> * mut T {
1492+ self . fetch_byte_add ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
14931493 }
14941494
14951495 /// Offsets the pointer's address by subtracting `val` (in units of `T`),
@@ -1502,9 +1502,9 @@ impl<T> AtomicPtr<T> {
15021502 /// to offset the pointer by an amount which is not a multiple of
15031503 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
15041504 /// work with a deliberately misaligned pointer. In such cases, you may use
1505- /// the [`fetch_sub_bytes `](Self::fetch_sub_bytes ) method instead.
1505+ /// the [`fetch_byte_sub `](Self::fetch_byte_sub ) method instead.
15061506 ///
1507- /// `fetch_sub ` takes an [`Ordering`] argument which describes the memory
1507+ /// `fetch_ptr_sub ` takes an [`Ordering`] argument which describes the memory
15081508 /// ordering of this operation. All ordering modes are possible. Note that
15091509 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
15101510 /// and using [`Release`] makes the load part [`Relaxed`].
@@ -1524,16 +1524,16 @@ impl<T> AtomicPtr<T> {
15241524 /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
15251525 ///
15261526 /// assert!(core::ptr::eq(
1527- /// atom.fetch_sub (1, Ordering::Relaxed),
1527+ /// atom.fetch_ptr_sub (1, Ordering::Relaxed),
15281528 /// &array[1],
15291529 /// ));
15301530 /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
15311531 /// ```
15321532 #[ inline]
15331533 #[ cfg( target_has_atomic = "ptr" ) ]
15341534 #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1535- pub fn fetch_sub ( & self , val : usize , order : Ordering ) -> * mut T {
1536- self . fetch_sub_bytes ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
1535+ pub fn fetch_ptr_sub ( & self , val : usize , order : Ordering ) -> * mut T {
1536+ self . fetch_byte_sub ( val. wrapping_mul ( core:: mem:: size_of :: < T > ( ) ) , order)
15371537 }
15381538
15391539 /// Offsets the pointer's address by adding `val` *bytes*, returning the
@@ -1542,7 +1542,7 @@ impl<T> AtomicPtr<T> {
15421542 /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
15431543 /// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
15441544 ///
1545- /// `fetch_add_bytes ` takes an [`Ordering`] argument which describes the
1545+ /// `fetch_byte_add ` takes an [`Ordering`] argument which describes the
15461546 /// memory ordering of this operation. All ordering modes are possible. Note
15471547 /// that using [`Acquire`] makes the store part of this operation
15481548 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
@@ -1560,14 +1560,14 @@ impl<T> AtomicPtr<T> {
15601560 /// use core::sync::atomic::{AtomicPtr, Ordering};
15611561 ///
15621562 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
1563- /// assert_eq!(atom.fetch_add_bytes (1, Ordering::Relaxed).addr(), 0);
1563+ /// assert_eq!(atom.fetch_byte_add (1, Ordering::Relaxed).addr(), 0);
15641564 /// // Note: in units of bytes, not `size_of::<i64>()`.
15651565 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
15661566 /// ```
15671567 #[ inline]
15681568 #[ cfg( target_has_atomic = "ptr" ) ]
15691569 #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1570- pub fn fetch_add_bytes ( & self , val : usize , order : Ordering ) -> * mut T {
1570+ pub fn fetch_byte_add ( & self , val : usize , order : Ordering ) -> * mut T {
15711571 #[ cfg( not( bootstrap) ) ]
15721572 // SAFETY: data races are prevented by atomic intrinsics.
15731573 unsafe {
@@ -1586,7 +1586,7 @@ impl<T> AtomicPtr<T> {
15861586 /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
15871587 /// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
15881588 ///
1589- /// `fetch_add_bytes ` takes an [`Ordering`] argument which describes the
1589+ /// `fetch_byte_sub ` takes an [`Ordering`] argument which describes the
15901590 /// memory ordering of this operation. All ordering modes are possible. Note
15911591 /// that using [`Acquire`] makes the store part of this operation
15921592 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
@@ -1604,13 +1604,13 @@ impl<T> AtomicPtr<T> {
16041604 /// use core::sync::atomic::{AtomicPtr, Ordering};
16051605 ///
16061606 /// let atom = AtomicPtr::<i64>::new(core::ptr::invalid_mut(1));
1607- /// assert_eq!(atom.fetch_sub_bytes (1, Ordering::Relaxed).addr(), 1);
1607+ /// assert_eq!(atom.fetch_byte_sub (1, Ordering::Relaxed).addr(), 1);
16081608 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
16091609 /// ```
16101610 #[ inline]
16111611 #[ cfg( target_has_atomic = "ptr" ) ]
16121612 #[ unstable( feature = "strict_provenance_atomic_ptr" , issue = "95228" ) ]
1613- pub fn fetch_sub_bytes ( & self , val : usize , order : Ordering ) -> * mut T {
1613+ pub fn fetch_byte_sub ( & self , val : usize , order : Ordering ) -> * mut T {
16141614 #[ cfg( not( bootstrap) ) ]
16151615 // SAFETY: data races are prevented by atomic intrinsics.
16161616 unsafe {
0 commit comments