Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove element_unordered_atomic intrinsics #789

Merged
merged 1 commit into from
Mar 18, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 0 additions & 132 deletions src/mem/mod.rs
Original file line number Diff line number Diff line change
@@ -8,10 +8,6 @@ type c_int = i16;
#[cfg(not(target_pointer_width = "16"))]
type c_int = i32;

use core::intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div};
use core::mem;
use core::ops::{BitOr, Shl};

// memcpy/memmove/memset have optimized implementations on some architectures
#[cfg_attr(
all(not(feature = "no-asm"), target_arch = "x86_64"),
@@ -60,131 +56,3 @@ intrinsics! {
impls::c_string_length(s)
}
}

// `bytes` must be a multiple of `mem::size_of::<T>()`
#[cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
fn memcpy_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) {
unsafe {
let n = exact_div(bytes, mem::size_of::<T>());
let mut i = 0;
while i < n {
atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
i += 1;
}
}
}

// `bytes` must be a multiple of `mem::size_of::<T>()`
#[cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
fn memmove_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) {
unsafe {
let n = exact_div(bytes, mem::size_of::<T>());
if src < dest as *const T {
// copy from end
let mut i = n;
while i != 0 {
i -= 1;
atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
}
} else {
// copy from beginning
let mut i = 0;
while i < n {
atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
i += 1;
}
}
}
}

// `T` must be a primitive integer type, and `bytes` must be a multiple of `mem::size_of::<T>()`
#[cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
fn memset_element_unordered_atomic<T>(s: *mut T, c: u8, bytes: usize)
where
T: Copy + From<u8> + Shl<u32, Output = T> + BitOr<T, Output = T>,
{
unsafe {
let n = exact_div(bytes, mem::size_of::<T>());

// Construct a value of type `T` consisting of repeated `c`
// bytes, to let us ensure we write each `T` atomically.
let mut x = T::from(c);
let mut i = 1;
while i < mem::size_of::<T>() {
x = (x << 8) | T::from(c);
i += 1;
}

// Write it to `s`
let mut i = 0;
while i < n {
atomic_store_unordered(s.add(i), x);
i += 1;
}
}
}

intrinsics! {
#[cfg(target_has_atomic_load_store = "8")]
pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
memcpy_element_unordered_atomic(dest, src, bytes);
}
#[cfg(target_has_atomic_load_store = "16")]
pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () {
memcpy_element_unordered_atomic(dest, src, bytes);
}
#[cfg(target_has_atomic_load_store = "32")]
pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () {
memcpy_element_unordered_atomic(dest, src, bytes);
}
#[cfg(target_has_atomic_load_store = "64")]
pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
memcpy_element_unordered_atomic(dest, src, bytes);
}
#[cfg(target_has_atomic_load_store = "128")]
pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
memcpy_element_unordered_atomic(dest, src, bytes);
}

#[cfg(target_has_atomic_load_store = "8")]
pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
memmove_element_unordered_atomic(dest, src, bytes);
}
#[cfg(target_has_atomic_load_store = "16")]
pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () {
memmove_element_unordered_atomic(dest, src, bytes);
}
#[cfg(target_has_atomic_load_store = "32")]
pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () {
memmove_element_unordered_atomic(dest, src, bytes);
}
#[cfg(target_has_atomic_load_store = "64")]
pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
memmove_element_unordered_atomic(dest, src, bytes);
}
#[cfg(target_has_atomic_load_store = "128")]
pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
memmove_element_unordered_atomic(dest, src, bytes);
}

#[cfg(target_has_atomic_load_store = "8")]
pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
memset_element_unordered_atomic(s, c, bytes);
}
#[cfg(target_has_atomic_load_store = "16")]
pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_2(s: *mut u16, c: u8, bytes: usize) -> () {
memset_element_unordered_atomic(s, c, bytes);
}
#[cfg(target_has_atomic_load_store = "32")]
pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_4(s: *mut u32, c: u8, bytes: usize) -> () {
memset_element_unordered_atomic(s, c, bytes);
}
#[cfg(target_has_atomic_load_store = "64")]
pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
memset_element_unordered_atomic(s, c, bytes);
}
#[cfg(target_has_atomic_load_store = "128")]
pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
memset_element_unordered_atomic(s, c, bytes);
}
}