Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 035fdfc

Browse files
committedMar 31, 2019
Add element-wise atomic memory operations
WIP
1 parent cee58fd commit 035fdfc

File tree

8 files changed

+245
-17
lines changed

8 files changed

+245
-17
lines changed
 

‎src/libcore/intrinsics.rs

+27
Original file line numberDiff line numberDiff line change
@@ -962,6 +962,33 @@ extern "rust-intrinsic" {
962962
/// value is not necessarily valid to be used to actually access memory.
963963
pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
964964

965+
/// Equivalent to the appropriate `llvm.memcpy.element.unordered.atomic.p0i8.p0i8.*` intrinsic, with
966+
/// a size of `count` * `size_of::<T>()`, an alignment of
967+
/// `min_align_of::<T>()`, and an element size of `size_of::<T>()`.
968+
///
969+
/// `size_of::<T>` must be an integer power of two no larger than the
970+
/// target-specific atomic access size limit.
971+
#[cfg(not(stage0))]
972+
pub fn atomic_element_copy_nonoverlapping_memory_unordered<T>(dst: *mut T, src: *const T, count: usize);
973+
974+
/// Equivalent to the appropriate `llvm.memmove.unordered.atomic.p0i8.p0i8.*` intrinsic, with
975+
/// a size of `count` * `size_of::<T>()`, an alignment of
976+
/// `min_align_of::<T>()`, and an element size of `size_of::<T>()`.
977+
///
978+
/// `size_of::<T>` must be an integer power of two no larger than the
979+
/// target-specific atomic access size limit.
980+
#[cfg(not(stage0))]
981+
pub fn atomic_element_copy_memory_unordered<T>(dst: *mut T, src: *const T, count: usize);
982+
983+
/// Equivalent to the appropriate `llvm.memset.unordered.atomic.p0i8.p0i8.*` intrinsic, with
984+
/// a size of `count` * `size_of::<T>()`, an alignment of
985+
/// `min_align_of::<T>()`, and an element size of `size_of::<T>()`.
986+
///
987+
/// `size_of::<T>` must be an integer power of two no larger than the
988+
/// target-specific atomic access size limit.
989+
#[cfg(not(stage0))]
990+
pub fn atomic_element_set_memory_unordered<T>(dst: *mut T, val: u8, count: usize);
991+
965992
/// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
966993
/// a size of `count` * `size_of::<T>()` and an alignment of
967994
/// `min_align_of::<T>()`

‎src/librustc_codegen_llvm/builder.rs

+42
Original file line numberDiff line numberDiff line change
@@ -966,6 +966,48 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
966966
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
967967
}
968968

969+
fn atomic_element_unordered_memcpy(&mut self, dst: &'ll Value, dst_align: Align,
970+
src: &'ll Value, src_align: Align,
971+
size: &'ll Value, element_size: u32) {
972+
let size = self.intcast(size, self.type_isize(), false);
973+
let dst = self.pointercast(dst, self.type_i8p());
974+
let src = self.pointercast(src, self.type_i8p());
975+
unsafe {
976+
llvm::LLVMRustBuildElementUnorderedAtomicMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
977+
src, src_align.bytes() as c_uint, size, element_size);
978+
}
979+
}
980+
981+
fn atomic_element_unordered_memmove(&mut self, dst: &'ll Value, dst_align: Align,
982+
src: &'ll Value, src_align: Align,
983+
size: &'ll Value, element_size: u32) {
984+
let size = self.intcast(size, self.type_isize(), false);
985+
let dst = self.pointercast(dst, self.type_i8p());
986+
let src = self.pointercast(src, self.type_i8p());
987+
let ret_ref = unsafe {
988+
llvm::LLVMRustBuildElementUnorderedAtomicMemMove(self.llbuilder,
989+
dst, dst_align.bytes() as c_uint,
990+
src, src_align.bytes() as c_uint,
991+
size, element_size)
992+
};
993+
if ret_ref.is_none() {
994+
bug!("llvm.memmove.element.unordered.atomic.* is not supported with LLVM prior to 7.0");
995+
}
996+
}
997+
998+
fn atomic_element_unordered_memset(&mut self, ptr: &'ll Value, fill_byte: &'ll Value,
999+
size: &'ll Value, align: Align, element_size: u32) {
1000+
let size = self.intcast(size, self.type_isize(), false);
1001+
let ptr = self.pointercast(ptr, self.type_i8p());
1002+
let ret_ref = unsafe {
1003+
llvm::LLVMRustBuildElementUnorderedAtomicMemSet(self.llbuilder, ptr, fill_byte,
1004+
size, align.bytes() as c_uint, element_size)
1005+
};
1006+
if ret_ref.is_none() {
1007+
bug!("llvm.memset.element.unordered.atomic.* is not supported with LLVM prior to 7.0");
1008+
}
1009+
}
1010+
9691011
fn select(
9701012
&mut self, cond: &'ll Value,
9711013
then_val: &'ll Value,

‎src/librustc_codegen_llvm/intrinsic.rs

+72-14
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,13 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
102102
let llret_ty = self.layout_of(ret_ty).llvm_type(self);
103103
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi);
104104

105+
let invalid_integer_monomorphization = |ty| {
106+
span_invalid_monomorphization_error(tcx.sess, span,
107+
&format!("invalid monomorphization of `{}` intrinsic: \
108+
expected basic integer type, found `{}`", name, ty));
109+
};
110+
111+
105112
let simple = get_simple_intrinsic(self, name);
106113
let llval = match name {
107114
_ if simple.is_some() => {
@@ -503,10 +510,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
503510
_ => bug!(),
504511
},
505512
None => {
506-
span_invalid_monomorphization_error(
507-
tcx.sess, span,
508-
&format!("invalid monomorphization of `{}` intrinsic: \
509-
expected basic integer type, found `{}`", name, ty));
513+
invalid_integer_monomorphization(ty);
510514
return;
511515
}
512516
}
@@ -548,6 +552,17 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
548552
Err(()) => return
549553
}
550554
}
555+
name if name.starts_with("atomic_element_") => {
556+
let ty = substs.type_at(0);
557+
if int_type_width_signed(ty, self).is_some() {
558+
atomic_element_intrinsic(self, name,
559+
substs.type_at(0),
560+
args);
561+
return;
562+
} else {
563+
return invalid_integer_monomorphization(ty);
564+
}
565+
}
551566
// This requires that atomic intrinsics follow a specific naming pattern:
552567
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
553568
name if name.starts_with("atomic_") => {
@@ -582,12 +597,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
582597
_ => self.sess().fatal("Atomic intrinsic not in correct format"),
583598
};
584599

585-
let invalid_monomorphization = |ty| {
586-
span_invalid_monomorphization_error(tcx.sess, span,
587-
&format!("invalid monomorphization of `{}` intrinsic: \
588-
expected basic integer type, found `{}`", name, ty));
589-
};
590-
591600
match split[1] {
592601
"cxchg" | "cxchgweak" => {
593602
let ty = substs.type_at(0);
@@ -610,7 +619,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
610619
self.store(success, dest.llval, dest.align);
611620
return;
612621
} else {
613-
return invalid_monomorphization(ty);
622+
return invalid_integer_monomorphization(ty);
614623
}
615624
}
616625

@@ -620,7 +629,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
620629
let size = self.size_of(ty);
621630
self.atomic_load(args[0].immediate(), order, size)
622631
} else {
623-
return invalid_monomorphization(ty);
632+
return invalid_integer_monomorphization(ty);
624633
}
625634
}
626635

@@ -636,7 +645,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
636645
);
637646
return;
638647
} else {
639-
return invalid_monomorphization(ty);
648+
return invalid_integer_monomorphization(ty);
640649
}
641650
}
642651

@@ -676,7 +685,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
676685
order
677686
)
678687
} else {
679-
return invalid_monomorphization(ty);
688+
return invalid_integer_monomorphization(ty);
680689
}
681690
}
682691
}
@@ -754,6 +763,54 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
754763
}
755764
}
756765

766+
fn atomic_element_intrinsic(
767+
bx: &mut Builder<'a, 'll, 'tcx>,
768+
name: &str,
769+
ty: Ty<'tcx>,
770+
args: &[OperandRef<'tcx, &'ll Value>],
771+
) {
772+
let (element_size, align) = bx.size_and_align_of(ty);
773+
let element_size = element_size.bytes();
774+
assert!(element_size <= u32::max_value() as u64);
775+
776+
let size = bx.mul(bx.const_usize(element_size), args[2].immediate());
777+
778+
match name {
779+
"atomic_element_copy_nonoverlapping_memory_unordered" => {
780+
bx.atomic_element_unordered_memcpy(
781+
args[0].immediate(),
782+
align,
783+
args[1].immediate(),
784+
align,
785+
size,
786+
element_size as u32
787+
);
788+
}
789+
"atomic_element_copy_memory_unordered" => {
790+
bx.atomic_element_unordered_memmove(
791+
args[0].immediate(),
792+
align,
793+
args[1].immediate(),
794+
align,
795+
size,
796+
element_size as u32
797+
);
798+
}
799+
"atomic_element_set_memory_unordered" => {
800+
bx.atomic_element_unordered_memset(
801+
args[0].immediate(),
802+
args[1].immediate(),
803+
size,
804+
align,
805+
element_size as u32
806+
);
807+
}
808+
_ => {
809+
bug!("unknown intrinsic '{}'", name);
810+
}
811+
}
812+
}
813+
757814
fn copy_intrinsic(
758815
bx: &mut Builder<'a, 'll, 'tcx>,
759816
allow_overlap: bool,
@@ -777,6 +834,7 @@ fn copy_intrinsic(
777834
}
778835
}
779836

837+
780838
fn memset_intrinsic(
781839
bx: &mut Builder<'a, 'll, 'tcx>,
782840
volatile: bool,

‎src/librustc_codegen_llvm/llvm/ffi.rs

+22
Original file line numberDiff line numberDiff line change
@@ -1154,6 +1154,28 @@ extern "C" {
11541154
Size: &'a Value,
11551155
IsVolatile: bool)
11561156
-> &'a Value;
1157+
pub fn LLVMRustBuildElementUnorderedAtomicMemCpy(B: &Builder<'a>,
1158+
Dst: &'a Value,
1159+
DstAlign: c_uint,
1160+
Src: &'a Value,
1161+
SrcAlign: c_uint,
1162+
Size: &'a Value,
1163+
ElementSize: u32)
1164+
-> &'a Value;
1165+
pub fn LLVMRustBuildElementUnorderedAtomicMemMove(B: &Builder<'a>,
1166+
Dst: &'a Value,
1167+
DstAlign: c_uint,
1168+
Src: &'a Value,
1169+
SrcAlign: c_uint,
1170+
Size: &'a Value,
1171+
ElementSize: u32) -> Option<&'a Value>;
1172+
pub fn LLVMRustBuildElementUnorderedAtomicMemSet(B: &Builder<'a>,
1173+
Ptr: &'a Value,
1174+
Val: &'a Value,
1175+
Size: &'a Value,
1176+
Align: c_uint,
1177+
ElementSize: u32) -> Option<&'a Value>;
1178+
11571179
pub fn LLVMRustBuildMemMove(B: &Builder<'a>,
11581180
Dst: &'a Value,
11591181
DstAlign: c_uint,

‎src/librustc_codegen_ssa/traits/builder.rs

+27
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,33 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
190190
flags: MemFlags,
191191
);
192192

193+
fn atomic_element_unordered_memcpy(
194+
&mut self,
195+
dst: Self::Value,
196+
dst_align: Align,
197+
src: Self::Value,
198+
src_align: Align,
199+
size: Self::Value,
200+
element_size: u32,
201+
);
202+
fn atomic_element_unordered_memmove(
203+
&mut self,
204+
dst: Self::Value,
205+
dst_align: Align,
206+
src: Self::Value,
207+
src_align: Align,
208+
size: Self::Value,
209+
element_size: u32,
210+
);
211+
fn atomic_element_unordered_memset(
212+
&mut self,
213+
ptr: Self::Value,
214+
fill_byte: Self::Value,
215+
size: Self::Value,
216+
align: Align,
217+
element_size: u32,
218+
);
219+
193220
fn select(
194221
&mut self,
195222
cond: Self::Value,

‎src/librustc_typeck/check/intrinsic.rs

+4-3
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
9292
})
9393
};
9494

95-
let (n_tps, inputs, output, unsafety) = if name.starts_with("atomic_") {
95+
let (n_tps, inputs, output, unsafety) = if name.starts_with("atomic_") && ! name.starts_with("atomic_element_") {
9696
let split : Vec<&str> = name.split('_').collect();
9797
assert!(split.len() >= 2, "Atomic intrinsic in an incorrect format");
9898

@@ -197,7 +197,8 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
197197
],
198198
tcx.mk_unit())
199199
}
200-
"volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => {
200+
"volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" |
201+
"atomic_element_copy_memory_unordered" | "atomic_element_copy_nonoverlapping_memory_unordered" => {
201202
(1,
202203
vec![
203204
tcx.mk_ptr(ty::TypeAndMut {
@@ -212,7 +213,7 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
212213
],
213214
tcx.mk_unit())
214215
}
215-
"write_bytes" | "volatile_set_memory" => {
216+
"write_bytes" | "volatile_set_memory" | "atomic_element_set_memory_unordered" => {
216217
(1,
217218
vec![
218219
tcx.mk_ptr(ty::TypeAndMut {

‎src/rustllvm/RustWrapper.cpp

+36
Original file line numberDiff line numberDiff line change
@@ -1268,6 +1268,42 @@ extern "C" LLVMValueRef LLVMRustBuildMemMove(LLVMBuilderRef B,
12681268
#endif
12691269
}
12701270

1271+
extern "C" LLVMValueRef LLVMRustBuildElementUnorderedAtomicMemCpy(LLVMBuilderRef B,
1272+
LLVMValueRef Dst, unsigned DstAlign,
1273+
LLVMValueRef Src, unsigned SrcAlign,
1274+
LLVMValueRef Size, uint32_t ElementSize) {
1275+
return wrap(unwrap(B)->CreateElementUnorderedAtomicMemCpy(
1276+
unwrap(Dst), DstAlign,
1277+
unwrap(Src), SrcAlign,
1278+
unwrap(Size), ElementSize));
1279+
}
1280+
1281+
extern "C" LLVMValueRef LLVMRustBuildElementUnorderedAtomicMemMove(LLVMBuilderRef B,
1282+
LLVMValueRef Dst, unsigned DstAlign,
1283+
LLVMValueRef Src, unsigned SrcAlign,
1284+
LLVMValueRef Size, uint32_t ElementSize) {
1285+
#if LLVM_VERSION_GE(7, 0)
1286+
return wrap(unwrap(B)->CreateElementUnorderedAtomicMemMove(
1287+
unwrap(Dst), DstAlign,
1288+
unwrap(Src), SrcAlign,
1289+
unwrap(Size), ElementSize));
1290+
#else
1291+
return nullptr;
1292+
#endif
1293+
}
1294+
1295+
extern "C" LLVMValueRef LLVMRustBuildElementUnorderedAtomicMemSet(LLVMBuilderRef B,
1296+
LLVMValueRef Ptr, LLVMValueRef Val,
1297+
LLVMValueRef Size, unsigned Align, uint32_t ElementSize) {
1298+
#if LLVM_VERSION_GE(7, 0)
1299+
return wrap(unwrap(B)->CreateElementUnorderedAtomicMemSet(
1300+
unwrap(Ptr), unwrap(Val),
1301+
unwrap(Size), Align, ElementSize));
1302+
#else
1303+
return nullptr;
1304+
#endif
1305+
}
1306+
12711307
extern "C" LLVMValueRef
12721308
LLVMRustBuildInvoke(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args,
12731309
unsigned NumArgs, LLVMBasicBlockRef Then,
There was a problem loading the remainder of the diff.

0 commit comments

Comments
 (0)
Failed to load comments.