diff --git a/example/mini_core.rs b/example/mini_core.rs index 2e165cc3c12..fae7793fbf3 100644 --- a/example/mini_core.rs +++ b/example/mini_core.rs @@ -662,6 +662,8 @@ pub mod intrinsics { #[rustc_intrinsic] pub unsafe fn copy(src: *const T, dst: *mut T, count: usize); #[rustc_intrinsic] + pub unsafe fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); + #[rustc_intrinsic] pub unsafe fn transmute(e: T) -> U; #[rustc_intrinsic] pub unsafe fn ctlz_nonzero(x: T) -> u32; diff --git a/src/builder.rs b/src/builder.rs index 6add7f05c2a..fcee15b290e 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -63,6 +63,30 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.value_counter.get() } + fn with_non_zero_size( + &mut self, + size: RValue<'gcc>, + builtin_block_name: &str, + emit_builtin: impl FnOnce(&mut Self), + ) { + let size_is_zero = self.context.new_comparison( + self.location, + ComparisonOp::Equals, + size, + self.const_usize(0), + ); + let func = self.current_func(); + let builtin_block = func.new_block(builtin_block_name); + let after_block = func.new_block("after_memory_builtin"); + self.llbb().end_with_conditional(self.location, size_is_zero, after_block, builtin_block); + + self.switch_to_block(builtin_block); + emit_builtin(self); + self.llbb().end_with_jump(self.location, after_block); + + self.switch_to_block(after_block); + } + fn atomic_extremum( &mut self, operation: ExtremumOperation, @@ -1404,12 +1428,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let _is_volatile = flags.contains(MemFlags::VOLATILE); let dst = self.pointercast(dst, self.type_i8p()); let src = self.pointercast(src, self.type_ptr_to(self.type_void())); - let memcpy = self.context.get_builtin_function("memcpy"); - // FIXME(antoyo): handle aligns and is_volatile. - self.block.add_eval( - self.location, - self.context.new_call(self.location, memcpy, &[dst, src, size]), - ); + self.with_non_zero_size(size, "memcpy", |bx| { + let memcpy = bx.context.get_builtin_function("memcpy"); + // FIXME(antoyo): handle aligns and is_volatile. + bx.block + .add_eval(bx.location, bx.context.new_call(bx.location, memcpy, &[dst, src, size])); + }); } fn memmove( @@ -1427,12 +1451,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let dst = self.pointercast(dst, self.type_i8p()); let src = self.pointercast(src, self.type_ptr_to(self.type_void())); - let memmove = self.context.get_builtin_function("memmove"); - // FIXME(antoyo): handle is_volatile. - self.block.add_eval( - self.location, - self.context.new_call(self.location, memmove, &[dst, src, size]), - ); + self.with_non_zero_size(size, "memmove", |bx| { + let memmove = bx.context.get_builtin_function("memmove"); + // FIXME(antoyo): handle is_volatile. + bx.block.add_eval( + bx.location, + bx.context.new_call(bx.location, memmove, &[dst, src, size]), + ); + }); } fn memset( @@ -1446,14 +1472,16 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memset not supported"); let _is_volatile = flags.contains(MemFlags::VOLATILE); let ptr = self.pointercast(ptr, self.type_i8p()); - let memset = self.context.get_builtin_function("memset"); // FIXME(antoyo): handle align and is_volatile. let fill_byte = self.context.new_cast(self.location, fill_byte, self.i32_type); let size = self.intcast(size, self.type_size_t(), false); - self.block.add_eval( - self.location, - self.context.new_call(self.location, memset, &[ptr, fill_byte, size]), - ); + self.with_non_zero_size(size, "memset", |bx| { + let memset = bx.context.get_builtin_function("memset"); + bx.block.add_eval( + bx.location, + bx.context.new_call(bx.location, memset, &[ptr, fill_byte, size]), + ); + }); } fn select( diff --git a/tests/run/zero_sized_mem.rs b/tests/run/zero_sized_mem.rs new file mode 100644 index 00000000000..71f7772ac6b --- /dev/null +++ b/tests/run/zero_sized_mem.rs @@ -0,0 +1,28 @@ +// Compiler: +// +// Run-time: +// status: 0 + +#![feature(no_core)] +#![no_std] +#![no_core] +#![no_main] + +extern crate mini_core; +use mini_core::*; + +#[inline(never)] +unsafe fn zero_sized_mem_ops(count: usize) { + let src = 0usize as *const (); + let dst = 0usize as *mut (); + + intrinsics::copy_nonoverlapping(src, dst, count); + intrinsics::copy(src, dst, count); + intrinsics::write_bytes(dst, 0xab, count); +} + +#[no_mangle] +extern "C" fn main(_argc: i32, _argv: *const *const u8) -> i32 { + unsafe { zero_sized_mem_ops(1) }; + 0 +}