diff --git a/tfhe/src/high_level_api/integers/signed/overflowing_ops.rs b/tfhe/src/high_level_api/integers/signed/overflowing_ops.rs index 840d1c4434..6be774fd57 100644 --- a/tfhe/src/high_level_api/integers/signed/overflowing_ops.rs +++ b/tfhe/src/high_level_api/integers/signed/overflowing_ops.rs @@ -5,7 +5,7 @@ use crate::high_level_api::global_state::with_thread_local_cuda_streams; use crate::high_level_api::integers::FheIntId; use crate::high_level_api::keys::InternalServerKey; use crate::integer::block_decomposition::DecomposableInto; -use crate::prelude::{OverflowingAdd, OverflowingMul, OverflowingSub}; +use crate::prelude::{OverflowingAdd, OverflowingMul, OverflowingNeg, OverflowingSub}; use crate::{FheBool, FheInt}; impl OverflowingAdd for &FheInt @@ -515,3 +515,68 @@ where <&Self as OverflowingMul<&Self>>::overflowing_mul(&self, other) } } + +impl OverflowingNeg for &FheInt +where + Id: FheIntId, +{ + type Output = FheInt; + + /// Negates self, overflowing if this is equal to the minimum value. + /// + /// * The operation is modular, i.e. on overflow the result wraps around. + /// * On overflow the [FheBool] is true (if self encrypts the minimum value), otherwise false + /// + /// # Example + /// + /// ```rust + /// use tfhe::prelude::*; + /// use tfhe::{generate_keys, set_server_key, ConfigBuilder, FheInt16}; + /// + /// let (client_key, server_key) = generate_keys(ConfigBuilder::default()); + /// set_server_key(server_key); + /// + /// let a = FheInt16::encrypt(i16::MIN, &client_key); + /// + /// let (result, overflowed) = a.overflowing_neg(); + /// let (expected_result, expected_overflowed) = i16::MIN.overflowing_neg(); + /// let result: i16 = result.decrypt(&client_key); + /// assert_eq!(result, expected_result); + /// assert_eq!(overflowed.decrypt(&client_key), expected_overflowed); + /// assert!(overflowed.decrypt(&client_key)); + /// ``` + fn overflowing_neg(self) -> (Self::Output, FheBool) { + global_state::with_internal_keys(|key| match key { + InternalServerKey::Cpu(cpu_key) => { + let (result, overflow) = cpu_key + .pbs_key() + .overflowing_neg_parallelized(&*self.ciphertext.on_cpu()); + ( + FheInt::new(result, cpu_key.tag.clone()), + FheBool::new(overflow, cpu_key.tag.clone()), + ) + } + #[cfg(feature = "gpu")] + InternalServerKey::Cuda(cuda_key) => with_thread_local_cuda_streams(|streams| { + let (result, overflow) = cuda_key + .pbs_key() + .overflowing_neg(&*self.ciphertext.on_gpu(streams), streams); + ( + FheInt::new(result, cuda_key.tag.clone()), + FheBool::new(overflow, cuda_key.tag.clone()), + ) + }), + }) + } +} + +impl OverflowingNeg for FheInt +where + Id: FheIntId, +{ + type Output = Self; + + fn overflowing_neg(self) -> (Self::Output, FheBool) { + <&Self as OverflowingNeg>::overflowing_neg(&self) + } +} diff --git a/tfhe/src/high_level_api/integers/unsigned/overflowing_ops.rs b/tfhe/src/high_level_api/integers/unsigned/overflowing_ops.rs index 9b2e274d4f..ff28b8c3ce 100644 --- a/tfhe/src/high_level_api/integers/unsigned/overflowing_ops.rs +++ b/tfhe/src/high_level_api/integers/unsigned/overflowing_ops.rs @@ -5,7 +5,7 @@ use crate::high_level_api::global_state::with_thread_local_cuda_streams; use crate::high_level_api::integers::FheUintId; use crate::high_level_api::keys::InternalServerKey; use crate::integer::block_decomposition::DecomposableInto; -use crate::prelude::{CastInto, OverflowingAdd, OverflowingMul, OverflowingSub}; +use crate::prelude::{CastInto, OverflowingAdd, OverflowingMul, OverflowingNeg, OverflowingSub}; use crate::{FheBool, FheUint}; impl OverflowingAdd for &FheUint @@ -509,3 +509,45 @@ where <&Self as OverflowingMul<&Self>>::overflowing_mul(&self, other) } } + +impl OverflowingNeg for &FheUint +where + Id: FheUintId, +{ + type Output = FheUint; + + fn overflowing_neg(self) -> (Self::Output, FheBool) { + global_state::with_internal_keys(|key| match key { + InternalServerKey::Cpu(cpu_key) => { + let (result, overflow) = cpu_key + .pbs_key() + .overflowing_neg_parallelized(&*self.ciphertext.on_cpu()); + ( + FheUint::new(result, cpu_key.tag.clone()), + FheBool::new(overflow, cpu_key.tag.clone()), + ) + } + #[cfg(feature = "gpu")] + InternalServerKey::Cuda(cuda_key) => with_thread_local_cuda_streams(|streams| { + let (result, overflow) = cuda_key + .pbs_key() + .overflowing_neg(&*self.ciphertext.on_gpu(streams), streams); + ( + FheUint::new(result, cuda_key.tag.clone()), + FheBool::new(overflow, cuda_key.tag.clone()), + ) + }), + }) + } +} + +impl OverflowingNeg for FheUint +where + Id: FheUintId, +{ + type Output = Self; + + fn overflowing_neg(self) -> (Self::Output, FheBool) { + <&Self as OverflowingNeg>::overflowing_neg(&self) + } +} diff --git a/tfhe/src/high_level_api/prelude.rs b/tfhe/src/high_level_api/prelude.rs index 61609fc4c1..967f993eb3 100644 --- a/tfhe/src/high_level_api/prelude.rs +++ b/tfhe/src/high_level_api/prelude.rs @@ -9,8 +9,8 @@ pub use crate::high_level_api::traits::{ BitSlice, CiphertextList, DivRem, FheDecrypt, FheEncrypt, FheEq, FheKeyswitch, FheMax, FheMin, FheOrd, FheTrivialEncrypt, FheTryEncrypt, FheTryTrivialEncrypt, IfThenElse, OverflowingAdd, - OverflowingMul, OverflowingSub, RotateLeft, RotateLeftAssign, RotateRight, RotateRightAssign, - ScalarIfThenElse, SquashNoise, Tagged, + OverflowingMul, OverflowingNeg, OverflowingSub, RotateLeft, RotateLeftAssign, RotateRight, + RotateRightAssign, ScalarIfThenElse, SquashNoise, Tagged, }; pub use crate::conformance::ParameterSetConformant; diff --git a/tfhe/src/high_level_api/traits.rs b/tfhe/src/high_level_api/traits.rs index 4c06699f66..418e0febd4 100644 --- a/tfhe/src/high_level_api/traits.rs +++ b/tfhe/src/high_level_api/traits.rs @@ -180,6 +180,12 @@ pub trait OverflowingMul { fn overflowing_mul(self, rhs: Rhs) -> (Self::Output, FheBool); } +pub trait OverflowingNeg { + type Output; + + fn overflowing_neg(self) -> (Self::Output, FheBool); +} + pub trait BitSlice { type Output; diff --git a/tfhe/src/integer/gpu/server_key/radix/bitwise_op.rs b/tfhe/src/integer/gpu/server_key/radix/bitwise_op.rs index 5452ae2374..80dba4b102 100644 --- a/tfhe/src/integer/gpu/server_key/radix/bitwise_op.rs +++ b/tfhe/src/integer/gpu/server_key/radix/bitwise_op.rs @@ -5,6 +5,7 @@ use crate::core_crypto::gpu::algorithms::{ use crate::core_crypto::gpu::vec::CudaVec; use crate::core_crypto::gpu::CudaStreams; use crate::core_crypto::prelude::LweBskGroupingFactor; +use crate::integer::gpu::ciphertext::boolean_value::CudaBooleanBlock; use crate::integer::gpu::ciphertext::CudaIntegerRadixCiphertext; use crate::integer::gpu::server_key::CudaBootstrappingKey; use crate::integer::gpu::{ @@ -95,6 +96,31 @@ impl CudaServerKey { ct.as_mut().info = ct.as_ref().info.after_bitnot(); } + pub(crate) unsafe fn unchecked_boolean_bitnot_assign_async( + &self, + ct: &mut CudaBooleanBlock, + streams: &CudaStreams, + ) { + // We do (-ciphertext) + (msg_mod -1) as it allows to avoid an allocation + cuda_lwe_ciphertext_negate_assign(&mut ct.0.as_mut().d_blocks, streams); + + let ct_blocks = ct.0.as_ref().d_blocks.lwe_ciphertext_count().0; + + let shift_plaintext = self.encoding().encode(Cleartext(1u64)).0; + + let scalar_vector = vec![shift_plaintext; ct_blocks]; + let mut d_decomposed_scalar = + CudaVec::::new_async(ct.0.as_ref().d_blocks.lwe_ciphertext_count().0, streams, 0); + d_decomposed_scalar.copy_from_cpu_async(scalar_vector.as_slice(), streams, 0); + + cuda_lwe_ciphertext_plaintext_add_assign( + &mut ct.0.as_mut().d_blocks, + &d_decomposed_scalar, + streams, + ); + // Neither noise level nor the degree changes + } + pub fn unchecked_bitnot_assign( &self, ct: &mut T, @@ -164,7 +190,7 @@ impl CudaServerKey { /// # Safety /// /// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must - /// not be dropped until streams is synchronised + /// not be dropped until streams is synchronized pub unsafe fn unchecked_bitop_assign_async( &self, ct_left: &mut T, @@ -447,7 +473,7 @@ impl CudaServerKey { /// # Safety /// /// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must - /// not be dropped until streams is synchronised + /// not be dropped until streams is synchronized pub unsafe fn bitand_assign_async( &self, ct_left: &mut T, @@ -553,7 +579,7 @@ impl CudaServerKey { /// # Safety /// /// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must - /// not be dropped until streams is synchronised + /// not be dropped until streams is synchronized pub unsafe fn bitor_assign_async( &self, ct_left: &mut T, @@ -658,7 +684,7 @@ impl CudaServerKey { /// # Safety /// /// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must - /// not be dropped until streams is synchronised + /// not be dropped until streams is synchronized pub unsafe fn bitxor_assign_async( &self, ct_left: &mut T, @@ -756,7 +782,7 @@ impl CudaServerKey { /// # Safety /// /// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must - /// not be dropped until streams is synchronised + /// not be dropped until streams is synchronized pub unsafe fn bitnot_assign_async( &self, ct: &mut T, diff --git a/tfhe/src/integer/gpu/server_key/radix/neg.rs b/tfhe/src/integer/gpu/server_key/radix/neg.rs index a10eb85d19..3ee5127b31 100644 --- a/tfhe/src/integer/gpu/server_key/radix/neg.rs +++ b/tfhe/src/integer/gpu/server_key/radix/neg.rs @@ -1,5 +1,8 @@ use crate::core_crypto::gpu::CudaStreams; -use crate::integer::gpu::ciphertext::CudaIntegerRadixCiphertext; +use crate::integer::gpu::ciphertext::boolean_value::CudaBooleanBlock; +use crate::integer::gpu::ciphertext::{ + CudaIntegerRadixCiphertext, CudaSignedRadixCiphertext, CudaUnsignedRadixCiphertext, +}; use crate::integer::gpu::server_key::CudaServerKey; use crate::integer::gpu::unchecked_negate_integer_radix_async; use crate::integer::server_key::radix_parallel::OutputFlag; @@ -126,7 +129,7 @@ impl CudaServerKey { /// # Safety /// /// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must - /// not be dropped until streams is synchronised + /// not be dropped until streams is synchronized pub unsafe fn neg_async( &self, ctxt: &T, @@ -147,4 +150,53 @@ impl CudaServerKey { self.propagate_single_carry_assign_async(&mut res, streams, None, OutputFlag::None); res } + + /// # Safety + /// + /// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must + /// not be dropped until streams is synchronized + pub unsafe fn overflowing_neg_async( + &self, + ctxt: &T, + streams: &CudaStreams, + ) -> (T, CudaBooleanBlock) + where + T: CudaIntegerRadixCiphertext, + { + let mut ct = if ctxt.block_carries_are_empty() { + ctxt.duplicate_async(streams) + } else { + let mut ct = ctxt.duplicate_async(streams); + self.full_propagate_assign_async(&mut ct, streams); + ct + }; + + self.bitnot_assign_async(&mut ct, streams); + + if T::IS_SIGNED { + let tmp = CudaSignedRadixCiphertext { + ciphertext: ct.into_inner(), + }; + let (result, overflowed) = self.signed_overflowing_scalar_add(&tmp, 1, streams); + let result = T::from(result.into_inner()); + (result, overflowed) + } else { + let mut tmp = CudaUnsignedRadixCiphertext { + ciphertext: ct.into_inner(), + }; + let mut overflowed = self.unsigned_overflowing_scalar_add_assign(&mut tmp, 1, streams); + self.unchecked_boolean_bitnot_assign_async(&mut overflowed, streams); + let result = T::from(tmp.into_inner()); + (result, overflowed) + } + } + + pub fn overflowing_neg(&self, ctxt: &T, streams: &CudaStreams) -> (T, CudaBooleanBlock) + where + T: CudaIntegerRadixCiphertext, + { + let result = unsafe { self.overflowing_neg_async(ctxt, streams) }; + streams.synchronize(); + result + } } diff --git a/tfhe/src/integer/gpu/server_key/radix/tests_signed/mod.rs b/tfhe/src/integer/gpu/server_key/radix/tests_signed/mod.rs index 57eecc5d96..da55efb137 100644 --- a/tfhe/src/integer/gpu/server_key/radix/tests_signed/mod.rs +++ b/tfhe/src/integer/gpu/server_key/radix/tests_signed/mod.rs @@ -79,6 +79,40 @@ where } } +impl<'a, F> FunctionExecutor<&'a SignedRadixCiphertext, (SignedRadixCiphertext, BooleanBlock)> + for GpuFunctionExecutor +where + F: Fn( + &CudaServerKey, + &CudaSignedRadixCiphertext, + &CudaStreams, + ) -> (CudaSignedRadixCiphertext, CudaBooleanBlock), +{ + fn setup(&mut self, cks: &RadixClientKey, sks: Arc) { + self.setup_from_keys(cks, &sks); + } + + fn execute( + &mut self, + input: &'a SignedRadixCiphertext, + ) -> (SignedRadixCiphertext, BooleanBlock) { + let context = self + .context + .as_ref() + .expect("setup was not properly called"); + + let d_ctxt = + CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input, &context.streams); + + let (gpu_result_0, gpu_result_1) = (self.func)(&context.sks, &d_ctxt, &context.streams); + + ( + gpu_result_0.to_signed_radix_ciphertext(&context.streams), + gpu_result_1.to_boolean_block(&context.streams), + ) + } +} + impl<'a, F> FunctionExecutor<&'a SignedRadixCiphertext, (RadixCiphertext, BooleanBlock)> for GpuFunctionExecutor where diff --git a/tfhe/src/integer/gpu/server_key/radix/tests_signed/test_neg.rs b/tfhe/src/integer/gpu/server_key/radix/tests_signed/test_neg.rs index 5cbf5f1049..336fc738b2 100644 --- a/tfhe/src/integer/gpu/server_key/radix/tests_signed/test_neg.rs +++ b/tfhe/src/integer/gpu/server_key/radix/tests_signed/test_neg.rs @@ -3,13 +3,14 @@ use crate::integer::gpu::server_key::radix::tests_unsigned::{ }; use crate::integer::gpu::CudaServerKey; use crate::integer::server_key::radix_parallel::tests_signed::test_neg::{ - signed_default_neg_test, signed_unchecked_neg_test, + default_overflowing_neg_test, signed_default_neg_test, signed_unchecked_neg_test, }; use crate::shortint::parameters::test_params::*; use crate::shortint::parameters::*; create_gpu_parameterized_test!(integer_unchecked_neg); create_gpu_parameterized_test!(integer_neg); +create_gpu_parameterized_test!(integer_overflowing_neg); fn integer_unchecked_neg

(param: P) where @@ -26,3 +27,8 @@ where let executor = GpuFunctionExecutor::new(&CudaServerKey::neg); signed_default_neg_test(param, executor); } + +fn integer_overflowing_neg(param: impl Into) { + let executor = GpuFunctionExecutor::new(&CudaServerKey::overflowing_neg); + default_overflowing_neg_test(param, executor); +} diff --git a/tfhe/src/integer/gpu/server_key/radix/tests_unsigned/test_neg.rs b/tfhe/src/integer/gpu/server_key/radix/tests_unsigned/test_neg.rs index c31f5c7958..c011c794ce 100644 --- a/tfhe/src/integer/gpu/server_key/radix/tests_unsigned/test_neg.rs +++ b/tfhe/src/integer/gpu/server_key/radix/tests_unsigned/test_neg.rs @@ -5,11 +5,13 @@ use crate::integer::gpu::CudaServerKey; use crate::integer::server_key::radix_parallel::tests_cases_unsigned::{ default_neg_test, unchecked_neg_test, }; +use crate::integer::server_key::radix_parallel::tests_unsigned::test_neg::default_overflowing_neg_test; use crate::shortint::parameters::test_params::*; use crate::shortint::parameters::*; create_gpu_parameterized_test!(integer_unchecked_neg); create_gpu_parameterized_test!(integer_neg); +create_gpu_parameterized_test!(integer_overflowing_neg); fn integer_unchecked_neg

(param: P) where @@ -26,3 +28,8 @@ where let executor = GpuFunctionExecutor::new(&CudaServerKey::neg); default_neg_test(param, executor); } + +fn integer_overflowing_neg(param: impl Into) { + let executor = GpuFunctionExecutor::new(&CudaServerKey::overflowing_neg); + default_overflowing_neg_test(param, executor); +} diff --git a/tfhe/src/integer/server_key/radix_parallel/add.rs b/tfhe/src/integer/server_key/radix_parallel/add.rs index 4776825f3d..db7d2b6c83 100644 --- a/tfhe/src/integer/server_key/radix_parallel/add.rs +++ b/tfhe/src/integer/server_key/radix_parallel/add.rs @@ -486,7 +486,7 @@ impl ServerKey { /// Computes the result of `lhs += rhs + input_carry` /// - /// This will selects what seems to be best algorithm to propagate carries + /// This will select what seems to be the best algorithm to propagate carries /// (fully parallel vs sequential) by looking at the number of blocks and /// number of threads. /// diff --git a/tfhe/src/integer/server_key/radix_parallel/neg.rs b/tfhe/src/integer/server_key/radix_parallel/neg.rs index 3d1c64897c..f5fdbbf02c 100644 --- a/tfhe/src/integer/server_key/radix_parallel/neg.rs +++ b/tfhe/src/integer/server_key/radix_parallel/neg.rs @@ -1,5 +1,5 @@ use crate::integer::ciphertext::IntegerRadixCiphertext; -use crate::integer::ServerKey; +use crate::integer::{BooleanBlock, ServerKey}; impl ServerKey { /// Homomorphically computes the opposite of a ciphertext encrypting an integer message. @@ -75,11 +75,32 @@ impl ServerKey { /// assert_eq!(255, dec); /// ``` pub fn neg_parallelized(&self, ctxt: &T) -> T + where + T: IntegerRadixCiphertext, + { + if ctxt.block_carries_are_empty() { + let mut result = self.bitnot(ctxt); + self.scalar_add_assign_parallelized(&mut result, 1); + result + } else if self.is_neg_possible(ctxt).is_ok() { + let mut result = self.unchecked_neg(ctxt); + self.full_propagate_parallelized(&mut result); + result + } else { + let mut cleaned_ctxt = ctxt.clone(); + self.full_propagate_parallelized(&mut cleaned_ctxt); + self.neg_parallelized(&cleaned_ctxt) + } + } + + pub fn overflowing_neg_parallelized(&self, ctxt: &T) -> (T, BooleanBlock) where T: IntegerRadixCiphertext, { let mut tmp_ctxt; + // As we want to compute the overflow we need a truly clean state + // And so we cannot avoid the full_propagate like we may in non overflowing_block let ct = if ctxt.block_carries_are_empty() { ctxt } else { @@ -88,8 +109,19 @@ impl ServerKey { &tmp_ctxt }; - let mut ct = self.unchecked_neg(ct); - self.full_propagate_parallelized(&mut ct); - ct + let mut result = self.bitnot(ct); + let mut overflowed = self.overflowing_scalar_add_assign_parallelized(&mut result, 1); + + if !T::IS_SIGNED { + // Computing overflow of !input + 1 only really works for signed integers + // However for unsigned integers we can still get the correct result as the only + // case where `!input + 1` overflows, is when `!input` == MAX (0b111..111) => + // `input == 0`. + // And in unsigned integers, the only case that is not an overflow is -0, + // so we can just invert the result + self.boolean_bitnot_assign(&mut overflowed); + } + + (result, overflowed) } } diff --git a/tfhe/src/integer/server_key/radix_parallel/tests_signed/test_neg.rs b/tfhe/src/integer/server_key/radix_parallel/tests_signed/test_neg.rs index 7beeb06a5e..360274eda9 100644 --- a/tfhe/src/integer/server_key/radix_parallel/tests_signed/test_neg.rs +++ b/tfhe/src/integer/server_key/radix_parallel/tests_signed/test_neg.rs @@ -4,10 +4,12 @@ use crate::integer::server_key::radix_parallel::tests_signed::{ create_iterator_of_signed_random_pairs, signed_neg_under_modulus, NB_CTXT, }; use crate::integer::server_key::radix_parallel::tests_unsigned::{ - nb_tests_smaller_for_params, nb_unchecked_tests_for_params, CpuFunctionExecutor, + nb_tests_smaller_for_params, nb_unchecked_tests_for_params, CpuFunctionExecutor, MAX_NB_CTXT, }; use crate::integer::tests::create_parameterized_test; -use crate::integer::{IntegerKeyKind, RadixClientKey, ServerKey, SignedRadixCiphertext}; +use crate::integer::{ + BooleanBlock, IntegerKeyKind, RadixClientKey, ServerKey, SignedRadixCiphertext, +}; #[cfg(tarpaulin)] use crate::shortint::parameters::coverage_parameters::*; use crate::shortint::parameters::test_params::*; @@ -18,6 +20,7 @@ use std::sync::Arc; create_parameterized_test!(integer_signed_unchecked_neg); create_parameterized_test!(integer_signed_smart_neg); create_parameterized_test!(integer_signed_default_neg); +create_parameterized_test!(integer_signed_default_overflowing_neg); fn integer_signed_unchecked_neg

(param: P) where @@ -43,6 +46,14 @@ where signed_default_neg_test(param, executor); } +fn integer_signed_default_overflowing_neg

(param: P) +where + P: Into, +{ + let executor = CpuFunctionExecutor::new(&ServerKey::overflowing_neg_parallelized); + default_overflowing_neg_test(param, executor); +} + pub(crate) fn signed_unchecked_neg_test(param: P, mut executor: T) where P: Into, @@ -186,3 +197,98 @@ where assert_eq!(clear_result, dec); } } + +pub(crate) fn default_overflowing_neg_test(param: P, mut overflowing_neg: T) +where + P: Into, + T: for<'a> FunctionExecutor<&'a SignedRadixCiphertext, (SignedRadixCiphertext, BooleanBlock)>, +{ + let param = param.into(); + let nb_tests_smaller = nb_tests_smaller_for_params(param); + let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix); + let cks = RadixClientKey::from(( + cks, + crate::integer::server_key::radix_parallel::tests_cases_unsigned::NB_CTXT, + )); + + sks.set_deterministic_pbs_execution(true); + let sks = Arc::new(sks); + + let mut rng = rand::thread_rng(); + + overflowing_neg.setup(&cks, sks); + + let cks: crate::integer::ClientKey = cks.into(); + + for num_blocks in 1..MAX_NB_CTXT { + let modulus = (cks.parameters().message_modulus().0.pow(num_blocks as u32) / 2) as i64; + + if modulus <= 1 { + continue; + } + + for _ in 0..nb_tests_smaller { + let clear = rng.gen_range(-modulus + 1..modulus); + let ctxt = cks.encrypt_signed_radix(clear, num_blocks); + + let (ct_res, flag) = overflowing_neg.execute(&ctxt); + + assert_eq!(flag.0.noise_level(), NoiseLevel::NOMINAL); + assert_eq!(flag.0.degree.get(), 1); + + let dec_flag = cks.decrypt_bool(&flag); + assert!( + !dec_flag, + "Invalid flag result for overflowing_neg({clear}),\n\ + Expected false, got true\n\ + num_blocks: {num_blocks}, modulus: {:?}", + -modulus..modulus + ); + + let dec_ct: i64 = cks.decrypt_signed_radix(&ct_res); + let expected = clear.wrapping_neg() % modulus; + assert_eq!( + dec_ct, + expected, + "Invalid result for overflowing_neg({clear}),\n\ + Expected {expected}, got {dec_ct}\n\ + num_blocks: {num_blocks}, modulus: {:?}", + -modulus..modulus + ); + + let (ct_res2, flag2) = overflowing_neg.execute(&ctxt); + assert_eq!(ct_res, ct_res2, "Failed determinism check"); + assert_eq!(flag, flag2, "Failed determinism check"); + } + + // The only case where signed neg does overflows + let ctxt = cks.encrypt_signed_radix(-modulus, num_blocks); + + let (ct_res, flag) = overflowing_neg.execute(&ctxt); + + assert_eq!(flag.0.noise_level(), NoiseLevel::NOMINAL); + assert_eq!(flag.0.degree.get(), 1); + + let dec_flag = cks.decrypt_bool(&flag); + assert!( + dec_flag, + "Invalid flag result for overflowing_neg({}),\n\ + Expected true, got false\n\ + num_blocks: {num_blocks}, modulus: {:?}", + -modulus, + -modulus..modulus + ); + + let dec_ct: i64 = cks.decrypt_signed_radix(&ct_res); + assert_eq!( + dec_ct, + -modulus, + "Invalid result for overflowing_neg({}),\n\ + Expected {}, got {dec_ct}\n\ + num_blocks: {num_blocks}, modulus: {:?}", + -modulus, + -modulus, + -modulus..modulus + ); + } +} diff --git a/tfhe/src/integer/server_key/radix_parallel/tests_unsigned/test_neg.rs b/tfhe/src/integer/server_key/radix_parallel/tests_unsigned/test_neg.rs index aa66cd4b24..e0cd5d8500 100644 --- a/tfhe/src/integer/server_key/radix_parallel/tests_unsigned/test_neg.rs +++ b/tfhe/src/integer/server_key/radix_parallel/tests_unsigned/test_neg.rs @@ -4,11 +4,11 @@ use crate::integer::server_key::radix_parallel::tests_cases_unsigned::{FunctionE use crate::integer::server_key::radix_parallel::tests_unsigned::{ nb_tests_for_params, nb_tests_smaller_for_params, panic_if_any_block_info_exceeds_max_degree_or_noise, panic_if_any_block_is_not_clean, - panic_if_any_block_values_exceeds_its_degree, unsigned_modulus, CpuFunctionExecutor, - ExpectedDegrees, ExpectedNoiseLevels, + panic_if_any_block_values_exceeds_its_degree, random_non_zero_value, unsigned_modulus, + CpuFunctionExecutor, ExpectedDegrees, ExpectedNoiseLevels, MAX_NB_CTXT, }; use crate::integer::tests::create_parameterized_test; -use crate::integer::{IntegerKeyKind, RadixCiphertext, RadixClientKey, ServerKey}; +use crate::integer::{BooleanBlock, IntegerKeyKind, RadixCiphertext, RadixClientKey, ServerKey}; #[cfg(tarpaulin)] use crate::shortint::parameters::coverage_parameters::*; use crate::shortint::parameters::test_params::*; @@ -18,6 +18,7 @@ use std::sync::Arc; create_parameterized_test!(integer_smart_neg); create_parameterized_test!(integer_default_neg); +create_parameterized_test!(integer_default_overflowing_neg); fn integer_smart_neg

(param: P) where @@ -35,6 +36,14 @@ where default_neg_test(param, executor); } +fn integer_default_overflowing_neg

(param: P) +where + P: Into, +{ + let executor = CpuFunctionExecutor::new(&ServerKey::overflowing_neg_parallelized); + default_overflowing_neg_test(param, executor); +} + impl ExpectedDegrees { fn after_unchecked_neg(&mut self, lhs: &RadixCiphertext) -> &Self { self.set_with(NegatedDegreeIter::new( @@ -165,7 +174,7 @@ where // Default Tests //============================================================================= -pub(crate) fn default_neg_test(param: P, mut executor: T) +pub(crate) fn default_neg_test(param: P, mut neg: T) where P: Into, T: for<'a> FunctionExecutor<&'a RadixCiphertext, RadixCiphertext>, @@ -175,28 +184,132 @@ where let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix); let cks = RadixClientKey::from((cks, NB_CTXT)); + sks.set_deterministic_pbs_execution(true); + let sks = Arc::new(sks.clone()); + + let mut rng = rand::thread_rng(); + + neg.setup(&cks, sks.clone()); + + let cks: crate::integer::ClientKey = cks.into(); + + for num_blocks in 1..MAX_NB_CTXT { + let modulus = unsigned_modulus(cks.parameters().message_modulus(), num_blocks as u32); + + for _ in 0..nb_tests_smaller { + let mut clear = rng.gen_range(0..modulus); + let mut ctxt = cks.encrypt_radix(clear, num_blocks); + + let ct_res = neg.execute(&ctxt); + panic_if_any_block_is_not_clean(&ct_res, &cks); + + let dec_ct: u64 = cks.decrypt_radix(&ct_res); + let expected = clear.wrapping_neg() % modulus; + assert_eq!( + dec_ct, expected, + "Invalid result for neg({clear}),\n\ + Expected {expected}, got {dec_ct}\n\ + num_blocks: {num_blocks}, modulus: {modulus}" + ); + + let ct_res2 = neg.execute(&ctxt); + assert_eq!(ct_res, ct_res2, "Failed determinism check"); + + // Test with non clean carries + let random_non_zero = random_non_zero_value(&mut rng, modulus); + sks.unchecked_scalar_add_assign(&mut ctxt, random_non_zero); + clear = clear.wrapping_add(random_non_zero) % modulus; + + let ct_res = neg.execute(&ctxt); + panic_if_any_block_is_not_clean(&ct_res, &cks); + + let dec_ct: u64 = cks.decrypt_radix(&ct_res); + let expected = clear.wrapping_neg() % modulus; + assert_eq!( + dec_ct, expected, + "Invalid result for neg({clear}),\n\ + Expected {expected}, got {dec_ct}\n\ + num_blocks: {num_blocks}, modulus: {modulus}" + ); + let ct_res2 = neg.execute(&ctxt); + assert_eq!(ct_res, ct_res2, "Failed determinism check"); + } + } +} + +pub(crate) fn default_overflowing_neg_test(param: P, mut overflowing_neg: T) +where + P: Into, + T: for<'a> FunctionExecutor<&'a RadixCiphertext, (RadixCiphertext, BooleanBlock)>, +{ + let param = param.into(); + let nb_tests_smaller = nb_tests_smaller_for_params(param); + let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix); + let cks = RadixClientKey::from((cks, NB_CTXT)); + sks.set_deterministic_pbs_execution(true); let sks = Arc::new(sks); let mut rng = rand::thread_rng(); - let modulus = unsigned_modulus(cks.parameters().message_modulus(), NB_CTXT as u32); + overflowing_neg.setup(&cks, sks); - executor.setup(&cks, sks); + let cks: crate::integer::ClientKey = cks.into(); - for _ in 0..nb_tests_smaller { - let clear = rng.gen::() % modulus; + for num_blocks in 1..MAX_NB_CTXT { + let modulus = unsigned_modulus(cks.parameters().message_modulus(), num_blocks as u32); - let ctxt = cks.encrypt(clear); - panic_if_any_block_is_not_clean(&ctxt, &cks); + for _ in 0..nb_tests_smaller { + let clear = rng.gen_range(1..modulus); + let ctxt = cks.encrypt_radix(clear, num_blocks); - let ct_res = executor.execute(&ctxt); - let tmp = executor.execute(&ctxt); - assert!(ct_res.block_carries_are_empty()); - assert_eq!(ct_res, tmp); + let (ct_res, flag) = overflowing_neg.execute(&ctxt); - let dec: u64 = cks.decrypt(&ct_res); - let clear_result = clear.wrapping_neg() % modulus; - assert_eq!(clear_result, dec); + panic_if_any_block_is_not_clean(&ct_res, &cks); + assert_eq!(flag.0.noise_level(), NoiseLevel::NOMINAL); + assert_eq!(flag.0.degree.get(), 1); + + let dec_flag = cks.decrypt_bool(&flag); + assert!( + dec_flag, + "Invalid value for overflowing_neg flag, expected true, got false" + ); + + let dec_ct: u64 = cks.decrypt_radix(&ct_res); + let expected = clear.wrapping_neg() % modulus; + assert_eq!( + dec_ct, expected, + "Invalid result for overflowing_neg({clear}),\n\ + Expected {expected}, got {dec_ct}\n\ + num_blocks: {num_blocks}, modulus: {modulus}" + ); + + let (ct_res2, flag2) = overflowing_neg.execute(&ctxt); + assert_eq!(ct_res, ct_res2, "Failed determinism check"); + assert_eq!(flag, flag2, "Failed determinism check"); + } + + // The only case where unsigned neg does not overflows + let ctxt = cks.encrypt_radix(0u32, num_blocks); + + let (ct_res, flag) = overflowing_neg.execute(&ctxt); + + panic_if_any_block_is_not_clean(&ct_res, &cks); + assert_eq!(flag.0.noise_level(), NoiseLevel::NOMINAL); + assert_eq!(flag.0.degree.get(), 1); + + let dec_flag = cks.decrypt_bool(&flag); + assert!( + !dec_flag, + "Invalid value for overflowing_neg flag, expected false, got true" + ); + + let dec_ct: u64 = cks.decrypt_radix(&ct_res); + assert_eq!( + dec_ct, 0, + "Invalid result for overflowing_neg(0),\n\ + Expected 0, got {dec_ct}\n\ + num_blocks: {num_blocks}, modulus: {modulus}" + ); } }