cross: step: test using f64 so we can enforce more precise limits

we're foremost testing the *math*, less than the precision under which
it's carried out
This commit is contained in:
2022-08-27 02:41:16 -07:00
parent 8a76b79e17
commit 1be2dc2419

View File

@@ -222,11 +222,10 @@ mod test {
use float_eq::assert_float_eq;
use std::cell::Cell;
// TODO: use f64 in tests for higher precision?
fn assert_vec_eq(a: Vec3<f32>, b: Vec3<f32>) {
assert_float_eq!(a.x(), b.x(), r2nd <= 1e-5, "{:?} != {:?}", a, b);
assert_float_eq!(a.y(), b.y(), r2nd <= 1e-5, "{:?} != {:?}", a, b);
assert_float_eq!(a.z(), b.z(), r2nd <= 1e-5, "{:?} != {:?}", a, b);
fn assert_vec_eq(a: Vec3<f64>, b: Vec3<f64>) {
assert_float_eq!(a.x(), b.x(), r2nd <= 1e-9, "{:?} != {:?}", a, b);
assert_float_eq!(a.y(), b.y(), r2nd <= 1e-9, "{:?} != {:?}", a, b);
assert_float_eq!(a.z(), b.z(), r2nd <= 1e-9, "{:?} != {:?}", a, b);
}
#[test]
@@ -287,7 +286,7 @@ mod test {
// this gives $dD/dt$, and then we scale by $\epsilon_0^{-1}$ to get $dE/dt$.
//```
let delta_d = Vec3::new(-2.0, 4.0, -2.0);
let delta_e = delta_d * f32::eps0_inv();
let delta_e = delta_d * f64::eps0_inv();
let new_e = ctx.step_e();
// when the h field has zero spatial derivative, E has zero time derivative.
@@ -318,7 +317,7 @@ mod test {
let new_e = ctx.step_e();
let mid_e = (in_e + new_e)*0.5;
let delta_e = new_e - in_e;
assert_vec_eq(delta_e, cond.elem_mul(mid_e) * -f32::eps0_inv());
assert_vec_eq(delta_e, cond.elem_mul(mid_e) * -f64::eps0_inv());
}
#[test]
fn step_e_understands_time_scale_feature_size() {
@@ -351,7 +350,7 @@ mod test {
// we pre-scaled the in_h values by feature_size in order to get the same nabla_h as before
let nabla_h_component = Vec3::new(-2.0, 4.0, -2.0);
let current_component = cond.elem_mul(mid_e);
assert_vec_eq(delta_e, (nabla_h_component - current_component) * f32::eps0_inv() * 1e-10);
assert_vec_eq(delta_e, (nabla_h_component - current_component) * f64::eps0_inv() * 1e-10);
}
#[test]
@@ -420,7 +419,7 @@ mod test {
let delta_b = -Vec3::new(-2.0, 4.0, -2.0);
let (new_h, new_m) = ctx.step_h();
assert_vec_eq(new_h, delta_b * f32::mu0_inv());
assert_vec_eq(new_h, delta_b * f64::mu0_inv());
assert_vec_eq(new_m, Vec3::zero());
}
#[test]
@@ -447,16 +446,16 @@ mod test {
let delta_b = -Vec3::new(-2.0, 4.0, -2.0) * 1e-3 / 0.2;
let (new_h, new_m) = ctx.step_h();
assert_vec_eq(new_h, delta_b * f32::mu0_inv());
assert_vec_eq(new_h, delta_b * f64::mu0_inv());
assert_vec_eq(new_m, Vec3::zero());
}
struct MockHMaterial {
response: Vec3<f32>,
called_with: Cell<(Vec3<f32>, Vec3<f32>)>,
response: Vec3<f64>,
called_with: Cell<(Vec3<f64>, Vec3<f64>)>,
}
impl Material<f32> for MockHMaterial {
fn move_b_vec(&self, m: Vec3<f32>, target_b: Vec3<f32>) -> Vec3<f32> {
impl Material<f64> for MockHMaterial {
fn move_b_vec(&self, m: Vec3<f64>, target_b: Vec3<f64>) -> Vec3<f64> {
self.called_with.set((m, target_b));
self.response
}
@@ -488,7 +487,7 @@ mod test {
assert_vec_eq(mock_mat.called_with.get().0, Vec3::zero()); // not magnetized
assert_vec_eq(mock_mat.called_with.get().1, delta_b);
assert_vec_eq(new_m, mock_mat.response);
assert_vec_eq(new_h, delta_b * f32::mu0_inv() - new_m);
assert_vec_eq(new_h, delta_b * f64::mu0_inv() - new_m);
}
#[test]
fn step_h_understands_previous_m_h() {
@@ -514,13 +513,13 @@ mod test {
};
// see step_h_understands_nabla_e
let delta_b = -Vec3::new(-2.0, 4.0, -2.0);
let prev_b = (in_h + in_m) * f32::mu0();
let prev_b = (in_h + in_m) * f64::mu0();
let new_b = prev_b + delta_b;
let (new_h, new_m) = ctx.step_h();
assert_vec_eq(mock_mat.called_with.get().0, in_m);
assert_vec_eq(mock_mat.called_with.get().1, new_b);
assert_vec_eq(new_m, mock_mat.response);
assert_vec_eq(new_h, new_b * f32::mu0_inv() - new_m);
assert_vec_eq(new_h, new_b * f64::mu0_inv() - new_m);
}
}