447 Commits

Author SHA1 Message Date
1387506511 try printing out gpu timesteps
this seems to just print all 0 on my laptop.
maybe it'll work better on a newer GPU.
following the example in Embark's rust-gpu:
runners/wgpu/src/compute.rs
2022-09-26 21:33:38 -07:00
6a7a6bc170 app: stacked_cores: move stimulus_per_step to where it takes effect again 2022-09-26 20:16:03 -07:00
638a97c106 app: stacked-cores: short-circuit sims that have already completed
this takes the per-sim time down from about 30s to 1s for completed
runs. allows much easier stopping + restarting of batched sims.
2022-09-26 16:08:15 -07:00
858c8f4db5 app: stacked-cores: more runs 2022-09-26 15:49:48 -07:00
6d1fb5d614 stim: fix Sinusoid tests 2022-09-25 18:05:48 -07:00
5f289bf07b update rust-toolchain: 2022-04-11 -> 2022-08-29, and update cargo deps 2022-09-25 18:05:17 -07:00
cbd741c6df app: stacked_cores: more asymmetric-winding parameterizations 2022-09-25 16:20:04 -07:00
ebbaf4c697 explore more of 40: multi-wrapped cores 2022-09-24 03:25:32 -07:00
c7ac19dcc9 app: stacked_cores: document more of the 24 (multi-wrapping) runs 2022-09-22 21:12:12 -07:00
6e32f9791e app: stacked_cores: re-analyze 17-xx and 18-xx fork-and-join experiments
neither of these produce good amplification.
- we expect bad amplification for 18-xx because it doesn't do any loops
  with > 2 cores
- 17-xx might just need reduced drive current
2022-09-22 17:25:14 -07:00
26a4a6ea86 app: stacked_cores: exlore a few more current parameterizations of the fork-then-join single-clock approach 2022-09-22 17:03:23 -07:00
cd8d374648 app: stacked_cores: try a fork -> join inverter
seems i need to increase the drive current
2022-09-22 02:25:31 -07:00
a0531e5866 app: stacked cores: expand 38 to more initial conditions 2022-09-20 17:37:17 -07:00
79c45359ea app: stacked cores: complete more simulations 2022-09-20 03:02:21 -07:00
92131f95f1 app: stacked cores: add missing (1, 0) sim 2022-09-19 21:31:45 -07:00
58704e4498 app: stacked cores: more experiments around folding multi-cores together 2022-09-19 21:28:16 -07:00
57e9759cab add 24 (multi-winding inverter) to inverter_characteristics.py 2022-09-19 18:24:27 -07:00
e6af37bef4 move crates/{post -> applications/stacked_cores}/scripts 2022-09-19 17:59:05 -07:00
bb737db3f7 plot some datapoints about the stacked_cores inverters 2022-09-19 17:58:26 -07:00
990e71b5c9 app: stacked_cores: more multi-core experiments 2022-09-19 16:02:28 -07:00
5641dc12f1 app: stacked_cores: more experiments around multi-core setups with bias 2022-09-18 17:39:47 -07:00
e8fc3c355f scripts: document the conditions which lead to "stable" logic levels 2022-09-18 17:28:32 -07:00
584fcac841 app: stacked_cores: round the step target to avoid duplicate work across runs 2022-09-16 17:49:01 -07:00
a6e5cb7583 app: stacked_cores: add some more simulations 2022-09-16 17:15:51 -07:00
8917c4a562 app: stacked_cores: define a bunch more sims, especially ones with asymmetric wrappings
this complicates the implementation quite a bit...
i imagine this whole file will be axed someday and treated as a
temporary prototype... we'll see :^)
2022-09-13 00:49:48 -07:00
d7364fe682 region: add a WedgeZ primitive
later i use this to Intersect a torus,
thereby creating a torus which only sweeps out one particular arc angle.
2022-09-13 00:48:32 -07:00
fdbe91b281 Vec2: re-enable the arg method 2022-09-13 00:48:13 -07:00
3e78c1e407 app: stacked_cores: more sims where we use a different loop count to couple S0/S1/S2 2022-09-12 00:32:00 -07:00
265706371d app: stacked_cores: vary the drive strength in the stacked-outside-with-direct-output-locked setup 2022-09-09 15:38:16 -07:00
d42eb32e07 app: stacked_cores: mix Outside coupling with Direct multi-coupling 2022-09-09 01:53:16 -07:00
1a242f05c1 app: stacked_cores: vary the drive current for these Outside Coupling runs 2022-09-08 16:21:26 -07:00
841bc864e9 gitignore: pycache directories 2022-09-08 15:38:56 -07:00
b79c25d2ca app: stacked_cores: define more experiments for the "Outside" flavor of coupling 2022-09-08 15:37:59 -07:00
518ef7df6d app: stacked_cores: new experiment for coupling cores "outside"
that is, direct couple non-adjacent cores, by routing the wires
_outside_ the core stack
2022-09-08 03:21:40 -07:00
6b154367c9 app: stacked_cores: more experiments 2022-09-07 15:53:04 -07:00
6ca6e015d2 app: stacked_cores: DriveStrength: Fold {Hold,Release}{Low,High} into just {Fold,Release} 2022-09-04 23:50:29 -07:00
692bfb2571 app: stacked_cores: convert ClockState states to use constructors 2022-09-04 23:12:38 -07:00
41029d5ad3 app: stacked_cores: add ability to set different drive pulses to different (relative) strengths 2022-09-04 23:00:52 -07:00
86c37a19c8 app: stacked_cores: new experiment where we couple S0,S1,S2 "directly" 2022-09-04 02:33:14 -07:00
5cdedfee41 app: stacked_cores: new experiment where we write S1 -> {S0,S2} and then write *back* to S1 from these 2022-09-04 01:27:54 -07:00
b8878bde1d split stacked_cores_8xx.py 2022-09-04 01:15:41 -07:00
e076480791 post: add script to extract info from the stacked_core 8xx experiments 2022-09-04 00:52:13 -07:00
197591712d app: stacked_cores: try driving 4 cores off of one core
it's actually pretty successful!
2022-09-03 01:06:01 -07:00
ff5882797a app: stacked_cores: add a 3rd core to the mix 2022-09-02 04:35:41 -07:00
a6bfdeb689 app: stacked_cores: vary the windings
we're able to get 90% transfer transmission and 30% noop transmission
when setting this right: a 3x disambiguation with 90% amplitude.
if we solve the amplification problem, we should be in the clear.
2022-09-02 02:48:42 -07:00
f38f06098f app: stacked_cores: implement multiple coupling loops per core 2022-09-01 22:00:42 -07:00
ea3ea63488 ColorTermRenderer: print the measurement name, not just the value 2022-09-01 21:44:38 -07:00
e868f493fb region: add a Rotate operation 2022-09-01 21:39:11 -07:00
216a5d8d76 region: ElongatedTorus: fix accidental doubling of length 2022-09-01 21:38:44 -07:00
491b5d3591 cross: Vec3: implement rotate_{xy,xz,yz} operations 2022-09-01 21:38:06 -07:00
2044397047 app: stacked_cores: prototype
the long term goal of this demo is to see if:
(a) we can get stronger coupling between two cores than with multi-core-inverter.
(b) we can get amplification by using a charge-pump like concept.
(c) we can construct a *working* multi-core-inverter from this.
2022-09-01 18:41:49 -07:00
f737a4c916 region: add an ElongatedTorus type
this is needed for when we want to couple some cores,
but require a narrower space to do so in.
2022-09-01 18:39:32 -07:00
19ff08ada8 app: buffer_proto5: increase fps by setting steps_per_stim
the bottleneck seems mostly to be transferring data CPU <-> GPU.
boosting to 200 steps per stim gets us like 50 fps, but causes serious
CPU-side lag.
2022-09-01 17:41:13 -07:00
482f07520c app: buffer_proto5: fix so DiskCache::get_or_insert flushes 2022-08-31 03:03:13 -07:00
0a1452cb5c buffer_proto5: parallelize a bit more 2022-08-31 02:55:40 -07:00
184997a701 app: buffer_proto5: parallelize the geometry searches 2022-08-31 02:40:47 -07:00
a6fe05a993 app: multi-core-inverter: add more sims 2022-08-31 01:40:29 -07:00
ea07a8d5a6 sim: spirv: tests: cleanup unused imports 2022-08-30 15:43:30 -07:00
ff1342ff8a sim: spirv: get the ray_propagation test working 2022-08-30 15:25:57 -07:00
c050b0406f app: multi-core-inverter: add some more simulations 2022-08-30 01:10:39 -07:00
a45d27a2e9 spirv: test: prototype a ray_propagation test
it needs some more work to be useful
2022-08-28 02:02:06 -07:00
a811561f14 app: multi-core-inverter: more simulations 2022-08-28 02:01:51 -07:00
538db399a9 Stimulus: Sinusoid: remove the amp component 2022-08-28 02:01:33 -07:00
ec77584311 cross: notes about optimization 2022-08-28 02:00:49 -07:00
5dc619ebc9 cross: add Real::c_inv() 2022-08-28 02:00:18 -07:00
19f08fce9f app: multi-core-inverter: tune drive current and conductivity 2022-08-27 03:14:41 -07:00
1be2dc2419 cross: step: test using f64 so we can enforce more precise limits
we're foremost testing the *math*, less than the precision under which
it's carried out
2022-08-27 02:41:16 -07:00
8a76b79e17 cross: step: backfill step_e tests 2022-08-27 02:38:50 -07:00
deaaefc3e7 add Real::eps0_inv() convenience method 2022-08-27 02:38:32 -07:00
529ad943ac app: multi-core-inverter: explore a few more simulations 2022-08-27 01:37:23 -07:00
769c90cf9e stim: fix issue where Exp would cause NaNs for very negative t 2022-08-27 01:31:06 -07:00
532055e045 app: multi-core-inverter: define a few more sims
trying to strike the right balance between coupling conductivity and
decay time.
2022-08-26 18:13:50 -07:00
5b66c2bc26 app: multi-core-inverter: parameterize a few more cases
exploring how low control conductivity can go
2022-08-26 04:17:01 -07:00
c90a73d395 cross: step: finish backfilling step_h tests 2022-08-26 02:03:27 -07:00
a7a9a9ea84 cross: step: backfill step_h material-free test 2022-08-26 01:43:10 -07:00
267c9fd36e cross: step: backfill a trivial step_h test 2022-08-26 01:23:52 -07:00
a4008dcc1d cross: step: document the step_h math 2022-08-26 00:41:41 -07:00
64410da4fe cross: step: reconcile the difference between our two \Delta t's 2022-08-25 22:49:50 -07:00
bbd31cc7be cross: step: annotate step_e with a maths derivation
this derivation comes from the legacy cpu code.
it looks like the spirv implementation actually
pursued a conceptually simpler approach.
i'll try to reconcile these shortly.
2022-08-25 22:20:03 -07:00
7f40b3ccd5 app: multi-core-inverter: more experiments with varied conductivities 2022-08-25 19:38:12 -07:00
bf3f79dd50 app: multi-core-inverter: implement some sims which specify conductivities separately 2022-08-25 15:43:13 -07:00
f80b2e22a4 app: multi-core-inverter: allow setting coupling and drive conductivities separately 2022-08-25 15:37:06 -07:00
6afa65c3e4 app: multi-core-inverter: design a few new experiments which vary the conductivity 2022-08-25 15:32:37 -07:00
cc1bdba280 Stimulus: leave a TODO for what to do about Exp & NaN/Inf 2022-08-25 15:32:13 -07:00
10fefdc9a3 step: backfill some documentation 2022-08-24 17:52:46 -07:00
9e49538ba7 cross: split supporting types out of step.rs
this focuses the core "business logic" into a more narrow slice.
good for organization, and also to highlight where the complexity lies
(i.e. most valuable places to test).
2022-08-24 17:45:28 -07:00
1cfef7cac6 stimulus: remove unused Stimulus impl for RegionGated 2022-08-24 15:49:46 -07:00
2c68a53668 rename StimExt to TimeVaryingExt 2022-08-24 15:44:12 -07:00
a2ee85d03f stim: remove the unused Stimulus impl for Shifted 2022-08-24 15:42:37 -07:00
7f089bad45 stim: break apart into smaller modules 2022-08-24 15:27:40 -07:00
9301734fcf app: multi-core-inverter: parameterize the wire conductivity 2022-08-24 15:06:41 -07:00
6d717fdda4 app: multi-core-inverter: explore multiple parameterizations in one run 2022-08-24 02:08:06 -07:00
488d0fe62a sim: spirv: remove apply_stimulus test-only function 2022-08-24 01:51:32 -07:00
dd6f598e44 spirv tests: reorganize 2022-08-24 01:42:10 -07:00
c008fb963e app: multi-core-inverter: explore more of the space 2022-08-24 01:24:16 -07:00
4168b9e4c6 spirv: uncomment some old tests
one of them fails, but it fails 600+ steps into the test.
not sure how problematic this really is, yet.
2022-08-24 00:16:26 -07:00
e94dc49d0f spirv test: mb_ferromagnet tests both e and h fields
we have to lessen the bounds a little bit.
no surprise: *not testing h* caused them to differ substantially.
i'd like to test more strenuously, but that would likely require
enabling R64 on the gpu side.
2022-08-24 00:04:20 -07:00
cdb0c3eaaa leave a TODO: enumerated: make use of List abstractions 2022-08-23 23:51:03 -07:00
ab6496d5f6 backfill Vec2::rotate tests 2022-08-23 23:49:43 -07:00
b16316b75b Driver: remove the unecessary Boxing of RenderedStimulus
no obvious perf diff one way or the other, yet
2022-08-23 23:33:22 -07:00
4525bbde56 remove unused lazy_static dependency 2022-08-23 23:29:47 -07:00
a51c3a1d14 sim: remove the unused StaticSim type alias 2022-08-23 23:25:08 -07:00
8e48414d68 SimMeta: make the fields private 2022-08-23 23:23:49 -07:00
17446cdc6b sim: remove unused AbstractSim::to_static method 2022-08-23 23:13:15 -07:00
2af754bf29 sim/legacy: remove
that crazy tangle of legacy code evolved over 2+
years into the beast it is today.
but it has no relevance in the GPU-enabled world of today,
particularly one with more rigid Material abstractions.

good things come to an end. i'll try not to be too sentimental.
2022-08-23 23:01:29 -07:00
1891a72df3 spirv tests: no longer test against the legacy simulation:
test the CPU impl against the GPU impl.

it's a different class of test. but it provides some value yet and most
importantly, allows us to strip out the legacy simulation code without
losing *too much* test coverage.
2022-08-23 20:05:30 -07:00
e7ed46bb89 spirv tests: clean up the code layout a bit 2022-08-23 19:23:02 -07:00
c8735ce164 legacy: mark port status of remaining tests 2022-08-23 19:15:44 -07:00
397eaa5a24 sim: port conductor-related legacy tests to spirv 2022-08-23 19:09:32 -07:00
4024ee3316 sim: port legacy test to spirv: sane_boundary_conditions 2022-08-23 16:56:35 -07:00
5c7ce8437a sim: port legacy energy_conservation_over_time test to spirv
the original had a suspect dimension (it was apparently applying a
stimulus *outside* the simulation). i've rectified that, but left a note
to ensure this doesn't happen again...
2022-08-23 16:38:24 -07:00
dcd7079c5f meas: Energy: allow non-meas code to query a simulation's energy
this is especially useful in test
2022-08-23 16:37:31 -07:00
f47c713e0e stimulus: add a structure to sum together two stimuli of different types
this may resemble the original List stuff. only it's more specialized,
for only summing two of a thing.
2022-08-23 16:35:29 -07:00
3326acd125 Driver: update TODOs 2022-08-23 01:27:03 -07:00
3c30ac33aa Driver: replace the other ThreadPool with a JobPool
as a bonus we can remove the threadpool dep :-)
2022-08-23 00:03:58 -07:00
7586bc8ff2 JobPool: implement buffering
this does give a little perf bump
2022-08-22 23:38:23 -07:00
a15af4d3a3 JobPool: remove outdated TODO 2022-08-22 20:13:35 -07:00
31726abb99 Driver: stimulus: use our own JobPool abstraction
this one leverages crossbeam.
it does appear to schedule jobs faster than the rust-threadpool.
curiously, the `recv` impl seems to be slower.
maybe that's because of the (inadvertent, unecessary) switch from mpsc
to mppc. worth trying to just insert a buffer.
2022-08-22 20:09:59 -07:00
eb95367fa5 JobPool: don't parameterize the type over its Worker
the Worker is often hard to specify concretely at the use site.
2022-08-22 20:05:41 -07:00
284b7368ef add a JobPool type to better abstract over repeat asynchronous work 2022-08-22 19:30:07 -07:00
5fff872890 app: multi-core-inverter: implement 2-core inverter
this is a simpler test-bed to explore things like clock duration
2022-08-22 18:14:28 -07:00
b160823a81 fix broken cargo build --all 2022-08-22 14:17:51 -07:00
7bce17f56b driver: more precisely measure stimuli "prep"
i'm beginning to think the `threadpool` library is actually just kinda
slow.
2022-08-22 02:32:58 -07:00
4df6e19abe Driver: slightly better instrumentation 2022-08-22 02:17:59 -07:00
532dd26e22 driver: optimization: don't memcopy the RenderedStimulus across threads 2022-08-22 01:49:35 -07:00
8268215441 app: multi-core-inverter: fix non-terminating drive signal 2022-08-22 01:41:20 -07:00
232e0fdafb Stimulus: replace Gated with a type alias 2022-08-22 01:36:47 -07:00
b0bedd1efa Driver: fix a bug where we might step more than the user wanted 2022-08-22 01:23:41 -07:00
82af4b100d driver: optimize the step_multiple step count calculation
this was using a stupid amount of compute.
we still have about 7% time unaccounted for. down from 12-15%.
2022-08-22 01:07:27 -07:00
e5c8bcff95 Driver: remove dead add_classical_boundary_explicit function 2022-08-22 00:51:53 -07:00
ff13a1e96c driver: address a TODO 2022-08-22 00:43:07 -07:00
24b82037b4 Stimulus: parameterize over R.
this saves us from a `mem::transmute` in the sim code to get
`Fields<R>`.
2022-08-22 00:37:34 -07:00
e32d500f8c real: add sin, cos and ln2 functions 2022-08-22 00:37:19 -07:00
f0fc324188 Stimulus: remove unused eval_into trait method 2022-08-21 21:41:27 -07:00
c02e5427d4 spirv tests: port to R32
this gives better debug info
2022-08-21 20:46:57 -07:00
29e78c74fe real: implement std::iter::Sum 2022-08-21 20:46:43 -07:00
527e2746ed driver: TODO about optimization 2022-08-21 19:25:20 -07:00
75a5041ed6 diagnostics: nicer formatting 2022-08-21 19:18:42 -07:00
98d6a5b34f spirv: instrument the device read/write operations 2022-08-21 18:51:51 -07:00
6c9a6e1ffa driver: all the user to configure the number of steps to go between stimulus application 2022-08-21 18:22:11 -07:00
a414bd77d4 diagnostics: break out a variable to make this code cleaner 2022-08-21 18:12:31 -07:00
850a7e773f diagnostics: rename time_spent_{foo} -> time_{foo} 2022-08-21 18:11:03 -07:00
a38734a1ed diagnostics: instrument the stimulus and stimulus blocked time 2022-08-21 18:10:09 -07:00
18dd66530a driver: evaluate stimulus in a background thread
this boosts fps from 920 to roughly 1150
2022-08-21 16:20:46 -07:00
7b848bcd16 driver: hide more behind the StimAccess type 2022-08-20 19:08:15 -07:00
c5cede6c6e driver: hide the stimulus stuff behind a wrapper
this will making prefetch cleaner to implement
2022-08-20 18:58:31 -07:00
053943df01 add Stimulus::render() and use it inside the driver and SpirvSim 2022-08-20 17:36:23 -07:00
d662ef24d3 SimMeta: implement PartialEq 2022-08-20 17:35:41 -07:00
4f229a51b1 rename RenderedVectorField -> RenderedStimulus 2022-08-20 17:08:26 -07:00
cd2917c8a5 driver, sim: use RenderedVectorField to simplify/optimize sim-internal rendering 2022-08-20 17:07:25 -07:00
69a603920f add a RenderedVectorField. maybe more accurately called a rendered stimulus?
used to represent a stimulus which has been rendered for a specific time with specific simulation parameters.
2022-08-20 17:05:58 -07:00
69ee2070c8 DimSlice: impl Default 2022-08-20 17:04:56 -07:00
ff4209ce78 SpirvSim: remove a Vec copy from the stimulus evaluation
this boosts perf from 562 fps -> 900-ish for the multi-core-inverter
```
t=2.73e-9 frame 141700 fps: 901.93 (sim: 154.0s, stim: 30.6s, [render: 92.7s], blocked: 0.0s, render_prep: 0.7s, other: 2.5s)
```

we're now spending more CPU time rendering the measurements
than computing the stimulus
2022-08-19 04:55:50 -07:00
87c24c739c spirv: call Stimulus::at instead of Stimulus::eval_into
this *lowers* perf from 595 fps -> 562 fps
2022-08-19 04:49:18 -07:00
570917cae5 stim: Gated: no longer a Stimulus 2022-08-19 04:34:20 -07:00
8df001773f eval_into: remove the scale parameter
this actually seems to drop perf from 637 -> 595 ish?

i suppose the compiler was able to fold the time multiplier in with the
scale multipler? idk, somewhat surprised.
2022-08-19 04:26:58 -07:00
ad5f064584 stim: Simplify the Exp implementation. it's no longer a Stimulus 2022-08-19 04:14:33 -07:00
77124fcdaf driver: implement an optimized stimulus adapter over ModulatedVectorField
this boosts perf from 520fps -> 632fps.

it does some uneccessary clones.
but it looks like the bulk of the inefficiency resides inside
the sim/spirv/ code though.
it might be that this is nearly memory-bottlenecked.
if so, backgrounding it might be sensible.
2022-08-19 03:54:43 -07:00
9f97f474d7 cross: DimSlice: allow the underlying data to be a Vec 2022-08-19 03:54:01 -07:00
35dbdffda7 driver: lay some scaffolding to allow us to optimize the stimulus in future 2022-08-18 22:19:50 -07:00
ffda00b796 stim: convert CurlStimulus to a CurlVectorField and use ModulatedVectorField
this opens the door to caching the vector field stuff.
2022-08-18 20:47:36 -07:00
478db86b75 multi-core-inverter: remove the List shenanigans 2022-08-18 20:02:09 -07:00
9461cc7781 stim: introduce a VectorField trait which we'll use to build a more structured approach to Stimulus 2022-08-18 17:08:44 -07:00
cf2d21f975 Stimulus: change at method to accept feat_size: f32, loc: Index 2022-08-18 16:21:21 -07:00
0fa8cb0d20 cross: DimSlice: add into_inner method 2022-08-18 16:04:15 -07:00
6750feef8d stim: remove TimeVarying3
`TimeVarying`(1) is enough for what we want.
2022-08-18 15:51:54 -07:00
570f058ee1 rename AbstractStimulus -> Stimulus 2022-08-18 15:27:18 -07:00
60e44d6d4d rename Stimulus -> RegionGated 2022-08-18 15:22:28 -07:00
eb406ea46f UniformStimulus: use Fields internally 2022-08-18 15:19:22 -07:00
6e7ae48d86 stim: remove the extra norm call in CurlStimulus application
we call `with_mag` after, making it redundant.
2022-08-18 14:28:33 -07:00
454307325b stim: add a scale parameter to AbstractStimulus::eval_into
this boosts perf from 571fps -> 620-ish.
2022-08-18 04:33:00 -07:00
e72c0ec11d cross: DimSlice: add dim/offset accessors 2022-08-18 04:32:43 -07:00
fb9d982545 multi-core-inverter: test an alternate Vec-based stimulus
it's about 0.5% slower. not much.
2022-08-18 04:12:57 -07:00
b9581b79b2 sim: add AbstractStimulus::eval_into for bulk evaluation 2022-08-18 04:11:04 -07:00
07fa4042a3 cross: list: add IntoVec trait 2022-08-18 04:00:41 -07:00
a3b15bd7e7 cross: DimSlice: add as_ref, as_mut methods to re-borrow the data with a different lifetime
we can't use the actual AsRef, AsMut trait because we aren't returning a
reference but a new type with an inner reference.
2022-08-18 03:25:52 -07:00
300c11f5ca stim: use a Visitor instead of a FoldOp for eval
boosts perf from 420 -> 530 fps
2022-08-18 02:52:57 -07:00
2a9c065cb0 cross: list: allow visit to be mutable 2022-08-18 02:45:15 -07:00
f2b23ace17 cross: list: implement a Visit trait.
it can't do much yet because the immutable-ness, but i can fix that.
2022-08-18 02:32:52 -07:00
5cc1c310b5 AbstractStimulus: add bulk eval_into operation. 2022-08-18 01:45:09 -07:00
a0d4a39a66 cross: OffsetDimSlice: implement enumeration 2022-08-17 21:41:26 -07:00
a34363122b cross: OffsetDimSlice: impl indices() enumeration 2022-08-17 21:37:40 -07:00
129aaadeac OffsetDimSlice: impl IntoIter 2022-08-17 21:20:24 -07:00
a247b861e1 cross: hide the iteration features behind a flag
they don't compile on spirv due to the inherent use of Options,
but they'll be useful in the CPU-side code.
2022-08-17 21:14:21 -07:00
198cc16f3f cross: compile tests with the fmt feature so we can use assert_eq 2022-08-17 21:06:23 -07:00
3ed4cd5059 rename DimensionedSlice -> DimSlice 2022-08-17 20:45:36 -07:00
694906aa32 add OffsetDimensionedSlice, with basic indexing operations 2022-08-17 20:43:38 -07:00
ae5bfcf311 rename dim.rs -> dim_slice.rs 2022-08-17 20:34:47 -07:00
7a1e5815a8 rename Vec3uIter -> DimIter 2022-08-17 20:32:31 -07:00
b82c127957 add an enumerated method to DimensionedSlice 2022-08-17 18:12:32 -07:00
2311cf1dd3 cross: DimensionedSlice: add an indices method 2022-08-17 18:04:21 -07:00
782ac4d2c1 cross: impl IntoIterator for DimensionedSlice 2022-08-17 17:41:39 -07:00
1688707aed cross: backfill test for DimensionedSlice::IndexMut 2022-08-17 17:31:11 -07:00
92d4f464e1 cross: DimensionedSlice: backfill Index test 2022-08-17 17:28:31 -07:00
ee98e1a060 stim: re-express the AbstractStimulus list op as a fold
this gives a big perf boost: 10.5 fps -> 446 fps.

still far lower from the 720 fps we got on an ordinary Vec<Box<dyn
AbstractRegion>>. i think we had achieved 730  using the old
ListVisitor.

it's probably not worth list-ifying the stimuli; at least not at this
level. at the least, we probably want only 5 stimuli: one per core.
if we did that, the stimuli could even have all the same typename,
and be put into a plain old array; no boxing.
2022-08-17 03:28:52 -07:00
ffa13ccd79 app: multi-core-inverter: clean this up by using map/flatten operations 2022-08-17 02:53:14 -07:00
2e667a02dd cross: list: add some conveniences to query list length and access the first element 2022-08-17 02:45:44 -07:00
107a28e7fd app: multi_core_inverter: replace these into_lists with a map operation 2022-08-16 16:53:22 -07:00
7bb3740ce2 app: multi_core_inverter: convert to List primitives
goal will be to replace this with enumerate/map/flatten ops
2022-08-16 16:40:39 -07:00
d45b2042e1 cross: list: add more convenient indexing 2022-08-16 16:40:15 -07:00
4b04a48cc4 cross: list: allow enumerating by u32 instead of Tagged 2022-08-16 16:18:09 -07:00
07b5c855e8 cross: list: implement a Flatten operation 2022-08-16 15:52:09 -07:00
9d4853333d cross: list: implement an Extend operation 2022-08-16 15:28:10 -07:00
c353ce411f cross: list: remove the old (unused) Visitor infrastructure
replaced by Fold/Map/etc
2022-08-16 15:07:47 -07:00
4c5c978053 whitespace nits 2022-08-16 01:29:35 -07:00
fad70f45c1 stim: use Map + Sum for evaluating stimuli lists 2022-08-16 01:11:46 -07:00
e2728e0303 stim: impl Add for Fields to simplify some of this code 2022-08-16 00:17:23 -07:00
a68b3c7a49 cross: list: backfill more extensive Sum tests 2022-08-16 00:08:01 -07:00
57b90faa69 cross: list: implement a Sum operation 2022-08-16 00:03:15 -07:00
a8acf6cbb8 cross: list: extend fold to work by reference. 2022-08-15 23:51:19 -07:00
7704eb623a cross: list: remove special access to Node::{head,tail} 2022-08-15 23:00:49 -07:00
a2939a7807 cross: list: implement Enumerate operation 2022-08-15 21:25:41 -07:00
1cff95877e cross: list: implement a Map operation 2022-08-15 21:00:16 -07:00
96c690990f cross: list: make the fold impl more consistent with reverse 2022-08-15 19:18:05 -07:00
35ceefca42 cross: list: implement a Reverse operation
this was stupidly hard. it HAS to be a trait: it cannot be a
free-standing function else rustc gets stuck in recursive trait
expansion.
2022-08-15 19:14:44 -07:00
7c1824d58c cross: list: document why we use the flat impl 2022-08-15 18:46:44 -07:00
dcbef02765 cross: backfill test for list Appendable::append 2022-08-15 16:52:03 -07:00
a3f2b0b33f cross: list: more tests for fold operation 2022-08-15 03:39:22 -07:00
663d657969 cross: list: implement a Fold operation 2022-08-15 03:33:11 -07:00
1a86fb5ca3 cross: list: fold MaybeMeta and Meta into one trait 2022-08-15 02:32:47 -07:00
22051a39f8 cross: list: s/decl_/impl_/: these are implementations, not declarations 2022-08-14 21:04:52 -07:00
35d0e6a96d cross: list: add an Appendable trait.
this isn't tested... hope it works!
2022-08-14 21:03:49 -07:00
6e9b4465cb impl IntoList for all 100 list types
didn't end up using the Chomp stuff.
2022-08-14 21:01:49 -07:00
2225f98ec8 cross: list: implement IntoList on top of ChompTuple
this should be easier for macro generation of the impl.
2022-08-14 19:52:37 -07:00
19893157fa port: legacy sim accessors test to spirv 2022-08-14 19:16:09 -07:00
f61c0aeb00 spirv_backend: document why we don't support f64 2022-08-14 19:15:58 -07:00
1e994a4feb app: multi_core_inverter: more experiments (clock decay length, drive current, clock length) 2022-08-14 16:24:58 -07:00
f1143ec365 app: multi_core_inverter: don't save meas.csv
this boosts perf by like 5x
2022-08-13 16:20:24 -07:00
ee93c22f4a app: multi_core_inverter: perf: move the stimulus Gating to outside the CurlStimulus
the region.contains() logic is much more expensive than the time bounds
check.
this gets an easy 50% perf boost to the ENTIRE simulation
2022-08-13 15:00:56 -07:00
434dc2cbd5 app: multi_core_inverter: de-virtualize the stimuli
this gets like a 5% perf gain.
there are obviously cleaner ways to do it (e.g. use a ListVisitor),
but this is only a proof of concept.

given the low perf difference, i'll likely revert this or explore other
options.
2022-08-13 03:51:52 -07:00
bbb8b2b9ae driver: better APIs around list-based stimuli 2022-08-13 03:51:01 -07:00
df2ccac1d7 cross: list: implement Indexable up to P99
was previously P4
2022-08-13 03:50:11 -07:00
858e787c19 driver: allow preserving the Stimuli as a concrete List 2022-08-12 18:03:10 -07:00
40449c4165 stim: implement AbstractStimulus for any List of stimuli
note that this doesn't handle zero-length lists (yet).
2022-08-12 17:08:53 -07:00
c2f8acaf24 cross: add a List visitor/walker type 2022-08-12 16:17:26 -07:00
468f5c5225 cross: rename 'VariantHandler' -> 'Visitor' 2022-08-12 15:20:36 -07:00
a74e7fa9a0 Driver: parameterize over the Stimulus type 2022-08-12 14:47:45 -07:00
e141047bec Driver: simplify the StimuliAdapter
it was basically dead code.
2022-08-12 14:42:56 -07:00
31fd83eb34 apps: multi_core_inverter: setup for 4ns clock phases
double what they were before. gives more time for things to settle.
2022-08-12 14:42:17 -07:00
658b8616d7 Torus: correct the cross_section_normal method -- again
i should add integration tests
2022-08-12 02:23:53 -07:00
084c5bc342 Region: remove Clone from the trait, and also parameterize everything
i didn't necessarily *want* to parameterize it all,
but it turned out to be easier to do that than to force all users to
workaround the lack of Clone.
2022-08-12 01:42:19 -07:00
d5fbb4e9b2 Region: remove the Serialization requirement 2022-08-12 00:57:01 -07:00
090b1ca09a BUGFIX: Torus: don't normalize the cross section normal
this would have led to incorrectly scaled current measurements
(but not incorrect current generation). we were likely severely
over-estimating the current.
2022-08-12 00:41:25 -07:00
ae1eb861be instrument the stimulus evaluation in our sim
... stimulus evaluation accounts for like 80% of the execution time 🤦
2022-08-11 22:57:43 -07:00
09bc7492ed expose diagnostics into the Sim, and capture stimuli evaluation
this isn't publicly exposed yet.
2022-08-11 22:43:07 -07:00
e7cc78a947 diagnostics: split into their own file 2022-08-11 22:31:05 -07:00
d379a7b0ee app: multi_core_inverter: try a related experiment where S0 is initialized to logic low 2022-08-11 22:24:19 -07:00
aa8f474f52 driver: Diagnostics: clean up the impl a bit 2022-08-11 19:04:12 -07:00
4a33912164 driver: abstract the render time measurements behind a Diagnostics api 2022-08-11 18:58:22 -07:00
f7b72a72be driver: abstract the step diagnostics measurements 2022-08-11 18:41:41 -07:00
a413a4d391 driver: move last_diag_time out of the Diagnostics object 2022-08-11 18:38:36 -07:00
0c9f04981a driver: relegate more diagnostics formatting to the Diagnostics impl 2022-08-11 18:36:35 -07:00
6f1e1557b3 driver: diagnostics: track the actual number of frames stepped
this allows fps-related diagnostics to be meaningful after
serialization / restarts.
2022-08-11 18:29:58 -07:00
e85d38d415 driver: split the Diagnostics out into their own object
more diagnostic refactoring to come
2022-08-11 18:27:30 -07:00
831cbfa76c app: multi_core_inverter: tune the state serializations
less frequent (for less disk space), and also save state
in a recoverable manner
2022-08-11 18:22:55 -07:00
1928ad71cd serializer renderer: gracefully handle the case where we run out of disk space
we might still run out of space when writing CSVs and other outputs...
but that's at least less likely,
as long as we serialize the rendering.
2022-08-11 18:21:46 -07:00
c83a44299f app: multi-core-inverter: fix S4 drive signal specification
there was a spuriuous high -> low transition
2022-08-11 15:24:26 -07:00
e23ab9efd7 app: multi_core_inverter: try a 5-stage inverter (each stage inverts)
we're diverging from the blog pretty far now.
but it turns out that, because of the inversion in Maxwell's
$\nabla x E = -dB/dT$ equation, the trivial wiring actually leads to
natural inverters.
2022-08-11 03:01:08 -07:00
652621e47a app: multi_core_inverter: more precise clock management
try to control the edges when the clock is release to prevent ringing.
2022-08-10 16:39:56 -07:00
59a4419130 app: multi_core_inverter: more detailed drive cycle 2022-08-10 15:47:28 -07:00
2f91418095 post: add doc-comments for these tools 2022-08-10 14:28:20 -07:00
46a53a4dde app: multi_core_inverter: fix up the drive sequence
see the code comment for explanation.
2022-08-10 01:43:36 -07:00
3998d72d02 app: multi_core_inverter: drive all four cores for four clock cycles 2022-08-10 01:35:42 -07:00
4fe8be8951 when writing Measurements to a CSV, format them in a machine-readable manner
i haven't tested the ones which contains commas -- hopefully the CSV
encoder deals with these :-)
2022-08-10 01:34:37 -07:00
8a3a64face meas: correctly render SI prefixes for negative numbers
the previous implementation treated negative numbers as effectively
having unknown magnitude, rendering them without any adjustment.
2022-08-10 01:17:49 -07:00
e08c6dbaa3 stim: backfill tests for CurlStimulus 2022-08-09 22:54:21 -07:00
520e9d9f68 CurlStimulus: re-use the HasCrossSection trait code
i believe this inverts the sign, but it also looks more correct this way
so i'm not immediately correcting that in this patch.
will backfill tests to verify.
2022-08-09 22:14:38 -07:00
1771973c6d CurlStimulus: take axis and center directly from the Region
by taking more from the region, we'll be able to reuse common code
and also make this more testable
2022-08-09 22:10:16 -07:00
7d1ee0ad50 meas: backfill tests for CurrentLoop 2022-08-05 17:35:55 -07:00
06379ffd30 CurrentLoop: use a better justified measurement algorithm
'course the best way to justify it is with tests: hopefully those will
come shortly.
2022-08-01 06:12:16 -07:00
527814e38a convert HasTangent -> HasCrossSection
i believe the current loop algorithm (which i'm just preserving here) is
actually not correct. i'll work through it more.
2022-08-01 05:17:35 -07:00
cc876d72d6 CurrentLoop: factor out the tangent calculation 2022-08-01 00:50:02 -07:00
723fed4786 rename meas::{eval_multiple_kv -> eval_multiple} 2022-07-31 23:27:37 -07:00
0e0945f744 measurement: remove the eval method 2022-07-31 23:26:53 -07:00
d5d8402c3d gitignore: don't ignore vim swap files
have the dev put them somewhere else
2022-07-31 17:15:42 -07:00
5362dacf3a Measurement: don't use SI prefix if there's no unit 2022-07-30 21:21:46 -07:00
b5c58c03ce meas: add a missing unit to the Energy measurement 2022-07-30 21:21:02 -07:00
530ab890e6 meas: render the SI prefix 2022-07-30 21:15:51 -07:00
542d700f69 meas: finish porting to a concrete type.
this will in future let me more easily test each individual measurement
type
2022-07-30 20:56:19 -07:00
60840aec36 WIP: make the measurement type concrete 2022-07-30 20:33:03 -07:00
4361167f99 stim: strongly-type the return type of AbstractSim::at with a Fields struct
this will help me not mix up the E and H fields.
2022-07-30 17:17:17 -07:00
6a511943f7 note a few suspect areas of code 2022-07-30 17:02:10 -07:00
a14625b493 meas: add SI units for some things
this is an uncommon code path, apparently: only visible when rendering
BEFORE serialization. may want to implement a richer meas format.
2022-07-29 23:54:02 -07:00
6f0e35ea35 multi_core_inverter: add some stimuli and measurements 2022-07-29 23:53:44 -07:00
7f3c2a9395 render: transform inaccurate float-based indexing into integer indexing 2022-07-29 21:53:49 -07:00
349e01ba16 fix Vec3::with_mag to return an Option
and thereby simplify it into just one method.
2022-07-29 21:45:25 -07:00
ba6ef3c5c2 viewer: add a render mode to display just the Material 2022-07-29 16:28:58 -07:00
c5e2713b51 remove unused enum_dispatch 2022-07-29 16:07:07 -07:00
9c1fc65068 convert AbstractSim::sample to include a reference to the material -- not just its conductivity 2022-07-29 16:02:16 -07:00
895c87869b rename CellStateWithM => Fields; parameterize Sample over R 2022-07-29 14:55:12 -07:00
7e452f508f AbstractSim: convert the {get,put}_material functions to use only Index 2022-07-29 14:43:59 -07:00
f4ac5de099 viewer: add docs 2022-07-29 13:57:17 -07:00
e2c156e790 meas: Evaluated: fix eval to return both key and value 2022-07-29 13:39:33 -07:00
604f368f0d SerializeRenderer: render to GenericSim, not StaticSim 2022-07-29 13:27:05 -07:00
95213e61be real: add serialization bounds to the Real trait 2022-07-29 13:22:03 -07:00
95ffb73fe3 add a to_generic method to the AbstractSim trait 2022-07-29 13:11:16 -07:00
02920f9bd3 mat: add some missing conversion traits 2022-07-29 13:11:00 -07:00
56f74e6b4a AbstractSim: remove the Send bound 2022-07-28 23:49:45 -07:00
4f2345f608 rename GenericSim -> AbstractSim 2022-07-28 23:41:42 -07:00
3104c06d95 fold MaterialSim into GenericSim trait 2022-07-28 22:31:47 -07:00
71ab89c4c9 de-virtualize GenericSim
this should let us fold the GenericSim and MaterialSim traits together.
2022-07-28 22:22:07 -07:00
2d1a15eabc AbstractMeasurement: remove the DynClone requirement 2022-07-28 21:49:28 -07:00
3722512554 AbstractMeasurement: remove the serde typetag stuff 2022-07-28 21:46:01 -07:00
5c4b8d86f2 measurements: store to disk *after* evaluating them
i'm hoping to simplify a lot of serialization code with this
2022-07-28 21:43:48 -07:00
32d1a70d15 driver: remove dyn_state 2022-07-28 19:19:57 -07:00
6206569f4a Fold SampleableSim and MaterialSim into one 2022-07-28 16:41:32 -07:00
a49d9cd7a4 sim: fold most accessors behind the meta method 2022-07-28 16:25:39 -07:00
0465e3d7f5 sim: remove impulse_e methods 2022-07-28 16:17:02 -07:00
afc1f874d2 sim: remove unused impulse_b, impulse_h methods 2022-07-28 15:55:50 -07:00
1898542372 sim: address a TODO: get_material returns a reference 2022-07-28 15:45:51 -07:00
45d2de29c6 rename 'coremem_types' -> 'coremem_cross' to better reflect its purpose 2022-07-28 15:40:23 -07:00
9e35b29087 move util/ out of coremem and into its only user: buffer_proto5 2022-07-28 15:30:50 -07:00
80d3765bcf fix NaN in CurlStimulus code 2022-07-28 13:54:50 -07:00
62afc91879 .gitignore vim swap files 2022-07-28 13:41:30 -07:00
26efc12c21 multi_core_inverter: abstractions to allow swapping out float impl and backend 2022-07-28 13:20:26 -07:00
de0f3d9654 spirv: document a TODO 2022-07-28 13:18:36 -07:00
07dfb9d852 spirv: add R32 support to the GPU code 2022-07-28 13:18:14 -07:00
15fc7b91dc driver: TODO: split diagnostics into their own struct 2022-07-28 02:06:34 -07:00
c82aab50a2 driver: simplify add_state_file implementation 2022-07-28 02:04:49 -07:00
33cb395584 driver: don't let the state be public 2022-07-28 02:01:46 -07:00
917a3d3c9d buffer_proto5: silence some warnings 2022-07-28 02:00:51 -07:00
fe47eb09f8 driver: rename new_with_state -> new 2022-07-28 01:59:11 -07:00
a6fb21d892 driver: remove SpirvDriver alias 2022-07-28 01:55:22 -07:00
7a6bbf06a5 driver: remove new_spirv method 2022-07-28 01:52:09 -07:00
50af5927df Optional::unwrap: switch this to a debug assert 2022-07-28 01:51:39 -07:00
c36d70044a fix benches/driver.rs
apparently `cargo build --all` doesn't include this :|
2022-07-27 17:26:22 -07:00
5a0766451d spirv: relax some : 'static bounds 2022-07-27 17:11:10 -07:00
9e07189b12 SpirvSim: don't always require the backend during construction 2022-07-27 17:04:43 -07:00
920a0b3c9a spirv backend: remove parameters from WgpuBackend struct 2022-07-27 16:50:51 -07:00
48e8f5d1b4 bench: explicitly specify spirv backend 2022-07-27 16:37:56 -07:00
d5c4e13b84 driver: remove legacy uses 2022-07-27 16:34:50 -07:00
1dd6a068ba replace 'StaticSim' with the SpirvSim type, material being the Vacuum 2022-07-27 16:22:32 -07:00
dc38457a8b don't re-export StaticSim from sim/mod.rs
this way we can clearly spot the legacy users.
2022-07-27 15:42:18 -07:00
93967485f0 spirv: remove the set_meta method on the SimBackend
backend is responsible for procuring its own resources on the first run.
2022-07-27 14:05:17 -07:00
932bb163c3 SpirvSim: explicitly pass the backend in when initialized 2022-07-27 13:53:32 -07:00
7698e0e5ba spirv: re-order the SimBackend parameters to be more consistent 2022-07-27 12:47:51 -07:00
6b51fcea02 spirv: cpu: inline most of this step logic 2022-07-27 12:42:44 -07:00
d0afca7e3f spirv: gpu: simplify some of this entry_point passing 2022-07-27 12:39:15 -07:00
b134fd2373 types: Optional: remove the Into/From<Option> impls
they're no longer used
2022-07-27 12:34:32 -07:00
568d61c598 spirv: remove the Optionality around entry points: compute them statically with traits 2022-07-27 12:32:43 -07:00
baaeeb9463 spirv_backend: no need to re-export glam 2022-07-27 12:13:01 -07:00
4bb0bc09ad spirv/cpu.rs: remove unused import 2022-07-27 12:08:31 -07:00
c85bee20f5 replace some assert's with debug_assert's; slightly more optimal Optional impls 2022-07-27 12:07:30 -07:00
f6a585852e move the dimensioned operations out of the sim adapters and into step.rs 2022-07-26 18:43:41 -07:00
7d16e87b6e spirv: port all backends to use R for the stimulus
particularly, this patches over a difference where the gpu backend
expected the stimulus to be R, while the CPU thought it should be f32.
that would likely have revealed a crash if we had tested it with f64
(TODO).
2022-07-26 18:16:10 -07:00
00dcfb170a spirv_backend/support.rs: remove the re-export of DimensionedSlice
also add some docs
2022-07-26 18:08:03 -07:00
dbd666d272 move the dimensioned indexing out of spirv_backend and into coremem_types
this allows us to use it from the CPU implementation.
2022-07-26 18:03:21 -07:00
d93d14d260 spirv_backend: use RuntimeArray to remove all this UnsizedArray stuff 2022-07-26 15:58:23 -07:00
09f7c8acb9 spirv_backend: support: remove unused helpers 2022-07-26 13:36:42 -07:00
6e4133db4d spirv backend: simplify the adapt.rs indexing by using the constructors previously created 2022-07-26 13:29:39 -07:00
68d8cdde42 move some of the VolumeSample instantiation into step.rs, out of cpu.rs
we can go further: the IndexDim type itself can be moved into step.rs -- maybe?
if it were to wrap a generic flat-indexable thing -- either a slice, or
an array ref.
2022-07-26 01:15:53 -07:00
92ab220110 spirv: test: remove legacy cpu-only tests
these tests are all covered by the backend-agnostic tests
2022-07-26 00:54:33 -07:00
972e0ba4fb spirv: test: add TODO for moving the cpu tests to be backend-agnostic 2022-07-25 22:38:10 -07:00
d68c1b20be spirv: test: port the last rgpu test to be backend-agnostic 2022-07-25 22:37:05 -07:00
a969969449 spirv: test: port mh_ferromagnet tests to backend-agnostic 2022-07-25 22:36:46 -07:00
04c6d05ab0 spirv: test: port mb_ferromagnet tests to be backend-agnostic 2022-07-25 22:28:43 -07:00
dc49cddc97 spirv: test: port conductor tests to backend_agnostic 2022-07-25 22:23:56 -07:00
3fa2c22438 spirv: test: port RngStimulus tests to both backends 2022-07-25 22:19:44 -07:00
a8be7279b3 spirv sim: port rgpu smoke tests to test both Gpu and Cpu backend generically 2022-07-25 21:52:08 -07:00
fee9a1c216 implement a CpuBackend for running the "spirv" simulations
UNTESTED
2022-07-25 17:58:22 -07:00
47e11474d2 parameterize SpirvSim over R: Real 2022-07-25 14:49:32 -07:00
a1784da1cf spirv: parameterize over the SimBackend 2022-07-25 14:27:09 -07:00
b4ee42cfdf spirv: rename WgpuData -> WgpuBackend 2022-07-25 14:11:58 -07:00
cf42ec2dd1 spirv: SimBackend: remove the Array3 use 2022-07-25 14:11:01 -07:00
567f088f98 spirv: hide the gpu ops behind a SimBackend trait 2022-07-25 13:59:28 -07:00
ff1d9867ab parameterize WgpuData over the M type 2022-07-25 13:15:41 -07:00
0801a0dca3 spirv: remove bindings.rs
the one function which was in here previously is just inlined into
gpu.rs
2022-07-25 13:07:35 -07:00
7cf8ed9a7b spirv: gpu.rs no longer references the super SpirvSim type 2022-07-25 13:06:00 -07:00
8c8e707407 spirv: move the stimulus application out of gpu.rs 2022-07-25 12:52:35 -07:00
5b8978f0ec spirv: instantiate the backend in mod.rs, not gpu.rs 2022-07-25 12:47:39 -07:00
bd066331de spirv: fix indendation 2022-07-25 12:45:07 -07:00
cfbd5547cb spirv: move indexable check into the gpu.rs backend 2022-07-25 12:44:02 -07:00
d1765554fc spirv/gpu.rs: don't hard-code Vec3<f32> size
in the future this may become parameterized
2022-07-25 12:36:50 -07:00
38a47a0054 split most of the GPU spirv sim stuff into its own file 2022-07-25 12:29:45 -07:00
2032e90688 spirv_bindings: remove the IntoFfi/FromFfi stuff 2022-07-25 00:55:43 -07:00
8a16a5ce30 lift SimMeta from spirv_backend -> coremem_types 2022-07-25 00:52:11 -07:00
15aaa3e893 move spirv_backend/sim.rs -> coremem_types/step.rs 2022-07-25 00:40:27 -07:00
5fec965549 Optional: derive fmt and serde traits based on feature flag 2022-07-25 00:35:57 -07:00
5490634fe7 move Optional out of spirv_backend and into coremem_types 2022-07-25 00:35:04 -07:00
9c7ef7ec88 spirv_backend: split the array operations out of sim.rs -> adapt.rs 2022-07-25 00:28:03 -07:00
8ab89640f2 spirv_backend: split out some of the spirv entry point adapters into adapt.rs 2022-07-25 00:21:20 -07:00
ebd2762d7a spirv: sim: adjust so Step{E,H}Context does not use ArrayHandle
the specific way to accomplish this is touchy.
see <https://github.com/EmbarkStudios/rust-gpu/issues/312#issuecomment-738824131>:

> So I'd say placing any of the spirv_std::storage_class types into an aggregate (including capturing it in a closure) is unsupported for now

in our specific case, we can't return a tuple where one element is a `&`
to a spirv Input, and another element is a `&mut` to a spirv Output.

when we have a struct, it can enclose either ONLY inputs,
or ONLY outputs -- not a mix.

i'm not 100% on how the Serialized stuff works, since it appears to
violate that. i guess that's exactly what this ArrayHandle stuff
achieves though.
2022-07-24 22:57:41 -07:00
05f5f75dd3 spirv: remove the ArrayHandleMut artifacts in Step{H,E}Context
this will make it easier to reuse these blocks on the CPU side.
2022-07-24 22:17:44 -07:00
b70cafa205 spirv support: fix an overly-constrained lifetime parameter in the array index fn 2022-07-24 21:51:20 -07:00
7286d272b9 move coremem/mat -> coremem/sim/legacy/mat 2022-07-24 18:31:11 -07:00
d0fcd9b657 hide legacy.rs behind a legacy/ dir 2022-07-24 18:20:58 -07:00
2f0e52a09b split SimState out of sim/mod.rs -> sim/legacy.rs 2022-07-24 18:19:26 -07:00
c8a082d2a1 wavefront: port to spirv driver 2022-07-24 17:45:13 -07:00
e62dc495f1 spirv: remove most of the IntoFfi/IntoLib stuff 2022-07-24 01:04:31 -07:00
193df5415f spirv: remove the last nontrivial Material IntoFfi/FromLib 2022-07-24 00:35:47 -07:00
4bd081ca7a spirv bindings: remove From<Static> conversion 2022-07-24 00:14:38 -07:00
940d86d86e remove unnecessary Ffi impls for AdaptStateless 2022-07-24 00:02:00 -07:00
ce00281c09 fix typo: 'electic' -> 'electric' 2022-07-23 23:58:24 -07:00
048eb7dbef geom: don't re-export coremem_types::vec 2022-07-23 18:57:17 -07:00
d813405cb1 spirv bindings: make compatible with more than just strictly f32 2022-07-23 18:24:09 -07:00
3f5160a8ea replace the CPU MBFerromagnet with a generic wrapper around any stateless coremem_types type 2022-07-23 18:10:49 -07:00
d246b97b5e coremem: remove unused SpirvMBFerromagnet 2022-07-23 17:12:30 -07:00
67872de16f clean up some unused code/imports 2022-07-23 16:27:43 -07:00
98773a350c remove custom cpu-specific MBPgram type 2022-07-23 16:26:50 -07:00
35a0c52f67 coremem tests: comment out dead code 2022-07-22 16:25:15 -07:00
9b149bae65 spirv bindings: split out common materials vs cpu materials 2022-07-22 16:24:59 -07:00
4a6a43fb31 plumb the R type parameter through spirv code 2022-07-22 16:21:03 -07:00
ee2cf47b8d types: remove unused code or mark it as intentionally dead 2022-07-22 16:20:22 -07:00
66ccbd1ada spirv: remove mat.rs and reuse coremem_types::mat everywhere 2022-07-22 15:22:09 -07:00
847b95f036 replace the FullyGenericMaterial in spirv with an analog type from coremem_types
this represents the last spirv-specific material.
next steps are to start removing the materials from `coremem` itself
(i.e. re-exporting more from `coremem_types::mat`).
2022-07-22 02:56:00 -07:00
4cbcc46d50 list: flat: remove unused import 2022-07-22 01:54:18 -07:00
727b7b43a3 types: mat: move DiscrMat into its own file 2022-07-22 01:53:42 -07:00
50ae6d4c34 types: mat: hack in a way to implement a 3-variant material 2022-07-22 01:46:07 -07:00
a8b6000104 types: list: lift the generic traits up to the top-level module
this lets me more easily swap in/out different list implementations when
experimenting.
2022-07-22 01:19:43 -07:00
fffe917c5c mat: remove some unused stuff related to DiscrMat2 2022-07-22 01:02:54 -07:00
97ac46fd8a mat: DiscrMat2 uses our Enum type internally 2022-07-22 00:44:57 -07:00
cdcc1fbbdd types: list: remove unused imports 2022-07-22 00:08:53 -07:00
72a66dbff4 types: enum: buff up tests 2022-07-21 23:50:57 -07:00
27c1523b0c types: Enum: make the DiscriminantCodable type a bit more usable
previously we couldn't *create* a discriminant, only edit it.
doing things this way is useful for the material code.
2022-07-21 23:02:33 -07:00
491f863aea types: enumerated: fix compile errors 2022-07-21 22:51:26 -07:00
fcc735765c types: Enum: simplify the internally_discriminanted constructor
we don't actually need to enforce the discriminant codability in the
constructor.
if the conditions aren't met, the user just won't be able to operate on
the enum.
2022-07-21 22:21:35 -07:00
55e58f630c coremem_types: Enum: fix a typo in the DiscriminantCodable docs 2022-07-21 22:15:52 -07:00
f2bb16eb5b coremem_types: Enum: add constructors 2022-07-21 22:11:48 -07:00
19b1df4919 coremem_types: Enum: add tests for the internal discriminant case 2022-07-21 21:58:41 -07:00
72b18d378f coremem_types: Enum: add a method to set the enum to a specific variant 2022-07-21 21:46:33 -07:00
65f90d0654 coremem_types: Enum: implement mutable dispatch 2022-07-21 21:35:15 -07:00
e96f0db11a coremem_types: enum: verify that setting the discriminant works as expected 2022-07-21 21:16:43 -07:00
c889ec6d09 coremem_types: enum: simplify the discriminant trait impls 2022-07-21 21:08:41 -07:00
3541ab14c1 coremem_types: Enum: add a basic test 2022-07-21 20:35:30 -07:00
b8a36c87a6 coremem_types: enum: add helpers to encode a new discriminant
not used yet: i'll have to add a mutator-based dispatcher in order
to set the new variant's value when the discriminant is updated.
2022-07-21 18:40:09 -07:00
fba85c5ae3 coremem_types: list: add a set method 2022-07-21 18:36:11 -07:00
960804598a coremem_types: enums don't require their variant to be Copy
this is safe because the variant is necessarily not ZST.
if it was ZST it could just be stored explicitly instead of folding it
into the first element, and that case should still be OK.
2022-07-21 18:31:31 -07:00
9153dfbb7a coremem_types: enumerated: allow folding the enum into the first element 2022-07-21 18:28:20 -07:00
b15bad434e coremem_types: remove unused ElementAt type alias from list 2022-07-21 17:05:20 -07:00
f448f9200e implement an Enum type
right now the user has to define the method to extract the discriminant
from the enum.
i'll work to integrate the discriminant into the enum itself in future
patches.
2022-07-21 17:02:57 -07:00
d6ebf968b2 coremem_types: list: simplify the IntoList trait 2022-07-21 14:11:42 -07:00
90127e3f02 coremem_types: list: rename tuple_direct -> flat 2022-07-21 14:05:55 -07:00
8ce64ecc73 coremem_types: implement a List type which uses direct indexing, without consuming the whole list in the process
it's ugly, but it works and avoids spirv's ZST bugs.
i can clean it up a bit later (and rename it, since it's not actually
using tuples, directly).
2022-07-21 02:32:21 -07:00
86fb7f018d coremem_types: implement a linked-list based List type
it doesn't quite get us where we want to be, as it requires padding to
handle ZSTs in spirv. i think we HAVE to use a flat list (combined with
a copy method, instead of reference-based) to handle ZSTs without
forced padding.
2022-07-21 02:32:08 -07:00
2e3021b875 coremem_types: move list into a submodule so i can toy with alternate implementations 2022-07-20 23:51:58 -07:00
b555ee93f0 coremem_types: DiscrMat2: implement in terms of compound::list::List
i'm not happy AT ALL with this implementation,
but it's the first way i've found which works around ZST limitations.
2022-07-20 16:20:09 -07:00
d034453970 coremem_types: list: implement core::ops::Index 2022-07-20 14:48:52 -07:00
5b5085829d coremem_types: list: remove the dynamic indexing ops
trying to restructure this stuff specifically to aid the material needs.
2022-07-20 14:39:33 -07:00
7dd4b09faf coremem_types: list: remove unused test helpers 2022-07-20 14:37:54 -07:00
24b0b3d680 coremem_types: list: minor test refactoring 2022-07-20 14:37:08 -07:00
d0ae25e28b coremem_types: list: switch to parameter-based indexing; add apply method 2022-07-20 14:31:20 -07:00
e029c8b3d9 coremem_types: list: make the types public
in the future we can hopefully expose only a subset of the types.
2022-07-20 14:30:43 -07:00
5ddd6fef74 coremem_types: list: rename LLNode -> Node 2022-07-20 13:47:43 -07:00
0ce862b614 coremem_types: list: implement List building via a Chomp trait
it's simpler, and lets the user always do `(args...).into_list()`
on ANY `List<(Args...)>` alias type.
2022-07-20 13:46:17 -07:00
415ffb9c4d coremem_types: list: switch to 'head'/'tail' terminology 2022-07-20 13:10:14 -07:00
dfe27c9b56 coremem_types: list: add a way to construct the full list in one-go 2022-07-20 12:47:20 -07:00
00ae71a6eb coremem_types: add List::set, List::get_mut methods 2022-07-20 12:15:09 -07:00
113 changed files with 15839 additions and 7776 deletions

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
out/ out/
target/ target/
__pycache__/

561
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -5,12 +5,13 @@ members = [
"crates/spirv_backend", "crates/spirv_backend",
"crates/spirv_backend_builder", "crates/spirv_backend_builder",
"crates/spirv_backend_runner", "crates/spirv_backend_runner",
"crates/types", "crates/cross",
"crates/post", "crates/post",
"crates/applications/buffer_proto5", "crates/applications/buffer_proto5",
"crates/applications/multi_core_inverter", "crates/applications/multi_core_inverter",
"crates/applications/sr_latch", "crates/applications/sr_latch",
"crates/applications/stacked_cores",
"crates/applications/wavefront", "crates/applications/wavefront",
] ]

View File

@@ -1,7 +1,7 @@
use coremem::{Driver, mat, meas, SpirvDriver}; use coremem::{Driver, mat, meas, SpirvDriver};
use coremem::geom::{Meters, Torus}; use coremem::geom::{Meters, Torus};
use coremem::sim::units::Seconds; use coremem::sim::units::Seconds;
use coremem::stim::{CurlStimulus, Sinusoid1, TimeVarying as _}; use coremem::stim::{CurlStimulus, Sinusoid, TimeVarying as _};
fn main() { fn main() {
coremem::init_logging(); coremem::init_logging();
@@ -60,12 +60,12 @@ fn main() {
driver.set_steps_per_stim(1000); driver.set_steps_per_stim(1000);
//driver.fill_region(&ferro1_region, mat::db::linear_iron()); //driver.fill_region(&ferro1_region, mat::db::linear_iron());
// Original, 3R1-LIKE ferromagnet (only a vague likeness), sr-latch-8: // Original, 3R1-LIKE ferromagnet (only a vague likeness), sr-latch-8:
// driver.fill_region(&ferro1_region, mat::MBFerromagnet::new(-0.3899, 0.3900, 310_000.0)); // driver.fill_region(&ferro1_region, mat::MBPgram::new(-0.3899, 0.3900, 310_000.0));
// driver.fill_region(&ferro2_region, mat::MBFerromagnet::new(-0.3899, 0.3900, 310_000.0)); // driver.fill_region(&ferro2_region, mat::MBPgram::new(-0.3899, 0.3900, 310_000.0));
// sr-latch-9; dead spot from B=[-0.03, 0.03]. This will help us see if the math is H-triggered // sr-latch-9; dead spot from B=[-0.03, 0.03]. This will help us see if the math is H-triggered
// or B-triggered // or B-triggered
// driver.fill_region(&ferro1_region, mat::MBFerromagnet::new(-0.3300, 0.3900, 310_000.0)); // driver.fill_region(&ferro1_region, mat::MBPgram::new(-0.3300, 0.3900, 310_000.0));
// driver.fill_region(&ferro2_region, mat::MBFerromagnet::new(-0.3300, 0.3900, 310_000.0)); // driver.fill_region(&ferro2_region, mat::MBPgram::new(-0.3300, 0.3900, 310_000.0));
// mu_r=881.33, starting at H=25 to H=75. // mu_r=881.33, starting at H=25 to H=75.
driver.fill_region(&ferro1_region, mat::MHPgram::new(25.0, 881.33, 44000.0)); driver.fill_region(&ferro1_region, mat::MHPgram::new(25.0, 881.33, 44000.0));
driver.fill_region(&ferro2_region, mat::MHPgram::new(25.0, 881.33, 44000.0)); driver.fill_region(&ferro2_region, mat::MHPgram::new(25.0, 881.33, 44000.0));
@@ -89,7 +89,7 @@ fn main() {
assert!(driver.test_region_filled(&sense_region, mat::IsomorphicConductor::new(sense_conductivity))); assert!(driver.test_region_filled(&sense_region, mat::IsomorphicConductor::new(sense_conductivity)));
let mut add_drive_pulse = |region: &Torus, start, duration, amp| { let mut add_drive_pulse = |region: &Torus, start, duration, amp| {
let wave = Sinusoid1::from_wavelength(amp, duration * 2.0) let wave = Sinusoid::from_wavelength(amp, duration * 2.0)
.half_cycle() .half_cycle()
.shifted(start); .shifted(start);
driver.add_stimulus(CurlStimulus::new( driver.add_stimulus(CurlStimulus::new(

View File

@@ -1,7 +1,7 @@
use coremem::{Driver, mat, meas, SpirvDriver}; use coremem::{Driver, mat, meas, SpirvDriver};
use coremem::geom::{Meters, Torus}; use coremem::geom::{Meters, Torus};
use coremem::sim::units::Seconds; use coremem::sim::units::Seconds;
use coremem::stim::{CurlStimulus, Sinusoid1, TimeVarying as _}; use coremem::stim::{CurlStimulus, Sinusoid, TimeVarying as _};
fn main() { fn main() {
coremem::init_logging(); coremem::init_logging();
@@ -81,7 +81,7 @@ fn main() {
} }
let mut add_drive_pulse = |region: &Torus, start, duration, amp| { let mut add_drive_pulse = |region: &Torus, start, duration, amp| {
let wave = Sinusoid1::from_wavelength(amp, duration * 2.0) let wave = Sinusoid::from_wavelength(amp, duration * 2.0)
.half_cycle() .half_cycle()
.shifted(start); .shifted(start);
driver.add_stimulus(CurlStimulus::new( driver.add_stimulus(CurlStimulus::new(

View File

@@ -5,7 +5,7 @@
use coremem::{Driver, mat, meas, SpirvDriver}; use coremem::{Driver, mat, meas, SpirvDriver};
use coremem::geom::{Meters, Torus}; use coremem::geom::{Meters, Torus};
use coremem::sim::units::Seconds; use coremem::sim::units::Seconds;
use coremem::stim::{CurlStimulus, Sinusoid1, TimeVarying as _}; use coremem::stim::{CurlStimulus, Sinusoid, TimeVarying as _};
fn main() { fn main() {
coremem::init_logging(); coremem::init_logging();
@@ -106,7 +106,7 @@ fn main() {
} }
let mut add_drive_pulse = |region: &Torus, start, duration, amp| { let mut add_drive_pulse = |region: &Torus, start, duration, amp| {
let wave = Sinusoid1::from_wavelength(amp, duration * 2.0) let wave = Sinusoid::from_wavelength(amp, duration * 2.0)
.half_cycle() .half_cycle()
.shifted(start); .shifted(start);
driver.add_stimulus(CurlStimulus::new( driver.add_stimulus(CurlStimulus::new(

View File

@@ -12,7 +12,7 @@
use coremem::{Driver, mat, meas, SpirvDriver}; use coremem::{Driver, mat, meas, SpirvDriver};
use coremem::geom::{region, Cube, Meters, Spiral, SwapYZ, Torus, Translate, Wrap}; use coremem::geom::{region, Cube, Meters, Spiral, SwapYZ, Torus, Translate, Wrap};
use coremem::sim::units::Seconds; use coremem::sim::units::Seconds;
use coremem::stim::{CurlStimulus, Sinusoid1, TimeVarying as _}; use coremem::stim::{CurlStimulus, Sinusoid, TimeVarying as _};
fn main() { fn main() {
@@ -132,7 +132,7 @@ fn main() {
assert!(driver.test_region_filled(&coupling_region, wire_mat)); assert!(driver.test_region_filled(&coupling_region, wire_mat));
let mut add_drive_pulse = |region: &Torus, start, duration, amp| { let mut add_drive_pulse = |region: &Torus, start, duration, amp| {
let wave = Sinusoid1::from_wavelength(amp, duration * 2.0) let wave = Sinusoid::from_wavelength(amp, duration * 2.0)
.half_cycle() .half_cycle()
.shifted(start); .shifted(start);
driver.add_stimulus(CurlStimulus::new( driver.add_stimulus(CurlStimulus::new(

View File

@@ -1,7 +1,7 @@
use coremem::{Driver, mat, meas, SimState}; use coremem::{Driver, mat, meas, SimState};
use coremem::geom::{Cube, Index, InvertedRegion, Meters, Torus, Union}; use coremem::geom::{Cube, Index, InvertedRegion, Meters, Torus, Union};
use coremem::real::R64 as Real; use coremem::real::R64 as Real;
use coremem::stim::{CurlStimulus, Sinusoid1, TimeVarying as _}; use coremem::stim::{CurlStimulus, Sinusoid, TimeVarying as _};
use coremem::units::Seconds; use coremem::units::Seconds;
fn main() { fn main() {
@@ -63,10 +63,10 @@ fn main() {
// if I = k*sin(w t) then dE/dt = k*w sin(w t) / (A*\sigma) // if I = k*sin(w t) then dE/dt = k*w sin(w t) / (A*\sigma)
// i.e. dE/dt is proportional to I/(A*\sigma), multiplied by w (or, divided by wavelength) // i.e. dE/dt is proportional to I/(A*\sigma), multiplied by w (or, divided by wavelength)
let peak_stim = peak_current/current_duration / (drive_region.cross_section() * conductivity); let peak_stim = peak_current/current_duration / (drive_region.cross_section() * conductivity);
let pos_wave = Sinusoid1::from_wavelength(peak_stim as _, current_duration * 2.0) let pos_wave = Sinusoid::from_wavelength(peak_stim as _, current_duration * 2.0)
.half_cycle(); .half_cycle();
let neg_wave = Sinusoid1::from_wavelength(-peak_stim as _, current_duration * 2.0) let neg_wave = Sinusoid::from_wavelength(-peak_stim as _, current_duration * 2.0)
.half_cycle() .half_cycle()
.shifted(current_duration + current_break); .shifted(current_duration + current_break);

View File

@@ -1,6 +1,6 @@
use coremem::{Driver, mat, meas, SimState, SpirvDriver}; use coremem::{Driver, mat, meas, SimState, SpirvDriver};
use coremem::geom::{Index, Meters, Torus}; use coremem::geom::{Index, Meters, Torus};
use coremem::stim::{CurlStimulus, Sinusoid1, TimeVarying as _}; use coremem::stim::{CurlStimulus, Sinusoid, TimeVarying as _};
use coremem::units::Seconds; use coremem::units::Seconds;
fn main() { fn main() {
@@ -45,7 +45,7 @@ fn main() {
let sense1_region = Torus::new_xz(Meters::new(ferro1_center + ferro_major, half_height, half_depth), wire_major, wire_minor); let sense1_region = Torus::new_xz(Meters::new(ferro1_center + ferro_major, half_height, half_depth), wire_major, wire_minor);
//driver.fill_region(&ferro1_region, mat::db::linear_iron()); //driver.fill_region(&ferro1_region, mat::db::linear_iron());
driver.fill_region(&ferro1_region, mat::MBFerromagnet::new(-0.3899, 0.3900, 310_000.0)); driver.fill_region(&ferro1_region, mat::MBPgram::new(-0.3899, 0.3900, 310_000.0));
driver.fill_region(&drive1_region, mat::IsomorphicConductor::new(drive_conductivity)); driver.fill_region(&drive1_region, mat::IsomorphicConductor::new(drive_conductivity));
driver.fill_region(&sense1_region, mat::IsomorphicConductor::new(sense_conductivity)); driver.fill_region(&sense1_region, mat::IsomorphicConductor::new(sense_conductivity));
@@ -54,7 +54,7 @@ fn main() {
let drive2_region = Torus::new_xz(Meters::new(ferro2_center - ferro_major, half_height, half_depth), wire_major, wire_minor); let drive2_region = Torus::new_xz(Meters::new(ferro2_center - ferro_major, half_height, half_depth), wire_major, wire_minor);
let sense2_region = Torus::new_xz(Meters::new(ferro2_center + ferro_major, half_height, half_depth), wire_major, wire_minor); let sense2_region = Torus::new_xz(Meters::new(ferro2_center + ferro_major, half_height, half_depth), wire_major, wire_minor);
driver.fill_region(&ferro2_region, mat::MBFerromagnet::new(-0.3899, 0.3900, 310_000.0)); driver.fill_region(&ferro2_region, mat::MBPgram::new(-0.3899, 0.3900, 310_000.0));
driver.fill_region(&drive2_region, mat::IsomorphicConductor::new(drive_conductivity)); driver.fill_region(&drive2_region, mat::IsomorphicConductor::new(drive_conductivity));
driver.fill_region(&sense2_region, mat::IsomorphicConductor::new(sense_conductivity)); driver.fill_region(&sense2_region, mat::IsomorphicConductor::new(sense_conductivity));
@@ -64,7 +64,7 @@ fn main() {
driver.add_classical_boundary(Meters::new(boundary_xy, boundary_xy, boundary_z)); driver.add_classical_boundary(Meters::new(boundary_xy, boundary_xy, boundary_z));
let mut add_drive_pulse = |region: &Torus, start, duration, amp| { let mut add_drive_pulse = |region: &Torus, start, duration, amp| {
let wave = Sinusoid1::from_wavelength(amp, duration * 2.0) let wave = Sinusoid::from_wavelength(amp, duration * 2.0)
.half_cycle() .half_cycle()
.shifted(start); .shifted(start);
driver.add_stimulus(CurlStimulus::new( driver.add_stimulus(CurlStimulus::new(

View File

@@ -5,6 +5,8 @@ authors = ["Colin <colin@uninsane.org>"]
edition = "2021" edition = "2021"
[dependencies] [dependencies]
bincode = "1.3" # MIT
coremem = { path = "../../coremem" } coremem = { path = "../../coremem" }
log = "0.4" log = "0.4"
rayon = "1.5" # MIT or Apache 2.0
serde = "1.0" serde = "1.0"

View File

@@ -0,0 +1,232 @@
use serde::{de::DeserializeOwned, Serialize};
use std::sync::RwLock;
pub struct NoSupplier;
pub type DiskCache<K, V, S=NoSupplier> = DiskCacheImpl<Entries<K, V>, S>;
pub type SyncDiskCache<K, V, S=NoSupplier> = DiskCacheImpl<SyncEntries<K, V>, S>;
pub struct DiskCacheImpl<E, S=NoSupplier> {
path: String,
entries: E,
supplier: S,
}
impl<E: EntriesCap> DiskCacheImpl<E, NoSupplier>
where
E::Key: DeserializeOwned,
E::Value: DeserializeOwned,
{
#[allow(dead_code)]
pub fn new(path: &str) -> Self {
Self::new_with_supplier(path, NoSupplier)
}
}
impl<E: EntriesCap, S> DiskCacheImpl<E, S>
where
E::Key: DeserializeOwned,
E::Value: DeserializeOwned,
{
pub fn new_with_supplier(path: &str, supplier: S) -> Self {
let entries = Self::load_from_disk(path).unwrap_or_default();
Self {
path: path.into(),
entries: E::from_vec(entries),
supplier,
}
}
fn load_from_disk(path: &str) -> Option<Vec<(E::Key, E::Value)>> {
let reader = std::io::BufReader::new(std::fs::File::open(path).ok()?);
bincode::deserialize_from(reader).ok()
}
}
impl<E: EntriesCap, S> DiskCacheImpl<E, S>
where
E::Key: Serialize + Clone,
E::Value: Serialize + Clone,
{
fn flush(&self) {
let writer = std::io::BufWriter::new(std::fs::File::create(&self.path).unwrap());
bincode::serialize_into(writer, &self.entries.to_vec()).unwrap();
}
fn flush_if_inserted(&self, v: GetOrInsert<E::Value>) -> E::Value {
match v {
GetOrInsert::Get(v) => v,
GetOrInsert::Inserted(v) => {
self.flush();
v
},
}
}
}
impl<E: EntriesCap, S> DiskCacheImpl<E, S>
where
E::Key: PartialEq,
E::Value: Clone,
{
#[allow(dead_code)]
pub fn get(&self, k: &E::Key) -> Option<E::Value> {
self.entries.get(k)
}
}
// non-sync insert is mut, while sync is immute
impl<K: Serialize + Clone + PartialEq, V: Serialize + Clone, S> DiskCache<K, V, S> {
#[allow(dead_code)]
/// insert this k/v ONLY IF NOT PRESENT
pub fn insert(&mut self, k: K, v: V) {
self.entries.insert(k, v);
self.flush();
}
}
impl<K: Serialize + Clone + PartialEq, V: Serialize + Clone, S> SyncDiskCache<K, V, S> {
#[allow(dead_code)]
/// insert this k/v ONLY IF NOT PRESENT
pub fn insert(&self, k: K, v: V) {
self.entries.insert(k, v);
self.flush();
}
}
// non-sync insert is mut, while sync is immute
impl<K: Serialize + Clone + PartialEq, V: Serialize + Clone, S> DiskCache<K, V, S> {
#[allow(dead_code)]
pub fn get_or_insert_with<F: FnOnce() -> V>(&mut self, k: K, f: F) -> V {
let v = self.entries.get_or_insert_with(k, |_| f());
self.flush_if_inserted(v)
}
}
impl<K: Serialize + Clone + PartialEq, V: Serialize + Clone, S> SyncDiskCache<K, V, S> {
#[allow(dead_code)]
pub fn get_or_insert_with<F: FnOnce() -> V>(&self, k: K, f: F) -> V {
let v = self.entries.get_or_insert_with(k, |_| f());
self.flush_if_inserted(v)
}
}
// non-sync insert is mut, while sync is immute
impl<K: Serialize + Clone + PartialEq, V: Serialize + Clone, S: FnMut(&K) -> V> DiskCache<K, V, S> {
#[allow(dead_code)]
pub fn get_or_insert_from_supplier(&mut self, k: K) -> V {
let v = self.entries.get_or_insert_with(k, |k| (self.supplier)(k));
self.flush_if_inserted(v)
}
}
impl<K: Serialize + Clone + PartialEq, V: Serialize + Clone, S: Fn(&K) -> V> SyncDiskCache<K, V, S> {
#[allow(dead_code)]
pub fn get_or_insert_from_supplier(&self, k: K) -> V {
let v = self.entries.get_or_insert_with(k, |k| (self.supplier)(k));
self.flush_if_inserted(v)
}
}
enum GetOrInsert<V> {
Get(V),
Inserted(V),
}
//---------- disk cache entries ----------
// we have the non-sync and the sync K/V collections,
// which the DiskCacheImpl wraps.
pub struct Entries<K, V>(Vec<(K, V)>);
pub struct SyncEntries<K, V>(RwLock<Entries<K, V>>);
pub trait EntriesCap {
type Key;
type Value;
fn from_vec(v: Vec<(Self::Key, Self::Value)>) -> Self;
fn to_vec(&self) -> Vec<(Self::Key, Self::Value)>
where
Self::Key: Clone,
Self::Value: Clone;
fn get(&self, k: &Self::Key) -> Option<Self::Value>
where
Self::Key: PartialEq,
Self::Value: Clone;
}
impl<K, V> EntriesCap for Entries<K, V> {
type Key = K;
type Value = V;
fn from_vec(v: Vec<(K, V)>) -> Self {
Self(v)
}
fn to_vec(&self) -> Vec<(K, V)>
where
K: Clone,
V: Clone,
{
self.0.clone()
}
fn get(&self, k: &K) -> Option<V>
where
K: PartialEq,
V: Clone,
{
self.0.iter().find(|(comp_k, _v): &&(K, V)| comp_k == k).map(|(_k, v)| v.clone())
}
}
impl<K, V> EntriesCap for SyncEntries<K, V> {
type Key = K;
type Value = V;
fn from_vec(v: Vec<(K, V)>) -> Self {
Self(RwLock::new(Entries::from_vec(v)))
}
fn to_vec(&self) -> Vec<(K, V)>
where
K: Clone,
V: Clone,
{
self.0.read().unwrap().to_vec()
}
fn get(&self, k: &K) -> Option<V>
where
K: PartialEq,
V: Clone,
{
self.0.read().unwrap().get(k)
}
}
impl<K: PartialEq, V> Entries<K, V> {
fn insert(&mut self, k: K, v: V) {
if !self.0.iter().any(|(comp_k, _v): &(K, V)| comp_k == &k) {
self.0.push((k, v))
}
}
}
impl<K: Clone + PartialEq, V: Clone> Entries<K, V> {
fn get_or_insert_with<F: FnOnce(&K) -> V>(&mut self, k: K, f: F) -> GetOrInsert<V> {
if let Some(v) = self.get(&k) {
return GetOrInsert::Get(v.clone());
}
let v = f(&k);
self.insert(k, v.clone());
GetOrInsert::Inserted(v)
}
}
impl<K: PartialEq, V> SyncEntries<K, V> {
fn insert(&self, k: K, v: V) {
self.0.write().unwrap().insert(k, v)
}
}
impl<K: Clone + PartialEq, V: Clone> SyncEntries<K, V> {
fn get_or_insert_with<F: FnOnce(&K) -> V>(&self, k: K, f: F) -> GetOrInsert<V> {
if let Some(v) = self.get(&k) {
return GetOrInsert::Get(v.clone());
}
let v = f(&k);
self.insert(k, v.clone());
GetOrInsert::Inserted(v)
}
}

View File

@@ -2,23 +2,40 @@
//! to couple them. i parameterize the entire setup over a bunch of different factors in order to //! to couple them. i parameterize the entire setup over a bunch of different factors in order to
//! search for the conditions which maximize energy transfer from the one core to the other. //! search for the conditions which maximize energy transfer from the one core to the other.
use coremem::{Driver, mat, meas, SpirvDriver}; use coremem::{Driver, mat, meas};
use coremem::geom::{region, Cube, Dilate, Memoize, Meters, Region, Spiral, SwapYZ, Torus, Translate, Wrap}; use coremem::geom::Meters;
use coremem::geom::region::{
self,
Cube,
Dilate,
Intersection,
InvertedRegion,
Memoize,
Spiral,
SwapYZ,
Torus,
Translate,
Wrap
};
use coremem::mat::{Ferroxcube3R1MH, IsoConductorOr}; use coremem::mat::{Ferroxcube3R1MH, IsoConductorOr};
use coremem::real::{R32, Real as _}; use coremem::real::{R32, Real as _};
use coremem::render::CsvRenderer; use coremem::render::CsvRenderer;
use coremem::stim::{CurlStimulus, Exp1, Gated, Sinusoid1, TimeVarying as _}; use coremem::sim::spirv::{SpirvSim, WgpuBackend};
use coremem::sim::units::{Seconds, Frame, Time as _}; use coremem::sim::units::{Seconds, Time as _};
use coremem::sim::spirv; use coremem::stim::{CurlVectorField, Exp, ModulatedVectorField, Sinusoid, TimeVaryingExt as _};
use coremem::util::cache::DiskCache;
use log::{error, info, warn}; use log::{error, info, warn};
use rayon::prelude::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
mod cache;
use cache::SyncDiskCache;
type Mat = IsoConductorOr<f32, Ferroxcube3R1MH>; type Mat = IsoConductorOr<f32, Ferroxcube3R1MH>;
#[allow(unused)] #[allow(unused)]
use coremem::geom::{Coord as _, Region as _}; use coremem::geom::{Coord as _, Region as _};
#[allow(unused)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum PulseType { enum PulseType {
Square, Square,
@@ -27,6 +44,7 @@ enum PulseType {
ExpDecay2x, ExpDecay2x,
} }
#[allow(unused)]
/// Return just the extrema of some collection /// Return just the extrema of some collection
fn extrema(mut meas: Vec<f32>) -> Vec<f32> { fn extrema(mut meas: Vec<f32>) -> Vec<f32> {
let mut i = 0; let mut i = 0;
@@ -144,31 +162,54 @@ struct Geometries {
ferro2_region: Torus, ferro2_region: Torus,
set1_region: Torus, set1_region: Torus,
set2_region: Torus, set2_region: Torus,
coupling_region: region::Union, coupling_region: region::Union3<
Memoize<Dilate<Wrap<Translate<SwapYZ<Intersection<Spiral, Cube>>>>>>,
Memoize<Dilate<Wrap<Translate<SwapYZ<Intersection<Spiral, InvertedRegion<Cube>>>>>>>,
region::Union3<Cube, Cube, region::Union4<Cube, Cube, Cube, Cube>>
>,
coupling_wire_top: Cube, coupling_wire_top: Cube,
coupling_wire_bot: Cube, coupling_wire_bot: Cube,
wrap1_len: f32, wrap1_len: f32,
wrap2_len: f32, wrap2_len: f32,
} }
/// computed measurements which get written to disk for later, manual (or grep-based) analysis.
/// because we only write these (except for the Debug impl reading them to write to disk),
/// rustc thinks all the fields are dead.
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
struct Results { struct Results {
#[allow(dead_code)]
m1_peak: f32, m1_peak: f32,
#[allow(dead_code)]
m2_peak: f32, m2_peak: f32,
#[allow(dead_code)]
m1_stable: f32, m1_stable: f32,
#[allow(dead_code)]
m2_stable: f32, m2_stable: f32,
#[allow(dead_code)]
h1_peak: f32, h1_peak: f32,
#[allow(dead_code)]
h2_max: f32, h2_max: f32,
#[allow(dead_code)]
h2_min: f32, h2_min: f32,
#[allow(dead_code)]
h1_stable: f32, h1_stable: f32,
#[allow(dead_code)]
h2_stable: f32, h2_stable: f32,
#[allow(dead_code)]
iset_min: f32, iset_min: f32,
#[allow(dead_code)]
iset_max: f32, iset_max: f32,
#[allow(dead_code)]
icoupling_peak: f32, icoupling_peak: f32,
#[allow(dead_code)]
peak_m_ratio: f32, peak_m_ratio: f32,
#[allow(dead_code)]
stable_m_ratio: f32, stable_m_ratio: f32,
/// m2_stable divided by m1_peak. i.e. "amplification" /// m2_stable divided by m1_peak. i.e. "amplification"
#[allow(dead_code)]
m2_stable_m1_peak: f32, m2_stable_m1_peak: f32,
#[allow(dead_code)]
t: f32, t: f32,
} }
@@ -261,29 +302,29 @@ fn derive_geometries(p: GeomParams) -> Option<Geometries> {
wrap2_bot - feat_sizes*2.0, wrap2_bot - feat_sizes*2.0,
wrap2_bot.with_y(coupling_wire_bot.top()) + feat_sizes*2.0, wrap2_bot.with_y(coupling_wire_bot.top()) + feat_sizes*2.0,
); );
let coupling_stubs = region::Union::new() let coupling_stubs = region::Union::new4(
.with(coupling_stub_top_left.clone()) coupling_stub_top_left.clone(),
.with(coupling_stub_top_right.clone()) coupling_stub_top_right.clone(),
.with(coupling_stub_bot_left.clone()) coupling_stub_bot_left.clone(),
.with(coupling_stub_bot_right.clone()) coupling_stub_bot_right.clone(),
; );
let coupling_wires = region::Union::new() let coupling_wires = region::Union::new3(
.with(coupling_wire_top.clone()) coupling_wire_top.clone(),
.with(coupling_wire_bot.clone()) coupling_wire_bot.clone(),
.with(coupling_stubs.clone()) coupling_stubs.clone(),
; );
let coupling_region = region::Union::new() let coupling_region = region::Union::new3(
.with(coupling_region1.clone()) coupling_region1.clone(),
.with(coupling_region2.clone()) coupling_region2.clone(),
.with(coupling_wires.clone()) coupling_wires.clone(),
; );
let wrap1_with_coupling = region::union( let wrap1_with_coupling = region::Union::new2(
coupling_region1.clone(), coupling_wires.clone() coupling_region1.clone(), coupling_wires.clone()
); );
let wrap2_with_coupling = region::union( let wrap2_with_coupling = region::Union::new2(
coupling_region2.clone(), coupling_wires.clone() coupling_region2.clone(), coupling_wires.clone()
); );
@@ -371,9 +412,10 @@ fn run_sim(id: u32, p: Params, g: Geometries) -> Results {
p.clock_type, p.clock_type,
); );
let mut driver: SpirvDriver<Mat> = Driver::new_spirv(g.dim, p.geom.feat_size); let mut driver = Driver::new(SpirvSim::<f32, Mat, WgpuBackend>::new(
driver.set_steps_per_stim(1000); g.dim.to_index(p.geom.feat_size), p.geom.feat_size
if !driver.add_state_file(&*format!("{}/state.bc", prefix), 16000) { ));
if !driver.add_state_file(&*format!("{}/state.bc", prefix), 4000) {
// mu_r=881.33, starting at H=25 to H=75. // mu_r=881.33, starting at H=25 to H=75.
let ferro_mat = mat::Ferroxcube3R1MH::new(); let ferro_mat = mat::Ferroxcube3R1MH::new();
// let ferro_mat = mat::db::conductor(wire_conductivity); // let ferro_mat = mat::db::conductor(wire_conductivity);
@@ -396,43 +438,39 @@ fn run_sim(id: u32, p: Params, g: Geometries) -> Results {
info!("loaded state file: skipping geometry calculations"); info!("loaded state file: skipping geometry calculations");
} }
let add_drive_sine_pulse = |driver: &mut SpirvDriver<Mat>, region: &Torus, start: f32, duration: f32, amp: f32| { let add_drive_sine_pulse = |driver: &mut Driver<f32, _, _>, region: &Torus, start: f32, duration: f32, amp: f32| {
let wave = Sinusoid1::from_wavelength(amp, duration * 2.0) let wave = Sinusoid::from_wavelength(duration * 2.0)
.half_cycle() .half_cycle()
.scaled(amp)
.shifted(start); .shifted(start);
driver.add_stimulus(CurlStimulus::new( driver.add_stimulus(ModulatedVectorField::new(
region.clone(), CurlVectorField::new(region.clone()),
wave.clone(), wave,
region.center(),
region.axis()
)); ));
}; };
let add_drive_square_pulse = |driver: &mut SpirvDriver<Mat>, region: &Torus, start: f32, duration: f32, amp: f32| { let add_drive_square_pulse = |driver: &mut Driver<f32, _, _>, region: &Torus, start: f32, duration: f32, amp: f32| {
let wave = Gated::new(amp, start, start+duration); let wave = amp.gated(start, start+duration);
driver.add_stimulus(CurlStimulus::new( driver.add_stimulus(ModulatedVectorField::new(
region.clone(), CurlVectorField::new(region.clone()),
wave.clone(), wave,
region.center(),
region.axis()
)); ));
}; };
let add_drive_exp_pulse = |driver: &mut SpirvDriver<Mat>, region: &Torus, start: f32, duration: f32, amp: f32| { let add_drive_exp_pulse = |driver: &mut Driver<f32, _, _>, region: &Torus, start: f32, duration: f32, amp: f32| {
let wave = Exp1::new_at(amp, start, 0.5*duration); let wave = Exp::new_at(amp, start, 0.5*duration);
driver.add_stimulus(CurlStimulus::new( driver.add_stimulus(ModulatedVectorField::new(
region.clone(), CurlVectorField::new(region.clone()),
wave.clone(), wave,
region.center(),
region.axis()
)); ));
}; };
let add_drive_step = |driver: &mut SpirvDriver<Mat>, region: &Torus, start: f32, amp: f32| { // step function: "permanently" increase the current by `amp`.
add_drive_square_pulse(driver, region, start, 1.0, amp); let _add_drive_step = |driver: &mut Driver<f32, _, _>, region: &Torus, start: f32, amp: f32| {
add_drive_square_pulse(driver, region, start, 1.0 /* effectively infinite duration */, amp);
}; };
let add_drive_pulse = |ty: PulseType, driver: &mut SpirvDriver<Mat>, region: &Torus, start: f32, duration: f32, amp: f32| { let add_drive_pulse = |ty: PulseType, driver: &mut Driver<f32, _, _>, region: &Torus, start: f32, duration: f32, amp: f32| {
match ty { match ty {
PulseType::Square => add_drive_square_pulse(driver, region, start, duration, amp), PulseType::Square => add_drive_square_pulse(driver, region, start, duration, amp),
PulseType::Sine => add_drive_sine_pulse(driver, region, start, duration, amp), PulseType::Sine => add_drive_sine_pulse(driver, region, start, duration, amp),
@@ -490,8 +528,9 @@ fn run_sim(id: u32, p: Params, g: Geometries) -> Results {
driver.add_csv_renderer(&*meas_csv, 400, None); driver.add_csv_renderer(&*meas_csv, 400, None);
driver.add_csv_renderer(&*meas_sparse_csv, 8000, None); driver.add_csv_renderer(&*meas_sparse_csv, 8000, None);
driver.set_steps_per_stimulus(20);
driver.step_until(duration); driver.step_until(duration);
let (m1_peak, m1_stable) = significa(CsvRenderer::new(&*meas_sparse_csv).read_column_as_f32("M(mem1)")); let (m1_peak, m1_stable) = significa(CsvRenderer::new(&*meas_sparse_csv).read_column_as_f32("M(mem1)"));
let (m2_peak, m2_stable) = significa(CsvRenderer::new(&*meas_sparse_csv).read_column_as_f32("M(mem2)")); let (m2_peak, m2_stable) = significa(CsvRenderer::new(&*meas_sparse_csv).read_column_as_f32("M(mem2)"));
let (h1_peak, h1_stable) = significa(CsvRenderer::new(&*meas_sparse_csv).read_column_as_f32("H(mem1)")); let (h1_peak, h1_stable) = significa(CsvRenderer::new(&*meas_sparse_csv).read_column_as_f32("H(mem1)"));
@@ -636,7 +675,7 @@ fn main() {
variants.len() / post_times.len(), variants.len() / post_times.len(),
); );
let mut geom_cache = DiskCache::new_with_supplier( let geom_cache = SyncDiskCache::new_with_supplier(
&format!("{}/.geom_cache", ensure_out_dir(i)), &format!("{}/.geom_cache", ensure_out_dir(i)),
|geom: &GeomParams| derive_geometries(geom.clone()) |geom: &GeomParams| derive_geometries(geom.clone())
); );
@@ -682,7 +721,7 @@ fn main() {
}; };
let wraps1_choices: Vec<_> = (-120..120) let wraps1_choices: Vec<_> = (-120..120)
.into_iter() .into_par_iter()
.filter_map(|wraps1| { .filter_map(|wraps1| {
let params = GeomParams { let params = GeomParams {
wraps1: (wraps1 * 4) as f32, wraps1: (wraps1 * 4) as f32,
@@ -707,7 +746,7 @@ fn main() {
.0.wraps1; .0.wraps1;
let wraps2_choices: Vec<_> = (-120..120) let wraps2_choices: Vec<_> = (-120..120)
.into_iter() .into_par_iter()
.filter_map(|wraps2| { .filter_map(|wraps2| {
let params = GeomParams { let params = GeomParams {
wraps2: (wraps2 * 4) as f32, wraps2: (wraps2 * 4) as f32,

File diff suppressed because it is too large Load Diff

View File

@@ -3,11 +3,11 @@
/// the SR latch in this example is wired to a downstream latch, mostly to show that it's /// the SR latch in this example is wired to a downstream latch, mostly to show that it's
/// possible to transfer the state (with some limitation) from one latch to another. /// possible to transfer the state (with some limitation) from one latch to another.
use coremem::{Driver, mat, meas, SpirvDriver}; use coremem::{Driver, mat, meas};
use coremem::geom::{Meters, Torus}; use coremem::geom::{Coord as _, Meters, Torus};
use coremem::sim::spirv; use coremem::sim::spirv::{SpirvSim, WgpuBackend};
use coremem::sim::units::Seconds; use coremem::sim::units::Seconds;
use coremem::stim::{CurlStimulus, Sinusoid1, TimeVarying as _}; use coremem::stim::{CurlVectorField, ModulatedVectorField, Sinusoid, TimeVaryingExt as _};
fn main() { fn main() {
@@ -59,7 +59,9 @@ fn main() {
let coupling_region = Torus::new_xz(Meters::new(0.5*(ferro1_center + ferro2_center), ferro_center_y, half_depth), wire_coupling_major, wire_minor); let coupling_region = Torus::new_xz(Meters::new(0.5*(ferro1_center + ferro2_center), ferro_center_y, half_depth), wire_coupling_major, wire_minor);
let sense_region = Torus::new_xz(Meters::new(ferro2_center + ferro_major, ferro_center_y, half_depth), wire_major, wire_minor); let sense_region = Torus::new_xz(Meters::new(ferro2_center + ferro_major, ferro_center_y, half_depth), wire_major, wire_minor);
let mut driver: SpirvDriver<spirv::FullyGenericMaterial> = Driver::new_spirv(Meters::new(width, height, depth), feat_size); let mut driver = Driver::new(SpirvSim::<f32, mat::FullyGenericMaterial<f32>, WgpuBackend>::new(
Meters::new(width, height, depth).to_index(feat_size), feat_size
));
// mu_r=881.33, starting at H=25 to H=75. // mu_r=881.33, starting at H=25 to H=75.
driver.fill_region(&ferro1_region, mat::MHPgram::new(25.0, 881.33, 44000.0)); driver.fill_region(&ferro1_region, mat::MHPgram::new(25.0, 881.33, 44000.0));
@@ -76,14 +78,13 @@ fn main() {
// helper to schedule a stimulus at the provided start time/duration. // helper to schedule a stimulus at the provided start time/duration.
let mut add_drive_pulse = |region: &Torus, start, duration, amp| { let mut add_drive_pulse = |region: &Torus, start, duration, amp| {
let wave = Sinusoid1::from_wavelength(amp, duration * 2.0) let wave = Sinusoid::from_wavelength(duration * 2.0)
.half_cycle() .half_cycle()
.scaled(amp)
.shifted(start); .shifted(start);
driver.add_stimulus(CurlStimulus::new( driver.add_stimulus(ModulatedVectorField::new(
region.clone(), CurlVectorField::new(region.clone()),
wave.clone(), wave,
region.center(),
region.axis()
)); ));
}; };
@@ -143,8 +144,6 @@ fn main() {
// render a couple CSV files: one very detailed and the other more sparsely detailed // render a couple CSV files: one very detailed and the other more sparsely detailed
driver.add_csv_renderer(&*format!("{}meas.csv", prefix), 200, None); driver.add_csv_renderer(&*format!("{}meas.csv", prefix), 200, None);
driver.add_csv_renderer(&*format!("{}meas-sparse.csv", prefix), 1600, None); driver.add_csv_renderer(&*format!("{}meas-sparse.csv", prefix), 1600, None);
// how frequently to re-evaluate the stimulus (Sample & Hold interpolation between evaluations)
driver.set_steps_per_stim(1000);
driver.step_until(Seconds(duration)); driver.step_until(Seconds(duration));
} }

View File

@@ -0,0 +1,8 @@
[package]
name = "stacked_cores"
version = "0.1.0"
authors = ["Colin <colin@uninsane.org>"]
edition = "2021"
[dependencies]
coremem = { path = "../../coremem" }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,242 @@
#!/usr/bin/env python3
"""
try to understand which transfer characteristics can be used to create stable logic.
"""
def fwd(offset: float, amp: float):
return lambda x: min(1.0, offset + amp*x)
def inv(offset: float, amp: float):
return lambda x: max(0.0, 1.0 - offset - amp*x)
def inv_from_fwd(inv):
return lambda x: max(0.0, 1 - inv(x))
def test_stability(fwd, inv):
low = 0.0
high = 1.0
mid = 0.5
for i in range(8):
low, high, mid = inv(fwd(high)), inv(fwd(low)), inv(fwd(mid))
print(f"low {low:.2f} high {high:.2f} bistable {mid:.2f}")
def map_stability(inv):
s = []
for i in range(101):
v = i/100.0
for _ in range(32):
v = inv(v)
s.append(v)
logic_low = s[0]
logic_high = s[100]
logic_mean = 0.5*(logic_low + logic_high)
print(f"low: {logic_low:.2f}, high: {logic_high:.2f}")
for i, v in enumerate(s):
if v >= logic_mean:
print("logic cutoff: {:.2f}".format(i/100))
break
def print_to_stable(inv, f):
for _ in range(8):
next = inv(f)
print(f"{f} -> {next}")
f = next
def print_to_stable_noise(inv, f, noise=0.01):
for i in range(8):
if i%2: x = f - noise
else: x = f + noise
next = inv(x)
print(f"{f:.3} ({x:.3}) -> {next:.3}")
f = next
print("stability: 0.2 + 2.0*x")
test_stability(fwd(0.2, 2.0), inv(0.2, 2.0))
print("stability: 0.2 + 1.5*x")
test_stability(fwd(0.2, 1.5), inv(0.2, 1.5))
print("stability: 0.2 + 1.1*x")
test_stability(fwd(0.2, 1.1), inv(0.2, 1.1))
print("stability: 0.4 + 1.1*x")
test_stability(fwd(0.4, 1.1), inv(0.4, 1.1))
print("stability: 0.5 + 2.0*x")
test_stability(fwd(0.5, 2.0), inv(0.5, 2.0))
print("stability: 0.9*x")
test_stability(fwd(0.0, 0.9), inv(0.0, 0.9))
print("stability: 0.2 + 1.5*x; 0.9 - 0.7*x")
test_stability(fwd(0.2, 1.5), inv(0.1, 0.7))
print("stability: 0.1 + 1.3*x; 0.9 - 0.7*x")
test_stability(fwd(0.1, 1.3), inv(0.1, 0.7))
print("""\
offset isn't a deal-breaker until it approaches 50%.
for any offset < 0.5, amplification > 1.0, there is *some* stable pair of levels.
as offset increases, the stable pairs become closer together
""")
def test_stability_inv(inv, mid=0.5):
low = 0.0
high = 1.0
for i in range(32):
low, high, mid = inv(high), inv(low), inv(mid)
print(f"low {low:.2f} high {high:.2f} bistable {mid:.2f}")
print("stability_inv: 0.2 + 2.0*x")
test_stability_inv(inv(0.2, 2.0))
print("stability_inv: 0.2 + 1.1*x")
test_stability_inv(inv(0.2, 1.1))
print("stability_inv: 0.5 + 1.1*x")
test_stability_inv(inv(0.5, 1.1), 0.2)
print("stability_inv: 0.7 + 1.1*x")
test_stability_inv(inv(0.7, 1.1), 0.1)
print("""\
inverter-only circuits can be stable, even if they ordinarily bias to just one direction...
the same amp > 1.0 condition holds.
importantly, offset > 0.5 becomes *fine*
""")
def piecewise(points: list, scale=20000.0):
"""
each element in points is a two-tuple (input, output), sorted by input value.
the return value is a function which:
- accepts f: [0..1]
- maps that to [-scale..scale]
- maps that through `points` with linear interpolation
- scales back to [0..1] and return that
"""
def apply(f):
x_in_scale = -scale + f * scale * 2.0
# locate the first point smaller than the input
for first_lower in points[:-1][::-1]:
if first_lower[0] < x_in_scale: break
for first_higher in points[1:]:
if first_higher[0] > x_in_scale: break
# print(x_in_scale, first_lower, first_higher)
tween = (x_in_scale - first_lower[0]) / (first_higher[0] - first_lower[0])
y_in_scale = tween * first_higher[1] + (1-tween) * first_lower[1]
r = (y_in_scale + scale) / (2.0 * scale)
return max(0.0, min(1.0, r))
return apply
fwd_26 = piecewise(
[
[ -14687, -7326 ],
[ -13049, -6503 ],
[ -11785, -5833 ],
[ -4649, -1447 ],
[ 4961, 7059 ],
[ 11283, 11147 ],
],
17000
)
print("stability 26 7:1 windings (SUITABLY DIFFERENTIATED)")
test_stability(fwd_26, lambda x: 1-x)
map_stability(inv_from_fwd(fwd_26))
fwd_36 = piecewise(
[
[ (-13430 + -14112 + -13935)/3, -8297],
[ (-12796 + -13454 + -13293)/3, -7684],
[ (-4872 + -5106 + -5091)/3, -282],
[ (2322 + 2343 + 3705)/3, 7411],
[ (4854 + 4840 + 7273)/3, 9318],
[ (7324 + 7138 + 10608)/3, 10151],
[ (10552 + 10509 + 14412)/3, 11398],
[ (13418 + 13482 + 14760)/3, 13081],
[ (14196 + 14528 + 14533)/3, 13580],
], 15000)
print("stability 36 (3:1) cores (not suitably differentiated)")
test_stability(fwd_36, lambda x: 1-x)
fwd_38_2_0 = piecewise(
[
[ (-13745 + -13012)/2, -6222 ],
[ (-4969 + -4744)/2, 2373 ],
[ (1772 + 2070)/2, 10467 ],
[ (4472 + 4114)/2, 12921 ],
[ (7221 + 6291)/2, 14530 ],
[ (11159 + 10397)/2, 15865 ],
[ (12430 + 15653)/2, 16202 ],
], 17000
)
print("stability 38 2:0 cores (SUITABLY DIFFERENTIATED)")
test_stability(fwd_38_2_0, lambda x: 1-x)
map_stability(inv_from_fwd(fwd_38_2_0))
# print_to_stable(inv_from_fwd(fwd_38_2_0), 0.0)
# print_to_stable_noise(inv_from_fwd(fwd_38_2_0), 0.32, 0.01)
fwd_38_3_0 = piecewise(
[
[ (-13956 + -13890 + -13077)/3, -5203],
[ (-4979 + -4885 + -4717)/3, 5051],
[ (1531 + 503 + 1006)/3, 12509],
[ (4180 + 1821 + 2239)/3, 14386],
[ (6986 + 3436 + 3701)/3, 15451],
[ (10482 + 6644 + 7735)/3, 16081],
[ (11436 + 13343 + 14411)/3, 16380],
], 17000
)
print("stability 38 3:0 cores (SUITABLY DIFFERENTIATED)")
test_stability(fwd_38_3_0, lambda x: 1-x)
map_stability(inv_from_fwd(fwd_38_3_0))
# print_to_stable(inv_from_fwd(fwd_38_3_0), 0.0)
# print_to_stable_noise(inv_from_fwd(fwd_38_3_0), 0.29, 0.01)
# fwd_38_2_0 minus 8000
# de-biasing like this makes for a WORSE inverter
# fwd_38_2_0_offset = piecewise(
# [
# [ (-13745 + -13012)/2, -14222 ],
# [ (-4969 + -4744)/2, -5627 ],
# [ (1772 + 2070)/2, 2467 ],
# [ (4472 + 4114)/2, 4921 ],
# [ (7221 + 6291)/2, 6530 ],
# [ (11159 + 10397)/2, 7865 ],
# [ (12430 + 15653)/2, 8202 ],
# ], 17000
# )
# print("stability 38 2:0 cores (offset -8000)")
# print("pw(0) = ", fwd_38_2_0_offset(0))
# test_stability(fwd_38_2_0_offset, lambda x: 1-x)
# map_stability(inv_from_fwd(fwd_38_2_0_offset))
# print_to_stable(inv_from_fwd(fwd_38_2_0_offset), 0.0)
fwd_38_4_2 = piecewise([
[ (-14049 + -14191 + -14218 + -14161)/4, -10675],
[ (-4993 + -4944 + -4985 + -5123)/4, -4640],
[ (1948 + 1854 + 2633 + 2602)/4, 73],
[ (4823 + 3914 + 5799 + 6177)/4, 1651],
[ (7933 + 6731 + 10058 + 9404)/4, 2625],
[ (11420 + 11947 + 15968 + 14039)/4, 6413],
[ (13786 + 16465 + 16667 + 15144)/4, 8180],
], 17000)
print("stability 38 4:2 cores (not suitably differentiated)")
test_stability(fwd_38_4_2, lambda x: 1-x)
fwd_38_5_2 = piecewise([
[ (-14056 + -14195 + -14234 + -14224 + -14162)/5, -8395],
[ (-4990 + -4931 + -4935 + -4968 + -5080)/5, -1468],
[ (1804 + 966 + 1564 + 1808 + 2237)/5, 3661],
[ (4632 + 2427 + 3521 + 4500 + 5563)/5, 5238],
[ (7629 + 4568 + 6648 + 8011 + 8527)/5, 6710],
[ (10758 + 9552 + 12404 + 13748 + 12927)/5, 10892],
[ (12335 + 15650 + 16603 + 16403 + 14635)/5, 13151],
], 17000)
print("stability 38 5:2 cores (not suitably differentiated)")
test_stability(fwd_38_5_2, lambda x: 1-x)
# TODO: code 24, 26, 27, 28, 30 (asymmetric windings)

View File

@@ -0,0 +1,46 @@
def load_csv(path: str):
"""
returns (header: list[str], rows: list[list[T]])
"""
header = []
rows = []
for i, line in enumerate(open(path).read().strip().split('\n')):
if i == 0:
header = line.split(',')
else:
rows.append(eval(line))
return header, rows
def labeled_rows(header: list, rows: list):
"""
return a list of dicts,
transforming each row into a kv map
"""
new_rows = []
for row in rows:
new_rows.append({ header[i]: elem for i, elem in enumerate(row) })
return new_rows
def last_row_before_t(rows: list, t: float):
"""
return the last row for which row[time] < t
"""
prev_row = None
for row in rows:
if row["time"] >= t:
break
prev_row = row
return prev_row
def extract_m(row: dict) -> list:
"""
return [M(state0), M(state1), ...]
"""
m = []
for k, v in row.items():
if k.startswith('M(state') and k.endswith(')'):
n = int(k[len('M(state'):-1])
assert n == len(m)
m.append(v)
return m

View File

@@ -0,0 +1,50 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 12-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_12xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_start = last_row_before_t(rows, 2e-9)
tx_end = last_row_before_t(rows, 3e-9)
noop_start = last_row_before_t(rows, 5e-9)
noop_end = last_row_before_t(rows, 6e-9)
m_tx_start = extract_m(tx_start)
m_tx_end = extract_m(tx_end)
m_noop_start = extract_m(noop_start)
m_noop_end = extract_m(noop_end)
m1_tx = abs(m_tx_end[1] - m_tx_start[1])
m1_noop = abs(m_noop_end[1] - m_noop_start[1])
m_tx_arr = [round(abs(m_tx_end[i] - m_tx_start[i])) for i in [0, 2]]
m_tx = sum(m_tx_arr)
m_noop_arr = [round(abs(m_noop_end[i] - m_noop_start[i])) for i in [0, 2]]
m_noop = sum(m_noop_arr)
ratio_tx_noop = m_tx / m_noop
ratio_tx_m1 = m_tx / m1_tx
ratio_tx_noop_m1 = (m_tx - m_noop) / m1_tx
print(f'm1 tx: {m1_tx} ({m_tx_start[1]} -> {m_tx_end[1]})')
print(f'm1 noop: {m1_noop} ({m_noop_start[1]} -> {m_noop_end[1]})')
print('')
print(f'm(tx): {m_tx_start}')
print(f' -> {m_tx_end}')
print('')
print(f'm(noop): {m_noop_start}')
print(f' -> {m_noop_end}')
print('')
print(f'tx/noop: {ratio_tx_noop:.3}')
print(f'tx/m1: {ratio_tx_m1:.3}')
print(f'(tx-noop)/m1: {ratio_tx_noop_m1:.3}')
if __name__ == '__main__':
extract_12xx(sys.argv[1])

View File

@@ -0,0 +1,36 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 17-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_17xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_start_0 = last_row_before_t(rows, 2e-9)
tx_start_1 = last_row_before_t(rows, 4e-9)
tx_end = last_row_before_t(rows, 5e-9)
m_tx_start_0 = extract_m(tx_start_0)
m_tx_start_1 = extract_m(tx_start_1)
m_tx_end = extract_m(tx_end)
m0_switch = abs(m_tx_start_1[0] - m_tx_start_0[0])
m_middle_switch = sum(abs(e - s) for (e, s) in zip(m_tx_start_1[1:-1], m_tx_start_0[1:-1]))
m_middle_clear = sum(abs(e - s) for (e, s) in zip(m_tx_end[1:-1], m_tx_start_1[1:-1]))
m_last_switch = abs(m_tx_end[-1] - m_tx_start_1[-1])
print(f'm0: {m0_switch} ({m_tx_start_0[0]} -> {m_tx_start_1[0]})')
print(f'm_middle: {m_middle_switch}')
print(f'm_middle_clear: {m_middle_clear}')
print(f'm_last: {m_last_switch} ({m_tx_start_1[-1]} -> {m_tx_end[-1]})')
print('')
print(f'm: {m_tx_start_0}')
print(f' -> {m_tx_start_1}')
print(f' -> {m_tx_end}')
if __name__ == '__main__':
extract_17xx(sys.argv[1])

View File

@@ -0,0 +1,36 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 18-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_18xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, 2e-9)
tx_fwd = last_row_before_t(rows, 4e-9)
tx_rev = last_row_before_t(rows, 6e-9)
m_init = extract_m(tx_init)
m_fwd = extract_m(tx_fwd)
m_rev = extract_m(tx_rev)
m0_fwd = abs(m_init[0] - m_fwd[0])
m0_rev = abs(m_rev[0] - m_fwd[0])
m_rest_fwd = sum(e - s for (e, s) in zip(m_fwd[1:], m_init[1:]))
m_rest_rev = sum(s - e for (e, s) in zip(m_rev[1:], m_fwd[1:]))
print(f'\t- m0: {m_init[0]} -> {m_fwd[0]} -> {m_rev[0]}')
print(f'\t- m0 fwd: {m0_fwd}')
print(f'\t- m0 rev: {m0_rev}')
print(f'\t- m_middle fwd: {m_rest_fwd}')
print(f'\t- m_middle rev: {m_rest_rev}')
print(f'\t- m: {m_init}')
print(f'\t -> {m_fwd}')
print(f'\t -> {m_rev}')
if __name__ == '__main__':
extract_18xx(sys.argv[1])

View File

@@ -0,0 +1,27 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 24-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_24xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, 2e-9)
tx_fini = last_row_before_t(rows, 3e-9)
m_init = extract_m(tx_init)
m_fini = extract_m(tx_fini)
m0 = -(m_fini[0] - m_init[0])
m1 = m_fini[1] - m_init[1]
print(f'\t- m0: {m0} ({m_init[0]} -> {m_fini[0]})')
print(f'\t- m1: {m1} ({m_init[1]} -> {m_fini[1]})')
print(f'\t- amp: {m1/m0}')
if __name__ == '__main__':
extract_24xx(sys.argv[1])

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 28-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_28xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, 2e-9)
tx_fini = last_row_before_t(rows, 3e-9)
m_init = extract_m(tx_init)
m_fini = extract_m(tx_fini)
m0 = -(m_fini[0] - m_init[0])
m1 = m_fini[1] - m_init[1]
m2 = -(m_fini[2] - m_init[2])
print(f'\t- m0: {m0} ({m_init[0]} -> {m_fini[0]})')
print(f'\t- m1: {m1} ({m_init[1]} -> {m_fini[1]})')
print(f'\t- m2: {m2} ({m_init[2]} -> {m_fini[2]})')
print(f'\t- amp: {m2/m0}')
if __name__ == '__main__':
extract_28xx(sys.argv[1])

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 29-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_29xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, 2e-9)
tx_mid = last_row_before_t(rows, 4e-9)
tx_fini = last_row_before_t(rows, 5e-9)
m_init = extract_m(tx_init)
m_mid = extract_m(tx_mid)
m_fini = extract_m(tx_fini)
m0 = -(m_mid[0] - m_init[0])
m1_first = m_mid[1] - m_init[1]
m2_first = m_mid[2] - m_init[2]
m1_second= m_fini[1] - m_mid[1]
m2_second= -(m_fini[2] - m_mid[2])
m1 = m1_first + m1_second
print(f'\t- m0: {m0} ({m_init[0]} -> {m_mid[0]})')
print(f'\t- m1: {m1}')
print(f'\t\t- from m0: {m1_first}')
print(f'\t\t- from m2: {m1_second}')
print(f'\t {m_init[1]} -> {m_mid[1]} -> {m_fini[1]}')
print(f'\t- m2: {m2_second}')
print(f'\t {m_init[2]} -> {m_mid[2]} -> {m_fini[2]})')
print(f'\t- amp: {m1/m0}')
if __name__ == '__main__':
extract_29xx(sys.argv[1])

View File

@@ -0,0 +1,32 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 33-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_33xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, 2e-9)
tx_fini = last_row_before_t(rows, 3e-9)
m_init = extract_m(tx_init)
m_fini = extract_m(tx_fini)
m0 = -(m_fini[0] - m_init[0])
m1 = m_fini[1] - m_init[1]
m2 = -(m_fini[2] - m_init[2])
m3 = m_fini[3] - m_init[3]
print(f'\t- madj: {m0 + m2 - m3}')
print(f'\t\t- m0: {m_init[0]} -> {m_fini[0]}')
print(f'\t\t- m2: {m_init[2]} -> {m_fini[2]}')
print(f'\t\t- m3: {m_init[3]} -> {m_fini[3]}')
print(f'\t- m1: {m1}')
print(f'\t\t- {m_init[1]} -> {m_fini[1]}')
if __name__ == '__main__':
extract_33xx(sys.argv[1])

View File

@@ -0,0 +1,30 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 34-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_34xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, 2e-9)
tx_fini = last_row_before_t(rows, 3e-9)
m_init = extract_m(tx_init)
m_fini = extract_m(tx_fini)
m0 = -(m_fini[0] - m_init[0])
m1 = m_fini[1] - m_init[1]
m2 = -(m_fini[2] - m_init[2])
print(f'\t- madj: {m0 - m2}')
print(f'\t\t- m0: {m_init[0]} -> {m_fini[0]}')
print(f'\t\t- m2: {m_init[2]} -> {m_fini[2]}')
print(f'\t- m1: {m1}')
print(f'\t\t- {m_init[1]} -> {m_fini[1]}')
if __name__ == '__main__':
extract_34xx(sys.argv[1])

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 36-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_36xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, 2e-9)
tx_fini = last_row_before_t(rows, 3e-9)
m_init = extract_m(tx_init)
m_fini = extract_m(tx_fini)
m0 = -(m_fini[0] - m_init[0])
m1 = m_fini[1] - m_init[1]
m2 = -(m_fini[2] - m_init[2])
m3 = -(m_fini[3] - m_init[3])
m4 = m_fini[4] - m_init[4]
print(f'\t- madj: {m0 + m2 + m3 - m4}')
print(f'\t\t- m0: {m_init[0]} -> {m_fini[0]}')
print(f'\t\t- m2: {m_init[2]} -> {m_fini[2]}')
print(f'\t\t- m3: {m_init[3]} -> {m_fini[3]}')
print(f'\t\t- m4: {m_init[4]} -> {m_fini[4]}')
print(f'\t- m1: {m1}')
print(f'\t\t- {m_init[1]} -> {m_fini[1]}')
if __name__ == '__main__':
extract_36xx(sys.argv[1])

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 37-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_37xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, 2e-9)
tx_fini = last_row_before_t(rows, 3e-9)
m_init = extract_m(tx_init)
m_fini = extract_m(tx_fini)
m0 = -(m_fini[0] - m_init[0])
m1 = -(m_fini[1] - m_init[1])
m2 = -(m_fini[2] - m_init[2])
m3 = m_fini[3] - m_init[3]
m4 = m_fini[4] - m_init[4]
m5 = m_fini[5] - m_init[5]
m6 = m_fini[6] - m_init[6]
print(f'\t- madj: {m0 + m1 + m2 - m4 - m5 - m6}')
print(f'\t\t- m0: {m_init[0]} -> {m_fini[0]}')
print(f'\t\t- m1: {m_init[1]} -> {m_fini[1]}')
print(f'\t\t- m2: {m_init[2]} -> {m_fini[2]}')
print(f'\t\t- m4: {m_init[4]} -> {m_fini[4]}')
print(f'\t\t- m5: {m_init[5]} -> {m_fini[5]}')
print(f'\t\t- m6: {m_init[6]} -> {m_fini[6]}')
print(f'\t- m3: {m3}')
print(f'\t\t- {m_init[3]} -> {m_fini[3]}')
if __name__ == '__main__':
extract_37xx(sys.argv[1])

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 38-xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_38xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, 2e-9)
tx_fini = last_row_before_t(rows, 3e-9)
m_init = extract_m(tx_init)
m_fini = extract_m(tx_fini)
m0 = -(m_fini[0] - m_init[0])
madj = sum(init - fini for (init, fini) in zip(m_init[1:], m_fini[1:]))
print(f'\t- madj: {madj}')
for i, (init, fini) in enumerate(zip(m_init[1:], m_fini[1:])):
print(f'\t\t- m{i+1}: {init} -> {fini}')
print(f'\t- m0: {m0}')
print(f'\t\t- {m_init[0]} -> {m_fini[0]}')
if __name__ == '__main__':
extract_38xx(sys.argv[1])

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 39-xx demos
to extract higher-level info from them.
"""
import os
import sys
import re
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_one(path: str, t_first: float, t_last: float):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_init = last_row_before_t(rows, t_first)
tx_fini = last_row_before_t(rows, t_last)
m_init = extract_m(tx_init)
m_fini = extract_m(tx_fini)
return m_init[0], m_fini[-1]
def extract_polarity(stem: str) -> float:
s = None
if re.search("-p\d\d\d", stem):
s = re.search("-p\d\d\d", stem).group(0)
if re.search("-n\d\d\d", stem):
s = re.search("-n\d\d\d", stem).group(0)
if s:
sign = {'n': -1, 'p': 1}[s[1]]
mag = int(s[2:])
return sign * mag * 0.01
if "-000" in stem:
return 0.00
def extract_39xx(base_path: str, t_first: str = "2e-9", t_last: str = "3e-9"):
base_dir, prefix = os.path.split(base_path)
mappings = {}
for entry in os.listdir(base_dir):
if entry.startswith(prefix):
(input_, output) = extract_one(os.path.join(base_dir, entry, "meas.csv"), float(t_first), float(t_last))
polarity = extract_polarity(entry)
mappings[int(round(input_))] = (int(round(output)), polarity)
print("Piecewise(")
print(" [")
for i, (o, polarity) in sorted(mappings.items()):
comment = f" # {polarity:.2}" if polarity is not None else ""
print(f" [ {i:6}, {o:6} ],{comment}")
print(" ]")
print(")")
if __name__ == '__main__':
extract_39xx(*sys.argv[1:])

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 8xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_8xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_start = last_row_before_t(rows, 2e-9)
tx_end = last_row_before_t(rows, 3e-9)
noop_start = last_row_before_t(rows, 5e-9)
noop_end = last_row_before_t(rows, 6e-9)
m_tx_start = extract_m(tx_start)
m_tx_end = extract_m(tx_end)
m_noop_start = extract_m(noop_start)
m_noop_end = extract_m(noop_end)
num_m = len(m_tx_start)
m0_switch = abs(m_tx_end[0] - m_tx_start[0])
m0_noop_switch = abs(m_noop_end[0] - m_noop_start[0])
m_tx_switch_arr = [round(abs(m_tx_end[i] - m_tx_start[i])) for i in range(1, num_m)]
m_tx_switch = sum(m_tx_switch_arr)
m_noop_switch_arr = [round(abs(m_noop_end[i] - m_noop_start[i])) for i in range(1, num_m)]
m_noop_switch = sum(m_noop_switch_arr)
ratio_tx_noop = m_tx_switch / m_noop_switch
ratio_tx_switch = m_tx_switch / m0_switch
ratio_noop_switch = m_noop_switch / m0_switch
ratio_tx_noop_switch = (m_tx_switch - m_noop_switch) / m0_switch
print(f'm0 tx: {m0_switch} ({m_tx_start[0]} -> {m_tx_end[0]})')
print(f'm0 noop: {m0_noop_switch} ({m_noop_start[0]} -> {m_noop_end[0]})')
print('')
print(f'm(tx): {m_tx_start}')
print(f' -> {m_tx_end}')
print('')
print(f'm(noop): {m_noop_start}')
print(f' -> {m_noop_end}')
print('')
print(f'switched(tx): {m_tx_switch_arr}')
print(f'switched(noop): {m_noop_switch_arr}')
print('')
print(f'tx/noop: {ratio_tx_noop:.3}')
print(f'tx/m0: {ratio_tx_switch:.3}')
print(f'noop/m0: {ratio_noop_switch:.3}')
print(f'(tx-noop)/m0: {ratio_tx_noop_switch:.3}')
if __name__ == '__main__':
extract_8xx(sys.argv[1])

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env python3
"""
invoke with the path to a meas.csv file for the stacked_core 9xx demos
to extract higher-level info from them.
"""
import sys
from stacked_cores import load_csv, labeled_rows, last_row_before_t, extract_m
def extract_9xx(path: str):
header, raw_rows = load_csv(path)
rows = labeled_rows(header, raw_rows)
tx_start = last_row_before_t(rows, 2e-9)
tx_end = last_row_before_t(rows, 4e-9)
rev_start = last_row_before_t(rows, 4e-9)
rev_end = last_row_before_t(rows, 6e-9)
m_tx_start = extract_m(tx_start)
m_tx_end = extract_m(tx_end)
m_rev_start = extract_m(rev_start)
m_rev_end = extract_m(rev_end)
m1_switch = abs(m_tx_end[1] - m_tx_start[1])
m1_rev_switch = abs(m_rev_end[1] - m_rev_start[1])
m_tx_switch_arr = [round(abs(m_tx_end[i] - m_tx_start[i])) for i in [0, 2]]
m_tx_switch = sum(m_tx_switch_arr)
m_rev_switch_arr = [round(abs(m_rev_end[i] - m_rev_start[i])) for i in [0, 2]]
m_rev_switch = sum(m_rev_switch_arr)
ratio_tx_switch = m_tx_switch / m1_switch
ratio_roundtrip = m1_rev_switch / m1_switch
print(f'm1 tx: {m1_switch} ({m_tx_start[1]} -> {m_tx_end[1]})')
print(f'm1 rev: {m1_rev_switch} ({m_rev_start[1]} -> {m_rev_end[1]})')
print('')
print(f'm(tx): {m_tx_start}')
print(f' -> {m_tx_end}')
print('')
print(f'm(rev): {m_rev_start}')
print(f' -> {m_rev_end}')
print('')
print(f'tx/m1: {ratio_tx_switch:.3}')
print(f'rev/m1: {ratio_roundtrip:.3}')
if __name__ == '__main__':
extract_9xx(sys.argv[1])

File diff suppressed because it is too large Load Diff

View File

@@ -7,10 +7,14 @@
//! with something that absorbs energy. since this example doesn't, it lets you see what //! with something that absorbs energy. since this example doesn't, it lets you see what
//! happens when you just use the default boundary conditions. //! happens when you just use the default boundary conditions.
use coremem::{mat, driver}; use coremem::{mat, Driver};
use coremem::geom::{Coord as _, Cube, Index, Vec3}; use coremem::geom::{Coord as _, Cube, Index};
use coremem::units::Seconds; use coremem::units::Seconds;
use coremem::stim::{Stimulus, TimeVarying as _, UniformStimulus}; use coremem::sim::spirv::{self, SpirvSim};
use coremem::stim::{Fields, ModulatedVectorField, Pulse, RegionGated};
use coremem::cross::vec::Vec3;
type Mat = mat::FullyGenericMaterial<f32>;
fn main() { fn main() {
coremem::init_logging(); coremem::init_logging();
@@ -21,14 +25,13 @@ fn main() {
// each cell represents 1um x 1um x 1um volume // each cell represents 1um x 1um x 1um volume
let feature_size = 1e-6; let feature_size = 1e-6;
// Create the simulation "driver" which uses the CPU as backend. // create the simulation "driver".
// by default all the computations are done with R32: a f32 which panics on NaN/Inf // the first parameter is the float type to use: f32 for unchecked math, coremem::real::R32
// you can parameterize it to use R64, or unchecked f32 -- see src/driver.rs for the definition // to guard against NaN/Inf (useful for debugging).
let mut driver: driver::CpuDriver = driver::Driver::new(size, feature_size); // to run this on the gpu instead of the gpu, replace `CpuBackend` with `WgpuBackend`.
let mut driver = Driver::new(SpirvSim::<f32, Mat, spirv::CpuBackend>::new(
// uncomment to use the Spirv/GPU driver. this one is restricted to unchecked f32. size, feature_size
// note: this won't have better perf unless you reduce the y4m/term renderer framerate below. ));
// let mut driver: driver::SpirvDriver = driver::Driver::new_spirv(size, feature_size);
// create a conductor on the left side. // create a conductor on the left side.
let conductor = Cube::new( let conductor = Cube::new(
@@ -43,12 +46,12 @@ fn main() {
Index::new(201, height*3/4, 1).to_meters(feature_size), Index::new(201, height*3/4, 1).to_meters(feature_size),
); );
// emit a constant E/H delta over this region for 100 femtoseconds // emit a constant E/H delta over this region for 100 femtoseconds
let stim = Stimulus::new( let stim = ModulatedVectorField::new(
center_region, RegionGated::new(center_region, Fields::new_eh(
UniformStimulus::new( Vec3::new(2e19, 0.0, 0.0),
Vec3::new(2e19, 0.0, 0.0), // E field (per second) Vec3::new(0.0, 0.0, 2e19/376.730),
Vec3::new(0.0, 0.0, 2e19/376.730) // H field (per second) )),
).gated(0.0, 100e-15), Pulse::new(0.0, 100e-15),
); );
driver.add_stimulus(stim); driver.add_stimulus(stim);

View File

@@ -12,19 +12,16 @@ crate-type = ["lib"]
[dependencies] [dependencies]
bincode = "1.3" # MIT bincode = "1.3" # MIT
common_macros = "0.1" # MIT or Apache 2.0 common_macros = "0.1" # MIT or Apache 2.0
crossbeam = "0.8" # MIT or Apache 2.0
crossterm = "0.24" # MIT crossterm = "0.24" # MIT
csv = "1.1" # MIT or Unlicense csv = "1.1" # MIT or Unlicense
dashmap = "5.3" # MIT dashmap = "5.3" # MIT
dyn-clone = "1.0" # MIT or Apache 2.0
enum_dispatch = "0.3" # MIT or Apache 2.0
env_logger = "0.9" # MIT or Apache 2.0 env_logger = "0.9" # MIT or Apache 2.0
float_eq = "1.0" # MIT or Apache 2.0
font8x8 = "0.3" # MIT font8x8 = "0.3" # MIT
futures = "0.3" # MIT or Apache 2.0 futures = "0.3" # MIT or Apache 2.0
image = "0.24" # MIT image = "0.24" # MIT
imageproc = "0.23" # MIT imageproc = "0.23" # MIT
indexmap = "1.9" # MIT or Apache 2.0 indexmap = "1.9" # MIT or Apache 2.0
lazy_static = "1.4" # MIT or Apache 2.0
log = "0.4" # MIT or Apache 2.0 log = "0.4" # MIT or Apache 2.0
more-asserts = "0.3" # CC0-1.0 more-asserts = "0.3" # CC0-1.0
ndarray = { version = "0.15", features = ["rayon", "serde"] } # MIT or Apache 2.0 ndarray = { version = "0.15", features = ["rayon", "serde"] } # MIT or Apache 2.0
@@ -33,8 +30,6 @@ num = "0.4" # MIT or Apache 2.0
rand = "0.8" # MIT or Apache 2.0 rand = "0.8" # MIT or Apache 2.0
rayon = "1.5" # MIT or Apache 2.0 rayon = "1.5" # MIT or Apache 2.0
serde = "1.0" # MIT or Apache 2.0 serde = "1.0" # MIT or Apache 2.0
threadpool = "1.8" # MIT or Apache 2.0
typetag = "0.2" # MIT or Apache 2.0
y4m = "0.7" # MIT y4m = "0.7" # MIT
wgpu = "0.12" wgpu = "0.12"
@@ -47,11 +42,12 @@ spirv-std = { git = "https://github.com/EmbarkStudios/rust-gpu" }
spirv-std-macros = { git = "https://github.com/EmbarkStudios/rust-gpu" } spirv-std-macros = { git = "https://github.com/EmbarkStudios/rust-gpu" }
spirv_backend = { path = "../spirv_backend" } spirv_backend = { path = "../spirv_backend" }
spirv_backend_runner = { path = "../spirv_backend_runner" } spirv_backend_runner = { path = "../spirv_backend_runner" }
coremem_types = { path = "../types", features = ["fmt", "serde"] } coremem_cross = { path = "../cross", features = ["iter", "fmt", "serde", "std"] }
[dev-dependencies] [dev-dependencies]
criterion = "0.3" criterion = "0.3"
float_eq = "1.0" # MIT or Apache 2.0
[[bench]] [[bench]]
name = "driver" name = "driver"

View File

@@ -1,27 +1,16 @@
use coremem::{Driver, SimState, SpirvDriver}; use coremem::Driver;
use coremem::geom::Index; use coremem::geom::Index;
use coremem::mat::{Ferroxcube3R1MH, IsoConductorOr, GenericMaterial, GenericMaterialNoPml, GenericMaterialOneField}; use coremem::mat::{Ferroxcube3R1MH, IsoConductorOr, FullyGenericMaterial};
use coremem::real::R32; use coremem::sim::spirv::{SpirvSim, WgpuBackend};
use coremem::sim::spirv::{self, SpirvSim};
use criterion::{BenchmarkId, criterion_group, criterion_main, Criterion}; use criterion::{BenchmarkId, criterion_group, criterion_main, Criterion};
type DefaultDriver = Driver::<SimState<R32, GenericMaterial<R32>>>;
pub fn bench_step(c: &mut Criterion) {
for size in &[10, 20, 40, 80, 160] {
let sim = SimState::<R32, GenericMaterial<R32>>::new(Index::new(*size, *size, *size), 1e-5);
c.bench_with_input(BenchmarkId::new("Driver::step", size), &sim, |b, sim| {
let mut driver = Driver::new_with_state(sim.clone());
b.iter(|| driver.step())
});
}
}
pub fn bench_step_spirv(c: &mut Criterion) { pub fn bench_step_spirv(c: &mut Criterion) {
type Mat = FullyGenericMaterial<f32>;
for size in &[10, 20, 40, 80, 160] { for size in &[10, 20, 40, 80, 160] {
let sim: SpirvSim = SpirvSim::new(Index::new(*size, *size, *size), 1e-5); let sim = SpirvSim::<f32, Mat, WgpuBackend>::new(Index::new(*size, *size, *size), 1e-5);
c.bench_with_input(BenchmarkId::new("Driver::step_spirv", size), &sim, |b, sim| { c.bench_with_input(BenchmarkId::new("Driver::step_spirv", size), &sim, |b, sim| {
let mut driver = Driver::new_with_state(sim.clone()); let mut driver = Driver::new(sim.clone());
b.iter(|| driver.step()) b.iter(|| driver.step())
}); });
} }
@@ -30,42 +19,13 @@ pub fn bench_step_spirv(c: &mut Criterion) {
pub fn bench_step_spirv_iso_3r1(c: &mut Criterion) { pub fn bench_step_spirv_iso_3r1(c: &mut Criterion) {
type Mat = IsoConductorOr<f32, Ferroxcube3R1MH>; type Mat = IsoConductorOr<f32, Ferroxcube3R1MH>;
for size in &[10, 20, 40, 80, 160] { for size in &[10, 20, 40, 80, 160] {
let sim: SpirvSim<Mat> = SpirvSim::new(Index::new(*size, *size, *size), 1e-5); let sim = SpirvSim::<f32, Mat, WgpuBackend>::new(Index::new(*size, *size, *size), 1e-5);
c.bench_with_input(BenchmarkId::new("Driver::spirv_ISO3R1", size), &sim, |b, sim| { c.bench_with_input(BenchmarkId::new("Driver::spirv_ISO3R1", size), &sim, |b, sim| {
let mut driver: SpirvDriver<Mat> = Driver::new_with_state(sim.clone()); let mut driver = Driver::new(sim.clone());
b.iter(|| driver.step()) b.iter(|| driver.step())
}); });
} }
} }
// pub fn bench_step_no_pml(c: &mut Criterion) { criterion_group!(benches, bench_step_spirv, bench_step_spirv_iso_3r1);
// for size in &[10, 20, 40, 80, 160] {
// c.bench_with_input(BenchmarkId::new("Driver::step_no_pml", size), size, |b, &size| {
// let mut driver = DefaultDriver::new(Index::new(size, size, size), 1e-5);
// b.iter(|| driver.step())
// });
// }
// }
//
// pub fn bench_step_one_vec(c: &mut Criterion) {
// for size in &[10, 20, 40, 80, 160] {
// c.bench_with_input(BenchmarkId::new("Driver::step_one_vec", size), size, |b, &size| {
// let mut driver = DefaultDriver::new(Index::new(size, size, size), 1e-5);
// b.iter(|| driver.step())
// });
// }
// }
//
// pub fn bench_step_with_pml(c: &mut Criterion) {
// let size = 40;
// for thickness in &[0, 1, 2, 4, 8, 16] {
// c.bench_with_input(BenchmarkId::new("Driver::step_with_pml", thickness), thickness, |b, &thickness| {
// let mut driver = DefaultDriver::new(Index::new(size, size, size), 1e-5);
// driver.add_pml_boundary(Index::new(thickness, thickness, thickness));
// b.iter(|| driver.step())
// });
// }
// }
criterion_group!(benches, /*bench_step,*/ bench_step_spirv, bench_step_spirv_iso_3r1);
criterion_main!(benches); criterion_main!(benches);

View File

@@ -103,3 +103,15 @@ Driver::spirv_ISO3R1/20 time: [548.70 us 555.85 us 563.28 us]
Driver::spirv_ISO3R1/40 time: [1.5333 ms 1.5405 ms 1.5489 ms] Driver::spirv_ISO3R1/40 time: [1.5333 ms 1.5405 ms 1.5489 ms]
Driver::spirv_ISO3R1/80 time: [13.299 ms 13.335 ms 13.376 ms] Driver::spirv_ISO3R1/80 time: [13.299 ms 13.335 ms 13.376 ms]
Driver::spirv_ISO3R1/160time: [164.57 ms 164.74 ms 164.93 ms] Driver::spirv_ISO3R1/160time: [164.57 ms 164.74 ms 164.93 ms]
5a0766451d96835061a674ab94f00341adb2b187:
Driver::step_spirv/10 time: [590.26 us 600.42 us 613.28 us]
Driver::step_spirv/20 time: [870.49 us 884.81 us 902.21 us]
Driver::step_spirv/40 time: [3.4094 ms 3.4285 ms 3.4498 ms]
Driver::step_spirv/80 time: [35.488 ms 35.673 ms 35.922 ms]
Driver::step_spirv/160 time: [270.98 ms 271.19 ms 271.43 ms]
Driver::spirv_ISO3R1/10 time: [585.57 us 596.11 us 608.79 us]
Driver::spirv_ISO3R1/20 time: [826.63 us 841.79 us 860.86 us]
Driver::spirv_ISO3R1/40 time: [2.8808 ms 2.9004 ms 2.9237 ms]
Driver::spirv_ISO3R1/80 time: [28.955 ms 29.027 ms 29.115 ms]
Driver::spirv_ISO3R1/160time: [216.03 ms 216.22 ms 216.45 ms]

View File

@@ -1,5 +1,7 @@
use coremem::{self, Driver, GenericSim, SimState}; use coremem::{self, Driver, AbstractSim};
use coremem::sim::spirv::SpirvSim; use coremem::sim::spirv::{SpirvSim, WgpuBackend};
use coremem::sim::units::Frame;
use coremem::cross::mat::FullyGenericMaterial;
use coremem::geom::Index; use coremem::geom::Index;
use std::time::{Instant, Duration}; use std::time::{Instant, Duration};
@@ -16,19 +18,23 @@ fn measure<F: FnMut()>(name: &str, n_times: u32, mut f: F) -> f32 {
avg avg
} }
fn measure_steps<S: GenericSim + Clone + Default + Send + Sync + 'static>(name: &str, steps_per_call: u32, mut d: Driver<S>) { fn measure_steps<S: AbstractSim + Clone + Default + Send + 'static>(name: &str, steps_per_call: u32, mut d: Driver<S::Real, S>) {
measure(name, 100/steps_per_call, || d.step_multiple(steps_per_call)); measure(name, 100/steps_per_call, || d.step_until(Frame(steps_per_call.into())));
} }
fn main() { fn main() {
coremem::init_logging(); coremem::init_logging();
measure_steps("spirv/80", 1, Driver::<SpirvSim>::new_spirv(Index::new(80, 80, 80), 1e-3)); measure_steps("spirv/80", 1, Driver::new(
measure_steps("sim/80", 1, Driver::<SimState>::new(Index::new(80, 80, 80), 1e-3)); SpirvSim::<f32, FullyGenericMaterial<f32>, WgpuBackend>::new(Index::new(80, 80, 80), 1e-3)
measure_steps("spirv/80 step(2)", 2, Driver::<SpirvSim>::new_spirv(Index::new(80, 80, 80), 1e-3)); ));
measure_steps("sim/80 step(2)", 2, Driver::<SimState>::new(Index::new(80, 80, 80), 1e-3)); measure_steps("spirv/80 step(2)", 2, Driver::new(
measure_steps("spirv/80 step(10)", 10, Driver::<SpirvSim>::new_spirv(Index::new(80, 80, 80), 1e-3)); SpirvSim::<f32, FullyGenericMaterial<f32>, WgpuBackend>::new(Index::new(80, 80, 80), 1e-3)
measure_steps("sim/80 step(10)", 10, Driver::<SimState>::new(Index::new(80, 80, 80), 1e-3)); ));
measure_steps("spirv/80 step(100)", 100, Driver::<SpirvSim>::new_spirv(Index::new(80, 80, 80), 1e-3)); measure_steps("spirv/80 step(10)", 10, Driver::new(
measure_steps("sim/80 step(100)", 100, Driver::<SimState>::new(Index::new(80, 80, 80), 1e-3)); SpirvSim::<f32, FullyGenericMaterial<f32>, WgpuBackend>::new(Index::new(80, 80, 80), 1e-3)
));
measure_steps("spirv/80 step(100)", 100, Driver::new(
SpirvSim::<f32, FullyGenericMaterial<f32>, WgpuBackend>::new(Index::new(80, 80, 80), 1e-3)
));
} }

View File

@@ -1,496 +0,0 @@
use coremem::*;
use coremem::geom::*;
use coremem::real::{Real as _, ToFloat as _};
use coremem::stim::AbstractStimulus;
use rand::rngs::StdRng;
use rand::{Rng as _, SeedableRng as _};
fn energy<R: Region>(s: &dyn SampleableSim, reg: &R) -> f32 {
let e = f64::half() * s.map_sum_over_enumerated(reg, |_pos: Index, cell| {
cell.e().mag_sq().to_f64()
});
e.cast()
}
fn energy_now_and_then<R: Region>(state: &mut StaticSim, reg: &R, frames: u32) -> (f32, f32) {
let energy_0 = energy(state, reg);
for _ in 0..frames {
state.step();
}
let energy_1 = energy(state, reg);
(energy_0, energy_1)
}
struct PmlStim<F> {
/// Maps index -> (stim vector, stim frequency)
f: F,
t_end: f32,
feat_size: f32,
}
impl<F: Fn(Index) -> (Vec3<f32>, f32) + Sync> AbstractStimulus for PmlStim<F> {
fn at(&self, t_sec: f32, pos: Meters) -> (Vec3<f32>, Vec3<f32>) {
let angle = t_sec/self.t_end*f32::two_pi();
let gate = 0.5*(1.0 - angle.cos());
let (e, hz) = (self.f)(pos.to_index(self.feat_size));
let sig_angle = angle*hz;
let sig = sig_angle.sin();
(e*gate*sig, Vec3::zero())
}
}
/// Apply some stimulus, and then let it decay and measure the ratio of energy left in the system
fn apply_stim_full_interior<F>(state: &mut StaticSim, frames: u32, f: F)
where F: Fn(Index) -> (Vec3<f32>, f32) + Sync // returns (E vector, omega)
{
let stim = PmlStim {
f,
t_end: (frames as f32) * state.timestep(),
feat_size: state.feature_size(),
};
for _t in 0..frames {
state.apply_stimulus(&stim);
state.step();
}
}
fn apply_stim_over_region<R, F>(state: &mut StaticSim, frames: u32, reg: R, f: F)
where
R: Region,
F: Fn(Index) -> (Vec3<f32>, f32) + Sync,
{
let feat = state.feature_size();
apply_stim_full_interior(state, frames, |idx| {
if reg.contains(idx.to_meters(feat)) {
f(idx)
} else {
(Vec3::zero(), 0.0)
}
});
}
/// Stimulate each point in the region with a pseudorandom (but predictable) e wave
fn apply_chaotic_stim_over_region<R: Region>(state: &mut StaticSim, frames: u32, interior: R) {
apply_stim_over_region(state, frames, interior, |idx| {
let seed = (idx.x() as u64) ^ ((idx.y() as u64) << 16) ^ ((idx.z() as u64) << 32);
let mut rng = StdRng::seed_from_u64(seed);
let dir = Vec3::new(
rng.gen_range(-1.0..1.0),
rng.gen_range(-1.0..1.0),
rng.gen_range(-1.0..1.0),
);
// XXX only works if it's a whole number. I suppose this makes sense though, as
// other numbers would create higher harmonics when gated.
let hz = rng.gen_range(0..=2);
(dir, hz as _)
})
}
fn chaotic_pml_test(state: &mut StaticSim, boundary: u32, padding: u32, frames: u32) -> f32 {
let feat = state.feature_size();
{
let upper_left_idx = Index::unit()*padding;
let lower_right_idx = state.size() - Index::unit() - upper_left_idx;
let interior = Cube::new(upper_left_idx.to_meters(feat), lower_right_idx.to_meters(feat));
apply_chaotic_stim_over_region(state, frames, interior);
}
let upper_left_idx = Index::unit()*boundary;
let lower_right_idx = state.size() - Index::unit() - upper_left_idx;
let sim_region = Cube::new(upper_left_idx.to_meters(feat), lower_right_idx.to_meters(feat));
let (energy_0, energy_1) = energy_now_and_then(state, &sim_region, frames);
// println!("Energy: {}/{}", energy_1, energy_0);
energy_1/energy_0
}
#[allow(unused)]
fn state_for_pml(size: Index, boundary: Index, feat_size: f32, sc_coeff: f32, cond_coeff: f32, pow: f32) -> StaticSim {
let mut state = StaticSim::new(size, feat_size);
let timestep = state.timestep();
state.fill_boundary_using(boundary, |boundary_ness| {
let b = boundary_ness.elem_pow(pow);
let coord_stretch = b * sc_coeff / timestep;
let conductivity = Vec3::unit() * (b.mag() * cond_coeff / timestep);
unimplemented!();
Static::default()
// Static {
// // TODO PML coord_stretch,
// conductivity,
// pml: Some((PmlState::new(), PmlParameters::new(coord_stretch))),
// ..Default::default()
// }
});
state
}
fn main() {
// Explanation here (slide 63): https://empossible.net/wp-content/uploads/2020/01/Lecture-The-Perfectly-Matched-Layer.pdf
// Claims that eps0/delta_t * n^3 is a good way to activate the PML layer
for pow in vec![3.0] {
for sc_coeff in vec![
// 0.1*consts::EPS0,
// 0.5*consts::EPS0,
// 1e0*consts::EPS0,
// 1e1*consts::EPS0,
// 1e2*consts::EPS0,
// 1e3*consts::EPS0,
// 1e6*consts::EPS0,
// 1e9*consts::EPS0,
// 1e12*consts::EPS0,
// 1e15*consts::EPS0,
// 1e18*consts::EPS0,
// 1e21*consts::EPS0,
// 0.01,
// 0.03,
// 0.05,
// 0.07,
// 0.08,
// 0.09,
// 0.1,
// 0.15,
// 0.2,
// 0.25,
// 0.3,
// 0.4,
// 0.5,
// 0.6,
// 0.7,
// 0.8,
// 0.9,
//0.95,
// 1.0,
// 1.5,
// 2.0,
// 3.0,
// 5.0,
// 10.0,
// 100.0,
// 1000.0,
//0.0,
0.5,
] {
//for cond_coeff in vec![0.0, 1.0, 1e3, 1e6, 1e9] {
for cond_coeff in vec![0.0, 0.5*f32::eps0()] {
for frames in vec![400, 1200] {
for pml_width in vec![1, 2, 4, 8, 16] {
for feat_size in vec![1e-6] {
let size = Index::unit()*121;
let sim_inset = 40;
let boundary = Index::unit()*pml_width;
let mut state = state_for_pml(size, boundary, feat_size, sc_coeff, cond_coeff, pow);
let ratio = chaotic_pml_test(&mut state, pml_width, sim_inset, frames);
println!("{},{} (pow={}, f={}, width={}, frames={}): {}", sc_coeff, cond_coeff, pow, feat_size, pml_width, frames, ratio);
}
}
}
}
}
}
// Conclusions:
// * if coordinate stretching is divided by time_step, then the absorption is independent of
// feature size.
// For 240 frames, PML width of 20, size=121, sim_inset = 40, cubic onset:
// * coefficients between [0.4, 1.0] are all within 30% of each other
// * coefficients between [0.5, 1.0] are all within 20% of each other
// * coefficients > 1 show instability (i.e. instable if coord_stretch > 1 / timestep);
// * absorption generally increases as the coefficient approaches 1.0 from the left.
// This begins to reverse at least by 0.98
// For 2400 frames, instability starts somewhere between 0.7 and 0.8
// 0.5 (f=0.000001, width=20, frames=2400): 0.0006807010986857421
// 0.6 (f=0.000001, width=20, frames=2400): 0.0005800974138738364
// 0.7 (f=0.000001, width=20, frames=2400): 0.0005092274390086559
// 0.8 (f=0.000001, width=20, frames=2400): 7326609898580109000000
// These numbers use hz = rng.gen_range(0..=5), with 20 frames of excitation and 20 steps
// between source and boundary
//
// Reducing hz to 0..=2 achieves much better perf (pow=3.0):
// 0.5 (f=0.000001, width=20, frames=2400): 0.0000007135657380376141
// 0.6 (f=0.000001, width=20, frames=2400): 0.0000006561934691721446
// 0.7 (f=0.000001, width=20, frames=2400): 0.0000006099334150762627
// 0.8 (f=0.000001, width=20, frames=2400): 2180868728529433400000
// Quadratic (pow=2.0):
// 0.4 (pow=2, f=0.000001, width=20, frames=2400): 0.0000006839243836328471
// 0.5 (pow=2, f=0.000001, width=20, frames=2400): 0.0000006189051555210391
// 0.6 (pow=2, f=0.000001, width=20, frames=2400): 0.9331062414894028
// 0.7 (pow=2, f=0.000001, width=20, frames=2400): 222818414359827930000000000000000000000000000000000000000000000000000000000000000000000
// Possibly better efficacy, but also less stable
// Linear (pow=1.0) is 100% unstable for at least coeff >= 0.5
// Subcubic (pow=2.5):
// 0.4 (pow=2.5, f=0.000001, width=20, frames=2400): 0.0000007333781495449756
// 0.5 (pow=2.5, f=0.000001, width=20, frames=2400): 0.0000006632381361190291
// 0.6 (pow=2.5, f=0.000001, width=20, frames=2400): 0.0000006089982278171636
// 0.7 (pow=2.5, f=0.000001, width=20, frames=2400): 7853952559.189004
// Superlinear (pow=1.5):
// 0.4 (pow=1.5, f=0.000001, width=20, frames=2400): 0.0000006502259131444893
// 0.5 (pow=1.5, f=0.000001, width=20, frames=2400): 0.0000005956833662247809
// 0.6 (pow=1.5, f=0.000001, width=20, frames=2400): 175615904583581400000000000000000000000000000000000000000000000000000000000000000000
// pow=4.0:
// 0.4 (pow=4, f=0.000001, width=20, frames=2400): 0.0000009105207758718423
// 0.5 (pow=4, f=0.000001, width=20, frames=2400): 0.0000008169331995553951
// 0.6 (pow=4, f=0.000001, width=20, frames=2400): 0.0000007525813705681166
// 0.7 (pow=4, f=0.000001, width=20, frames=2400): 0.0000007019565296008103
// 0.8 (pow=4, f=0.000001, width=20, frames=2400): 0.0000006600546205898854
// 0.9 (pow=4, f=0.000001, width=20, frames=2400): 0.0000006246047577819345
// 1 (pow=4, f=0.000001, width=20, frames=2400): 5437370254206590000000000000000000000000000000000
// pow=3.5:
// 0.4 (pow=3.5, f=0.000001, width=20, frames=2400): 0.0000008485707012112478
// 0.5 (pow=3.5, f=0.000001, width=20, frames=2400): 0.0000007653501316913873
// 0.6 (pow=3.5, f=0.000001, width=20, frames=2400): 0.0000007047188963430957
// 0.7 (pow=3.5, f=0.000001, width=20, frames=2400): 0.0000006561795324449849
// 0.8 (pow=3.5, f=0.000001, width=20, frames=2400): 0.0000006159621438553542
// 0.9 (pow=3.5, f=0.000001, width=20, frames=2400): 18913062838424785000000000000000000
// So generally lower powers = more absorbtion (makes sense: higher average coordinate
// stretching), but is more sensitive to error. All powers > 1.5 are stable at coeff=0.5.
// We don't know that much about reflection from just this data.
// Running over a smaller timeframe should give some suggestion about reflection
// 0.4 (pow=2, f=0.000001, width=20, frames=120): 0.2870021971493659
// 0.5 (pow=2, f=0.000001, width=20, frames=120): 0.2647114916517679 **
// 0.6 (pow=2, f=0.000001, width=20, frames=120): 0.24807341265362437
// 0.7 (pow=2, f=0.000001, width=20, frames=120): 0.2350081813369852
// 0.8 (pow=2, f=0.000001, width=20, frames=120): 0.22438565227661014
// 0.9 (pow=2, f=0.000001, width=20, frames=120): 0.21552516261496907
// 1.0 (pow=2, f=0.000001, width=20, frames=120): 0.20798702383197107
// 1.5 (pow=2, f=0.000001, width=20, frames=120): 0.39444643124947426
// 0.4 (pow=3, f=0.000001, width=20, frames=120): 0.3596522778473902
// 0.5 (pow=3, f=0.000001, width=20, frames=120): 0.33718409856439474
// 0.6 (pow=3, f=0.000001, width=20, frames=120): 0.32025414395335816
// 0.7 (pow=3, f=0.000001, width=20, frames=120): 0.3067653188201421 **
// 0.8 (pow=3, f=0.000001, width=20, frames=120): 0.2956250635204507
// 0.9 (pow=3, f=0.000001, width=20, frames=120): 0.2861895432992242
// 1.0 (pow=3, f=0.000001, width=20, frames=120): 0.27804573152917056
// 1.5 (pow=3, f=0.000001, width=20, frames=120): 0.24950413937515897
// 0.4 (pow=4, f=0.000001, width=20, frames=120): 0.41043160705678194
// 0.5 (pow=4, f=0.000001, width=20, frames=120): 0.38844802581052806
// 0.6 (pow=4, f=0.000001, width=20, frames=120): 0.3721037027266112
// 0.7 (pow=4, f=0.000001, width=20, frames=120): 0.35912262947037576
// 0.8 (pow=4, f=0.000001, width=20, frames=120): 0.34837556188585944
// 0.9 (pow=4, f=0.000001, width=20, frames=120): 0.33922665697709947 **
// 1 (pow=4, f=0.000001, width=20, frames=120): 0.33128127577349487
// 1.5 (pow=4, f=0.000001, width=20, frames=120): 0.30260541581006584
// Even smaller timeframe:
// 0.5 (pow=2, f=0.000001, width=20, frames=5): 1.0109467232603757
// 0.5 (pow=2, f=0.000001, width=20, frames=10): 1.0117049984675424
// 0.5 (pow=2, f=0.000001, width=20, frames=20): 1.015054295037722
// 0.5 (pow=2, f=0.000001, width=20, frames=30): 1.016127315217072
// 0.5 (pow=2, f=0.000001, width=20, frames=40): 1.0125450428623481
// 0.5 (pow=2, f=0.000001, width=20, frames=60): 0.9520114341681021
// 0.5 (pow=2, f=0.000001, width=20, frames=80): 0.7617289087518861
// 0.5 (pow=2, f=0.000001, width=20, frames=120): 0.2647114916517679
// 0.5 (pow=2, f=0.000001, width=20, frames=160): 0.018698432021826004
// 0.5 (pow=2, f=0.000001, width=20, frames=200): 0.001053437888276037
// 0.5 (pow=3, f=0.000001, width=20, frames=5): 1.0109467232603757
// 0.5 (pow=3, f=0.000001, width=20, frames=10): 1.0117049984674478
// 0.5 (pow=3, f=0.000001, width=20, frames=20): 1.0150542142468075
// 0.5 (pow=3, f=0.000001, width=20, frames=30): 1.0161755433699602
// 0.5 (pow=3, f=0.000001, width=20, frames=40): 1.0140118995944478
// 0.5 (pow=3, f=0.000001, width=20, frames=60): 0.9828724159361404
// 0.5 (pow=3, f=0.000001, width=20, frames=80): 0.8304056107875255
// 0.5 (pow=3, f=0.000001, width=20, frames=120): 0.33718409856439474
// 0.5 (pow=3, f=0.000001, width=20, frames=160): 0.033906877503334515
// 0.5 (pow=3, f=0.000001, width=20, frames=200): 0.0016075121270576818
// 0.5 (pow=4, f=0.000001, width=20, frames=5): 1.0109467232603757
// 0.5 (pow=4, f=0.000001, width=20, frames=10): 1.0117049984674424
// 0.5 (pow=4, f=0.000001, width=20, frames=20): 1.015054203569572
// 0.5 (pow=4, f=0.000001, width=20, frames=30): 1.0161827617273314
// 0.5 (pow=4, f=0.000001, width=20, frames=40): 1.014355067000562
// 0.5 (pow=4, f=0.000001, width=20, frames=60): 0.9971421502814346
// 0.5 (pow=4, f=0.000001, width=20, frames=80): 0.8718457443603458
// 0.5 (pow=4, f=0.000001, width=20, frames=120): 0.38844802581052806
// 0.5 (pow=4, f=0.000001, width=20, frames=160): 0.04879003869310861
// 0.5 (pow=4, f=0.000001, width=20, frames=200): 0.0025039595751290534
// That the numbers are all the same for t < 20 is not encouraging.
// Could be because of the courant number 0.577? No energy has reached the border yet?
// Why don't we query just the energy in the sim region -- not the boundary?
// After filtering to measure energy only in the sim region (note that some energy outside the
// sim region COULD be reflected back into the sim region later):
// 0.5 (pow=2, f=0.000001, width=20, frames=5): 1.0109467232603757
// 0.5 (pow=2, f=0.000001, width=20, frames=10): 1.0117049984565956
// 0.5 (pow=2, f=0.000001, width=20, frames=20): 1.0150051760172862
// 0.5 (pow=2, f=0.000001, width=20, frames=30): 1.0103422463150569
// 0.5 (pow=2, f=0.000001, width=20, frames=40): 0.9534926741875023
// 0.5 (pow=2, f=0.000001, width=20, frames=60): 0.7052236436305864
// 0.5 (pow=2, f=0.000001, width=20, frames=80): 0.42366103692252166
// 0.5 (pow=2, f=0.000001, width=20, frames=120): 0.047117482623349645
// 0.5 (pow=2, f=0.000001, width=20, frames=160): 0.0012788501490854916
// 0.5 (pow=2, f=0.000001, width=20, frames=200): 0.00025052555946608536
// 0.5 (pow=2, f=0.000001, width=20, frames=300): 0.000026163786700826282
// 0.5 (pow=2, f=0.000001, width=20, frames=400): 0.000009482696335385068
// 0.5 (pow=2, f=0.000001, width=20, frames=600): 0.0000034339203626681873
// 0.5 (pow=2, f=0.000001, width=20, frames=800): 0.0000016501593549063537
// 0.5 (pow=2, f=0.000001, width=20, frames=1200): 0.0000007097133927819365
// 0.5 (pow=2, f=0.000001, width=20, frames=1600): 0.0000004990355277480898
// 0.5 (pow=2, f=0.000001, width=20, frames=2400): 0.00000026541631012862064
// 0.5 (pow=3, f=0.000001, width=20, frames=5): 1.0109467232603757
// 0.5 (pow=3, f=0.000001, width=20, frames=10): 1.0117049984564943
// 0.5 (pow=3, f=0.000001, width=20, frames=20): 1.015005103609574
// 0.5 (pow=3, f=0.000001, width=20, frames=30): 1.010347173332254
// 0.5 (pow=3, f=0.000001, width=20, frames=40): 0.9534945122673749
// 0.5 (pow=3, f=0.000001, width=20, frames=60): 0.7052084303225945
// 0.5 (pow=3, f=0.000001, width=20, frames=80): 0.42365590263359737
// 0.5 (pow=3, f=0.000001, width=20, frames=120): 0.04711930602404218
// 0.5 (pow=3, f=0.000001, width=20, frames=160): 0.0013058690825149086
// 0.5 (pow=3, f=0.000001, width=20, frames=200): 0.00027124952640186995
// 0.5 (pow=3, f=0.000001, width=20, frames=300): 0.00003363606115185493
// 0.5 (pow=3, f=0.000001, width=20, frames=400): 0.000012979216745112146
// 0.5 (pow=3, f=0.000001, width=20, frames=600): 0.000004790890253991059
// 0.5 (pow=3, f=0.000001, width=20, frames=800): 0.0000018967555372300478
// 0.5 (pow=3, f=0.000001, width=20, frames=1200): 0.000000785458387440694
// 0.5 (pow=3, f=0.000001, width=20, frames=1600): 0.0000005657588895614973
// 0.5 (pow=3, f=0.000001, width=20, frames=2400): 0.0000002931443936827707
// 0.5 (pow=4, f=0.000001, width=20, frames=5): 1.0109467232603757
// 0.5 (pow=4, f=0.000001, width=20, frames=10): 1.011704998456489
// 0.5 (pow=4, f=0.000001, width=20, frames=20): 1.0150050988039427
// 0.5 (pow=4, f=0.000001, width=20, frames=30): 1.0103477261980267
// 0.5 (pow=4, f=0.000001, width=20, frames=40): 0.9534933477754826
// 0.5 (pow=4, f=0.000001, width=20, frames=60): 0.7052093991486525
// 0.5 (pow=4, f=0.000001, width=20, frames=80): 0.4236543609610083
// 0.5 (pow=4, f=0.000001, width=20, frames=120): 0.047143352151935734
// 0.5 (pow=4, f=0.000001, width=20, frames=160): 0.0014253338264054373
// 0.5 (pow=4, f=0.000001, width=20, frames=200): 0.0003719236701949888
// 0.5 (pow=4, f=0.000001, width=20, frames=300): 0.0000655709065994179
// 0.5 (pow=4, f=0.000001, width=20, frames=400): 0.000019428436023699366
// 0.5 (pow=4, f=0.000001, width=20, frames=600): 0.000006787101433521501
// 0.5 (pow=4, f=0.000001, width=20, frames=800): 0.000002352253311002263
// 0.5 (pow=4, f=0.000001, width=20, frames=1200): 0.0000008945009475225972
// 0.5 (pow=4, f=0.000001, width=20, frames=1600): 0.0000006298015155592009
// 0.5 (pow=4, f=0.000001, width=20, frames=2400): 0.0000003110877898243663
// pow=2 and pow=3 look nearly interchangeable in perf, with pow=4 slightly worse.
// Given higher stability for pow=3, it pushes me in that direction
// What about ordinary conductivity?
// Uniaxial conductors do nothing
// non-axial conductors (measured over full volume -- not just sim volume)
// 0,0.000000000000001 (pow=2, f=0.000001, width=20, frames=20): 1.0150542017357447
// 0,0.000000000000001 (pow=2, f=0.000001, width=20, frames=40): 1.0144968836020802
// 0,0.000000000000001 (pow=2, f=0.000001, width=20, frames=80): 1.001249693707569
// 0,0.000000000000001 (pow=2, f=0.000001, width=20, frames=120): 0.9703627391410313
// 0,0.000000000000001 (pow=2, f=0.000001, width=20, frames=240): 0.9839347439326915
// 0,0.000000000000001 (pow=2, f=0.000001, width=20, frames=400): 0.9710971333858839
// 0,0.000000000001 (pow=2, f=0.000001, width=20, frames=20): 1.015054152058065
// 0,0.000000000001 (pow=2, f=0.000001, width=20, frames=40): 1.0139965206685178
// 0,0.000000000001 (pow=2, f=0.000001, width=20, frames=80): 0.8611234324272511
// 0,0.000000000001 (pow=2, f=0.000001, width=20, frames=120): 0.38724876605299674
// 0,0.000000000001 (pow=2, f=0.000001, width=20, frames=240): 0.010452374686360136
// 0,0.000000000001 (pow=2, f=0.000001, width=20, frames=400): 0.0006325928742141608
// 0,0.000000001 (pow=2, f=0.000001, width=20, frames=20): 1.0150333781336882
// 0,0.000000001 (pow=2, f=0.000001, width=20, frames=40): 0.9834360538048873
// 0,0.000000001 (pow=2, f=0.000001, width=20, frames=80): 0.5072413415421974
// 0,0.000000001 (pow=2, f=0.000001, width=20, frames=120): 0.10345998021237998
// 0,0.000000001 (pow=2, f=0.000001, width=20, frames=240): 0.007519161562708659
// 0,0.000000001 (pow=2, f=0.000001, width=20, frames=400): 0.0006070229440826888
// 0,0.000001 (pow=2, f=0.000001, width=20, frames=20): 1.0150056763216164
// 0,0.000001 (pow=2, f=0.000001, width=20, frames=40): 1.0123907067781637
// 0,0.000001 (pow=2, f=0.000001, width=20, frames=80): 1.0095377475794036
// 0,0.000001 (pow=2, f=0.000001, width=20, frames=120): 1.0027226600640793
// 0,0.000001 (pow=2, f=0.000001, width=20, frames=240): 1.002880617201309
// 0,0.000001 (pow=2, f=0.000001, width=20, frames=400): 0.9817776859704778
// 0,0.001 (pow=2, f=0.000001, width=20, frames=20): 1.0150056260987463
// 0,0.001 (pow=2, f=0.000001, width=20, frames=40): 1.0127202910529511
// 0,0.001 (pow=2, f=0.000001, width=20, frames=80): 1.0141889645709163
// 0,0.001 (pow=2, f=0.000001, width=20, frames=120): 1.012712629869813
// 0,0.001 (pow=2, f=0.000001, width=20, frames=240): 1.0225762750160987
// 0,0.001 (pow=2, f=0.000001, width=20, frames=400): 1.0158169138074233
// 0,1 (pow=2, f=0.000001, width=20, frames=20): 1.0150056260488272
// 0,1 (pow=2, f=0.000001, width=20, frames=40): 1.0127206223077903
// 0,1 (pow=2, f=0.000001, width=20, frames=80): 1.01419363999915
// 0,1 (pow=2, f=0.000001, width=20, frames=120): 1.0127226953239867
// 0,1 (pow=2, f=0.000001, width=20, frames=240): 1.0225963209694455
// 0,1 (pow=2, f=0.000001, width=20, frames=400): 1.0158520285873147
// 0,0.000000000000001 (pow=3, f=0.000001, width=20, frames=20): 1.0150542017810857
// 0,0.000000000000001 (pow=3, f=0.000001, width=20, frames=40): 1.014497284230248
// 0,0.000000000000001 (pow=3, f=0.000001, width=20, frames=80): 1.0013349322815948
// 0,0.000000000000001 (pow=3, f=0.000001, width=20, frames=120): 0.9707653838346726
// 0,0.000000000000001 (pow=3, f=0.000001, width=20, frames=240): 0.98542720202683
// 0,0.000000000000001 (pow=3, f=0.000001, width=20, frames=400): 0.9736668760490382
// 0,0.000000000001 (pow=3, f=0.000001, width=20, frames=20): 1.0150541972999592
// 0,0.000000000001 (pow=3, f=0.000001, width=20, frames=40): 1.0143834319294165
// 0,0.000000000001 (pow=3, f=0.000001, width=20, frames=80): 0.9087702844329181
// 0,0.000000000001 (pow=3, f=0.000001, width=20, frames=120): 0.4641021861485614
// 0,0.000000000001 (pow=3, f=0.000001, width=20, frames=240): 0.025542565246607578
// 0,0.000000000001 (pow=3, f=0.000001, width=20, frames=400): 0.0024678463432975324
// 0,0.000000001 (pow=3, f=0.000001, width=20, frames=20): 1.0150507306076173
// 0,0.000000001 (pow=3, f=0.000001, width=20, frames=40): 0.9973368290451762
// 0,0.000000001 (pow=3, f=0.000001, width=20, frames=80): 0.5359561255382069
// 0,0.000000001 (pow=3, f=0.000001, width=20, frames=120): 0.10107229161449789
// 0,0.000000001 (pow=3, f=0.000001, width=20, frames=240): 0.0018726822600725027
// 0,0.000000001 (pow=3, f=0.000001, width=20, frames=400): 0.0001395247365920271
// 0,0.000001 (pow=3, f=0.000001, width=20, frames=20): 1.015006719517991
// 0,0.000001 (pow=3, f=0.000001, width=20, frames=40): 1.0067208901320701
// 0,0.000001 (pow=3, f=0.000001, width=20, frames=80): 0.9295573543881088
// 0,0.000001 (pow=3, f=0.000001, width=20, frames=120): 0.8372562924822498
// 0,0.000001 (pow=3, f=0.000001, width=20, frames=240): 0.7109091146305759
// 0,0.000001 (pow=3, f=0.000001, width=20, frames=400): 0.5405933200528408
// 0,0.001 (pow=3, f=0.000001, width=20, frames=20): 1.015005627048278
// 0,0.001 (pow=3, f=0.000001, width=20, frames=40): 1.0127139915480314
// 0,0.001 (pow=3, f=0.000001, width=20, frames=80): 1.0141000517606538
// 0,0.001 (pow=3, f=0.000001, width=20, frames=120): 1.0125212238134818
// 0,0.001 (pow=3, f=0.000001, width=20, frames=240): 1.0221951620616994
// 0,0.001 (pow=3, f=0.000001, width=20, frames=400): 1.0151495300965783
// 0,1 (pow=3, f=0.000001, width=20, frames=20): 1.0150056260497768
// 0,1 (pow=3, f=0.000001, width=20, frames=40): 1.012720616007617
// 0,1 (pow=3, f=0.000001, width=20, frames=80): 1.0141935510766382
// 0,1 (pow=3, f=0.000001, width=20, frames=120): 1.0127225038875007
// 0,1 (pow=3, f=0.000001, width=20, frames=240): 1.0225959397081388
// 0,1 (pow=3, f=0.000001, width=20, frames=400): 1.0158513607157937
// It appears here that the cubic roll-off is best, and very low conductivities are required.
//
// Isotropic conductor, energy measured only within the sim area:
// The optimized case (0.5*EPS0/timestep * x^3) is almost IDENTICAL to the
// optimized stretched-coordinate version.
// 0,0.000000000000001 (pow=2, f=0.000001, width=20, frames=120): 0.07433102240945513
// 0,0.000000000000001 (pow=2, f=0.000001, width=20, frames=400): 0.20431918328907037
// 0,0.000000000001 (pow=2, f=0.000001, width=20, frames=120): 0.04851013262210713
// 0,0.000000000001 (pow=2, f=0.000001, width=20, frames=400): 0.00018366232272528987
// 0,0.0000000000044270939064065 (pow=2, f=0.000001, width=20, frames=120): 0.047101055930619994
// 0,0.0000000000044270939064065 (pow=2, f=0.000001, width=20, frames=400): 0.000007399479477711689
// 0,0.000000000008854187812813 (pow=2, f=0.000001, width=20, frames=120): 0.047132676278627536
// 0,0.000000000008854187812813 (pow=2, f=0.000001, width=20, frames=400): 0.000008105989741898135
// 0,0.00000000001 (pow=2, f=0.000001, width=20, frames=120): 0.047145523858393434
// 0,0.00000000001 (pow=2, f=0.000001, width=20, frames=400): 0.000008327266269591764
// 0,0.0000000001 (pow=2, f=0.000001, width=20, frames=120): 0.05005311352870215
// 0,0.0000000001 (pow=2, f=0.000001, width=20, frames=400): 0.00003665711687010239
// 0,0.000000001 (pow=2, f=0.000001, width=20, frames=120): 0.0808038989604745
// 0,0.000000001 (pow=2, f=0.000001, width=20, frames=400): 0.0005636469832522345
// 0,0.00000001 (pow=2, f=0.000001, width=20, frames=120): 0.42090062694288544
// 0,0.00000001 (pow=2, f=0.000001, width=20, frames=400): 0.07558396270665715
// 0,0.000001 (pow=2, f=0.000001, width=20, frames=120): 0.9456301791158588
// 0,0.000001 (pow=2, f=0.000001, width=20, frames=400): 0.9456217562930543
// 0,0.001 (pow=2, f=0.000001, width=20, frames=120): 0.9549600788853474
// 0,0.001 (pow=2, f=0.000001, width=20, frames=400): 0.9782784298162882
// 0,1 (pow=2, f=0.000001, width=20, frames=120): 0.9549694776242036
// 0,1 (pow=2, f=0.000001, width=20, frames=400): 0.9783121245194134
// 0,0.000000000000001 (pow=3, f=0.000001, width=20, frames=120): 0.07435108171392021
// 0,0.000000000000001 (pow=3, f=0.000001, width=20, frames=400): 0.20479144240774633
// 0,0.000000000001 (pow=3, f=0.000001, width=20, frames=120): 0.050005777221700895
// 0,0.000000000001 (pow=3, f=0.000001, width=20, frames=400): 0.0006548443359188922
// 0,0.0000000000044270939064065 (pow=3, f=0.000001, width=20, frames=120): 0.0471097820852083
// 0,0.0000000000044270939064065 (pow=3, f=0.000001, width=20, frames=400): 0.00000726683693079002
// 0,0.000000000008854187812813 (pow=3, f=0.000001, width=20, frames=120): 0.04711795151894901
// 0,0.000000000008854187812813 (pow=3, f=0.000001, width=20, frames=400): 0.000007527865420649209
// 0,0.00000000001 (pow=3, f=0.000001, width=20, frames=120): 0.0471214615521427
// 0,0.00000000001 (pow=3, f=0.000001, width=20, frames=400): 0.000007561968840324101
// 0,0.0000000001 (pow=3, f=0.000001, width=20, frames=120): 0.04775494018209187
// 0,0.0000000001 (pow=3, f=0.000001, width=20, frames=400): 0.00001655968038405813
// 0,0.000000001 (pow=3, f=0.000001, width=20, frames=120): 0.05548225210925576
// 0,0.000000001 (pow=3, f=0.000001, width=20, frames=400): 0.00011973516700797696
// 0,0.00000001 (pow=3, f=0.000001, width=20, frames=120): 0.09859866928724702
// 0,0.00000001 (pow=3, f=0.000001, width=20, frames=400): 0.0010788775694009376
// 0,0.000001 (pow=3, f=0.000001, width=20, frames=120): 0.7905518369602526
// 0,0.000001 (pow=3, f=0.000001, width=20, frames=400): 0.5217962235754597
// 0,0.001 (pow=3, f=0.000001, width=20, frames=120): 0.9547813505519078
// 0,0.001 (pow=3, f=0.000001, width=20, frames=400): 0.9776380394998598
// 0,1 (pow=3, f=0.000001, width=20, frames=120): 0.9549692988681137
// 0,1 (pow=3, f=0.000001, width=20, frames=400): 0.978311483657099
// With both PML and conductor boundary:
// 0.5,0.0000000000044270939064065 (pow=2, f=0.000001, width=20, frames=120): 0.04709454626708049
// 0.5,0.0000000000044270939064065 (pow=2, f=0.000001, width=20, frames=400): 0.000007626399231684735
// 0.5,0.0000000000044270939064065 (pow=2, f=0.000001, width=20, frames=1200): 0.0000008116632120088743
// 0.5,0.0000000000044270939064065 (pow=2, f=0.000001, width=20, frames=2400): 0.00000032497678753300923
// 0.5,0.0000000000044270939064065 (pow=3, f=0.000001, width=20, frames=120): 0.04709779630205149
// 0.5,0.0000000000044270939064065 (pow=3, f=0.000001, width=20, frames=400): 0.000007132448872463483
// 0.5,0.0000000000044270939064065 (pow=3, f=0.000001, width=20, frames=1200): 0.000000710451109532798
// 0.5,0.0000000000044270939064065 (pow=3, f=0.000001, width=20, frames=2400): 0.0000002515070714149805
// This is basically no change from JUST PML or JUST conductors
// Maybe I should be trying to vary the width: maybe PML works more effectively for narrower
// boundaries than do conductors?
}

View File

@@ -0,0 +1,181 @@
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
/// this is just a big dumb bag of perf-related metrics,
/// gathered with the intention of identifying areas for optimization
pub struct Diagnostics {
frames_completed: u64,
/// all known time spent in driver code, as measured from the toplevel
time_in_driver: Duration,
/// time during which driver had passed control to stimulus
time_sim_step: Duration,
time_prepping_stim: Duration,
time_on_stimuli: Duration,
/// time during which driver was preparing to render (e.g. cloning state)
time_prepping_render: Duration,
time_rendering: Duration,
/// time during which driver was waiting for an async stimulus job
time_blocked_on_stim: Duration,
/// time during which driver was waiting for an async render job
time_blocked_on_render: Duration,
/// time during which CPU was waiting for GPU data
time_reading_device: Duration,
/// time during which CPU was transferring data to GPU
time_writing_device: Duration,
start_time: Instant,
}
#[derive(Clone, Default)]
pub struct SyncDiagnostics(Arc<Mutex<Diagnostics>>);
impl Default for Diagnostics {
fn default() -> Self {
Self::new()
}
}
impl Diagnostics {
pub fn new() -> Self {
Self {
frames_completed: 0,
time_in_driver: Default::default(),
time_sim_step: Default::default(),
time_prepping_stim: Default::default(),
time_on_stimuli: Default::default(),
time_prepping_render: Default::default(),
time_rendering: Default::default(),
time_blocked_on_stim: Default::default(),
time_blocked_on_render: Default::default(),
time_reading_device: Default::default(),
time_writing_device: Default::default(),
start_time: Instant::now(),
}
}
pub fn format(&self) -> String {
let overall_time = self.start_time.elapsed().as_secs_f64();
let driver_time = self.time_in_driver.as_secs_f64();
let fps = (self.frames_completed as f64) / overall_time;
let fps_line = format!("fps: {:6.2}", fps);
let step_time = self.time_sim_step.as_secs_f64();
let render_prep_time = self.time_prepping_render.as_secs_f64();
let stim_block_time = self.time_blocked_on_stim.as_secs_f64();
let render_block_time = self.time_blocked_on_render.as_secs_f64();
let stim_prep_time = self.time_prepping_stim.as_secs_f64();
let other_driver_time = driver_time - (
step_time + stim_block_time + stim_prep_time + render_block_time + render_prep_time
);
let other_time = overall_time - driver_time;
let toplevel_line = format!("toplevel\tstep: {:.1}s, stim_blocked: {:.1}s, render_blocked: {:.1}s, stim_prep: {:.1}s, render_prep: {:.1}s, driver_other: {:.1}s, unknown: {:.1}s",
step_time,
stim_block_time,
render_block_time,
stim_prep_time,
render_prep_time,
other_driver_time,
other_time,
);
let device_write_time = self.time_writing_device.as_secs_f64();
let device_read_time = self.time_reading_device.as_secs_f64();
let device_line = format!("> gpu\twrite: {:.1}s, read: {:.1}s",
device_write_time,
device_read_time,
);
let stim_bg_time = self.time_on_stimuli.as_secs_f64();
let render_bg_time = self.time_rendering.as_secs_f64();
let bg_line = format!("> async\tstim: {:.1}s, render: {:.1}s",
stim_bg_time,
render_bg_time,
);
format!("{}\n {}\n {}\n {}", fps_line, toplevel_line, device_line, bg_line)
}
}
impl SyncDiagnostics {
pub fn new() -> Self {
Self(Arc::new(Mutex::new(Diagnostics::new())))
}
pub fn format(&self) -> String {
self.0.lock().unwrap().format()
}
/// measure the duration of some arbitrary chunk of code.
/// used internally.
pub fn measure<R, F: FnOnce() -> R>(f: F) -> (Duration, R) {
let start = Instant::now();
let r = f();
(start.elapsed(), r)
}
pub fn instrument_driver<R, F: FnOnce() -> R>(&self, f: F) -> R {
let (elapsed, ret) = Self::measure(f);
self.0.lock().unwrap().time_in_driver += elapsed;
ret
}
/// record the duration of the sim step operation.
pub fn instrument_step<R, F: FnOnce() -> R>(&self, frames: u64, f: F) -> R {
let (elapsed, ret) = Self::measure(f);
let mut me = self.0.lock().unwrap();
me.time_sim_step += elapsed;
me.frames_completed += frames as u64;
ret
}
/// record the duration spent preparing for render (i.e. cloning stuff and moving it into a
/// render pool).
pub fn instrument_render_prep<R, F: FnOnce() -> R>(&self, f: F) -> R {
let (elapsed, ret) = Self::measure(f);
self.0.lock().unwrap().time_prepping_render += elapsed;
ret
}
/// record the duration actually spent doing CPU render work
pub fn instrument_render_cpu_side<R, F: FnOnce() -> R>(&self, f: F) -> R {
let (elapsed, ret) = Self::measure(f);
self.0.lock().unwrap().time_rendering += elapsed;
ret
}
pub fn instrument_stimuli_prep<R, F: FnOnce() -> R>(&self, f: F) -> R {
let (elapsed, ret) = Self::measure(f);
self.0.lock().unwrap().time_prepping_stim += elapsed;
ret
}
/// record the duration spent blocking the simulation because the stimulus queue is full.
pub fn instrument_stimuli_blocked<R, F: FnOnce() -> R>(&self, f: F) -> R{
let (elapsed, ret) = Self::measure(f);
self.0.lock().unwrap().time_blocked_on_stim += elapsed;
ret
}
/// record the duration spent blocking the simulation because the render queue is full.
pub fn instrument_render_blocked<R, F: FnOnce() -> R>(&self, f: F) -> R{
let (elapsed, ret) = Self::measure(f);
self.0.lock().unwrap().time_blocked_on_render += elapsed;
ret
}
pub fn instrument_stimuli<R, F: FnOnce() -> R>(&self, f: F) -> R {
let (elapsed, ret) = Self::measure(f);
self.0.lock().unwrap().time_on_stimuli += elapsed;
ret
}
pub fn instrument_read_device<R, F: FnOnce() -> R>(&self, f: F) -> R {
let (elapsed, ret) = Self::measure(f);
self.0.lock().unwrap().time_reading_device += elapsed;
ret
}
pub fn instrument_write_device<R, F: FnOnce() -> R>(&self, f: F) -> R {
let (elapsed, ret) = Self::measure(f);
self.0.lock().unwrap().time_writing_device += elapsed;
ret
}
}

View File

@@ -1,107 +1,149 @@
use crate::geom::{Coord, Index, Meters, Region, Vec3}; use crate::diagnostics::SyncDiagnostics;
use crate::mat::{self, Pml}; use crate::geom::{Coord, Index, Region};
use crate::mat;
use crate::meas::{self, AbstractMeasurement}; use crate::meas::{self, AbstractMeasurement};
use crate::real::{self, Real}; use crate::real::Real;
use crate::render::{self, MultiRenderer, Renderer}; use crate::render::{self, MultiRenderer, Renderer};
use crate::sim::{GenericSim, MaterialSim, SampleableSim, SimState}; use crate::sim::AbstractSim;
use crate::sim::units::{Frame, Time}; use crate::sim::units::{Frame, Time};
use crate::sim::spirv::{self, SpirvSim}; use crate::stim::{
use crate::stim::AbstractStimulus; DynStimuli,
Fields,
FieldMags,
ModulatedVectorField,
RenderedStimulus,
Stimulus,
StimuliVec,
TimeVarying,
VectorField,
};
use crate::worker::JobPool;
use coremem_cross::compound::list;
use coremem_cross::dim::DimSlice;
use coremem_cross::step::SimMeta;
use log::{info, trace}; use log::{info, trace};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::cell::Cell;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::sync::mpsc::{sync_channel, SyncSender, Receiver}; use std::time::Instant;
use std::time::{Duration, Instant};
use threadpool::ThreadPool;
pub struct Driver<S=SimState> { pub struct Driver<R, S, Stim=DriverStimulusDynVec<R>> {
pub state: S, state: S,
renderer: Arc<MultiRenderer<S>>, renderer: Arc<MultiRenderer<S>>,
// TODO: use Rayon's thread pool? render_pool: JobPool<S, ()>,
render_pool: ThreadPool, measurements: Vec<Arc<dyn AbstractMeasurement<S>>>,
render_channel: (SyncSender<()>, Receiver<()>), stimuli: StimAccess<R, Stim>,
time_spent_stepping: Duration,
time_spent_on_stimuli: Duration,
time_spent_prepping_render: Duration,
time_spent_blocked_on_render: Duration,
time_spent_rendering: Arc<Mutex<Duration>>,
measurements: Vec<Box<dyn AbstractMeasurement>>,
stimuli: StimuliAdapter,
start_time: Instant,
last_diag_time: Instant,
/// simulation end time /// simulation end time
sim_end_time: Option<Frame>, sim_end_time: Option<Frame>,
diag: SyncDiagnostics,
last_diag_time: Instant,
} }
pub type CpuDriver<R=real::R32, M=mat::GenericMaterial<R>> = Driver<SimState<R, M>>; impl<S: AbstractSim, Stim> Driver<S::Real, S, Stim> {
pub type SpirvDriver<M=spirv::FullyGenericMaterial> = Driver<SpirvSim<M>>; pub fn new_with_stim(mut state: S, stimuli: Stim) -> Self {
let diag = SyncDiagnostics::new();
impl<R: Real, M: Default> Driver<SimState<R, M>> { state.use_diagnostics(diag.clone());
pub fn new<C: Coord>(size: C, feature_size: f32) -> Self {
Self::new_with_state(SimState::new(size.to_index(feature_size), feature_size))
}
}
impl<M: spirv::IntoFfi> SpirvDriver<M>
where M::Ffi: Default + 'static
{
pub fn new_spirv<C: Coord>(size: C, feature_size: f32) -> Self {
Self::new_with_state(SpirvSim::new(size.to_index(feature_size), feature_size))
}
}
impl<S> Driver<S> {
pub fn new_with_state(state: S) -> Self {
Self { Self {
state, state,
renderer: Arc::new(MultiRenderer::new()), renderer: Arc::new(MultiRenderer::new()),
render_pool: ThreadPool::new(3), render_pool: JobPool::new(1),
render_channel: sync_channel(0),
time_spent_stepping: Default::default(),
time_spent_on_stimuli: Default::default(),
time_spent_prepping_render: Default::default(),
time_spent_blocked_on_render: Default::default(),
time_spent_rendering: Default::default(),
measurements: vec![ measurements: vec![
Box::new(meas::Time), Arc::new(meas::Time),
Box::new(meas::Meta), Arc::new(meas::Meta),
Box::new(meas::Energy::world()), Arc::new(meas::Energy::world()),
Box::new(meas::Power::world()), Arc::new(meas::Power::world()),
], ],
stimuli: StimuliAdapter::new(), stimuli: StimAccess::new(diag.clone(), stimuli),
start_time: Instant::now(),
last_diag_time: Instant::now(),
sim_end_time: None, sim_end_time: None,
diag,
last_diag_time: Instant::now(),
} }
} }
}
impl<S> Driver<S> { pub fn add_measurement<Meas: AbstractMeasurement<S> + 'static>(&mut self, m: Meas) {
pub fn add_stimulus<Stim: AbstractStimulus + 'static>(&mut self, s: Stim) { self.measurements.push(Arc::new(m));
self.stimuli.push(Box::new(s))
} }
pub fn add_measurement<Meas: AbstractMeasurement + 'static>(&mut self, m: Meas) { pub fn add_stimulus<SNew>(&mut self, s: SNew)
self.measurements.push(Box::new(m)); where Stim: Pushable<SNew>
} {
self.stimuli.push(self.state.meta(), s)
pub fn set_steps_per_stim(&mut self, steps_per_stim: u64) {
self.stimuli.frame_interval = steps_per_stim;
} }
} }
impl<S: MaterialSim> Driver<S> { impl<S: AbstractSim> Driver<S::Real, S, DriverStimulusDynVec<S::Real>> {
pub fn new(state: S) -> Self {
Self::new_with_stim(state, DriverStimulusDynVec::default())
}
}
impl<S: AbstractSim> Driver<S::Real, S, list::Empty> {
pub fn new_list_stim(state: S) -> Self {
Self::new_with_stim(state, list::Empty::default())
}
}
impl<S: AbstractSim, Stim> Driver<S::Real, S, Stim> {
/// add a stimulus onto a list of non-monomorphized stimuli.
/// this necessarily must return a new Self.
/// (well, with enough tuning we could actually Box just the first reference...
pub fn with_add_stimulus<E>(self, s: E) -> Driver<S::Real, S, list::Appended<Stim, E>>
where Stim: list::Appendable<E>
{
Driver {
state: self.state,
renderer: self.renderer,
render_pool: self.render_pool,
measurements: self.measurements,
stimuli: StimAccess::new(self.diag.clone(), self.stimuli.into_inner().append(s)),
sim_end_time: self.sim_end_time,
diag: self.diag,
last_diag_time: self.last_diag_time,
}
}
pub fn with_stimulus<NewStim>(self, stimuli: NewStim) -> Driver<S::Real, S, NewStim> {
Driver {
state: self.state,
renderer: self.renderer,
render_pool: self.render_pool,
measurements: self.measurements,
stimuli: StimAccess::new(self.diag.clone(), stimuli),
sim_end_time: self.sim_end_time,
diag: self.diag,
last_diag_time: self.last_diag_time,
}
}
pub fn with_concrete_stimulus<T>(self) -> Driver<S::Real, S, DriverStimulusVec<T>> {
self.with_stimulus(DriverStimulusVec::<T>::default())
}
pub fn with_modulated_stimulus<T>(self) -> Driver<S::Real, S, DriverStimulusModulated<S::Real, T>> {
self.with_stimulus(DriverStimulusModulated::<S::Real, T>::default())
}
}
impl<R, S, Stim> Driver<R, S, Stim> {
/// when we step the simulation N times, we do so with a constant stimulus over those N frames.
/// lower-resolution quantization of stimuli lets us batch more step calls (critical to perf)
/// but at the cost of precision.
pub fn set_steps_per_stimulus(&mut self, steps: u64) {
self.stimuli.steps_per_stimulus = steps;
}
}
impl<S: AbstractSim, Stim> Driver<S::Real, S, Stim> {
pub fn fill_region<Reg: Region, M: Into<S::Material> + Clone>(&mut self, region: &Reg, mat: M) { pub fn fill_region<Reg: Region, M: Into<S::Material> + Clone>(&mut self, region: &Reg, mat: M) {
self.state.fill_region(region, mat); self.state.fill_region(region, mat);
} }
pub fn test_region_filled<Reg: Region, M: Into<S::Material> + Clone>(&mut self, region: &Reg, mat: M) -> bool { pub fn test_region_filled<Reg: Region, M>(&mut self, region: &Reg, mat: M) -> bool
where
M: Into<S::Material> + Clone,
S::Material: PartialEq
{
self.state.test_region_filled(region, mat) self.state.test_region_filled(region, mat)
} }
}
impl<S: SampleableSim> Driver<S> {
pub fn size(&self) -> Index { pub fn size(&self) -> Index {
self.state.size() self.state.size()
} }
@@ -111,13 +153,23 @@ impl<S: SampleableSim> Driver<S> {
pub fn time(&self) -> f32 { pub fn time(&self) -> f32 {
self.state.time() self.state.time()
} }
pub fn add_classical_boundary<C: Coord>(&mut self, thickness: C)
where S::Material: From<mat::IsomorphicConductor<S::Real>>
{
let timestep = self.state.timestep();
self.state.fill_boundary_using(thickness, |boundary_ness| {
let b = boundary_ness.elem_pow(3.0);
let cond = b * (0.5 / timestep);
let iso_cond = cond.x() + cond.y() + cond.z();
let iso_conductor = mat::IsomorphicConductor::new(iso_cond.cast());
iso_conductor
});
}
} }
impl<S: SampleableSim + Send + Sync + 'static> Driver<S> { impl<S: AbstractSim + 'static, Stim> Driver<S::Real, S, Stim> {
pub fn dyn_state(&mut self) -> &mut dyn SampleableSim {
&mut self.state
}
fn add_renderer<Rend: Renderer<S> + 'static>( fn add_renderer<Rend: Renderer<S> + 'static>(
&mut self, renderer: Rend, name: &str, step_frequency: u64, frame_limit: Option<u64> &mut self, renderer: Rend, name: &str, step_frequency: u64, frame_limit: Option<u64>
) { ) {
@@ -140,114 +192,118 @@ impl<S: SampleableSim + Send + Sync + 'static> Driver<S> {
} }
} }
impl<S: SampleableSim + Send + Sync + Serialize + 'static> Driver<S> { impl<S: AbstractSim + Serialize + 'static, Stim> Driver<S::Real, S, Stim> {
pub fn add_serializer_renderer(&mut self, out_base: &str, step_frequency: u64, frame_limit: Option<u64>) { pub fn add_serializer_renderer(&mut self, out_base: &str, step_frequency: u64, frame_limit: Option<u64>) {
let fmt_str = format!("{out_base}{{step_no}}.bc", out_base=out_base); let fmt_str = format!("{out_base}{{step_no}}.bc", out_base=out_base);
self.add_renderer(render::SerializerRenderer::new_static(&*fmt_str), &*fmt_str, step_frequency, frame_limit); self.add_renderer(render::SerializerRenderer::new_generic(&*fmt_str), &*fmt_str, step_frequency, frame_limit);
} }
} }
impl<S: SampleableSim + Send + Sync + Serialize + for<'a> Deserialize<'a> + 'static> Driver<S> { impl<S, Stim> Driver<S::Real, S, Stim>
where
S: AbstractSim + Send + Sync + Serialize + for<'a> Deserialize<'a> + 'static
{
/// instruct the driver to periodically save the simulation state to the provided path. /// instruct the driver to periodically save the simulation state to the provided path.
/// also attempts to load an existing state file, returning `true` on success. /// also attempts to load an existing state file, returning `true` on success.
pub fn add_state_file(&mut self, state_file: &str, snapshot_frequency: u64) -> bool { pub fn add_state_file(&mut self, state_file: &str, snapshot_frequency: u64) -> bool {
let ser = render::SerializerRenderer::new(state_file); let ser = render::SerializerRenderer::new(state_file);
let loaded = match ser.try_load() { let loaded = ser.try_load().map(|s| {
Some(state) => { self.state = s.state;
self.state = state.state; self.state.use_diagnostics(self.diag.clone());
true }).is_some();
},
None => false,
};
self.add_renderer(ser, state_file, snapshot_frequency, None); self.add_renderer(ser, state_file, snapshot_frequency, None);
loaded loaded
} }
} }
impl<S: GenericSim + Clone + Default + Send + Sync + 'static> Driver<S> { impl<S, Stim> Driver<S::Real, S, Stim>
where
S: AbstractSim + Clone + Default + Send + 'static,
Stim: DriverStimulus<S::Real> + Send + 'static,
{
fn render(&mut self) { fn render(&mut self) {
let prep_start = Instant::now(); let their_state = self.diag.instrument_render_prep(|| {
let their_state = self.state.clone(); if self.render_pool.num_workers() != 3 {
let their_measurements = self.measurements.clone(); let diag = self.diag.clone();
let renderer = self.renderer.clone(); // TODO: these measurements will come to differ from the ones in the Driver,
let time_spent_rendering = self.time_spent_rendering.clone(); // if the user calls `add_measurement`!
let sender = self.render_channel.0.clone(); let measurements = self.measurements.clone();
self.render_pool.execute(move || { let renderer = self.renderer.clone();
// unblock the main thread (this limits the number of renders in-flight at any time self.render_pool.spawn_workers(3, move |state| {
sender.send(()).unwrap(); // unblock the main thread (this limits the number of renders in-flight at any time
trace!("render begin"); trace!("render begin");
let start_time = Instant::now(); diag.instrument_render_cpu_side(|| {
renderer.render(&their_state, &*their_measurements, Default::default()); let meas: Vec<&dyn AbstractMeasurement<S>> = measurements.iter().map(|m| &**m).collect();
*time_spent_rendering.lock().unwrap() += start_time.elapsed(); renderer.render(&state, &*meas, Default::default());
trace!("render end"); });
trace!("render end");
});
}
self.state.clone()
});
// TODO: this instrumentation is not 100% accurate.
// - 'prep' and 'blocked' have effectively been folded together.
// - either delete 'prep', or change this block to use a `try_send` (prep) followed by a
// `send` (blocking)
self.diag.instrument_render_blocked(|| {
self.render_pool.tend();
self.render_pool.send(their_state);
}); });
self.time_spent_prepping_render += prep_start.elapsed();
let block_start = Instant::now();
self.render_channel.1.recv().unwrap();
self.time_spent_blocked_on_render += block_start.elapsed();
} }
/// Return the number of steps actually stepped /// Return the number of steps actually stepped
fn step_at_most(&mut self, at_most: u32) -> u32 { fn step_at_most(&mut self, at_most: u32) -> u32 {
assert!(at_most != 0); let diag = self.diag.clone();
let start_step = self.state.step_no(); diag.instrument_driver(move || {
if self.stimuli.should_apply(start_step) { assert!(at_most != 0);
self.stimuli.real_time = self.state.time(); let start_step = self.state.step_no();
self.stimuli.time_step = self.state.timestep();
trace!("updating stimuli");
}
if self.renderer.any_work_for_frame(start_step) { if self.renderer.any_work_for_frame(start_step) {
self.render(); self.render();
} }
let mut can_step = 1; // maybe the renderer or stimulus needs servicing before the max frame the user asked for.
while can_step < at_most && !self.renderer.any_work_for_frame(start_step + can_step as u64) { // step less than `at_most`, in that case.
can_step += 1; let next_frame_for_user = start_step + at_most as u64;
} let next_frame_to_render = self.renderer.next_frame_for_work(start_step);
trace!("step begin"); let next_frame_for_stim = self.stimuli.next_frame_for_work(start_step);
let start_time = Instant::now(); let step_to = [Some(next_frame_for_user), next_frame_to_render, Some(next_frame_for_stim)]
self.state.step_multiple(can_step, &self.stimuli); .into_iter()
self.time_spent_stepping += start_time.elapsed(); .flatten()
trace!("step end"); .min()
if self.last_diag_time.elapsed().as_secs_f64() >= 5.0 { .unwrap();
self.last_diag_time = Instant::now(); let steps_this_time = (step_to - start_step).try_into().unwrap();
let step = self.state.step_no();
let step_time = self.time_spent_stepping.as_secs_f64(); let meta = self.state.meta();
let stim_time = self.time_spent_on_stimuli.as_secs_f64(); let stim = self.stimuli.get_for(meta, start_step);
let render_time = self.time_spent_rendering.lock().unwrap().as_secs_f64(); // prefetch the next stimulus, in the background.
let render_prep_time = self.time_spent_prepping_render.as_secs_f64(); self.diag.instrument_stimuli_prep(|| {
let block_time = self.time_spent_blocked_on_render.as_secs_f64(); self.stimuli.start_job(meta, step_to);
let overall_time = self.start_time.elapsed().as_secs_f64(); });
let fps = (self.state.step_no() as f64) / overall_time;
let sim_time = self.state.time() as f64; trace!("step begin");
let percent_complete = match self.sim_end_time { self.diag.instrument_step(steps_this_time as u64, || {
Some(t) => format!("[{:.1}%] ", 100.0 * self.state.time() / *t.to_seconds(self.timestep())), self.state.step_multiple(steps_this_time, &stim);
None => "".to_owned(), });
}; trace!("step end");
info!(
"{}t={:.2e} frame {:06} fps: {:6.2} (sim: {:.1}s, stim: {:.1}s, [render: {:.1}s], blocked: {:.1}s, render_prep: {:.1}s, other: {:.1}s)", if self.last_diag_time.elapsed().as_secs_f64() >= 5.0 {
percent_complete, self.last_diag_time = Instant::now();
sim_time, let step = self.state.step_no();
step, let diagstr = self.diag.format();
fps, let sim_time = self.state.time() as f64;
step_time, let percent_complete = self.sim_end_time.map(|t| {
stim_time, format!("[{:.1}%] ", 100.0 * self.state.time() / *t.to_seconds(self.timestep()))
render_time, }).unwrap_or_default();
block_time, info!(
render_prep_time, "{}t={:.2e} frame {:06} {}",
overall_time - step_time - stim_time - block_time - render_prep_time, percent_complete, sim_time, step, diagstr
); );
} }
can_step as u32 steps_this_time
} })
pub fn step_multiple(&mut self, num_steps: u32) {
let mut steps_remaining = num_steps;
while steps_remaining != 0 {
steps_remaining -= self.step_at_most(steps_remaining);
}
} }
pub fn step(&mut self) { pub fn step(&mut self) {
self.step_multiple(1); self.step_at_most(1);
} }
/// Returns the number of timesteps needed to reach the end time /// Returns the number of timesteps needed to reach the end time
@@ -261,101 +317,237 @@ impl<S: GenericSim + Clone + Default + Send + Sync + 'static> Driver<S> {
let sim_end_time = sim_end_time.to_frame(self.state.timestep()); let sim_end_time = sim_end_time.to_frame(self.state.timestep());
self.sim_end_time = Some(sim_end_time); self.sim_end_time = Some(sim_end_time);
let mut stepped = false; let mut stepped = false;
while self.dyn_state().step_no() < *sim_end_time { while self.state.step_no() < *sim_end_time {
self.step_multiple(100); let steps_left = *sim_end_time - self.state.step_no();
// sanity limit: don't try to step too much at once else we may lock up the GPU/etc.
self.step_at_most(steps_left.min(1000) as u32);
stepped = true; stepped = true;
} }
if stepped { if stepped {
// render the final frame -- unless we already *have* // render the final frame -- unless we already *have*
self.render(); self.render();
} }
self.render_pool.join(); self.render_pool.join_workers();
self.sim_end_time = None; self.sim_end_time = None;
} }
} }
impl<S: MaterialSim> Driver<S> { // this is effectively `Cow`, but without the `ToOwned` (Clone) requirement
pub fn add_pml_boundary<C: Coord, R: Real>(&mut self, thickness: C) pub enum ValueOrRef<'a, T> {
where S::Material: From<Pml<R>> Value(T),
{ Ref(&'a T),
let timestep = self.state.timestep();
self.state.fill_boundary_using(thickness, |boundary_ness| {
let b = boundary_ness.elem_pow(3.0);
let conductivity = b * (0.5 / timestep);
Pml::new(conductivity)
});
}
pub fn add_classical_boundary<C: Coord>(&mut self, thickness: C)
where S::Material: From<mat::IsomorphicConductor<f32>>
{
self.add_classical_boundary_explicit::<f32, _>(thickness)
}
/// the CPU code is parameterized over `Real`: you'll need to use this interface to get access
/// to that, if using a CPU driver. otherwise, use `add_classical_boundary`
pub fn add_classical_boundary_explicit<R: Real, C: Coord>(&mut self, thickness: C)
where S::Material: From<mat::IsomorphicConductor<R>>
{
let timestep = self.state.timestep();
self.state.fill_boundary_using(thickness, |boundary_ness| {
let b = boundary_ness.elem_pow(3.0);
let cond = b * (0.5 / timestep);
let iso_cond = cond.x() + cond.y() + cond.z();
let iso_conductor = mat::IsomorphicConductor::new(iso_cond.cast());
iso_conductor
});
}
} }
impl<'a, T> AsRef<T> for ValueOrRef<'a, T> {
/// Adapts the stimuli to be applied only every so often, to improve perf fn as_ref(&self) -> &T {
struct StimuliAdapter { match self {
stim: Vec<Box<dyn AbstractStimulus>>, ValueOrRef::Value(x) => &x,
/// How many frames to go between applications of the stimulus. ValueOrRef::Ref(x) => x,
frame_interval: u64,
real_time: f32,
time_step: f32,
}
impl AbstractStimulus for StimuliAdapter {
fn at(&self, t_sec: f32, pos: Meters) -> (Vec3<f32>, Vec3<f32>) {
self.stim.at(t_sec, pos)
// TODO: remove this stuff (here only for testing)
/*
if true {
// interpolation unaware (i.e. let the Sim backend do it)
} else if false {
// delta-fn "interpolation"
self.stim.at(t_sec, pos) * (self.frame_interval as f32)
} else if false {
// step-fn "interpolation"
self.stim.at(self.real_time, pos)
} else {
// linear interpolation
let interp_width = self.frame_interval as f32 * self.time_step;
let prev = self.stim.at(self.real_time, pos);
let next = self.stim.at(self.real_time + interp_width, pos);
let interp = (t_sec - self.real_time) / interp_width;
prev * (1.0 - interp) + next * interp
} }
*/
} }
} }
impl StimuliAdapter { /// gives an opportunity to optimize a Stimulus for a specific setting
fn new() -> Self { /// before passing it off to the simulation.
pub trait DriverStimulus<R: Real> {
type Optimized: Stimulus<R>;
fn optimized_for<'a>(
&'a self, meta: SimMeta<f32>, _step: u64
) -> ValueOrRef<'a, Self::Optimized>;
}
pub trait Pushable<T> {
fn push(&mut self, meta: SimMeta<f32>, t: T);
}
pub struct DriverStimulusVec<T>(StimuliVec<T>);
impl<T> Default for DriverStimulusVec<T> {
fn default() -> Self {
Self(Default::default())
}
}
impl<R: Real, T: Stimulus<R>> DriverStimulus<R> for DriverStimulusVec<T> {
type Optimized = StimuliVec<T>;
fn optimized_for<'a>(
&'a self, _meta: SimMeta<f32>, _step: u64
) -> ValueOrRef<'a, Self::Optimized> {
ValueOrRef::Ref(&self.0)
}
}
impl<T> Pushable<T> for DriverStimulusVec<T> {
fn push(&mut self, _meta: SimMeta<f32>, t: T) {
self.0.push(t)
}
}
#[derive(Default)]
pub struct DriverStimulusDynVec<R>(DynStimuli<R>);
impl<R: Real> DriverStimulus<R> for DriverStimulusDynVec<R> {
type Optimized = DynStimuli<R>;
fn optimized_for<'a>(
&'a self, _meta: SimMeta<f32>, _step: u64
) -> ValueOrRef<'a, Self::Optimized> {
ValueOrRef::Ref(&self.0)
}
}
impl<R: Real, T: Stimulus<R> + Send + 'static> Pushable<T> for DriverStimulusDynVec<R> {
fn push(&mut self, _meta: SimMeta<f32>, t: T) {
self.0.push(Box::new(t))
}
}
/// optimized stimulus which will evaluate the vector fields _only once_
pub struct DriverStimulusModulated<R, T>(StimuliVec<ModulatedStaticField<R, T>>);
impl<R: Real, T> Default for DriverStimulusModulated<R, T> {
fn default() -> Self {
Self(Default::default())
}
}
impl<R: Real, V: VectorField<R>, T> Pushable<ModulatedVectorField<V, T>> for DriverStimulusModulated<R, T> {
fn push(&mut self, meta: SimMeta<f32>, stim: ModulatedVectorField<V, T>) {
let (vfield, timef) = stim.into_inner();
let dim = meta.dim();
let mut storage = Vec::new();
storage.resize_with(dim.product_sum_usize(), Fields::default);
let mut view = DimSlice::new(dim, storage);
let mut_view: DimSlice<&mut [_]> = view.as_mut();
for (loc, value) in mut_view.enumerated() {
*value = vfield.at(meta.feature_size().cast(), loc.into());
}
self.0.push(ModulatedStaticField::new(view, timef))
}
}
impl<R: Real, T: TimeVarying<R>> DriverStimulus<R> for DriverStimulusModulated<R, T> {
type Optimized = StimuliVec<ModulatedStaticField<R, FieldMags<R>>>;
fn optimized_for<'a>(
&'a self, meta: SimMeta<f32>, step: u64,
) -> ValueOrRef<'a, Self::Optimized> {
let t_sec = meta.time_step().cast::<R>() * R::from_primitive(step);
let opt = self.0.iter().map(|modulated| ModulatedVectorField::new(
// TODO: remove this costly clone!
(*modulated.fields()).clone(),
modulated.modulation().at(t_sec),
)).collect();
ValueOrRef::Value(StimuliVec::from_vec(opt))
}
}
/// a Stimulus where the field has been pre-calculated
pub type ModulatedStaticField<R, T> = ModulatedVectorField<DimSlice<Vec<Fields<R>>>, T>;
/// wraps a Stimulus to help provide async functionality on top of it.
/// the caller can request evaluation at a specific time, and either block on that or
/// come back and re-request that time later, expecting that it's been evaluated in the background.
struct StimAccess<R, T> {
stim: Arc<Mutex<T>>,
steps_per_stimulus: u64,
diag: SyncDiagnostics,
/// is the background thread doing work (or, has it completed work and placed it on the return
/// queue)?
/// A.K.A. "can i safely do a blocking recv on response_channel".
outstanding: Cell<bool>,
worker: JobPool<(SimMeta<f32>, u64), (SimMeta<f32>, u64, RenderedStimulus<R>)>,
}
impl<R, T> StimAccess<R, T> {
fn new(diag: SyncDiagnostics, stim: T) -> Self {
Self { Self {
stim: Default::default(), stim: Arc::new(Mutex::new(stim)),
frame_interval: 1, steps_per_stimulus: 1,
real_time: 0.0, diag,
time_step: 0.0, outstanding: Cell::new(false),
worker: JobPool::new(1),
} }
} }
fn should_apply(&self, frame: u64) -> bool { fn into_inner(self) -> T {
(frame % self.frame_interval == 0) && self.stim.len() != 0 let _ = self.maybe_wait_for_job(Default::default(), 0);
// with the worker joined, there should be no outstanding handles on the arc.
Arc::try_unwrap(self.stim).ok().unwrap().into_inner().unwrap()
} }
fn push(&mut self, s: Box<dyn AbstractStimulus>) { fn next_frame_for_work(&self, after: u64) -> u64 {
self.stim.push(s) let f = after + self.steps_per_stimulus;
f - f % self.steps_per_stimulus
}
/// used internally.
/// waits for an outstanding job (if any).
/// if the response matches the request, return the response,
/// else discard the response.
fn maybe_wait_for_job(&self, meta: SimMeta<f32>, step: u64) -> Option<RenderedStimulus<R>> {
if !self.outstanding.get() {
return None;
}
// block until job is complete and receive the result
let completed = self.diag.instrument_stimuli_blocked(|| {
self.worker.recv()
});
let (job_meta, job_step, rendered) = completed;
self.outstanding.set(false);
Some(rendered)
.filter(|_| (job_meta, job_step) == (meta, step))
}
}
impl<R: Real, T: DriverStimulus<R> + Send + 'static> StimAccess<R, T> {
fn get_for(&mut self, meta: SimMeta<f32>, step: u64) -> RenderedStimulus<R> {
// either claim the outstanding job (if it exists and matches)...
self.maybe_wait_for_job(meta, step).unwrap_or_else(|| {
// or start a job and wait for it to complete inline
self.start_job(meta, step);
self.maybe_wait_for_job(meta, step).unwrap()
})
}
// begin rendering the stimulus in the background
fn start_job(&mut self, meta: SimMeta<f32>, step: u64) {
// only one in-progress job allowed!
assert!(!self.outstanding.get());
self.outstanding.set(true);
self.ensure_worker();
self.worker.send((meta, step));
}
fn ensure_worker(&mut self) {
if self.worker.num_workers() != 0 {
return;
}
let stim = self.stim.clone();
let diag = self.diag.clone();
self.worker.spawn_worker(move |(meta, step)| {
let stim = diag.instrument_stimuli(|| {
let stim = stim.lock().unwrap();
let opt = stim.optimized_for(meta, step);
opt.as_ref().rendered(
meta.time_step().cast(),
// TODO: convert this to an integer
meta.time_step().cast::<R>() * R::from_primitive(step),
meta.feature_size().cast(),
meta.dim()
).into_owned()
//^ this 'into_owned' ought to be a no-op.
//^ it would only ever be borrowed if we accidentally called `rendered` twice.
});
(meta, step, stim)
});
}
}
impl<R, S, T: Pushable<S>> Pushable<S> for StimAccess<R, T> {
fn push(&mut self, meta: SimMeta<f32>, t: S) {
// invalidate any outstanding jobs (because the stimulus will have changed)
let _ = self.maybe_wait_for_job(Default::default(), 0);
self.stim.lock().unwrap().push(meta, t)
} }
} }

View File

@@ -1,5 +1,5 @@
use crate::geom::Vec2; use coremem_cross::real::Real;
use crate::real::Real; use coremem_cross::vec::Vec2;
use std::ops::Add; use std::ops::Add;

View File

@@ -6,8 +6,22 @@ mod units;
pub use line::Line2d; pub use line::Line2d;
pub use polygon::Polygon2d; pub use polygon::Polygon2d;
pub use region::{ pub use region::{
Cube, CylinderZ, Dilate, InvertedRegion, Memoize, Region, Sphere, Spiral, SwapXZ, SwapYZ, Torus, Translate, Union, WorldRegion, Wrap Cube,
CylinderZ,
Dilate,
HasCrossSection,
InvertedRegion,
Memoize,
Region,
Sphere,
Spiral,
SwapXZ,
SwapYZ,
Torus,
Translate,
Union,
WorldRegion,
Wrap,
}; };
pub use units::{Coord, Meters, OrdMeters, Index}; pub use units::{Coord, Meters, OrdMeters, Index};
pub use coremem_types::vec::{Vec2, Vec3, Vec3u};

View File

@@ -1,5 +1,6 @@
use crate::geom::{Line2d, Vec2}; use crate::geom::Line2d;
use crate::real::Real; use coremem_cross::real::Real;
use coremem_cross::vec::Vec2;
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct Polygon2d<R> { pub struct Polygon2d<R> {

View File

@@ -0,0 +1,97 @@
use crate::geom::Meters;
use super::{
and_not,
Cube,
HasCrossSection,
Intersection,
InvertedRegion,
Region,
Torus,
Union,
Union4,
};
use coremem_cross::vec::Vec3;
/// it's a torus, but elongated around its axis to resemble a pill shape.
///
/// ```
/// _______
/// / \
/// | |
/// \_______/
/// ```
pub struct ElongatedTorus(Union4<
Intersection<Torus, InvertedRegion<Cube>>, // rounded top
Intersection<Torus, InvertedRegion<Cube>>, // rounded bottom
Cube, // left connection between top/bot
Cube, // right connection between top/bot
>);
impl ElongatedTorus {
pub fn new_xz(center: Meters, length: f32, major_rad: f32, minor_rad: f32) -> Self {
let body = Cube::new_centered(
center,
Meters::new(2.0 * (major_rad + minor_rad), 2.0 * minor_rad, length),
);
let top = and_not(
Torus::new_xz(
center + Meters::new(0.0, 0.0, 0.5 * length),
major_rad,
minor_rad,
),
body,
);
let bot = and_not(
Torus::new_xz(
center - Meters::new(0.0, 0.0, 0.5 * length),
major_rad,
minor_rad,
),
body,
);
// TODO: these should be cylinders
let left = Cube::new_centered(
center - Meters::new(major_rad, 0.0, 0.0),
Meters::new(2.0 * minor_rad, 2.0 * minor_rad, length),
);
let right = Cube::new_centered(
center + Meters::new(major_rad, 0.0, 0.0),
Meters::new(2.0 * minor_rad, 2.0 * minor_rad, length),
);
Self(Union::new4(
top,
bot,
left,
right,
))
}
}
impl Region for ElongatedTorus {
fn contains(&self, p: Meters) -> bool {
self.0.contains(p)
}
}
impl HasCrossSection for ElongatedTorus {
fn cross_section_normal(&self, p: Meters) -> Vec3<f32> {
let top = self.0.region0_of_4();
let bot = self.0.region1_of_4();
let right = self.0.region3_of_4();
let left = self.0.region2_of_4();
let bridge_area =
(right.x_range().end - right.x_range().start)
* (right.y_range().start - right.y_range().end);
if top.contains(p) {
top.region0_of_2().cross_section_normal(p)
} else if bot.contains(p) {
bot.region0_of_2().cross_section_normal(p)
} else if right.contains(p) {
Vec3::new(0.0, 0.0, bridge_area)
} else if left.contains(p) {
Vec3::new(0.0, 0.0, -bridge_area)
} else {
Vec3::default()
}
}
}

View File

@@ -1,31 +1,39 @@
use crate::geom::{Coord, Meters, OrdMeters}; use crate::geom::{Coord, Meters, OrdMeters};
use dyn_clone::{self, DynClone}; use coremem_cross::vec::Vec3;
use rayon::prelude::*; use rayon::prelude::*;
use serde::{Serialize, Deserialize}; use serde::{Serialize, Deserialize};
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::sync::Arc; use std::sync::Arc;
mod constructed;
pub use constructed::*;
mod primitives; mod primitives;
pub use primitives::*; pub use primitives::*;
#[typetag::serde(tag = "type")] pub trait Region: Send + Sync {
pub trait Region: Send + Sync + DynClone {
fn contains(&self, p: Meters) -> bool; fn contains(&self, p: Meters) -> bool;
} }
dyn_clone::clone_trait_object!(Region);
pub fn and<T1: Region + 'static, T2: Region + 'static>(r1: T1, r2: T2) -> Intersection { /// some (volume) which has a tangent vector everywhere inside/on it.
Intersection::new().and(r1).and(r2) /// for example, a cylinder has tangents everywhere except its axis.
/// the returned vector should represent the area of the cross section.
pub trait HasCrossSection {
fn cross_section_normal(&self, p: Meters) -> Vec3<f32>;
} }
pub fn and_not<T1: Region + 'static, T2: Region + 'static>(r1: T1, r2: T2) -> Intersection { pub fn and<T1: Region + 'static, T2: Region + 'static>(r1: T1, r2: T2) -> Intersection<T1, T2> {
Intersection::new2(r1, r2)
}
pub fn and_not<T1: Region + 'static, T2: Region + 'static>(r1: T1, r2: T2) -> Intersection<T1, InvertedRegion<T2>> {
and(r1, InvertedRegion::new(r2)) and(r1, InvertedRegion::new(r2))
} }
pub fn union<T1: Region + 'static, T2: Region + 'static>(r1: T1, r2: T2) -> Union { pub fn union<T1: Region + 'static, T2: Region + 'static>(r1: T1, r2: T2) -> Union<T1, T2> {
Union::new().with(r1).with(r2) Union::new2(r1, r2)
} }
/// returns true if there's a path (via the cardinal directions) from p0 to p1 within this region. /// returns true if there's a path (via the cardinal directions) from p0 to p1 within this region.
@@ -67,137 +75,169 @@ pub fn distance_to<R: Region, C: Coord>(r: &R, p0: C, p1: C, feat_size: f32) ->
} }
/// Region describing the entire simulation space /// Region describing the entire simulation space
#[derive(Copy, Clone, Serialize, Deserialize)] #[derive(Copy, Clone, Default, Serialize, Deserialize)]
pub struct WorldRegion; pub struct WorldRegion;
#[typetag::serde]
impl Region for WorldRegion { impl Region for WorldRegion {
fn contains(&self, _: Meters) -> bool { fn contains(&self, _: Meters) -> bool {
true true
} }
} }
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Default, Serialize, Deserialize)]
pub struct InvertedRegion(Box<dyn Region>); pub struct InvertedRegion<R>(R);
impl InvertedRegion { impl<R> InvertedRegion<R> {
pub fn new<R: Region + 'static>(r: R) -> Self { pub fn new(r: R) -> Self {
Self(Box::new(r)) Self(r)
} }
} }
#[typetag::serde] impl<R: Region> Region for InvertedRegion<R> {
impl Region for InvertedRegion {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
!self.0.contains(p) !self.0.contains(p)
} }
} }
#[derive(Clone, Default, Serialize, Deserialize)] #[derive(Clone, Default, Serialize, Deserialize)]
pub struct Union(Vec<Box<dyn Region>>); pub struct Union<R1, R2>(R1, R2);
impl Union { pub type Union3<R1, R2, R3> = Union<Union<R1, R2>, R3>;
pub fn new() -> Self { pub type Union4<R1, R2, R3, R4> = Union<Union3<R1, R2, R3>, R4>;
Self(Vec::new())
impl<R1, R2> Union<R1, R2> {
pub fn with<R: Region>(self, r: R) -> Union<Self, R> {
Union::new2(self, r)
} }
pub fn new_with<R: Region + 'static>(r: R) -> Self { pub fn new2(r1: R1, r2: R2) -> Self {
Self::new().with(r) Self(r1, r2)
} }
pub fn with<R: Region + 'static>(self, r: R) -> Self { pub fn new3<R3: Region>(r1: R1, r2: R2, r3: R3) -> Union<Self, R3> {
self.with_box(Box::new(r)) Union::new2(r1, r2).with(r3)
} }
pub fn with_box(mut self, r: Box<dyn Region>) -> Self { pub fn new4<R3: Region, R4: Region>(r1: R1, r2: R2, r3: R3, r4: R4) -> Union<Union<Self, R3>, R4> {
self.0.push(r); Union::new2(r1, r2).with(r3).with(r4)
self
} }
} }
#[typetag::serde] impl<R0, R1> Union<R0, R1> {
impl Region for Union { pub fn region0_of_2(&self) -> &R0 {
&self.0
}
pub fn region1_of_2(&self) -> &R1 {
&self.1
}
}
impl<R0, R1, R2> Union3<R0, R1, R2> {
pub fn region0_of_3(&self) -> &R0 {
self.0.region0_of_2()
}
pub fn region1_of_3(&self) -> &R1 {
self.0.region1_of_2()
}
pub fn region2_of_3(&self) -> &R2 {
&self.1
}
}
impl<R0, R1, R2, R3> Union4<R0, R1, R2, R3> {
pub fn region0_of_4(&self) -> &R0 {
self.0.region0_of_3()
}
pub fn region1_of_4(&self) -> &R1 {
self.0.region1_of_3()
}
pub fn region2_of_4(&self) -> &R2 {
self.0.region2_of_3()
}
pub fn region3_of_4(&self) -> &R3 {
&self.1
}
}
impl<R1: Region, R2: Region> Region for Union<R1, R2> {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
self.0.iter().any(|r| r.contains(p)) self.0.contains(p) || self.1.contains(p)
} }
} }
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Default, Serialize, Deserialize)]
pub struct Intersection(Vec<Box<dyn Region>>); pub struct Intersection<R1, R2>(R1, R2);
impl Intersection { impl<R1, R2> Intersection<R1, R2> {
pub fn new() -> Self { pub fn and<R3: Region>(self, r: R3) -> Intersection<Self, R3> {
Self(Vec::new()) Intersection::new2(self, r)
} }
pub fn new_with<R: Region + 'static>(r: R) -> Self { pub fn new2(r1: R1, r2: R2) -> Self {
Self::new().and(r) Self(r1, r2)
} }
pub fn and<R: Region + 'static>(self, r: R) -> Self { pub fn region0_of_2(&self) -> &R1 {
self.and_box(Box::new(r)) &self.0
} }
pub fn and_box(mut self, r: Box<dyn Region>) -> Self { pub fn region1_of_2(&self) -> &R2 {
self.0.push(r); &self.1
self
} }
} }
#[typetag::serde] impl<R1: Region, R2: Region> Region for Intersection<R1, R2> {
impl Region for Intersection {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
self.0.iter().all(|r| r.contains(p)) self.0.contains(p) && self.1.contains(p)
} }
} }
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Default, Serialize, Deserialize)]
pub struct Translate { pub struct Translate<R> {
inner: Box<dyn Region>, inner: R,
shift: Meters, shift: Meters,
} }
impl Translate { impl<R> Translate<R> {
pub fn new<T: Region + 'static>(inner: T, shift: Meters) -> Self { pub fn new(inner: R, shift: Meters) -> Self {
Self { inner: Box::new(inner), shift } Self { inner, shift }
} }
} }
#[typetag::serde] impl<R: Region> Region for Translate<R> {
impl Region for Translate {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
self.inner.contains(p - self.shift) self.inner.contains(p - self.shift)
} }
} }
impl<R: HasCrossSection> HasCrossSection for Translate<R> {
#[derive(Clone, Serialize, Deserialize)] fn cross_section_normal(&self, p: Meters) -> Vec3<f32> {
pub struct SwapXZ { self.inner.cross_section_normal(p - self.shift)
inner: Box<dyn Region>,
}
impl SwapXZ {
pub fn new<T: Region + 'static>(inner: T) -> Self {
Self { inner: Box::new(inner) }
} }
} }
#[typetag::serde] #[derive(Clone, Default, Serialize, Deserialize)]
impl Region for SwapXZ { pub struct SwapXZ<R> {
inner: R,
}
impl<R> SwapXZ<R> {
pub fn new(inner: R) -> Self {
Self { inner }
}
}
impl<R: Region> Region for SwapXZ<R> {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
let p = Meters::new(p.z(), p.y(), p.z()); let p = Meters::new(p.z(), p.y(), p.z());
self.inner.contains(p) self.inner.contains(p)
} }
} }
#[derive(Clone, Default, Serialize, Deserialize)]
#[derive(Clone, Serialize, Deserialize)] pub struct SwapYZ<R> {
pub struct SwapYZ { inner: R,
inner: Box<dyn Region>,
} }
impl SwapYZ { impl<R> SwapYZ<R> {
pub fn new<T: Region + 'static>(inner: T) -> Self { pub fn new(inner: R) -> Self {
Self { inner: Box::new(inner) } Self { inner }
} }
} }
#[typetag::serde] impl<R: Region> Region for SwapYZ<R> {
impl Region for SwapYZ {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
let mapped = Meters::new(p.x(), p.z(), p.y()); let mapped = Meters::new(p.x(), p.z(), p.y());
self.inner.contains(mapped) self.inner.contains(mapped)
@@ -210,20 +250,20 @@ impl Region for SwapYZ {
/// the resulting region is mapped onto the original region y=[0, y_max]. x is just the radius /// the resulting region is mapped onto the original region y=[0, y_max]. x is just the radius
/// so that (0, 0) is mapped to (0, 0), and (1, 0) is mapped to (1, 0) and (0, 1) is mapped to /// so that (0, 0) is mapped to (0, 0), and (1, 0) is mapped to (1, 0) and (0, 1) is mapped to
/// (1, 0.5*y_max) and (-5, 0) is mapped to (5, 0.5*y_max). /// (1, 0.5*y_max) and (-5, 0) is mapped to (5, 0.5*y_max).
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Default, Serialize, Deserialize)]
pub struct Wrap { pub struct Wrap<R> {
inner: Box<dyn Region>, inner: R,
y_max: f32, y_max: f32,
about: Meters, about: Meters,
} }
impl Wrap { impl<R> Wrap<R> {
pub fn new<T: Region + 'static>(inner: T, y_max: f32) -> Self { pub fn new(inner: R, y_max: f32) -> Self {
Self::new_about(inner, y_max, Meters::new(0.0, 0.0, 0.0)) Self::new_about(inner, y_max, Meters::new(0.0, 0.0, 0.0))
} }
pub fn new_about<T: Region + 'static>(inner: T, y_max: f32, about: Meters) -> Self { pub fn new_about(inner: R, y_max: f32, about: Meters) -> Self {
Self { inner: Box::new(inner), y_max, about } Self { inner, y_max, about }
} }
fn map(&self, p: Meters) -> Meters { fn map(&self, p: Meters) -> Meters {
@@ -235,28 +275,26 @@ impl Wrap {
} }
} }
#[typetag::serde] impl<R: Region> Region for Wrap<R> {
impl Region for Wrap {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
self.inner.contains(self.map(p)) self.inner.contains(self.map(p))
} }
} }
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Default, Serialize, Deserialize)]
pub struct Dilate { pub struct Dilate<R> {
inner: Box<dyn Region>, inner: R,
rad: f32, rad: f32,
res: f32, res: f32,
} }
impl Dilate { impl<R> Dilate<R> {
pub fn new<T: Region + 'static>(inner: T, rad: f32, res: f32) -> Self { pub fn new(inner: R, rad: f32, res: f32) -> Self {
Self { inner: Box::new(inner), rad, res } Self { inner, rad, res }
} }
} }
#[typetag::serde] impl<R: Region> Region for Dilate<R> {
impl Region for Dilate {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
let rad_iters = (self.rad / self.res).ceil() as i32; let rad_iters = (self.rad / self.res).ceil() as i32;
let rad_range = -rad_iters..=rad_iters; let rad_range = -rad_iters..=rad_iters;
@@ -279,24 +317,78 @@ impl Region for Dilate {
} }
} }
#[derive(Clone, Serialize, Deserialize)] pub struct Rotate<R> {
pub struct Memoize { region: R,
#[serde(skip)] /// angle (radians) about the +x axis
lut: Arc<dashmap::DashMap<OrdMeters, bool>>, about_x: f32,
inner: Box<dyn Region>, /// angle (radians) about the +y axis
about_y: f32,
/// angle (radians) about the +z axis
about_z: f32,
} }
impl Memoize { impl<R> Rotate<R> {
pub fn new<R: Region + 'static>(inner: R) -> Self { pub fn about_x(about_x: f32, region: R) -> Self {
Self::about_x_y_z(about_x, 0.0, 0.0, region)
}
pub fn about_y(about_y: f32, region: R) -> Self {
Self::about_x_y_z(0.0, about_y, 0.0, region)
}
pub fn about_z(about_z: f32, region: R) -> Self {
Self::about_x_y_z(0.0, 0.0, about_z, region)
}
pub fn about_x_y_z(about_x: f32, about_y: f32, about_z: f32, region: R) -> Self {
Self {
region, about_x, about_y, about_z
}
}
fn rotate_into_region(&self, global: Vec3<f32>) -> Vec3<f32> {
global
.rotate_yz(-self.about_x)
.rotate_xz(-self.about_y)
.rotate_xy(-self.about_z)
}
fn rotate_out_of_region(&self, local: Vec3<f32>) -> Vec3<f32> {
local
.rotate_yz(self.about_x)
.rotate_xz(self.about_y)
.rotate_xy(self.about_z)
}
}
impl<R: Region> Region for Rotate<R> {
fn contains(&self, p: Meters) -> bool {
self.region.contains(Meters(self.rotate_into_region(p.0)))
}
}
impl<R: HasCrossSection> HasCrossSection for Rotate<R> {
fn cross_section_normal(&self, p: Meters) -> Vec3<f32> {
self.rotate_out_of_region(
self.region.cross_section_normal(
Meters(self.rotate_into_region(p.0))
)
)
}
}
#[derive(Clone, Default, Serialize, Deserialize)]
pub struct Memoize<R> {
#[serde(skip)]
lut: Arc<dashmap::DashMap<OrdMeters, bool>>,
inner: R,
}
impl<R> Memoize<R> {
pub fn new(inner: R) -> Self {
Self { Self {
lut: Arc::new(dashmap::DashMap::new()), lut: Arc::new(dashmap::DashMap::new()),
inner: Box::new(inner), inner,
} }
} }
} }
#[typetag::serde] impl<R: Region> Region for Memoize<R> {
impl Region for Memoize {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
*self.lut.entry(OrdMeters(p)).or_insert_with(|| self.inner.contains(p)) *self.lut.entry(OrdMeters(p)).or_insert_with(|| self.inner.contains(p))
} }
@@ -307,7 +399,7 @@ mod test {
use super::*; use super::*;
use float_eq::assert_float_eq; use float_eq::assert_float_eq;
fn assert_map(w: &Wrap, from: Meters, to: Meters) { fn assert_map<R>(w: &Wrap<R>, from: Meters, to: Meters) {
let mapped = w.map(from); let mapped = w.map(from);
assert_float_eq!(mapped.x(), to.x(), abs <= 0.01); assert_float_eq!(mapped.x(), to.x(), abs <= 0.01);
assert_float_eq!(mapped.y(), to.y(), abs <= 0.01); assert_float_eq!(mapped.y(), to.y(), abs <= 0.01);

View File

@@ -1,13 +1,14 @@
use crate::geom::{Meters, Vec2, Vec3}; use crate::geom::Meters;
use crate::real::Real as _; use coremem_cross::real::Real as _;
use coremem_cross::vec::{Vec2, Vec3};
use serde::{Serialize, Deserialize}; use serde::{Serialize, Deserialize};
use std::fmt::{self, Display}; use std::fmt::{self, Display};
use std::ops::Range; use std::ops::Range;
use super::Region; use super::{HasCrossSection, Region};
#[derive(Copy, Clone, Serialize, Deserialize)] #[derive(Copy, Clone, Default, Serialize, Deserialize)]
pub struct CylinderZ { pub struct CylinderZ {
center: Vec2<f32>, center: Vec2<f32>,
radius: f32, radius: f32,
@@ -23,7 +24,6 @@ impl CylinderZ {
} }
} }
#[typetag::serde]
impl Region for CylinderZ { impl Region for CylinderZ {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
p.xy().distance_sq(self.center) <= self.radius * self.radius p.xy().distance_sq(self.center) <= self.radius * self.radius
@@ -36,6 +36,31 @@ impl Display for CylinderZ {
} }
} }
/// describes all 3d space which falls within a given angular space, relative to the Z axis.
#[derive(Copy, Clone, Default, Serialize, Deserialize)]
pub struct WedgeZ {
arg_min: f32,
arg_max: f32,
}
impl WedgeZ {
pub fn new(arg_min: f32, arg_max: f32) -> Self {
Self { arg_min, arg_max }
}
}
impl Region for WedgeZ {
fn contains(&self, p: Meters) -> bool {
let arg = p.xy().arg();
// arg is [-pi, pi).
// if the user supplied some desired range where arg_max > pi, then we need to rotate
// one revolution "into" that range.
let arg_next = arg + f32::two_pi();
(arg >= self.arg_min && arg <= self.arg_max) ||
(arg_next >= self.arg_min && arg_next <= self.arg_max)
}
}
#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize)] #[derive(Copy, Clone, Debug, Default, Serialize, Deserialize)]
pub struct Torus { pub struct Torus {
center: Meters, center: Meters,
@@ -76,7 +101,6 @@ impl Torus {
} }
} }
#[typetag::serde]
impl Region for Torus { impl Region for Torus {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
// a torus is the set of all points < distance `r` from the circle of radius `R`, // a torus is the set of all points < distance `r` from the circle of radius `R`,
@@ -86,7 +110,7 @@ impl Region for Torus {
// 2. Find the point `q` on the circle which is nearest to `p`. // 2. Find the point `q` on the circle which is nearest to `p`.
// 3. Consider the distance from `p` to `q`. // 3. Consider the distance from `p` to `q`.
let rel_p = *p - *self.center; let rel_p = *p - *self.center;
let p_on_plane = rel_p - self.normal.with_mag(self.normal.dot(rel_p)); let p_on_plane = rel_p - self.normal.with_mag(self.normal.dot(rel_p)).unwrap();
let q = if p_on_plane == Vec3::zero() { let q = if p_on_plane == Vec3::zero() {
// avoid division by zero. // avoid division by zero.
// The point is precisely on the axis of the torus. // The point is precisely on the axis of the torus.
@@ -94,16 +118,25 @@ impl Region for Torus {
// and they all give the same answer. // and they all give the same answer.
// Such a point is given by rotating the normal axis by 90 degrees in ANY DIRECTION // Such a point is given by rotating the normal axis by 90 degrees in ANY DIRECTION
let off_axis = self.normal.arbitrary_orthogonal_vector(); let off_axis = self.normal.arbitrary_orthogonal_vector();
off_axis.with_mag(self.major_rad) off_axis.with_mag(self.major_rad).unwrap()
} else { } else {
p_on_plane.with_mag(self.major_rad) p_on_plane.with_mag(self.major_rad).unwrap()
}; };
let distance_to_circle_sq = rel_p.distance_sq(q); let distance_to_circle_sq = rel_p.distance_sq(q);
distance_to_circle_sq < self.minor_rad * self.minor_rad distance_to_circle_sq < self.minor_rad * self.minor_rad
} }
} }
#[derive(Copy, Clone, Serialize, Deserialize)] impl HasCrossSection for Torus {
fn cross_section_normal(&self, coord: Meters) -> Vec3<f32> {
let axis = self.axis();
let to_coord = *coord - *self.center();
// this creates a normal which always points "counter-clockwise" along the shape
axis.cross(to_coord).with_mag(self.cross_section()).unwrap_or_default()
}
}
#[derive(Copy, Clone, Default, Serialize, Deserialize)]
pub struct Sphere { pub struct Sphere {
center: Meters, center: Meters,
rad: f32, rad: f32,
@@ -118,7 +151,6 @@ impl Sphere {
} }
} }
#[typetag::serde]
impl Region for Sphere { impl Region for Sphere {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
p.distance_sq(*self.center) < self.rad * self.rad p.distance_sq(*self.center) < self.rad * self.rad
@@ -227,7 +259,6 @@ impl Cube {
} }
} }
#[typetag::serde]
impl Region for Cube { impl Region for Cube {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
self.x_range().contains(&p.x()) && self.x_range().contains(&p.x()) &&
@@ -237,7 +268,7 @@ impl Region for Cube {
} }
/// a Spiral traces out a circle on the xy plane as z increases. /// a Spiral traces out a circle on the xy plane as z increases.
#[derive(Copy, Clone, Serialize, Deserialize)] #[derive(Copy, Clone, Default, Serialize, Deserialize)]
pub struct Spiral { pub struct Spiral {
/// radius of the spiral /// radius of the spiral
major: f32, major: f32,
@@ -256,7 +287,6 @@ impl Spiral {
} }
} }
#[typetag::serde]
impl Region for Spiral { impl Region for Spiral {
fn contains(&self, p: Meters) -> bool { fn contains(&self, p: Meters) -> bool {
let revs = p.z() / self.period; let revs = p.z() / self.period;

View File

@@ -1,6 +1,6 @@
use crate::real::ToFloat; use coremem_cross::real::ToFloat;
use coremem_cross::vec::{Vec3, Vec3u};
use serde::{Serialize, Deserialize}; use serde::{Serialize, Deserialize};
use super::{Vec3, Vec3u};
use std::fmt::{self, Display}; use std::fmt::{self, Display};
use std::cmp::Ordering; use std::cmp::Ordering;
use std::ops::{Add, Deref, Div, Mul, Neg, Sub}; use std::ops::{Add, Deref, Div, Mul, Neg, Sub};
@@ -188,6 +188,17 @@ impl Index {
} }
} }
impl Into<Vec3u> for Index {
fn into(self) -> Vec3u {
self.0
}
}
impl From<Vec3u> for Index {
fn from(v: Vec3u) -> Self {
Self(v)
}
}
impl Coord for Index { impl Coord for Index {
fn to_meters(&self, feature_size: f32) -> Meters { fn to_meters(&self, feature_size: f32) -> Meters {
Meters(Vec3::from(self.0) * feature_size) Meters(Vec3::from(self.0) * feature_size)

View File

@@ -7,19 +7,20 @@
use log::info; use log::info;
mod diagnostics;
pub mod driver; pub mod driver;
pub mod geom; pub mod geom;
pub mod mat;
pub mod meas; pub mod meas;
pub mod render; pub mod render;
pub mod sim; pub mod sim;
pub mod stim; pub mod stim;
pub mod util; pub mod worker;
pub use driver::*; pub use driver::*;
pub use mat::*;
pub use sim::*; pub use sim::*;
pub use coremem_types::real; pub use coremem_cross as cross;
pub use coremem_cross::real;
pub use coremem_cross::mat;
// Some things to keep in mind: // Some things to keep in mind:
// B = mu_r*H + M // B = mu_r*H + M

View File

@@ -1,362 +0,0 @@
use crate::CellState;
use crate::geom::{Line2d, Vec2, Vec3, Polygon2d};
use crate::mat::Material;
use crate::real::Real;
use crate::sim::StepParametersMut;
use lazy_static::lazy_static;
use log::trace;
use serde::{Serialize, Deserialize};
use std::any::{Any, TypeId};
use std::cmp::Ordering;
use std::collections::HashMap;
use std::sync::Mutex;
fn step_linear_ferro<R: Real>(m_mut: &mut Vec3<R>, mh_curve: &MHCurve<R>, context: &CellState<R>, delta_b: Vec3<R>) {
trace!("step_b enter");
let (h, m) = (context.h(), *m_mut);
let target_hm = h + m + delta_b * R::mu0_inv();
// TODO: this is probably not the best way to generalize a BH curve into 3d.
let (_hx, mx) = mh_curve.move_to(
h.x(),
m.x(),
target_hm.x(),
);
let (_hy, my) = mh_curve.move_to(
h.y(),
m.y(),
target_hm.y(),
);
let (_hz, mz) = mh_curve.move_to(
h.z(),
m.z(),
target_hm.z(),
);
*m_mut = Vec3::new(mx, my, mz);
// let ret = Vec3::new(hx, hy, hz);
trace!("step_b end");
}
/// M as a function of H
#[derive(Clone, PartialEq)]
struct MHCurve<R> {
geom: Polygon2d<R>,
}
#[allow(unused)]
impl<R: Real> MHCurve<R> {
/// Construct a M(H) curve from a sweep from M = 0 to Ms and back down to M = 0.
/// The curve below M = 0 is derived by symmetry.
fn new<R2: Real>(points: &[Vec2<R2>]) -> Self {
let full_pts: Vec<_> =
points.iter().cloned()
.chain(points.iter().cloned().map(|p| -p))
.map(|p| p.cast())
.collect();
Self {
geom: Polygon2d::new(full_pts)
}
}
fn from_bh<R2: Real>(points: &[(R2, R2)]) -> Self {
let mh_points: Vec<_> = points.iter().cloned().map(|(h, b)| {
Vec2::new(h, b / R2::mu0() - h)
}).collect();
Self::new(&*mh_points)
}
fn from_mh<R2: Real>(points: &[(R2, R2)]) -> Self {
let mh_points: Vec<_> = points.iter().cloned().map(|(h, m)| {
Vec2::new(h, m)
}).collect();
Self::new(&*mh_points)
}
/// Return (Hmax, Mmax)
pub fn extremes(&self) -> Vec2<R> {
Vec2::new(self.geom.max_x(), self.geom.max_y())
}
/// Moves (h, m) towards some location in the MH curve where H + M = target_hm.
/// Returns `Ok((h, m))` if complete; `Err((h, m))` if there's more work to be done (call it
/// again).
fn step_toward(&self, h: R, m: R, target_hm: R) -> Result<Vec2<R>, Vec2<R>> {
let is_ascending = match target_hm.partial_cmp(&(h + m)).unwrap_or_else(|| panic!("{} {}", h, m)) {
Ordering::Greater => true,
Ordering::Less => false,
_ => return Ok(Vec2::new(h, m))
};
if (is_ascending && m == self.geom.max_y()) || (!is_ascending && m == self.geom.min_y()) {
// Fully saturated. m is fixed, while h moves freely
return Ok(Vec2::new(target_hm - m, m));
}
// Locate the segment which would contain the current point
let mut segments = self.geom.segments();
let active_segment = loop {
let line = segments.next().unwrap_or_else(|| {
panic!("failed to find segment for h:{}, m:{}, {:?}", h, m, self.geom.segments().collect::<Vec<_>>());
});
if line.contains_y(m) && line.is_ascending() == is_ascending {
if line.contains_x(h) && line.distance_sq(Vec2::new(h, m)) < R::from_primitive(1.0e-6) {
// (h, m) resides on this line
break line;
} else {
// need to move the point toward this line
let h_intercept = line.x(m);
break Line2d::new(Vec2::new(h, m), Vec2::new(h_intercept, m));
}
}
};
trace!("active segment: {:?}", active_segment);
// Find some m(h) on the active_segment such that sum(h) = h + m(h) = target_hm
let sum_h = active_segment + Line2d::new(Vec2::zero(), Vec2::unit());
trace!("sum_h: {:?}", sum_h);
let new_h = if sum_h.to().y() != sum_h.from().y() {
sum_h.move_toward_y_unclamped(h, target_hm)
} else {
// avoid a division-by-zero.
// We could be anywhere along this line, but we prefer the endpoint
// so as to escape out of any permanent loops
active_segment.to().x()
};
trace!("new_h: {}", new_h);
if sum_h.contains_x(new_h) {
// the segment contains a point with the target H+M
Ok(active_segment.at_x(new_h))
} else {
// the segment doesn't contain the desired point: clamp and try the next segment
Err(active_segment.clamp_by_x(new_h))
}
}
fn move_to(&self, mut h: R, mut m: R, target_hm: R) -> (R, R) {
let mut i = 0;
loop {
i += 1;
match self.step_toward(h, m, target_hm) {
Ok(v) => break (v.x(), v.y()),
Err(v) => {
h = v.x();
m = v.y();
},
}
if i % 2048 == 0 {
panic!("unusually high iteration count without converging: {}. args: {}, {}, {}", i, h, m, target_hm);
}
}
}
}
#[derive(Default, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct Ferroxcube3R1<R> {
m: Vec3<R>,
}
impl<R: Real> Ferroxcube3R1<R> {
pub fn new() -> Self {
Self::default()
}
}
impl<R: Real> Ferroxcube3R1<R> {
fn curve() -> &'static MHCurve<R> {
lazy_static! {
static ref CURVES: Mutex<HashMap<TypeId, Box<dyn Any + Send>>> = Mutex::new(HashMap::new());
}
let mut lock = CURVES.lock().unwrap();
let curve = lock.entry(TypeId::of::<R>()).or_insert_with(|| {
Box::new(MHCurve::<R>::from_bh(&[
( 35.0, 0.0),
( 50.0, 0.250),
( 100.0, 0.325),
( 200.0, 0.350),
(1000.0, 0.390),
// Falling
( 200.0, 0.360),
( 100.0, 0.345),
( 50.0, 0.340),
( 0.0, 0.325),
]))
}).downcast_ref::<MHCurve<R>>().unwrap();
unsafe { std::mem::transmute::<&MHCurve<R>, &'static MHCurve<R>>(curve) }
}
}
impl<R: Real> Material<R> for Ferroxcube3R1<R> {
fn step_b(&mut self, context: &CellState<R>, delta_b: Vec3<R>) {
step_linear_ferro(&mut self.m, Self::curve(), context, delta_b)
}
fn m(&self) -> Vec3<R> {
self.m
}
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
StepParametersMut::default().with_conductivity(Vec3::uniform(1e-3))
}
}
/// Simple, square-loop ferrite
#[derive(Default, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct MinimalSquare<R> {
m: Vec3<R>,
}
impl<R: Real> MinimalSquare<R> {
fn curve() -> &'static MHCurve<R> {
lazy_static! {
static ref CURVES: Mutex<HashMap<TypeId, Box<dyn Any + Send>>> = Mutex::new(HashMap::new());
}
let mut lock = CURVES.lock().unwrap();
let curve = lock.entry(TypeId::of::<R>()).or_insert_with(|| {
Box::new(MHCurve::<R>::from_bh(&[
( 1.0, 0.0),
( 2.0, 1000000.0),
// Falling
( 0.0, 900000.0),
]))
}).downcast_ref::<MHCurve<R>>().unwrap();
unsafe { std::mem::transmute::<&MHCurve<R>, &'static MHCurve<R>>(curve) }
}
}
impl<R: Real> Material<R> for MinimalSquare<R> {
fn step_b(&mut self, context: &CellState<R>, delta_b: Vec3<R>) {
step_linear_ferro(&mut self.m, Self::curve(), context, delta_b)
}
fn m(&self) -> Vec3<R> {
self.m
}
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
StepParametersMut::default().with_conductivity(Vec3::uniform(1e-3))
}
}
#[cfg(test)]
mod test {
use super::*;
fn mh_curve_for_test() -> MHCurve<f32> {
MHCurve::new(&[
// rising
Vec2::new( 10.0, 0.0),
Vec2::new( 20.0, 100.0),
Vec2::new( 30.0, 150.0),
// falling
Vec2::new( 0.0, 120.0),
// negative rising
Vec2::new(-10.0, 0.0),
Vec2::new(-20.0, -100.0),
Vec2::new(-30.0, -150.0),
// negative falling
Vec2::new( 0.0, -120.0),
])
}
fn assert_step_toward_symmetric(h: f32, m: f32, target_mh: f32, target: Result<Vec2<f32>, Vec2<f32>>) {
let curve = mh_curve_for_test();
let target = match target {
Ok(v) => Ok(v),
Err(v) => Err(v),
};
let neg_target = match target {
Ok(v) => Ok(-v),
Err(v) => Err(-v),
};
assert_eq!(curve.step_toward(h, m, target_mh), target);
assert_eq!(curve.step_toward(-h, -m, -target_mh), neg_target);
}
fn assert_move_to_symmetric(h: f32, m: f32, target_mh: f32, target: (f32, f32)) {
let curve = mh_curve_for_test();
assert_eq!(curve.move_to(h, m, target_mh), target);
assert_eq!(curve.move_to(-h, -m, -target_mh), (-target.0, -target.1));
}
#[test]
fn mh_curve_move_from_inner_to_inner() {
assert_step_toward_symmetric(0.0, 0.0, 5.0, Ok(Vec2::new(5.0, 0.0)));
assert_step_toward_symmetric(0.0, 5.0, 10.0, Ok(Vec2::new(5.0, 5.0)));
assert_step_toward_symmetric(-5.0, 5.0, -3.0, Ok(Vec2::new(-8.0, 5.0)));
assert_step_toward_symmetric(-5.0, 5.0, 7.0, Ok(Vec2::new(2.0, 5.0)));
assert_step_toward_symmetric(5.0, -5.0, -3.0, Ok(Vec2::new(2.0, -5.0)));
assert_step_toward_symmetric(5.0, -5.0, 3.0, Ok(Vec2::new(8.0, -5.0)));
}
#[test]
fn mh_curve_magnetize_along_edge() {
// start of segment NOOP
assert_step_toward_symmetric(10.0, 0.0, 10.0, Ok(Vec2::new(10.0, 0.0)));
// start of segment to middle of segment
assert_step_toward_symmetric(10.0, 0.0, 32.0, Ok(Vec2::new(12.0, 20.0)));
// middle of segment NOOP
assert_step_toward_symmetric(12.0, 20.0, 32.0, Ok(Vec2::new(12.0, 20.0)));
// middle of segment to middle of segment
assert_step_toward_symmetric(12.0, 20.0, 54.0, Ok(Vec2::new(14.0, 40.0)));
// middle of segment to end of segment
assert_step_toward_symmetric(12.0, 20.0, 120.0, Err(Vec2::new(20.0, 100.0)));
}
#[test]
fn mh_curve_demagnetize_along_edge() {
// start of segment NOOP
assert_step_toward_symmetric(30.0, 150.0, 180.0, Ok(Vec2::new(30.0, 150.0)));
// start of segment to middle of segment
assert_step_toward_symmetric(30.0, 150.0, 160.0, Ok(Vec2::new(20.0, 140.0)));
// middle of segment NOOP
assert_step_toward_symmetric(20.0, 140.0, 160.0, Ok(Vec2::new(20.0, 140.0)));
// middle of segment to middle of segment
assert_step_toward_symmetric(20.0, 140.0, 140.0, Ok(Vec2::new(10.0, 130.0)));
// middle of segment to end of segment
assert_step_toward_symmetric(20.0, 140.0, 120.0, Err(Vec2::new(0.0, 120.0)));
}
#[test]
fn mh_curve_magnetize_across_edges() {
// Rising from start to middle
assert_move_to_symmetric(10.0, 0.0, 132.0, (22.0, 110.0));
// Rising from start to saturation
assert_move_to_symmetric(10.0, 0.0, 180.0, (30.0, 150.0));
// Rising from start to post-saturation
assert_move_to_symmetric(10.0, 0.0, 400.0, (250.0, 150.0));
// Rising from negative saturation to start
assert_move_to_symmetric(-30.0, -150.0, 10.0, (10.0, 0.0));
// Rising from negative post-saturation to start
assert_move_to_symmetric(-250.0, -150.0, 10.0, (10.0, 0.0));
// Rising from negative middle to middle
assert_move_to_symmetric(-22.0, -110.0, 132.0, (22.0, 110.0));
}
#[test]
fn mh_curve_demagnetize_across_edges() {
// Falling from saturation to start
assert_move_to_symmetric(30.0, 150.0, 120.0, (0.0, 120.0));
// Falling from post-saturation to post-saturation
assert_move_to_symmetric(250.0, 150.0, 200.0, (50.0, 150.0));
// Falling from post-saturation to saturation
assert_move_to_symmetric(250.0, 150.0, 180.0, (30.0, 150.0));
// Falling from post-saturation to start
assert_move_to_symmetric(250.0, 150.0, 120.0, (0.0, 120.0));
// Falling from post-saturation to negative saturation
assert_move_to_symmetric(250.0, 150.0, -180.0, (-30.0, -150.0));
// Falling from post-saturation to negative post-saturation
assert_move_to_symmetric(250.0, 150.0, -400.0, (-250.0, -150.0));
// Falling from interior to middle
assert_move_to_symmetric(28.0, 130.0, 140.0, (10.0, 130.0));
// Falling from interior to middle
assert_move_to_symmetric(28.0, 130.0, 130.0, (5.0, 125.0));
}
/// Float rounding would cause `inf`s, which manifested as infinite looping.
#[test]
fn regression_no_convergence_3r1() {
let curve = Ferroxcube3R1::curve();
curve.move_to(-202.04596, -278400.53, -278748.66);
}
}

View File

@@ -1,35 +0,0 @@
//! database of common materials
use crate::geom::Vec3;
use crate::mat::{AnisomorphicConductor, IsomorphicConductor, LinearMagnet, Ferroxcube3R1, MinimalSquare};
use crate::real::Real;
pub fn conductor<R: Real, R2: Real>(conductivity: R2) -> IsomorphicConductor<R> {
IsomorphicConductor::new(conductivity.cast())
}
pub fn anisotropic_conductor<R>(conductivity: Vec3<R>) -> AnisomorphicConductor<R> {
AnisomorphicConductor::new(conductivity)
}
pub fn copper<R: Real>() -> IsomorphicConductor<R> {
conductor(50_000_000.0)
}
// See https://en.wikipedia.org/wiki/Permeability_(electromagnetism)#Values_for_some_common_materials
/// This is a simplified form of iron annealed in H.
pub fn linear_annealed_iron<R: Real>() -> LinearMagnet<R> {
LinearMagnet::new(200_000.0)
}
/// This is a simplified form of iron
pub fn linear_iron<R: Real>() -> LinearMagnet<R> {
LinearMagnet::new(5000.0)
}
/// https://www.ferroxcube.com/upload/media/product/file/MDS/3r1.pdf
pub fn ferroxcube_3r1<R: Real>() -> Ferroxcube3R1<R> {
Ferroxcube3R1::default()
}
pub fn minimal_square_ferrite<R: Real>() -> MinimalSquare<R> {
MinimalSquare::default()
}

View File

@@ -1,101 +0,0 @@
use crate::CellState;
use crate::geom::Vec3;
use crate::mat::Material;
use crate::real::Real;
use serde::{Serialize, Deserialize};
/// Material which can be magnetized, but has no hysteresis and no coercivity.
#[derive(Copy, Clone, Default, PartialEq, Serialize, Deserialize)]
pub struct LinearMagnet<R> {
/// \mu_r
relative_permeability: Vec3<R>,
m: Vec3<R>,
}
impl<R: Real> LinearMagnet<R> {
pub fn new<R2: Real>(relative_permeability: R2) -> Self {
Self {
relative_permeability: Vec3::uniform(relative_permeability).cast(),
m: Vec3::zero(),
}
}
pub fn new_anisotropic<R2: Real>(relative_permeability: Vec3<R2>) -> Self {
Self {
relative_permeability: relative_permeability.cast(),
m: Vec3::zero()
}
}
}
impl<R: Real> Material<R> for LinearMagnet<R> {
fn m(&self) -> Vec3<R> {
self.m
}
fn step_b(&mut self, _context: &CellState<R>, delta_b: Vec3<R>) {
//```tex
// $B = \mu_0 (H + M) = \mu_0 \mu_r H$
// $\mu_r H = H + M$
// $M = (\mu_r - 1) H$
// $B = \mu_0 (1/(\mu_r - 1) M + M)$
// $B = \mu_0 \mu_r/(\mu_r - 1) M$
//```
let mu_r = self.relative_permeability;
let delta_m = (delta_b*R::mu0_inv()).elem_mul(mu_r - Vec3::unit()).elem_div(mu_r);
self.m += delta_m;
}
}
#[cfg(test)]
mod test {
use super::*;
use float_eq::assert_float_eq;
#[test]
fn linear_magnet_steep() {
let mut mag = LinearMagnet::<f64>::new(5000.0);
// M = B/mu0 * (mu_r-1)/(mu_r)
mag.step_b(&CellState::default(), Vec3::uniform(1.0));
assert_float_eq!(mag.m().x(), 795615.56, abs <= 1.0);
mag.step_b(&CellState::default(), Vec3::uniform(1.0));
assert_float_eq!(mag.m().x(), 1591231.12, abs <= 1.0);
mag.step_b(&CellState::default(), Vec3::uniform(-1.0));
assert_float_eq!(mag.m().x(), 795615.56, abs <= 1.0);
mag.step_b(&CellState::default(), Vec3::uniform(-1.0));
assert_float_eq!(mag.m().x(), 0.0, abs <= 1.0);
}
#[test]
fn linear_magnet_shallow() {
let mut mag = LinearMagnet::<f64>::new(2.0);
mag.step_b(&CellState::default(), Vec3::uniform(1.0));
assert_float_eq!(mag.m().x(), 397887.36, abs <= 1.0);
mag.step_b(&CellState::default(), Vec3::uniform(-3.0));
assert_float_eq!(mag.m().x(), -795774.72, abs <= 1.0);
}
#[test]
fn linear_magnet_accuracy() {
let mut mag = LinearMagnet::<f32>::new(5000.0);
let mut b = Vec3::zero();
while b.x() < 1.0 {
let delta_b = Vec3::uniform(0.00002);
mag.step_b(&CellState::default(), delta_b);
b += delta_b;
}
while b.x() > 0.0 {
let delta_b = Vec3::uniform(-0.00001);
mag.step_b(&CellState::default(), delta_b);
b += delta_b;
}
// TODO: This error is WAY too big!
// Need to make sure that M+H == mu0*B always
assert_float_eq!(mag.m().x(), b.x() * f32::mu0_inv(), abs <= 900.0);
}
}

View File

@@ -1,195 +0,0 @@
use crate::geom::{Line2d, Vec2, Vec3};
use crate::mat::Material;
use crate::real::Real;
use crate::sim::CellState;
use serde::{Serialize, Deserialize};
/// M(B) parallelogram
///
///```text
/// ____________
/// / /
/// / . /
/// / /
/// /___________/
/// ```
///
/// The `.` depicts (0, 0). X axis is B; y axis is M.
/// As B increases, M remains constant until it hits an edge.
/// Then M rises up to its max.
/// Same thing happens on the left edge, as B decreases and M falls to its min.
#[derive(Default, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct MBPgram<R> {
/// Vertical range of the graph
pub max_m: R,
/// X coordinate at which the upward slope starts
pub b_start: R,
/// X coordinate at which the upward slope ends
pub b_end: R,
}
impl<R: Real> MBPgram<R> {
pub fn new(b_start: R, b_end: R, max_m: R) -> Self {
Self { b_start, b_end, max_m }
}
/// Return the new `M`
pub fn move_b(&self, m: R, target_b: R) -> R {
let right_edge = Line2d::new(
Vec2::new(self.b_start, -self.max_m),
Vec2::new(self.b_end, self.max_m),
);
let left_edge = Line2d::new(
Vec2::new(-self.b_start, self.max_m),
Vec2::new(-self.b_end, -self.max_m),
);
// m must be at least this much:
let min_m = right_edge.clamp_by_x(target_b).y();
// m must be no more than this:
let max_m = left_edge.clamp_by_x(target_b).y();
m.max_or_undefined(min_m).min_or_undefined(max_m)
}
}
#[derive(Default, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct NativeMBFerromagnet<R> {
m: Vec3<R>,
curve: MBPgram<R>,
}
impl<R: Real> NativeMBFerromagnet<R> {
pub fn new<R2: Real>(b_start: R2, b_end: R2, max_m: R2) -> Self {
Self {
m: Vec3::zero(),
curve: MBPgram::new(b_start.cast(), b_end.cast(), max_m.cast()),
}
}
pub fn curve(&self) -> MBPgram<R> {
self.curve
}
}
impl<R: Real> Material<R> for NativeMBFerromagnet<R> {
fn step_b(&mut self, context: &CellState<R>, delta_b: Vec3<R>) {
let target_b = context.with_m(self.m).b() + delta_b;
// println!("step_b {}", target_b);
self.m = Vec3::new(
self.curve.move_b(self.m.x(), target_b.x()),
self.curve.move_b(self.m.y(), target_b.y()),
self.curve.move_b(self.m.z(), target_b.z()),
);
}
fn m(&self) -> Vec3<R> {
self.m
}
}
#[derive(Default, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct SpirvMBFerromagnet<R>(NativeMBFerromagnet<R>);
impl<R: Real> SpirvMBFerromagnet<R> {
pub fn new<R2: Real>(b_start: R2, b_end: R2, max_m: R2) -> Self {
Self(NativeMBFerromagnet::new(b_start, b_end, max_m))
}
pub fn curve(&self) -> MBPgram<R> {
self.0.curve()
}
}
impl<R: Real> Material<R> for SpirvMBFerromagnet<R> {
fn step_b(&mut self, context: &CellState<R>, delta_b: Vec3<R>) {
let target_b = context.with_m(self.m()).b() + delta_b;
let curve = coremem_types::mat::MBPgram::new(
self.0.curve.b_start,
self.0.curve.b_end,
self.0.curve.max_m,
);
// println!("step_b {}", target_b);
self.0.m = Vec3::new(
curve.move_b(self.0.m.x(), target_b.x()),
curve.move_b(self.0.m.y(), target_b.y()),
curve.move_b(self.0.m.z(), target_b.z()),
);
}
fn m(&self) -> Vec3<R> {
self.0.m()
}
}
// XXX: for debugging, use the same MBFerromagnet impl as we do in spirv impl.
// pub type MBFerromagnet<R> = SpirvMBFerromagnet<R>;
pub type MBFerromagnet<R> = NativeMBFerromagnet<R>;
#[cfg(test)]
mod test {
use super::*;
use float_eq::assert_float_eq;
#[test]
fn curve_interior() {
let curve = MBPgram::new(4.0, 6.0, 20.0f32);
assert_float_eq!(curve.move_b(0.0, 2.0), 0.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(0.0, 5.0), 0.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(1.0, 5.0), 1.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(20.0, 5.0), 20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(-20.0, 4.0), -20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(-20.0, -6.0), -20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(20.0, -4.0), 20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(10.0, -2.0), 10.0, abs <= 1e-5);
}
#[test]
fn curve_exterior() {
let curve = MBPgram::new(4.0, 6.0, 20.0f32);
assert_float_eq!(curve.move_b(0.0, 6.0), 20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(0.0, 7.0), 20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(0.0, -6.0), -20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(0.0, -7.0), -20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(2.0, -6.0), -20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(20.0, -6.0), -20.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(20.0, -5.0), 0.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(20.0, -4.5), 10.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(-15.0, 4.5), -10.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(-15.0, 5.0), 0.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(-15.0, 5.5), 10.0, abs <= 1e-5);
assert_float_eq!(curve.move_b(-15.0, 7.5), 20.0, abs <= 1e-5);
}
#[test]
fn curve_3r1() {
// slope of 3r1 is about M=793210*B
// This is almost identical to iron (795615)!
let curve = MBPgram::new(-0.3899, 0.3900, 310000f32);
// magnetizing:
// v.s. 198893 in B(H) curve
assert_float_eq!(curve.move_b(0.0, 0.250), 198703.0, abs <= 1.0);
// v.s. 278321 in B(H) curve
assert_float_eq!(curve.move_b(198703.0, 0.350), 278201.0, abs <= 1.0);
assert_float_eq!(curve.move_b(278201.0, 0.390), 310000.0, abs <= 1.0);
// de-magnetizing:
// From saturation, decreasing B causes NO decrease in M: instead, it causes a decrease in
// H. This is probably BAD: in the B(H) curve, a large change in H always causes a large
// change in B. The movement of H here is likely to induce current, whereas it SHOULDN'T.
assert_float_eq!(curve.move_b(310000.0, 0.38995), 310000.0, abs <= 1.0);
// v.s. 258626 in B(H); H = 220
assert_float_eq!(curve.move_b(310000.0, 0.325), 258406.0, abs <= 1.0);
// here's where H crosses 0 (v.s. B=0.325 in the B(H) curve... quite a difference)
assert_float_eq!(curve.move_b(310000.0, 0.050), 39788.438, abs <= 1.0);
// v.s. 35.0 in B(H)
assert_float_eq!(curve.move_b(310000.0, 0.0), 39.75, abs <= 1.0);
// negative magnetization:
// erase the magnetization: H = -40
assert_float_eq!(curve.move_b(310000.0, -0.00005), 0.0, abs <= 0.1);
// the magnetization has been completely erased:
assert_float_eq!(curve.move_b(310000.0, -0.25), -198703.0, abs <= 1.0);
}
}

View File

@@ -1,327 +0,0 @@
use crate::CellState;
use crate::geom::Vec3;
use crate::real::Real;
use crate::sim::{PmlParameters, PmlState, StepParameters, StepParametersMut};
use enum_dispatch::enum_dispatch;
use serde::{Serialize, Deserialize};
pub mod db;
mod bh_ferromagnet;
mod mb_ferromagnet;
mod linear;
pub use bh_ferromagnet::*;
pub use mb_ferromagnet::*;
pub use coremem_types::mat::{AnisomorphicConductor, Ferroxcube3R1MH, IsoConductorOr, IsomorphicConductor, MHPgram};
pub use linear::*;
#[enum_dispatch]
pub trait Material<R: Real> {
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
// by default, behave as a vacuum
StepParametersMut::default()
}
/// Return the magnetization.
fn m(&self) -> Vec3<R> {
Vec3::zero()
}
/// Called just before magnetic field is updated. Optionally change any internal state (e.g. magnetization).
fn step_b(&mut self, _context: &CellState<R>, _delta_b: Vec3<R>) {
}
}
pub trait MaterialExt<R> {
fn step_parameters<'a>(&'a self) -> StepParameters<'a, R>;
fn conductivity(&self) -> Vec3<R>;
}
impl<R: Real, M: Material<R>> MaterialExt<R> for M {
fn step_parameters<'a>(&'a self) -> StepParameters<'a, R> {
unsafe { &mut *(self as *const M as *mut M) }.step_parameters_mut().into()
}
fn conductivity(&self) -> Vec3<R> {
self.step_parameters().conductivity()
}
}
/// Capable of capturing all field-related information about a material at any
/// snapshot moment-in-time. Useful for serializing state.
#[derive(Clone, Default, PartialEq, Serialize, Deserialize)]
pub struct Static<R> {
pub conductivity: Vec3<R>,
// pub pml: Option<(PmlState, PmlParameters)>,
pub m: Vec3<R>,
}
impl<R: Real> Static<R> {
pub fn from_material<M: Material<R>>(m: &M) -> Self {
let p = m.step_parameters();
Self {
conductivity: p.conductivity(),
// pml: p.pml().map(|(s, p)| (*s, p)),
m: m.m(),
}
}
// pub fn from_pml(pseudo_conductivity: Vec3<flt::Real>) -> Self {
// Self::from_material(&Pml::new(pseudo_conductivity))
// }
}
impl<R: Real> Material<R> for Static<R> {
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
StepParametersMut::new(
self.conductivity,
None, // self.pml.as_mut().map(|(s, p)| (s, *p)),
)
}
fn m(&self) -> Vec3<R> {
self.m
}
}
impl<R: Real, T> From<T> for Static<R>
where T: Into<GenericMaterial<R>>
{
fn from(mat: T) -> Self {
let generic = mat.into();
Self::from_material(&generic)
}
}
#[derive(Clone, Default, PartialEq, Serialize, Deserialize)]
pub struct Pml<R>(PmlState<R>, PmlParameters<R>);
impl<R: Real> Pml<R> {
pub fn new<R2: Real>(pseudo_conductivity: Vec3<R2>) -> Self {
Self(PmlState::new(), PmlParameters::new(pseudo_conductivity))
}
}
impl<R: Real> Material<R> for Pml<R> {
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
StepParametersMut::default().with_pml(&mut self.0, self.1)
}
}
// #[enum_dispatch(Material)]
#[derive(Clone, PartialEq, Serialize, Deserialize)]
pub enum GenericMaterial<R> {
Conductor(AnisomorphicConductor<R>),
LinearMagnet(LinearMagnet<R>),
Pml(Pml<R>),
MBFerromagnet(MBFerromagnet<R>),
Ferroxcube3R1(Ferroxcube3R1<R>),
MinimalSquare(MinimalSquare<R>),
}
impl<R: Real> Default for GenericMaterial<R> {
fn default() -> Self {
Self::Conductor(Default::default())
}
}
impl<R> From<AnisomorphicConductor<R>> for GenericMaterial<R> {
fn from(inner: AnisomorphicConductor<R>) -> Self {
Self::Conductor(inner)
}
}
impl<R: Real, V: Real> From<IsomorphicConductor<V>> for GenericMaterial<R> {
fn from(inner: IsomorphicConductor<V>) -> Self {
let iso_r = IsomorphicConductor::new(inner.iso_conductivity().cast::<R>());
Self::Conductor(iso_r.into())
}
}
impl<R> From<LinearMagnet<R>> for GenericMaterial<R> {
fn from(inner: LinearMagnet<R>) -> Self {
Self::LinearMagnet(inner)
}
}
impl<R> From<Pml<R>> for GenericMaterial<R> {
fn from(inner: Pml<R>) -> Self {
Self::Pml(inner)
}
}
impl<R> From<MBFerromagnet<R>> for GenericMaterial<R> {
fn from(inner: MBFerromagnet<R>) -> Self {
Self::MBFerromagnet(inner)
}
}
impl<R> From<Ferroxcube3R1<R>> for GenericMaterial<R> {
fn from(inner: Ferroxcube3R1<R>) -> Self {
Self::Ferroxcube3R1(inner)
}
}
impl<R> From<MinimalSquare<R>> for GenericMaterial<R> {
fn from(inner: MinimalSquare<R>) -> Self {
Self::MinimalSquare(inner)
}
}
impl<R: Real> Material<R> for GenericMaterial<R> {
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
use GenericMaterial::*;
match self {
Conductor(inner) => inner.step_parameters_mut(),
LinearMagnet(inner) => inner.step_parameters_mut(),
Pml(inner) => inner.step_parameters_mut(),
MBFerromagnet(inner) => inner.step_parameters_mut(),
Ferroxcube3R1(inner) => inner.step_parameters_mut(),
MinimalSquare(inner) => inner.step_parameters_mut(),
}
}
/// Return the magnetization.
fn m(&self) -> Vec3<R> {
use GenericMaterial::*;
match self {
Conductor(inner) => inner.m(),
LinearMagnet(inner) => inner.m(),
Pml(inner) => inner.m(),
MBFerromagnet(inner) => inner.m(),
Ferroxcube3R1(inner) => Material::m(inner),
MinimalSquare(inner) => Material::m(inner),
}
}
/// Called just before magnetic field is updated. Optionally change any internal state (e.g. magnetization).
fn step_b(&mut self, context: &CellState<R>, delta_b: Vec3<R>) {
use GenericMaterial::*;
match self {
Conductor(inner) => inner.step_b(context, delta_b),
LinearMagnet(inner) => inner.step_b(context, delta_b),
Pml(inner) => inner.step_b(context, delta_b),
MBFerromagnet(inner) => inner.step_b(context, delta_b),
Ferroxcube3R1(inner) => inner.step_b(context, delta_b),
MinimalSquare(inner) => inner.step_b(context, delta_b),
}
}
}
// #[enum_dispatch(Material)]
#[derive(Clone, Serialize, Deserialize)]
pub enum GenericMaterialNoPml<R> {
Conductor(AnisomorphicConductor<R>),
LinearMagnet(LinearMagnet<R>),
MBFerromagnet(MBFerromagnet<R>),
Ferroxcube3R1(Ferroxcube3R1<R>),
MinimalSquare(MinimalSquare<R>),
}
impl<R: Real> Default for GenericMaterialNoPml<R> {
fn default() -> Self {
AnisomorphicConductor::default().into()
}
}
impl<R> From<AnisomorphicConductor<R>> for GenericMaterialNoPml<R> {
fn from(inner: AnisomorphicConductor<R>) -> Self {
Self::Conductor(inner)
}
}
impl<R: Real> Material<R> for GenericMaterialNoPml<R> {
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
use GenericMaterialNoPml::*;
match self {
Conductor(inner) => inner.step_parameters_mut(),
LinearMagnet(inner) => inner.step_parameters_mut(),
MBFerromagnet(inner) => inner.step_parameters_mut(),
Ferroxcube3R1(inner) => inner.step_parameters_mut(),
MinimalSquare(inner) => inner.step_parameters_mut(),
}
}
/// Return the magnetization.
fn m(&self) -> Vec3<R> {
use GenericMaterialNoPml::*;
match self {
Conductor(inner) => inner.m(),
LinearMagnet(inner) => inner.m(),
MBFerromagnet(inner) => inner.m(),
Ferroxcube3R1(inner) => Material::m(inner),
MinimalSquare(inner) => Material::m(inner),
}
}
/// Called just before magnetic field is updated. Optionally change any internal state (e.g. magnetization).
fn step_b(&mut self, context: &CellState<R>, delta_b: Vec3<R>) {
use GenericMaterialNoPml::*;
match self {
Conductor(inner) => inner.step_b(context, delta_b),
LinearMagnet(inner) => inner.step_b(context, delta_b),
MBFerromagnet(inner) => inner.step_b(context, delta_b),
Ferroxcube3R1(inner) => inner.step_b(context, delta_b),
MinimalSquare(inner) => inner.step_b(context, delta_b),
}
}
}
/// Materials which have only 1 Vec3.
// #[enum_dispatch(Material)]
#[derive(Clone, Serialize, Deserialize)]
pub enum GenericMaterialOneField<R> {
Conductor(AnisomorphicConductor<R>),
Ferroxcube3R1(Ferroxcube3R1<R>),
MinimalSquare(MinimalSquare<R>),
}
impl<R: Real> Default for GenericMaterialOneField<R> {
fn default() -> Self {
AnisomorphicConductor::default().into()
}
}
impl<R> From<AnisomorphicConductor<R>> for GenericMaterialOneField<R> {
fn from(inner: AnisomorphicConductor<R>) -> Self {
Self::Conductor(inner)
}
}
impl<R: Real> Material<R> for GenericMaterialOneField<R> {
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
use GenericMaterialOneField::*;
match self {
Conductor(inner) => inner.step_parameters_mut(),
Ferroxcube3R1(inner) => inner.step_parameters_mut(),
MinimalSquare(inner) => inner.step_parameters_mut(),
}
}
/// Return the magnetization.
fn m(&self) -> Vec3<R> {
use GenericMaterialOneField::*;
match self {
Conductor(inner) => inner.m(),
Ferroxcube3R1(inner) => Material::m(inner),
MinimalSquare(inner) => Material::m(inner),
}
}
/// Called just before magnetic field is updated. Optionally change any internal state (e.g. magnetization).
fn step_b(&mut self, context: &CellState<R>, delta_b: Vec3<R>) {
use GenericMaterialOneField::*;
match self {
Conductor(inner) => inner.step_b(context, delta_b),
Ferroxcube3R1(inner) => inner.step_b(context, delta_b),
MinimalSquare(inner) => inner.step_b(context, delta_b),
}
}
}
// coremem_types adapters
impl<R: Real> Material<R> for AnisomorphicConductor<R> {
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
let c = coremem_types::mat::Material::conductivity(self);
StepParametersMut::default().with_conductivity(c)
}
}
impl<R: Real> Material<R> for IsomorphicConductor<R> {
fn step_parameters_mut<'a>(&'a mut self) -> StepParametersMut<'a, R> {
let c = coremem_types::mat::Material::conductivity(self);
StepParametersMut::default().with_conductivity(c)
}
}

View File

@@ -1,84 +1,217 @@
use crate::geom::{Meters, Region, Torus, Vec3, WorldRegion}; use crate::geom::{HasCrossSection, Meters, Region, Torus, WorldRegion};
use crate::real::{Real as _, ToFloat as _}; use crate::real::{Real as _, ToFloat as _};
use crate::sim::SampleableSim; use crate::cross::vec::{Vec3, Vec3u};
use dyn_clone::{self, DynClone}; use crate::sim::AbstractSim;
use indexmap::IndexMap;
use serde::{Serialize, Deserialize}; use serde::{Serialize, Deserialize};
use std::ops::AddAssign;
// TODO: remove this Clone and Send requirement? Have Measurements be shared by-reference across // TODO: do we really need both Send and Sync?
// threads? i.e. Sync, and no Clone pub trait AbstractMeasurement<S>: Send + Sync {
#[typetag::serde(tag = "type")] fn key_value(&self, state: &S) -> Vec<Measurement>;
pub trait AbstractMeasurement: Send + Sync + DynClone {
fn eval(&self, state: &dyn SampleableSim) -> String;
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String>;
} }
dyn_clone::clone_trait_object!(AbstractMeasurement);
pub fn eval_multiple_kv(state: &dyn SampleableSim, meas: &[Box<dyn AbstractMeasurement>]) -> IndexMap<String, String> { pub fn as_dyn_measurements<S, M: AbstractMeasurement<S>>(meas: &[M]) -> Vec<&dyn AbstractMeasurement<S>> {
let mut r = IndexMap::new(); meas.into_iter().map(|m| m as &dyn AbstractMeasurement<S>).collect()
for m in meas { }
let other = m.key_value(state);
r.extend(other.into_iter());
/// combine several measurements
pub fn eval_multiple<S>(state: &S, meas: &[&dyn AbstractMeasurement<S>]) -> Vec<Measurement> {
meas.into_iter().flat_map(|m| m.key_value(state).into_iter()).collect()
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum MeasurementValue {
Field(Vec3<f32>),
Float(f32),
Int(u64),
Dim(Vec3u),
}
impl From<Vec3<f32>> for MeasurementValue {
fn from(v: Vec3<f32>) -> Self {
Self::Field(v)
}
}
impl From<f32> for MeasurementValue {
fn from(v: f32) -> Self {
Self::Float(v)
}
}
impl From<u64> for MeasurementValue {
fn from(v: u64) -> Self {
Self::Int(v)
}
}
impl From<Vec3u> for MeasurementValue {
fn from(v: Vec3u) -> Self {
Self::Dim(v)
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Measurement {
name: String,
value: MeasurementValue,
/// e.g. "A" for Amps
unit: String,
}
impl Measurement {
fn new<T: Into<MeasurementValue>>(name: &str, value: T, unit: &str) -> Self {
Self {
name: name.to_owned(),
value: value.into(),
unit: unit.to_owned(),
}
}
fn new_unitless<T: Into<MeasurementValue>>(name: &str, value: T) -> Self {
Self::new(name, value, "")
}
pub fn name(&self) -> &str {
&self.name
}
pub fn pretty_print(&self) -> String {
use MeasurementValue::*;
match self.value {
Field(v) => format!("{}{}", v, self.unit),
Float(f) => if self.unit != "" {
SiScale::format_short(f, &self.unit)
} else {
f.to_string()
},
Int(u) => format!("{}{}", u, self.unit),
Dim(v) => format!("{}x{}x{}{}", v.x(), v.y(), v.z(), self.unit),
}
}
/// format the Measurement in a way that could be parseable later.
/// one major use case for this is in dumping the type to a CSV.
pub fn machine_readable(&self) -> String {
use MeasurementValue::*;
match self.value {
Field(v) => format!("{},{},{}", v.x(), v.y(), v.z()),
Float(f) => f.to_string(),
Int(u) => u.to_string(),
Dim(v) => format!("{},{},{}", v.x(), v.y(), v.z()),
}
}
/// retrieve the float value of this measurement -- if it's of float type.
/// useful for tests
pub fn get_float(&self) -> Option<f32> {
match self.value {
MeasurementValue::Float(f) => Some(f),
_ => None,
}
}
}
impl<S> AbstractMeasurement<S> for Measurement {
fn key_value(&self, _state: &S) -> Vec<Measurement> {
vec![self.clone()]
}
}
enum SiScale {
Pico,
Nano,
Micro,
Milli,
Unit,
Kilo,
Mega,
Giga,
Terra,
}
impl SiScale {
fn for_value(v: f32) -> Self {
use SiScale::*;
match v.abs() {
v if v < 1e-12 => Unit,
v if v < 1e-9 => Pico,
v if v < 1e-6 => Nano,
v if v < 1e-3 => Micro,
v if v < 1e0 => Milli,
v if v < 1e3 => Unit,
v if v < 1e6 => Kilo,
v if v < 1e9 => Mega,
v if v < 1e12 => Giga,
v if v < 1e15 => Terra,
_ => Unit
}
}
/// return the numerical scale of this prefix.
/// e.g. `scale(&Pico) -> 1e-12
fn scale(&self) -> f32 {
use SiScale::*;
match *self {
Pico => 1e-12,
Nano => 1e-9,
Micro => 1e-6,
Milli => 1e-3,
Unit => 1.0,
Kilo => 1e3,
Mega => 1e6,
Giga => 1e9,
Terra => 1e12,
}
}
/// return the short string for this scale.
/// e.g. `shortcode(Pico) -> "p"`
fn shortcode(&self) -> &'static str {
use SiScale::*;
match *self {
Pico => "p",
Nano => "n",
Micro => "u",
Milli => "m",
Unit => "",
Kilo => "k",
Mega => "M",
Giga => "G",
Terra => "T",
}
}
/// format `v`, with the provided unit.
/// e.g. `format_short(1234, "A") -> "1.23 kA"
fn format_short(v: f32, unit: &str) -> String {
let si = SiScale::for_value(v);
let scaled = v / si.scale();
format!("{:.2} {}{}", scaled, si.shortcode(), unit)
} }
r
} }
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct Time; pub struct Time;
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for Time {
impl AbstractMeasurement for Time { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String { vec![
format!("{:.3e}s (step {})", state.time(), state.step_no()) Measurement::new_unitless("step", state.step_no()),
} Measurement::new("time", state.time(), "s"),
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { ]
[
("step".to_string(), state.step_no().to_string()),
("time".to_string(), state.time().to_string()),
].into_iter().collect()
} }
} }
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct Meta; pub struct Meta;
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for Meta {
impl AbstractMeasurement for Meta { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String { vec![
format!("{}x{}x{} feat: {:.1e}m", state.width(), state.height(), state.depth(), state.feature_size()) Measurement::new_unitless("dim", state.size().0),
} Measurement::new("feature_size", state.feature_size(), "m"),
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { ]
[
("width".to_string(), state.width().to_string()),
("height".to_string(), state.height().to_string()),
("depth".to_string(), state.depth().to_string()),
("feature_size".to_string(), state.feature_size().to_string()),
].into_iter().collect()
} }
} }
#[derive(Clone, Serialize, Deserialize)]
pub struct Label(pub String);
impl Label {
pub fn new<S: Into<String>>(s: S) -> Self {
Self(s.into())
}
}
#[typetag::serde]
impl AbstractMeasurement for Label {
fn eval(&self, _state: &dyn SampleableSim) -> String {
self.0.clone()
}
fn key_value(&self, _state: &dyn SampleableSim) -> IndexMap<String, String> {
[
(self.0.clone(), self.0.clone()),
].into_iter().collect()
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Volume { pub struct Volume {
name: String, name: String,
region: Box<dyn Region>, region: Box<dyn Region>,
@@ -92,29 +225,21 @@ impl Volume {
} }
} }
/// Returns the volume of the region, in units of um^3 /// Returns the volume of the region, in units of um^3
fn data(&self, state: &dyn SampleableSim) -> f32 { fn data<S: AbstractSim>(&self, state: &S) -> f32 {
let feat_um = state.feature_size() as f64 * 1e6; let feat_um = state.feature_size() as f64 * 1e6;
(state.volume_of_region(&*self.region) as f64 * feat_um * feat_um * feat_um) as f32 (state.volume_of_region(&*self.region) as f64 * feat_um * feat_um * feat_um) as f32
} }
} }
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for Volume {
impl AbstractMeasurement for Volume { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String { vec![
format!("Vol({}): {:.2e} um^3", Measurement::new(&format!("Vol({})", self.name), self.data(state), "um^3"),
self.name, ]
self.data(state),
)
}
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> {
[
(format!("Vol({})", self.name), self.data(state).to_string()),
].into_iter().collect()
} }
} }
#[derive(Clone, Serialize, Deserialize)]
pub struct Current { pub struct Current {
name: String, name: String,
region: Box<dyn Region>, region: Box<dyn Region>,
@@ -127,7 +252,7 @@ impl Current {
region: Box::new(r) region: Box::new(r)
} }
} }
fn data(&self, state: &dyn SampleableSim) -> (f32, Vec3<f32>) { fn data<S: AbstractSim>(&self, state: &S) -> (f32, Vec3<f32>) {
let FieldSample(volume, current_mag, current_vec) = state.map_sum_over_enumerated(&*self.region, |coord: Meters, _cell| { let FieldSample(volume, current_mag, current_vec) = state.map_sum_over_enumerated(&*self.region, |coord: Meters, _cell| {
let current = state.current(coord); let current = state.current(coord);
FieldSample(1, current.mag().cast(), current.cast()) FieldSample(1, current.mag().cast(), current.cast())
@@ -138,6 +263,24 @@ impl Current {
} }
} }
// TODO: clean up these FieldSample types
#[derive(Default)]
struct TupleSum<T>(T);
impl<T0: Default + AddAssign, T1: Default + AddAssign> std::iter::Sum for TupleSum<(T0, T1)> {
fn sum<I>(iter: I) -> Self
where I: Iterator<Item = Self>
{
let mut s = Self::default();
for TupleSum((a0, a1)) in iter {
s.0.0 += a0;
s.0.1 += a1;
}
s
}
}
#[derive(Default)] #[derive(Default)]
struct FieldSample(u32, f64, Vec3<f64>); struct FieldSample(u32, f64, Vec3<f64>);
@@ -183,65 +326,79 @@ impl std::iter::Sum for FieldSamples<[FieldSample; 3]> {
} }
} }
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for Current {
impl AbstractMeasurement for Current { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let (mean_current_mag, mean_current_vec) = self.data(state); let (mean_current_mag, mean_current_vec) = self.data(state);
format!("I/cell({}): {:.2e} {:.2e}", vec![
self.name, Measurement::new(
mean_current_mag, &format!("Imag/cell({})", self.name),
mean_current_vec) mean_current_mag,
} "A",
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { ),
let (mean_current_mag, mean_current_vec) = self.data(state); Measurement::new(
[ &format!("I/cell({})", self.name),
(format!("Imag/cell({})", self.name), mean_current_mag.to_string()), mean_current_vec,
(format!("I/cell({})", self.name), mean_current_vec.to_string()), "A",
].into_iter().collect() ),
]
} }
} }
/// Measures the current directed around a closed loop /// Measures the current directed around a closed loop
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct CurrentLoop { pub struct CurrentLoop<R> {
name: String, name: String,
region: Torus region: R,
} }
impl CurrentLoop { impl<R> CurrentLoop<R> {
pub fn new(name: &str, r: Torus) -> Self { pub fn new(name: &str, r: R) -> Self {
Self { Self {
name: name.into(), name: name.into(),
region: r, region: r,
} }
} }
fn data(&self, state: &dyn SampleableSim) -> f32 { }
let FieldSample(volume, directed_current, _current_vec) = state.map_sum_over_enumerated(&self.region, |coord: Meters, _cell| { impl<R: Region + HasCrossSection> CurrentLoop<R> {
let normal = self.region.axis(); fn data<S: AbstractSim>(&self, state: &S) -> f32 {
let to_coord = *coord - *self.region.center(); // - current exists as a property of a 2d surface.
let tangent = normal.cross(to_coord).norm(); // - the user has provided us a 3d volume which behaves as though it's an extruded surface:
let current = state.current(coord); // for any point in the volume we can query the normal vector of the cross section
let directed_current = current.dot(tangent.cast()); // containing that point.
FieldSample(1, directed_current.cast(), current.cast()) // - we choose that measuring the "current" on such a volume means to measure the average
// current through all its cross sections (for most boring materials, each
// cross section has nearly identical current).
// - therefore, enumerate the entire volume and compute the "net" current (the sum over
// each cell of whatever current in that cell is along the cross-section normal).
// then divide by the number of complete cross sections we measured, to average.
let feature_area = state.feature_size() * state.feature_size();
let TupleSum((net_current, cross_sections)) = state.map_sum_over_enumerated(&self.region, move |coord: Meters, _cell| {
// `normal` represents both the size of the cross section (m^2) this cell belongs to,
// and the normal direction of the cross section.
let normal = self.region.cross_section_normal(coord); // [m^2]
// calculate the amount of normal current through this specific cell
let current_density = state.current_density(coord); // [A/m^2]
let cross_sectional_current = feature_area * current_density.dot(normal.norm()); // [A]
// keep track of how many cross sections we enumerate, since each additional cross
// sections represents a double-count of the current.
let num_cross_sections_filled = feature_area / normal.mag();
TupleSum((cross_sectional_current, num_cross_sections_filled))
}); });
let mean_directed_current = directed_current.cast::<f32>() / f32::from_primitive(volume); let mean_cross_sectional_current = net_current.cast::<f32>() / cross_sections;
let cross_section = self.region.cross_section() / (state.feature_size() * state.feature_size()); mean_cross_sectional_current
let cross_sectional_current = mean_directed_current * cross_section;
cross_sectional_current
} }
} }
#[typetag::serde] impl<R: Region + HasCrossSection, S: AbstractSim> AbstractMeasurement<S> for CurrentLoop<R> {
impl AbstractMeasurement for CurrentLoop { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let cross_sectional_current = self.data(state); let cross_sectional_current = self.data(state);
format!("I({}): {:.2e}", self.name, cross_sectional_current) vec![
} Measurement::new(
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { &format!("I({})", self.name),
let cross_sectional_current = self.data(state); cross_sectional_current,
[ "A"
(format!("I({})", self.name), cross_sectional_current.to_string()), ),
].into_iter().collect() ]
} }
} }
@@ -260,7 +417,7 @@ impl MagneticLoop {
region: r, region: r,
} }
} }
fn data(&self, state: &dyn SampleableSim) -> (f32, f32, f32) { fn data<S: AbstractSim>(&self, state: &S) -> (f32, f32, f32) {
let FieldSamples([ let FieldSamples([
FieldSample(volume, directed_m, _m_vec), FieldSample(volume, directed_m, _m_vec),
FieldSample(_, directed_b, _b_vec), FieldSample(_, directed_b, _b_vec),
@@ -300,29 +457,18 @@ impl MagneticLoop {
} }
} }
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for MagneticLoop {
impl AbstractMeasurement for MagneticLoop { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let (mean_directed_m, mean_directed_b, mean_directed_h) = self.data(state); let (mean_directed_m, mean_directed_b, mean_directed_h) = self.data(state);
format!( vec![
"M({}): {:.2e}; B({}): {:.2e}; H({}): {:.2e}", Measurement::new_unitless(&format!("M({})", self.name), mean_directed_m),
self.name, mean_directed_m, Measurement::new_unitless(&format!("B({})", self.name), mean_directed_b),
self.name, mean_directed_b, Measurement::new_unitless(&format!("H({})", self.name), mean_directed_h),
self.name, mean_directed_h, ]
)
}
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> {
let (mean_directed_m, mean_directed_b, mean_directed_h) = self.data(state);
[
(format!("M({})", self.name), mean_directed_m.to_string()),
(format!("B({})", self.name), mean_directed_b.to_string()),
(format!("H({})", self.name), mean_directed_h.to_string()),
].into_iter().collect()
} }
} }
/// mean M over a region /// mean M over a region
#[derive(Clone, Serialize, Deserialize)]
pub struct MagneticFlux { pub struct MagneticFlux {
name: String, name: String,
region: Box<dyn Region>, region: Box<dyn Region>,
@@ -335,7 +481,7 @@ impl MagneticFlux {
region: Box::new(r) region: Box::new(r)
} }
} }
fn data(&self, state: &dyn SampleableSim) -> Vec3<f32> { fn data<S: AbstractSim>(&self, state: &S) -> Vec3<f32> {
let FieldSample(volume, _directed_mag, mag_vec) = state.map_sum_over(&*self.region, |cell| { let FieldSample(volume, _directed_mag, mag_vec) = state.map_sum_over(&*self.region, |cell| {
let b = cell.b(); let b = cell.b();
let mag = b.mag(); let mag = b.mag();
@@ -346,22 +492,19 @@ impl MagneticFlux {
} }
} }
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for MagneticFlux {
impl AbstractMeasurement for MagneticFlux { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let mean_mag = self.data(state); let mean_mag = self.data(state);
format!("Bavg({}): {:.2e}", self.name, mean_mag) vec![
} Measurement::new_unitless(
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { &format!("Bavg({})", self.name),
let mean_mag = self.data(state); mean_mag,
[ )
(format!("Bavg({})", self.name), mean_mag.to_string()), ]
].into_iter().collect()
} }
} }
/// mean B over a region /// mean B over a region
#[derive(Clone, Serialize, Deserialize)]
pub struct Magnetization { pub struct Magnetization {
name: String, name: String,
region: Box<dyn Region>, region: Box<dyn Region>,
@@ -374,7 +517,7 @@ impl Magnetization {
region: Box::new(r) region: Box::new(r)
} }
} }
fn data(&self, state: &dyn SampleableSim) -> Vec3<f32> { fn data<S: AbstractSim>(&self, state: &S) -> Vec3<f32> {
let FieldSample(volume, _directed_mag, mag_vec) = state.map_sum_over(&*self.region, |cell| { let FieldSample(volume, _directed_mag, mag_vec) = state.map_sum_over(&*self.region, |cell| {
let m = cell.m(); let m = cell.m();
let mag = m.mag(); let mag = m.mag();
@@ -385,17 +528,14 @@ impl Magnetization {
} }
} }
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for Magnetization {
impl AbstractMeasurement for Magnetization { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let mean_mag = self.data(state); let mean_mag = self.data(state);
format!("Mavg({}): {:.2e}", self.name, mean_mag) vec![
} Measurement::new_unitless(
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { &format!("Mavg({})", self.name), mean_mag
let mean_mag = self.data(state); ),
[ ]
(format!("Mavg({})", self.name), mean_mag.to_string()),
].into_iter().collect()
} }
} }
@@ -407,17 +547,12 @@ fn loc(v: Meters) -> String {
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct MagnetizationAt(pub Meters); pub struct MagnetizationAt(pub Meters);
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for MagnetizationAt {
impl AbstractMeasurement for MagnetizationAt { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let m = state.sample(self.0).m(); let m = state.sample(self.0).m();
format!("M{}: {:.2e}", loc(self.0), m) vec![
} Measurement::new_unitless(&format!("M{}", loc(self.0)), m.cast())
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { ]
let m = state.sample(self.0).m();
[
(format!("M{}", loc(self.0)), m.to_string()),
].into_iter().collect()
} }
} }
@@ -425,17 +560,14 @@ impl AbstractMeasurement for MagnetizationAt {
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct MagneticFluxAt(pub Meters); pub struct MagneticFluxAt(pub Meters);
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for MagneticFluxAt {
impl AbstractMeasurement for MagneticFluxAt { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let b = state.sample(self.0).b(); let b = state.sample(self.0).b();
format!("B{}: {:.2e}", loc(self.0), b) vec![
} Measurement::new_unitless(
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { &format!("B{}", loc(self.0)), b.cast()
let b = state.sample(self.0).b(); )
[ ]
(format!("B{}", loc(self.0)), b.to_string()),
].into_iter().collect()
} }
} }
@@ -443,38 +575,31 @@ impl AbstractMeasurement for MagneticFluxAt {
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct MagneticStrengthAt(pub Meters); pub struct MagneticStrengthAt(pub Meters);
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for MagneticStrengthAt {
impl AbstractMeasurement for MagneticStrengthAt { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let h = state.sample(self.0).h(); let h = state.sample(self.0).h();
format!("H{}: {:.2e}", loc(self.0), h) vec![
} Measurement::new_unitless(
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { &format!("H{}", loc(self.0)), h.cast()
let h = state.sample(self.0).h(); )
[ ]
(format!("H{}", loc(self.0)), h.to_string()),
].into_iter().collect()
} }
} }
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct ElectricField(pub Meters); pub struct ElectricField(pub Meters);
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for ElectricField {
impl AbstractMeasurement for ElectricField { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let e = state.sample(self.0).e(); let e = state.sample(self.0).e();
format!("E{}: {:.2e}", loc(self.0), e) vec![
} Measurement::new_unitless(
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { &format!("E{}", loc(self.0)), e.cast()
let e = state.sample(self.0).e(); )
[ ]
(format!("E{}", loc(self.0)), e.to_string()),
].into_iter().collect()
} }
} }
#[derive(Clone, Serialize, Deserialize)]
pub struct Energy { pub struct Energy {
name: String, name: String,
region: Box<dyn Region>, region: Box<dyn Region>,
@@ -490,7 +615,7 @@ impl Energy {
region: Box::new(region), region: Box::new(region),
} }
} }
fn data(&self, state: &dyn SampleableSim) -> f32 { pub(crate) fn data<S: AbstractSim>(&self, state: &S) -> f32 {
// Potential energy stored in a E/M field: // Potential energy stored in a E/M field:
// https://en.wikipedia.org/wiki/Magnetic_energy // https://en.wikipedia.org/wiki/Magnetic_energy
// https://en.wikipedia.org/wiki/Electric_potential_energy#Energy_stored_in_an_electrostatic_field_distribution // https://en.wikipedia.org/wiki/Electric_potential_energy#Energy_stored_in_an_electrostatic_field_distribution
@@ -507,21 +632,17 @@ impl Energy {
} }
} }
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for Energy {
impl AbstractMeasurement for Energy { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let e = self.data(state); let e = self.data(state);
format!("U({}): {:.2e}", self.name, e) vec![
} Measurement::new(
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { &format!("U({})", self.name), e, "J"
let e = self.data(state); )
[ ]
(format!("U({})", self.name), e.to_string()),
].into_iter().collect()
} }
} }
#[derive(Clone, Serialize, Deserialize)]
pub struct Power { pub struct Power {
name: String, name: String,
region: Box<dyn Region> region: Box<dyn Region>
@@ -537,7 +658,7 @@ impl Power {
region: Box::new(region), region: Box::new(region),
} }
} }
fn data(&self, state: &dyn SampleableSim) -> f32 { fn data<S: AbstractSim>(&self, state: &S) -> f32 {
// Power is P = IV = A*J*V = L^2*J.(LE) = L^3 J.E // Power is P = IV = A*J*V = L^2*J.(LE) = L^3 J.E
// where L is feature size. // where L is feature size.
#[allow(non_snake_case)] #[allow(non_snake_case)]
@@ -549,16 +670,186 @@ impl Power {
} }
} }
#[typetag::serde] impl<S: AbstractSim> AbstractMeasurement<S> for Power {
impl AbstractMeasurement for Power { fn key_value(&self, state: &S) -> Vec<Measurement> {
fn eval(&self, state: &dyn SampleableSim) -> String {
let power = self.data(state); let power = self.data(state);
format!("P({}): {:.2e}", self.name, power) vec![
} Measurement::new(
fn key_value(&self, state: &dyn SampleableSim) -> IndexMap<String, String> { &format!("P({})", self.name), power, "W"
let power = self.data(state); )
[ ]
(format!("P({})", self.name), power.to_string()), }
].into_iter().collect() }
#[cfg(test)]
pub mod test {
use super::*;
use crate::cross::mat::AnisomorphicConductor;
use crate::cross::step::SimMeta;
use crate::geom::Index;
use crate::sim::{Fields, GenericSim};
use crate::stim::Stimulus;
struct MockSim {
e_field: Vec3<f32>,
dim: Vec3u,
feature_size: f32,
mat: AnisomorphicConductor<f32>,
}
impl AbstractSim for MockSim {
type Real = f32;
type Material = AnisomorphicConductor<f32>;
fn meta(&self) -> SimMeta<f32> {
SimMeta::new(self.dim, self.feature_size, 1e-9)
}
fn step_no(&self) -> u64 {
unimplemented!()
}
fn fields_at_index(&self, _pos: Index) -> Fields<Self::Real> {
Fields::new(self.e_field, Vec3::zero(), Vec3::zero())
}
fn get_material_index(&self, _at: Index) -> &Self::Material {
&self.mat
}
fn put_material_index(&mut self, _at: Index, _m: Self::Material) {
unimplemented!()
}
fn step_multiple<S: Stimulus<f32>>(&mut self, _num_steps: u32, _s: &S) {
unimplemented!()
}
fn to_generic(&self) -> GenericSim<Self::Real> {
unimplemented!()
}
}
struct MockRegion {
normal: Vec3<f32>,
}
impl HasCrossSection for MockRegion {
fn cross_section_normal(&self, _p: Meters) -> Vec3<f32> {
self.normal
}
}
impl Region for MockRegion {
fn contains(&self, _p: Meters) -> bool {
true
}
}
#[test]
fn current_loop_trivial() {
let sim = MockSim {
e_field: Vec3::new(1.0, 0.0, 0.0),
dim: Vec3u::new(1, 1, 1),
feature_size: 1.0,
mat: AnisomorphicConductor::new(Vec3::new(1.0, 1.0, 1.0)),
};
let region = MockRegion {
normal: Vec3::new(1.0, 0.0, 0.0),
};
let kv = CurrentLoop::new("test", region).key_value(&sim);
assert_eq!(kv.len(), 1);
// measured area is 1 m^2
// region cross-section is 1 m^2
// conductivity is 1 S/m
assert_eq!(kv[0].get_float().unwrap(), 1.0);
}
#[test]
fn current_loop_multi_cell() {
let sim = MockSim {
e_field: Vec3::new(1.0, 0.0, 0.0),
dim: Vec3u::new(4, 4, 4),
feature_size: 0.25,
mat: AnisomorphicConductor::new(Vec3::new(1.0, 1.0, 1.0)),
};
let region = MockRegion {
normal: Vec3::new(1.0, 0.0, 0.0),
};
let kv = CurrentLoop::new("test", region).key_value(&sim);
assert_eq!(kv.len(), 1);
// measured area is 1 m^2
// region cross-section is 1 m^2
// conductivity is 1 S/m
assert_eq!(kv[0].get_float().unwrap(), 1.0);
}
#[test]
fn current_loop_off_conductor() {
let sim = MockSim {
e_field: Vec3::new(1.0, 1.0, 1.0),
dim: Vec3u::new(4, 4, 4),
feature_size: 0.25,
mat: AnisomorphicConductor::new(Vec3::new(0.0, 1.0, 1.0)),
};
let region = MockRegion {
normal: Vec3::new(1.0, 0.0, 0.0),
};
let kv = CurrentLoop::new("test", region).key_value(&sim);
assert_eq!(kv.len(), 1);
// material is not conductive in the direction being queried
assert_eq!(kv[0].get_float().unwrap(), 0.0);
}
#[test]
fn current_loop_e_field() {
let sim = MockSim {
e_field: Vec3::new(4.0, 2.0, 1.0),
dim: Vec3u::new(4, 4, 4),
feature_size: 0.25,
mat: AnisomorphicConductor::new(Vec3::new(1.0, 1.0, 1.0)),
};
let region = MockRegion {
normal: Vec3::new(1.0, 0.0, 0.0),
};
let kv = CurrentLoop::new("test", region).key_value(&sim);
assert_eq!(kv.len(), 1);
// measured area is 1 m^2
// region cross-section is 1 m^2
// conductivity is 1 S/m
// e field is 4 V/m
assert_eq!(kv[0].get_float().unwrap(), 4.0);
}
#[test]
fn current_loop_conductivity() {
let sim = MockSim {
e_field: Vec3::new(4.0, 2.0, 1.0),
dim: Vec3u::new(4, 4, 4),
feature_size: 0.25,
mat: AnisomorphicConductor::new(Vec3::new(3.0, 1.0, 1.0)),
};
let region = MockRegion {
normal: Vec3::new(1.0, 0.0, 0.0),
};
let kv = CurrentLoop::new("test", region).key_value(&sim);
assert_eq!(kv.len(), 1);
// measured area is 1 m^2
// region cross-section is 1 m^2
// conductivity is 3 S/m
// e field is 4 V/m
assert_eq!(kv[0].get_float().unwrap(), 3.0*4.0);
}
#[test]
fn current_loop_cross_section() {
let sim = MockSim {
e_field: Vec3::new(4.0, 2.0, 1.0),
dim: Vec3u::new(4, 4, 4),
feature_size: 0.5,
mat: AnisomorphicConductor::new(Vec3::new(3.0, 1.0, 1.0)),
};
let region = MockRegion {
normal: Vec3::new(16.0, 0.0, 0.0),
};
let kv = CurrentLoop::new("test", region).key_value(&sim);
assert_eq!(kv.len(), 1);
// measured area is 2 m^2
// region cross-section is 16 m^2
// conductivity is 3 S/m
// e field is 4 V/m
assert_eq!(kv[0].get_float().unwrap(), 3.0*4.0*16.0);
} }
} }

View File

@@ -1,7 +1,8 @@
use crate::geom::{Meters, Vec2, Vec3}; use crate::geom::Index;
use crate::real::ToFloat as _; use crate::real::ToFloat as _;
use crate::sim::{SampleableSim, Sample, StaticSim}; use crate::cross::vec::{Vec2, Vec3};
use crate::meas::{self, AbstractMeasurement}; use crate::sim::{AbstractSim, GenericSim, Sample};
use crate::meas::{self, AbstractMeasurement, Measurement};
use crossterm::{cursor, QueueableCommand as _}; use crossterm::{cursor, QueueableCommand as _};
use crossterm::style::{style, Color, PrintStyledContent, Stylize as _}; use crossterm::style::{style, Color, PrintStyledContent, Stylize as _};
use font8x8::{BASIC_FONTS, GREEK_FONTS, UnicodeFonts as _}; use font8x8::{BASIC_FONTS, GREEK_FONTS, UnicodeFonts as _};
@@ -11,6 +12,8 @@ use image::{RgbImage, Rgb};
use imageproc::{pixelops, drawing}; use imageproc::{pixelops, drawing};
use rayon::prelude::*; use rayon::prelude::*;
use serde::{Serialize, Deserialize}; use serde::{Serialize, Deserialize};
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::fs::{File, OpenOptions}; use std::fs::{File, OpenOptions};
use std::io::{BufReader, BufWriter, Seek as _, SeekFrom, Write as _}; use std::io::{BufReader, BufWriter, Seek as _, SeekFrom, Write as _};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@@ -51,10 +54,10 @@ fn scale_unsigned_to_u8(x: f32, typ: f32) -> u8 {
/// Scale a vector to have magnitude between [0, 1). /// Scale a vector to have magnitude between [0, 1).
fn scale_vector(x: Vec2<f32>, typical_mag: f32) -> Vec2<f32> { fn scale_vector(x: Vec2<f32>, typical_mag: f32) -> Vec2<f32> {
let new_mag = scale_unsigned(x.mag(), typical_mag); let new_mag = scale_unsigned(x.mag(), typical_mag);
x.with_mag(new_mag) x.with_mag(new_mag).unwrap_or_default()
} }
fn im_size<S: SampleableSim>(state: &S, max_w: u32, max_h: u32) -> (u32, u32) { fn im_size<S: AbstractSim>(state: &S, max_w: u32, max_h: u32) -> (u32, u32) {
let mut width = max_w; let mut width = max_w;
let mut height = width * state.height() / state.width(); let mut height = width * state.height() / state.width();
if height > max_h { if height > max_h {
@@ -71,6 +74,7 @@ pub enum FieldDisplayMode {
EzBxy, EzBxy,
BCurrent, BCurrent,
M, M,
Material,
} }
impl FieldDisplayMode { impl FieldDisplayMode {
@@ -80,17 +84,19 @@ impl FieldDisplayMode {
BzExy => EzBxy, BzExy => EzBxy,
EzBxy => BCurrent, EzBxy => BCurrent,
BCurrent => M, BCurrent => M,
M => BzExy, M => Material,
Material => BzExy,
} }
} }
pub fn prev(self) -> Self { pub fn prev(self) -> Self {
use FieldDisplayMode::*; use FieldDisplayMode::*;
match self { match self {
BzExy => M, BzExy => Material,
EzBxy => BzExy, EzBxy => BzExy,
BCurrent => EzBxy, BCurrent => EzBxy,
M => BCurrent, M => BCurrent,
Material => M,
} }
} }
} }
@@ -128,20 +134,22 @@ impl RenderConfig {
struct RenderSteps<'a, S> { struct RenderSteps<'a, S> {
im: RgbImage, im: RgbImage,
sim: &'a S, sim: &'a S,
meas: &'a [Box<dyn AbstractMeasurement>], meas: &'a [&'a dyn AbstractMeasurement<S>],
/// Simulation z coordinate to sample /// Simulation z coordinate to sample
z: u32, z: u32,
} }
impl<'a, S: SampleableSim> RenderSteps<'a, S> { impl<'a, S: AbstractSim> RenderSteps<'a, S> {
// TODO: this could probably be a single measurement, and we just let collections of
// measurements also behave as measurements
/// Render using default configuration constants /// Render using default configuration constants
fn render(state: &'a S, measurements: &'a [Box<dyn AbstractMeasurement>], z: u32) -> RgbImage { fn render(state: &'a S, measurements: &'a [&'a dyn AbstractMeasurement<S>], z: u32) -> RgbImage {
Self::render_configured(state, measurements, z, (640, 480), RenderConfig::default()) Self::render_configured(state, measurements, z, (640, 480), RenderConfig::default())
} }
/// Render, controlling things like the size. /// Render, controlling things like the size.
fn render_configured( fn render_configured(
state: &'a S, state: &'a S,
measurements: &'a [Box<dyn AbstractMeasurement>], measurements: &'a [&'a dyn AbstractMeasurement<S>],
z: u32, z: u32,
max_size: (u32, u32), max_size: (u32, u32),
config: RenderConfig, config: RenderConfig,
@@ -173,11 +181,14 @@ impl<'a, S: SampleableSim> RenderSteps<'a, S> {
FieldDisplayMode::M => { FieldDisplayMode::M => {
me.render_m(config.scale); me.render_m(config.scale);
} }
FieldDisplayMode::Material => {
me.render_mat(config.scale);
}
} }
me.render_measurements(); me.render_measurements();
me.im me.im
} }
fn new(sim: &'a S, meas: &'a [Box<dyn AbstractMeasurement>], width: u32, height: u32, z: u32) -> Self { fn new(sim: &'a S, meas: &'a [&'a dyn AbstractMeasurement<S>], width: u32, height: u32, z: u32) -> Self {
RenderSteps { RenderSteps {
im: RgbImage::new(width, height), im: RgbImage::new(width, height),
sim, sim,
@@ -186,13 +197,10 @@ impl<'a, S: SampleableSim> RenderSteps<'a, S> {
} }
} }
fn get_at_px(&self, x_px: u32, y_px: u32) -> Sample { fn get_at_px<'b>(&'b self, x_px: u32, y_px: u32) -> Sample<'b, S::Real, S::Material> {
let x_prop = x_px as f32 / self.im.width() as f32; let x_idx = x_px * self.sim.width() / self.im.width();
let x_m = x_prop * (self.sim.width() as f32 * self.sim.feature_size() as f32); let y_idx = y_px * self.sim.height() / self.im.height();
let y_prop = y_px as f32 / self.im.height() as f32; self.sim.sample(Index::new(x_idx, y_idx, self.z))
let y_m = y_prop * (self.sim.height() as f32 * self.sim.feature_size() as f32);
let z_m = self.z as f32 * self.sim.feature_size() as f32;
self.sim.sample(Meters(Vec3::new(x_m, y_m, z_m)))
} }
////////////// Ex/Ey/Bz configuration //////////// ////////////// Ex/Ey/Bz configuration ////////////
@@ -229,7 +237,22 @@ impl<'a, S: SampleableSim> RenderSteps<'a, S> {
self.render_vector_field(Rgb([0xff, 0xff, 0xff]), 1.0e5 * scale, |cell| cell.m().xy().to_f32()); self.render_vector_field(Rgb([0xff, 0xff, 0xff]), 1.0e5 * scale, |cell| cell.m().xy().to_f32());
} }
fn render_vector_field<F: Fn(&Sample) -> Vec2<f32>>(&mut self, color: Rgb<u8>, typical: f32, measure: F) { fn render_mat(&mut self, scale: f32) {
unsafe fn to_bytes<T>(d: &T) -> &[u8] {
std::slice::from_raw_parts(d as *const T as *const u8, std::mem::size_of::<T>())
}
self.render_scalar_field(scale, false, 1, |cell| {
let mut hasher = DefaultHasher::new();
let as_bytes = unsafe { to_bytes(cell.material()) };
std::hash::Hash::hash_slice(as_bytes, &mut hasher);
hasher.finish() as f32 / (-1i64 as u64 as f32)
});
}
fn render_vector_field<F>(&mut self, color: Rgb<u8>, typical: f32, measure: F)
where
F: Fn(&Sample<'_, S::Real, S::Material>) -> Vec2<f32>
{
let w = self.im.width(); let w = self.im.width();
let h = self.im.height(); let h = self.im.height();
let vec_spacing = 10; let vec_spacing = 10;
@@ -244,7 +267,10 @@ impl<'a, S: SampleableSim> RenderSteps<'a, S> {
} }
} }
} }
fn render_scalar_field<F: Fn(&Sample) -> f32 + Sync>(&mut self, typical: f32, signed: bool, slot: u32, measure: F) { fn render_scalar_field<F>(&mut self, typical: f32, signed: bool, slot: u32, measure: F)
where
F: Fn(&Sample<'_, S::Real, S::Material>) -> f32 + Sync
{
// XXX: get_at_px borrows self, so we need to clone the image to operate on it mutably. // XXX: get_at_px borrows self, so we need to clone the image to operate on it mutably.
let mut im = self.im.clone(); let mut im = self.im.clone();
let w = im.width(); let w = im.width();
@@ -268,8 +294,8 @@ impl<'a, S: SampleableSim> RenderSteps<'a, S> {
self.im = im; self.im = im;
} }
fn render_measurements(&mut self) { fn render_measurements(&mut self) {
for (meas_no, m) in self.meas.iter().enumerate() { for (meas_no, m) in meas::eval_multiple(self.sim, &self.meas).into_iter().enumerate() {
let meas_string = m.eval(self.sim); let meas_string = m.pretty_print();
for (i, c) in meas_string.chars().enumerate() { for (i, c) in meas_string.chars().enumerate() {
let glyph = BASIC_FONTS.get(c) let glyph = BASIC_FONTS.get(c)
.or_else(|| GREEK_FONTS.get(c)) .or_else(|| GREEK_FONTS.get(c))
@@ -290,7 +316,10 @@ impl<'a, S: SampleableSim> RenderSteps<'a, S> {
} }
} }
fn field_vector<F: Fn(&Sample) -> Vec2<f32>>(&self, xidx: u32, yidx: u32, size: u32, measure: &F) -> Vec2<f32> { fn field_vector<F>(&self, xidx: u32, yidx: u32, size: u32, measure: &F) -> Vec2<f32>
where
F: Fn(&Sample<'_, S::Real, S::Material>) -> Vec2<f32>
{
let mut field = Vec2::default(); let mut field = Vec2::default();
let w = self.im.width(); let w = self.im.width();
let h = self.im.height(); let h = self.im.height();
@@ -339,25 +368,25 @@ impl ImageRenderExt for RgbImage {
} }
pub trait Renderer<S>: Send + Sync { pub trait Renderer<S>: Send + Sync {
fn render_z_slice(&self, state: &S, z: u32, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig); fn render_z_slice(&self, state: &S, z: u32, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig);
// { // {
// self.render_with_image(state, &RenderSteps::render(state, measurements, z), measurements); // self.render_with_image(state, &RenderSteps::render(state, measurements, z), measurements);
// } // }
fn render(&self, state: &S, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig); fn render(&self, state: &S, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig);
/// Not intended to be called directly by users; implement this if you want the image to be /// Not intended to be called directly by users; implement this if you want the image to be
/// computed using default settings and you just manage where to display/save it. /// computed using default settings and you just manage where to display/save it.
fn render_with_image(&self, state: &S, _im: &RgbImage, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig) { fn render_with_image(&self, state: &S, _im: &RgbImage, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig) {
self.render(state, measurements, config); self.render(state, measurements, config);
} }
} }
fn default_render_z_slice<S: SampleableSim, R: Renderer<S>>( fn default_render_z_slice<S: AbstractSim, R: Renderer<S>>(
me: &R, state: &S, z: u32, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig, me: &R, state: &S, z: u32, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig,
) { ) {
me.render_with_image(state, &RenderSteps::render(state, measurements, z), measurements, config); me.render_with_image(state, &RenderSteps::render(state, measurements, z), measurements, config);
} }
fn default_render<S: SampleableSim, R: Renderer<S>>( fn default_render<S: AbstractSim, R: Renderer<S>>(
me: &R, state: &S, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig me: &R, state: &S, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig
) { ) {
me.render_z_slice(state, state.depth() / 2, measurements, config); me.render_z_slice(state, state.depth() / 2, measurements, config);
} }
@@ -365,7 +394,7 @@ fn default_render<S: SampleableSim, R: Renderer<S>>(
// pub struct NumericTermRenderer; // pub struct NumericTermRenderer;
// //
// impl Renderer for NumericTermRenderer { // impl Renderer for NumericTermRenderer {
// fn render(&mut self, state: &SimSnapshot, _measurements: &[Box<dyn AbstractMeasurement>]) { // fn render(&mut self, state: &SimSnapshot, _measurements: &[&dyn AbstractMeasurement<S>]) {
// for y in 0..state.height() { // for y in 0..state.height() {
// for x in 0..state.width() { // for x in 0..state.width() {
// let cell = state.get((x, y).into()); // let cell = state.get((x, y).into());
@@ -385,17 +414,18 @@ fn default_render<S: SampleableSim, R: Renderer<S>>(
#[derive(Default)] #[derive(Default)]
pub struct ColorTermRenderer; pub struct ColorTermRenderer;
impl<S: SampleableSim> Renderer<S> for ColorTermRenderer { impl<S: AbstractSim> Renderer<S> for ColorTermRenderer {
fn render(&self, state: &S, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig) { fn render(&self, state: &S, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig) {
default_render(self, state, measurements, config) default_render(self, state, measurements, config)
} }
fn render_z_slice( fn render_z_slice(
&self, &self,
state: &S, state: &S,
z: u32, z: u32,
measurements: &[Box<dyn AbstractMeasurement>], measurements: &[&dyn AbstractMeasurement<S>],
config: RenderConfig, config: RenderConfig,
) { ) {
let measurements = meas::eval_multiple(state, measurements);
let (max_w, mut max_h) = crossterm::terminal::size().unwrap(); let (max_w, mut max_h) = crossterm::terminal::size().unwrap();
max_h = max_h.saturating_sub(2 + measurements.len() as u16); max_h = max_h.saturating_sub(2 + measurements.len() as u16);
let im = RenderSteps::render_configured(state, &[], z, (max_w as _, max_h as _), config); let im = RenderSteps::render_configured(state, &[], z, (max_w as _, max_h as _), config);
@@ -424,7 +454,7 @@ impl<S: SampleableSim> Renderer<S> for ColorTermRenderer {
for m in measurements { for m in measurements {
// Measurements can be slow to compute // Measurements can be slow to compute
stdout.flush().unwrap(); stdout.flush().unwrap();
let meas_string = m.eval(state); let meas_string = format!("{}: \t{}", m.name(), m.pretty_print());
stdout.queue(cursor::MoveDown(1)).unwrap(); stdout.queue(cursor::MoveDown(1)).unwrap();
stdout.queue(cursor::MoveToColumn(1)).unwrap(); stdout.queue(cursor::MoveToColumn(1)).unwrap();
stdout.queue(PrintStyledContent(style(meas_string))).unwrap(); stdout.queue(PrintStyledContent(style(meas_string))).unwrap();
@@ -447,14 +477,14 @@ impl Y4MRenderer {
} }
} }
impl<S: SampleableSim> Renderer<S> for Y4MRenderer { impl<S: AbstractSim> Renderer<S> for Y4MRenderer {
fn render_z_slice(&self, state: &S, z: u32, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig) { fn render_z_slice(&self, state: &S, z: u32, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig) {
default_render_z_slice(self, state, z, measurements, config) default_render_z_slice(self, state, z, measurements, config)
} }
fn render(&self, state: &S, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig) { fn render(&self, state: &S, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig) {
default_render(self, state, measurements, config) default_render(self, state, measurements, config)
} }
fn render_with_image(&self, _state: &S, im: &RgbImage, _meas: &[Box<dyn AbstractMeasurement>], _config: RenderConfig) { fn render_with_image(&self, _state: &S, im: &RgbImage, _meas: &[&dyn AbstractMeasurement<S>], _config: RenderConfig) {
{ {
let mut enc = self.encoder.lock().unwrap(); let mut enc = self.encoder.lock().unwrap();
if enc.is_none() { if enc.is_none() {
@@ -505,6 +535,14 @@ impl<S> MultiRendererElement<S> {
Some(end) => frame < end, Some(end) => frame < end,
} }
} }
fn next_frame_for_work(&self, after: u64) -> Option<u64> {
let max_frame = after + self.step_frequency;
let max_frame = max_frame - max_frame % self.step_frequency;
match self.step_limit {
None => Some(max_frame),
Some(end) => Some(max_frame).filter(|&f| f < end)
}
}
} }
pub struct MultiRenderer<S> { pub struct MultiRenderer<S> {
@@ -537,19 +575,22 @@ impl<S> MultiRenderer<S> {
pub fn any_work_for_frame(&self, frame: u64) -> bool { pub fn any_work_for_frame(&self, frame: u64) -> bool {
self.renderers.read().unwrap().iter().any(|m| m.work_this_frame(frame)) self.renderers.read().unwrap().iter().any(|m| m.work_this_frame(frame))
} }
pub fn next_frame_for_work(&self, after: u64) -> Option<u64> {
self.renderers.read().unwrap().iter().flat_map(|m| m.next_frame_for_work(after)).min()
}
} }
impl<S: SampleableSim> Renderer<S> for MultiRenderer<S> { impl<S: AbstractSim> Renderer<S> for MultiRenderer<S> {
fn render_z_slice(&self, state: &S, z: u32, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig) { fn render_z_slice(&self, state: &S, z: u32, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig) {
default_render_z_slice(self, state, z, measurements, config) default_render_z_slice(self, state, z, measurements, config)
} }
fn render(&self, state: &S, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig) { fn render(&self, state: &S, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig) {
if self.renderers.read().unwrap().len() != 0 { if self.renderers.read().unwrap().len() != 0 {
self.render_with_image(state, &RenderSteps::render(state, measurements, state.depth() / 2), measurements, config); self.render_with_image(state, &RenderSteps::render(state, measurements, state.depth() / 2), measurements, config);
} }
} }
fn render_with_image(&self, state: &S, im: &RgbImage, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig) { fn render_with_image(&self, state: &S, im: &RgbImage, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig) {
for r in &*self.renderers.read().unwrap() { for r in &*self.renderers.read().unwrap() {
if r.work_this_frame(state.step_no()) { if r.work_this_frame(state.step_no()) {
r.renderer.render_with_image(state, im, measurements, config); r.renderer.render_with_image(state, im, measurements, config);
@@ -559,25 +600,28 @@ impl<S: SampleableSim> Renderer<S> for MultiRenderer<S> {
} }
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct SerializedFrame<S=StaticSim> { pub struct SerializedFrame<S> {
pub state: S, pub state: S,
/// although not generally necessary to load the sim, saving the measurements is beneficial for /// although not generally necessary to load the sim, saving the measurements is beneficial for
/// post-processing. /// post-processing.
pub measurements: Vec<Box<dyn AbstractMeasurement>>, pub measurements: Vec<Measurement>,
} }
impl<S: SampleableSim> SerializedFrame<S> { impl<S: AbstractSim> SerializedFrame<S> {
pub fn to_static(self) -> SerializedFrame<StaticSim> { pub fn to_generic(self) -> SerializedFrame<GenericSim<S::Real>> {
SerializedFrame { SerializedFrame {
state: SampleableSim::to_static(&self.state), state: AbstractSim::to_generic(&self.state),
measurements: self.measurements, measurements: self.measurements,
} }
} }
} }
/// this serializes the simulation state plus measurements to disk.
/// it can either convert the state to a generic, material-agnostic format (generic)
/// or dump it as-is.
pub struct SerializerRenderer { pub struct SerializerRenderer {
fmt_str: String, fmt_str: String,
prefer_static: bool, prefer_generic: bool,
} }
impl SerializerRenderer { impl SerializerRenderer {
@@ -586,47 +630,50 @@ impl SerializerRenderer {
pub fn new(fmt_str: &str) -> Self { pub fn new(fmt_str: &str) -> Self {
Self { Self {
fmt_str: fmt_str.into(), fmt_str: fmt_str.into(),
prefer_static: false, prefer_generic: false,
} }
} }
/// Same as `new`, but cast to StaticSim before serializing. This yields a file that's easier /// Same as `new`, but cast to GenericSim before serializing. This yields a file that's easier
/// for post-processing, and may be smaller in size. /// for post-processing.
pub fn new_static(fmt_str: &str) -> Self { pub fn new_generic(fmt_str: &str) -> Self {
Self { Self {
fmt_str: fmt_str.into(), fmt_str: fmt_str.into(),
prefer_static: true, prefer_generic: true,
} }
} }
} }
impl SerializerRenderer { impl SerializerRenderer {
fn serialize<S: SampleableSim + Serialize>(&self, state: &S, measurements: &[Box<dyn AbstractMeasurement>]) { fn serialize<S: AbstractSim + Serialize>(&self, state: &S, measurements: Vec<Measurement>) {
let frame = SerializedFrame { let frame = SerializedFrame {
state, state,
measurements: measurements.iter().cloned().collect(), measurements,
}; };
let name = self.fmt_str.replace("{step_no}", &*frame.state.step_no().to_string()); let name = self.fmt_str.replace("{step_no}", &*frame.state.step_no().to_string());
let out = BufWriter::new(File::create(name).unwrap()); // serialize to a temporary file -- in case we run out of disk space, etc.
//serde_cbor::to_writer(out, &snap).unwrap(); let temp_name = format!("{}.incomplete", name);
let out = BufWriter::new(File::create(&temp_name).unwrap());
bincode::serialize_into(out, &frame).unwrap(); bincode::serialize_into(out, &frame).unwrap();
// atomically complete the write.
std::fs::rename(temp_name, name).unwrap();
} }
pub fn try_load<S: SampleableSim + for <'a> Deserialize<'a>>(&self) -> Option<SerializedFrame<S>> { pub fn try_load<S: AbstractSim + for <'a> Deserialize<'a>>(&self) -> Option<SerializedFrame<S>> {
let mut reader = BufReader::new(File::open(&*self.fmt_str).ok()?); let mut reader = BufReader::new(File::open(&*self.fmt_str).ok()?);
bincode::deserialize_from(&mut reader).ok() bincode::deserialize_from(&mut reader).ok()
} }
} }
impl<S: SampleableSim + Serialize> Renderer<S> for SerializerRenderer { impl<S: AbstractSim + Serialize> Renderer<S> for SerializerRenderer {
fn render_z_slice(&self, state: &S, z: u32, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig) { fn render_z_slice(&self, state: &S, z: u32, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig) {
default_render_z_slice(self, state, z, measurements, config) default_render_z_slice(self, state, z, measurements, config)
} }
fn render(&self, state: &S, measurements: &[Box<dyn AbstractMeasurement>], _config: RenderConfig) { fn render(&self, state: &S, measurements: &[&dyn AbstractMeasurement<S>], _config: RenderConfig) {
if self.prefer_static { if self.prefer_generic {
self.serialize(&state.to_static(), measurements); self.serialize(&state.to_generic(), meas::eval_multiple(state, measurements));
} else { } else {
self.serialize(state, measurements); self.serialize(state, meas::eval_multiple(state, measurements));
} }
} }
} }
@@ -661,12 +708,12 @@ impl CsvRenderer {
} }
} }
impl<S: SampleableSim> Renderer<S> for CsvRenderer { impl<S: AbstractSim> Renderer<S> for CsvRenderer {
fn render_z_slice(&self, state: &S, z: u32, measurements: &[Box<dyn AbstractMeasurement>], config: RenderConfig) { fn render_z_slice(&self, state: &S, z: u32, measurements: &[&dyn AbstractMeasurement<S>], config: RenderConfig) {
default_render_z_slice(self, state, z, measurements, config) default_render_z_slice(self, state, z, measurements, config)
} }
fn render(&self, state: &S, measurements: &[Box<dyn AbstractMeasurement>], _config: RenderConfig) { fn render(&self, state: &S, measurements: &[&dyn AbstractMeasurement<S>], _config: RenderConfig) {
let row = meas::eval_multiple_kv(state, measurements); let row = meas::eval_multiple(state, measurements);
let step = state.step_no(); let step = state.step_no();
let mut lock = self.state.lock().unwrap(); let mut lock = self.state.lock().unwrap();
let mut writer = match lock.take().unwrap() { let mut writer = match lock.take().unwrap() {
@@ -700,13 +747,13 @@ impl<S: SampleableSim> Renderer<S> for CsvRenderer {
file.set_len(0).unwrap(); file.set_len(0).unwrap();
let mut writer = csv::Writer::from_writer(BufWriter::new(file)); let mut writer = csv::Writer::from_writer(BufWriter::new(file));
// write the header // write the header
writer.write_record(row.keys()).unwrap(); writer.write_record(row.iter().map(|m| m.name())).unwrap();
writer writer
} }
}, },
CsvState::Writing(writer) => writer, CsvState::Writing(writer) => writer,
}; };
writer.write_record(row.values()).unwrap(); writer.write_record(row.iter().map(|m| m.machine_readable())).unwrap();
writer.flush().unwrap(); writer.flush().unwrap();
*lock = Some(CsvState::Writing(writer)); *lock = Some(CsvState::Writing(writer));
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,251 +0,0 @@
use serde::de::Deserializer;
use serde::ser::Serializer;
use serde::{Deserialize, Serialize};
use crate::mat::{AnisomorphicConductor, IsoConductorOr, IsomorphicConductor, Ferroxcube3R1MH, MaterialExt as _, MBFerromagnet, MBPgram, MHPgram, Static};
use crate::geom::{Index, Vec3, Vec3u};
/// hide the actual spirv backend structures inside a submodule to make their use/boundary clear.
mod ffi {
pub use spirv_backend::entry_points;
pub use spirv_backend::sim::SerializedSimMeta;
pub use spirv_backend::support::Optional;
pub use spirv_backend::mat::FullyGenericMaterial;
pub use coremem_types::mat::MBPgram;
}
// conversion traits for types defined cross-lib
pub trait IntoFfi {
type Ffi;
fn into_ffi(self) -> Self::Ffi;
}
pub trait IntoLib {
type Lib;
fn into_lib(self) -> Self::Lib;
}
macro_rules! identity {
($($param:ident,)* => $t:ty) => {
impl<$($param: IntoFfi),*> IntoFfi for $t {
type Ffi = $t;
fn into_ffi(self) -> Self::Ffi {
self
}
}
impl<$($param: IntoLib),*> IntoLib for $t {
type Lib = $t;
fn into_lib(self) -> Self::Lib {
self
}
}
};
}
// XXX: should work for any other lifetime, not just 'static
identity!(=> f32);
identity!(=> &'static str);
identity!(T0, T1, => (T0, T1));
identity!(=> Vec3u);
identity!(T, => Vec3<T>);
impl<L: IntoFfi> IntoFfi for Option<L>
where L::Ffi: Default
{
type Ffi = ffi::Optional<L::Ffi>;
fn into_ffi(self) -> Self::Ffi {
match self {
Some(s) => ffi::Optional::some(s.into_ffi()),
None => ffi::Optional::none(),
}
}
}
impl<F: Copy + IntoLib> IntoLib for ffi::Optional<F> {
type Lib = Option<F::Lib>;
fn into_lib(self) -> Self::Lib {
if self.is_some() {
Some(self.unwrap().into_lib())
} else {
None
}
}
}
impl IntoFfi for MBPgram<f32> {
type Ffi = ffi::MBPgram<f32>;
fn into_ffi(self) -> Self::Ffi {
Self::Ffi::new(self.b_start, self.b_end, self.max_m)
}
}
impl IntoLib for ffi::MBPgram<f32> {
type Lib = MBPgram<f32>;
fn into_lib(self) -> Self::Lib {
Self::Lib::new(self.b_start, self.b_end, self.max_m)
}
}
identity!( => MHPgram<f32>);
identity!( => Ferroxcube3R1MH);
identity!(R, M, => IsoConductorOr<R, M>);
#[derive(Clone, Default, PartialEq, Serialize, Deserialize)]
pub struct FullyGenericMaterial {
pub conductivity: Vec3<f32>,
pub m_b_curve: Option<MBPgram<f32>>,
pub m_h_curve: Option<MHPgram<f32>>,
}
impl IntoFfi for FullyGenericMaterial {
type Ffi = ffi::FullyGenericMaterial;
fn into_ffi(self) -> Self::Ffi {
Self::Ffi {
conductivity: self.conductivity.into_ffi(),
m_b_curve: self.m_b_curve.into_ffi(),
m_h_curve: self.m_h_curve.into_ffi(),
}
}
}
impl IntoLib for ffi::FullyGenericMaterial {
type Lib = FullyGenericMaterial;
fn into_lib(self) -> Self::Lib {
Self::Lib {
conductivity: self.conductivity.into_lib(),
m_b_curve: self.m_b_curve.into_lib(),
m_h_curve: self.m_h_curve.into_lib(),
}
}
}
impl From<Static<f32>> for FullyGenericMaterial {
fn from(m: Static<f32>) -> Self {
FullyGenericMaterial {
conductivity: m.conductivity(),
.. Default::default()
}
}
}
impl From<AnisomorphicConductor<f32>> for FullyGenericMaterial {
fn from(m: AnisomorphicConductor<f32>) -> Self {
FullyGenericMaterial {
conductivity: m.conductivity(),
.. Default::default()
}
}
}
impl From<IsomorphicConductor<f32>> for FullyGenericMaterial {
fn from(m: IsomorphicConductor<f32>) -> Self {
FullyGenericMaterial {
conductivity: m.conductivity(),
.. Default::default()
}
}
}
impl From<MBFerromagnet<f32>> for FullyGenericMaterial {
fn from(m: MBFerromagnet<f32>) -> Self {
FullyGenericMaterial {
m_b_curve: Some(m.curve()),
.. Default::default()
}
}
}
impl From<MHPgram<f32>> for FullyGenericMaterial {
fn from(m: MHPgram<f32>) -> Self {
FullyGenericMaterial {
m_h_curve: Some(m),
.. Default::default()
}
}
}
impl From<Ferroxcube3R1MH> for FullyGenericMaterial {
fn from(m: Ferroxcube3R1MH) -> Self {
let curve: MHPgram<f32> = m.into();
curve.into()
}
}
// this is bitwise- and type-compatible with the spirv SimMeta, except we need serde traits
#[derive(Clone, Default, Serialize, Deserialize)]
pub struct SimMeta {
pub(crate) dim: Index,
pub(crate) inv_feature_size: f32,
pub(crate) time_step: f32,
pub(crate) feature_size: f32,
}
impl IntoFfi for SimMeta {
type Ffi = ffi::SerializedSimMeta;
fn into_ffi(self) -> Self::Ffi {
Self::Ffi {
dim: self.dim.0.into_ffi(),
inv_feature_size: self.inv_feature_size,
time_step: self.time_step,
feature_size: self.feature_size,
}
}
}
/// Store the FFI form in memory, but serialize via the lib form.
#[derive(Clone, Default, PartialEq)]
pub struct Remote<F>(F);
impl<F> Remote<F> {
pub fn into_inner(self) -> F {
self.0
}
}
impl<L: IntoFfi> From<L> for Remote<L::Ffi> {
fn from(l: L) -> Self {
Remote(l.into_ffi())
}
}
impl<F> std::ops::Deref for Remote<F> {
type Target = F;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<F> Serialize for Remote<F>
where F: Clone + IntoLib,
F::Lib: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let local = self.0.clone().into_lib();
local.serialize(serializer)
}
}
impl<'de, F> Deserialize<'de> for Remote<F>
where F: IntoLib,
F::Lib: Deserialize<'de> + IntoFfi<Ffi=F>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let local: F::Lib = Deserialize::deserialize(deserializer)?;
Ok(Remote(local.into_ffi()))
}
}
// FUNCTION BINDINGS
pub fn entry_points<L>() -> Option<(&'static str, &'static str)>
where
L: IntoFfi,
L::Ffi: 'static
{
ffi::entry_points::<L::Ffi>().into_lib()
}

View File

@@ -0,0 +1,49 @@
use crate::diagnostics::SyncDiagnostics;
use coremem_cross::mat::Material;
use coremem_cross::real::Real;
use coremem_cross::step::{SimMeta, StepEContext, StepHContext};
use coremem_cross::vec::{Vec3, Vec3u};
use super::SimBackend;
#[derive(Default)]
pub struct CpuBackend;
impl<R: Real, M: Material<R>> SimBackend<R, M> for CpuBackend {
fn step_n(
&mut self,
_diag: &SyncDiagnostics,
meta: SimMeta<R>,
mat: &[M],
stim_e: &[Vec3<R>],
stim_h: &[Vec3<R>],
e: &mut [Vec3<R>],
h: &mut [Vec3<R>],
m: &mut [Vec3<R>],
num_steps: u32,
) {
for _ in 0..num_steps {
// step E field
apply_all_cells(meta.dim(), |idx| {
StepEContext::step_flat_view(meta, mat, stim_e, e, h, idx);
});
// step H field
apply_all_cells(meta.dim(), |idx| {
StepHContext::step_flat_view(meta, mat, stim_h, e, h, m, idx);
});
}
}
}
fn apply_all_cells<F: FnMut(Vec3u)>(dim: Vec3u, mut f: F) {
for z in 0..dim.z() {
for y in 0..dim.y() {
for x in 0..dim.x() {
f(Vec3u::new(x, y, z));
}
}
}
}

View File

@@ -0,0 +1,497 @@
use futures::FutureExt as _;
use log::info;
use std::borrow::Cow;
use std::num::NonZeroU64;
use wgpu;
use wgpu::util::DeviceExt as _;
use crate::diagnostics::SyncDiagnostics;
use coremem_cross::vec::{Vec3, Vec3u};
use coremem_cross::step::SimMeta;
use spirv_backend::HasEntryPoints;
use super::SimBackend;
#[derive(Default)]
pub struct WgpuBackend {
handles: Option<(&'static str /* step_h */, &'static str /* step_e */, WgpuHandles)>,
}
struct WgpuHandles {
step_bind_group_layout: wgpu::BindGroupLayout,
step_e_pipeline: wgpu::ComputePipeline,
step_h_pipeline: wgpu::ComputePipeline,
device: wgpu::Device,
queue: wgpu::Queue,
}
impl WgpuHandles {
fn open<R, M: HasEntryPoints<R>>(dim: Vec3u) -> Self {
info!("WgpuHandles::open({})", dim);
use std::mem::size_of;
let volume = dim.product_sum_usize() as u64;
let max_elem_size = size_of::<M>().max(size_of::<Vec3<R>>());
let max_array_size = volume * max_elem_size as u64;
let max_buf_size = max_array_size + 0x1000; // allow some overhead
let (device, queue) = futures::executor::block_on(open_device(max_buf_size));
let shader_binary = get_shader();
let shader_module = unsafe { device.create_shader_module_spirv(&shader_binary) };
let (step_bind_group_layout, step_h_pipeline, step_e_pipeline) = make_pipelines(
&device, &shader_module, M::step_h(), M::step_e()
);
WgpuHandles {
step_bind_group_layout,
step_h_pipeline,
step_e_pipeline,
device,
queue,
}
}
}
// TODO: these bounds aren't 100% right. we're sending R and M over to the GPU by a bitwise copy.
// that probably means the types should be Send + Copy
impl<R: Copy, M: Send + Sync + HasEntryPoints<R>> SimBackend<R, M> for WgpuBackend {
fn step_n(
&mut self,
diag: &SyncDiagnostics,
meta: SimMeta<R>,
mat: &[M],
stim_cpu_e: &[Vec3<R>],
stim_cpu_h: &[Vec3<R>],
e: &mut [Vec3<R>],
h: &mut [Vec3<R>],
m: &mut [Vec3<R>],
num_steps: u32,
) {
let dim = meta.dim();
let field_bytes = dim.product_sum() as usize * std::mem::size_of::<Vec3<f32>>();
let (step_h, step_e, handles) = self.handles.get_or_insert_with(|| (
M::step_h(),
M::step_e(),
WgpuHandles::open::<R, M>(dim)
));
// if device is opened, make sure we're open for the right types
assert_eq!(*step_h, M::step_h());
assert_eq!(*step_e, M::step_e());
let device = &handles.device;
let queue = &handles.queue;
let step_bind_group_layout = &handles.step_bind_group_layout;
let step_e_pipeline = &handles.step_e_pipeline;
let step_h_pipeline = &handles.step_h_pipeline;
let timestamp_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("timestamps"),
// each timestamp is 8 bytes, and we do 4 per step
size: 8 * 4 * num_steps as u64,
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: true,
});
timestamp_buffer.unmap();
let sim_meta_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("gpu-side simulation metadata"),
contents: to_bytes(&[meta][..]),
usage: wgpu::BufferUsages::STORAGE,
});
let stim_e_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("gpu-side stimulus e field"),
contents: to_bytes(stim_cpu_e),
usage: wgpu::BufferUsages::STORAGE
});
let stim_h_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("gpu-side stimulus h field"),
contents: to_bytes(stim_cpu_h),
usage: wgpu::BufferUsages::STORAGE
});
let mat_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("gpu-side materials matrix"),
contents: to_bytes(mat),
// Can be used by the GPU and copied back to the CPU
usage: wgpu::BufferUsages::STORAGE,
});
let e_field_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("gpu-side in/out e field"),
contents: to_bytes(e),
usage: wgpu::BufferUsages::STORAGE.union(wgpu::BufferUsages::COPY_SRC),
});
let h_field_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("gpu-side in/out h field"),
contents: to_bytes(h),
// Can be used by the GPU and copied back to the CPU
usage: wgpu::BufferUsages::STORAGE.union(wgpu::BufferUsages::COPY_SRC),
});
let m_field_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("gpu-side in/out m field"),
contents: to_bytes(m),
// Can be used by the GPU and copied back to the CPU
usage: wgpu::BufferUsages::STORAGE.union(wgpu::BufferUsages::COPY_SRC),
});
let e_readback_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("cpu-side copy of e output buffer"),
size: field_bytes as wgpu::BufferAddress,
// Can be read to the CPU, and can be copied from the shader's storage buffer
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let h_readback_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("cpu-side copy of h output buffer"),
size: field_bytes as wgpu::BufferAddress,
// Can be read to the CPU, and can be copied from the shader's storage buffer
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let m_readback_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("cpu-side copy of m output buffer"),
size: field_bytes as wgpu::BufferAddress,
// Can be read to the CPU, and can be copied from the shader's storage buffer
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &step_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: sim_meta_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: stim_e_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: stim_h_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: mat_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 4,
resource: e_field_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 5,
resource: h_field_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 6,
resource: m_field_buffer.as_entire_binding(),
},
],
});
let queries = device.create_query_set(&wgpu::QuerySetDescriptor {
label: None,
count: 4 * num_steps,
ty: wgpu::QueryType::Timestamp,
});
let workgroups = ((dim.x()+3) / 4, (dim.y()+3) / 4, (dim.z()+3) / 4);
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
for step in 0..num_steps {
{
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_pipeline(&step_e_pipeline);
cpass.write_timestamp(&queries, 4*step);
cpass.dispatch(workgroups.0, workgroups.1, workgroups.2);
cpass.write_timestamp(&queries, 4*step + 1);
}
{
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
cpass.set_bind_group(0, &bind_group, &[]);
cpass.set_pipeline(&step_h_pipeline);
cpass.write_timestamp(&queries, 4*step + 2);
cpass.dispatch(workgroups.0, workgroups.1, workgroups.2);
cpass.write_timestamp(&queries, 4*step + 3);
}
}
encoder.copy_buffer_to_buffer(
&e_field_buffer,
0,
&e_readback_buffer,
0,
field_bytes as u64,
);
encoder.copy_buffer_to_buffer(
&h_field_buffer,
0,
&h_readback_buffer,
0,
field_bytes as u64,
);
encoder.copy_buffer_to_buffer(
&m_field_buffer,
0,
&m_readback_buffer,
0,
field_bytes as u64,
);
encoder.resolve_query_set(&queries, 0..4*num_steps, &timestamp_buffer, 0);
diag.instrument_write_device(move || {
queue.submit(Some(encoder.finish()));
});
let e_readback_slice = e_readback_buffer.slice(..);
let e_readback_future = e_readback_slice.map_async(wgpu::MapMode::Read).then(|_| async {
e.copy_from_slice(unsafe {
from_bytes(e_readback_slice.get_mapped_range().as_ref())
});
e_readback_buffer.unmap();
});
let h_readback_slice = h_readback_buffer.slice(..);
let h_readback_future = h_readback_slice.map_async(wgpu::MapMode::Read).then(|_| async {
h.copy_from_slice(unsafe {
from_bytes(h_readback_slice.get_mapped_range().as_ref())
});
h_readback_buffer.unmap();
});
let m_readback_slice = m_readback_buffer.slice(..);
let m_readback_future = m_readback_slice.map_async(wgpu::MapMode::Read).then(|_| async {
m.copy_from_slice(unsafe {
from_bytes(m_readback_slice.get_mapped_range().as_ref())
});
m_readback_buffer.unmap();
});
// let timestamp_period = queue.get_timestamp_period();
let timestamp_readback_slice = timestamp_buffer.slice(..);
let timestamp_readback_future = timestamp_readback_slice.map_async(wgpu::MapMode::Read).then(|_| async {
{
let mapped = timestamp_readback_slice.get_mapped_range();
let timings: &[u64] = unsafe {
from_bytes(mapped.as_ref())
};
println!("timings: {:?}", timings);
}
timestamp_buffer.unmap();
});
// optimization note: it may be possible to use `WaitForSubmission`
// and copy data to/from even as the GPU begins executing the next job.
device.poll(wgpu::Maintain::Wait);
diag.instrument_read_device(move || {
futures::executor::block_on(futures::future::join(
e_readback_future, futures::future::join(
h_readback_future, futures::future::join(
m_readback_future, timestamp_readback_future
)
)
));
});
}
}
/// Convert an arbitrary slice into a byte slice
fn to_bytes<T>(slice: &[T]) -> &[u8] {
unsafe {
std::slice::from_raw_parts(slice.as_ptr() as *const u8, slice.len() * std::mem::size_of::<T>())
}
}
/// Convert a byte slice into a T slice
unsafe fn from_bytes<T>(slice: &[u8]) -> &[T] {
let elem_size = std::mem::size_of::<T>();
let new_len = slice.len() / elem_size;
assert_eq!(new_len * elem_size, slice.len());
std::slice::from_raw_parts(slice.as_ptr() as *const T, new_len)
}
/// Loads the shader
fn get_shader() -> wgpu::ShaderModuleDescriptorSpirV<'static> {
let data = spirv_backend_runner::spirv_module();
let spirv = Cow::Owned(wgpu::util::make_spirv_raw(&data).into_owned());
wgpu::ShaderModuleDescriptorSpirV {
label: None,
source: spirv,
}
}
async fn open_device(max_buf_size: u64) -> (wgpu::Device, wgpu::Queue) {
// based on rust-gpu/examples/runners/wgpu/src/compute.rs:start_internal
let instance = wgpu::Instance::new(wgpu::Backends::PRIMARY);
info!("open_device: got instance");
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::HighPerformance,
force_fallback_adapter: false,
compatible_surface: None,
})
.await
.expect("Failed to find an appropriate adapter");
info!("open_device: got adapter");
// XXX not all adapters will support non-default limits, and it could
// cause perf degradations even on the ones that do. May want to consider
// folding some buffers together to avoid this.
let mut limits = wgpu::Limits::default();
//limits.max_bind_groups = 5;
//limits.max_dynamic_storage_buffers_per_pipeline_layout = 5;
limits.max_storage_buffers_per_shader_stage = 7;
//limits.max_storage_buffer_binding_size = 128 MiB.
//limits.max_storage_buffer_binding_size = 1024 * (1 << 20);
limits.max_storage_buffer_binding_size = max_buf_size.try_into().unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (
wgpu::Features::empty()
.union(wgpu::Features::SPIRV_SHADER_PASSTHROUGH)
.union(wgpu::Features::TIMESTAMP_QUERY)
),
limits,
},
None,
)
.await
.expect("Failed to create device");
info!("open_device: got device");
(device, queue)
}
fn make_pipelines(
device: &wgpu::Device,
shader_module: &wgpu::ShaderModule,
entry_step_h: &'static str,
entry_step_e: &'static str
) -> (
wgpu::BindGroupLayout, wgpu::ComputePipeline, wgpu::ComputePipeline
) {
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
// meta
binding: 0,
count: None,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
has_dynamic_offset: false,
min_binding_size: Some(NonZeroU64::new(1).unwrap()),
ty: wgpu::BufferBindingType::Storage { read_only: true },
},
},
wgpu::BindGroupLayoutEntry {
// stimulus(e)
binding: 1,
count: None,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
has_dynamic_offset: false,
min_binding_size: Some(NonZeroU64::new(1).unwrap()),
ty: wgpu::BufferBindingType::Storage { read_only: true },
},
},
wgpu::BindGroupLayoutEntry {
// stimulus(h)
binding: 2,
count: None,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
has_dynamic_offset: false,
min_binding_size: Some(NonZeroU64::new(1).unwrap()),
ty: wgpu::BufferBindingType::Storage { read_only: true },
},
},
wgpu::BindGroupLayoutEntry {
// materials
binding: 3,
count: None,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
has_dynamic_offset: false,
min_binding_size: Some(NonZeroU64::new(1).unwrap()),
ty: wgpu::BufferBindingType::Storage { read_only: true },
},
},
wgpu::BindGroupLayoutEntry {
// e field
binding: 4,
count: None,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
has_dynamic_offset: false,
min_binding_size: Some(NonZeroU64::new(1).unwrap()),
ty: wgpu::BufferBindingType::Storage { read_only: false },
},
},
wgpu::BindGroupLayoutEntry {
// h field
binding: 5,
count: None,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
has_dynamic_offset: false,
min_binding_size: Some(NonZeroU64::new(1).unwrap()),
ty: wgpu::BufferBindingType::Storage { read_only: false },
},
},
wgpu::BindGroupLayoutEntry {
// m field
binding: 6,
count: None,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
has_dynamic_offset: false,
min_binding_size: Some(NonZeroU64::new(1).unwrap()),
ty: wgpu::BufferBindingType::Storage { read_only: false },
},
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let compute_step_h_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
module: shader_module,
entry_point: entry_step_h,
});
let compute_step_e_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
module: shader_module,
entry_point: entry_step_e,
});
(bind_group_layout, compute_step_h_pipeline, compute_step_e_pipeline)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,381 +0,0 @@
use crate::real::*;
use crate::geom::{Meters, Region, Vec3};
use rand;
type Fields = (Vec3<f32>, Vec3<f32>);
pub trait AbstractStimulus: Sync {
// TODO: might be cleaner to return some `Fields` type instead of a tuple
/// Return the (E, H) field which should be added PER-SECOND to the provided position/time.
fn at(&self, t_sec: f32, pos: Meters) -> Fields;
}
// impl<T: AbstractStimulus> AbstractStimulus for &T {
// fn at(&self, t_sec: f32, pos: Meters) -> Vec3 {
// (*self).at(t_sec, pos)
// }
// }
impl<T: AbstractStimulus> AbstractStimulus for Vec<T> {
fn at(&self, t_sec: f32, pos: Meters) -> Fields {
let (mut e, mut h) = Fields::default();
for i in self {
let (de, dh) = i.at(t_sec, pos);
e += de;
h += dh;
}
(e, h)
}
}
impl AbstractStimulus for Box<dyn AbstractStimulus> {
fn at(&self, t_sec: f32, pos: Meters) -> Fields {
(**self).at(t_sec, pos)
}
}
pub struct NoopStimulus;
impl AbstractStimulus for NoopStimulus {
fn at(&self, _t_sec: f32, _pos: Meters) -> Fields {
Fields::default()
}
}
pub struct UniformStimulus {
e: Vec3<f32>,
h: Vec3<f32>,
}
impl UniformStimulus {
pub fn new(e: Vec3<f32>, h: Vec3<f32>) -> Self {
Self { e, h }
}
pub fn new_e(e: Vec3<f32>) -> Self {
Self::new(e, Vec3::zero())
}
}
impl AbstractStimulus for UniformStimulus {
fn at(&self, t_sec: f32, _pos: Meters) -> Fields {
TimeVarying3::at(self, t_sec)
}
}
impl TimeVarying for UniformStimulus {}
impl TimeVarying3 for UniformStimulus {
fn at(&self, _t_sec: f32) -> Fields {
(self.e, self.h)
}
}
pub struct RngStimulus {
seed: u64,
e_scale: f32,
h_scale: f32,
}
impl RngStimulus {
pub fn new(seed: u64) -> Self {
Self { seed, e_scale: 1e15, h_scale: 1e15 }
}
pub fn new_e(seed: u64) -> Self {
Self { seed, e_scale: 1e15, h_scale: 0.0 }
}
fn gen(&self, t_sec: f32, pos: Meters, scale: f32, salt: u64) -> Vec3<f32> {
use rand::{Rng as _, SeedableRng as _};
let seed = self.seed
^ (t_sec.to_bits() as u64)
^ ((pos.x().to_bits() as u64) << 8)
^ ((pos.y().to_bits() as u64) << 16)
^ ((pos.z().to_bits() as u64) << 24)
^ (salt << 32);
let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
Vec3::new(
rng.gen_range(-scale..=scale),
rng.gen_range(-scale..=scale),
rng.gen_range(-scale..=scale),
)
}
}
impl AbstractStimulus for RngStimulus {
fn at(&self, t_sec: f32, pos: Meters) -> Fields {
(self.gen(t_sec, pos, self.e_scale, 0), self.gen(t_sec, pos, self.h_scale, 0x7de3))
}
}
/// Apply a time-varying stimulus uniformly across some region
#[derive(Clone)]
pub struct Stimulus<R, T> {
region: R,
stim: T,
}
impl<R, T> Stimulus<R, T> {
pub fn new(region: R, stim: T) -> Self {
Self {
region, stim
}
}
}
impl<R: Region + Sync, T: TimeVarying3 + Sync> AbstractStimulus for Stimulus<R, T> {
fn at(&self, t_sec: f32, pos: Meters) -> Fields {
if self.region.contains(pos) {
self.stim.at(t_sec)
} else {
Fields::default()
}
}
}
/// Apply a time-varying stimulus across some region.
/// The stimulus seen at each point is based on its angle about the specified ray.
#[derive(Clone)]
pub struct CurlStimulus<R, T> {
region: R,
stim: T,
center: Meters,
axis: Meters,
}
impl<R, T> CurlStimulus<R, T> {
pub fn new(region: R, stim: T, center: Meters, axis: Meters) -> Self {
Self { region, stim, center, axis }
}
}
impl<R: Region + Sync, T: TimeVarying1 + Sync> AbstractStimulus for CurlStimulus<R, T> {
fn at(&self, t_sec: f32, pos: Meters) -> Fields {
if self.region.contains(pos) {
let (amt_e, amt_h) = self.stim.at(t_sec);
let from_center_to_point = *pos - *self.center;
let rotational = from_center_to_point.cross(*self.axis);
let impulse_e = rotational.with_mag(amt_e.cast());
let impulse_h = rotational.with_mag(amt_h.cast());
(impulse_e, impulse_h)
} else {
Fields::default()
}
}
}
pub trait TimeVarying: Sized {
fn shifted(self, new_start: f32) -> Shifted<Self> {
Shifted::new(self, new_start)
}
fn gated(self, from: f32, to: f32) -> Gated<Self> {
Gated::new(self, from, to)
}
}
pub trait TimeVarying1: TimeVarying {
/// Retrieve the (E, H) impulse to apply PER-SECOND at the provided time (in seconds).
fn at(&self, t_sec: f32) -> (f32, f32);
}
pub trait TimeVarying3: TimeVarying {
/// Retrieve the (E, H) impulse to apply PER-SECOND at the provided time (in seconds).
fn at(&self, t_sec: f32) -> Fields;
}
// assumed to represent the E field
impl TimeVarying for f32 {}
impl TimeVarying1 for f32 {
fn at(&self, _t_sec: f32) -> (f32, f32) {
(*self, 0.0)
}
}
/// E field which changes magnitude sinusoidally as a function of t
#[derive(Clone)]
pub struct Sinusoid<A> {
amp: A,
omega: f32,
}
pub type Sinusoid1 = Sinusoid<f32>;
pub type Sinusoid3 = Sinusoid<Vec3<f32>>;
impl<A> Sinusoid<A> {
pub fn new(amp: A, freq: f32) -> Self {
Self {
amp,
omega: freq * f32::two_pi(),
}
}
pub fn from_wavelength(amp: A, lambda: f32) -> Self {
Self::new(amp, 1.0/lambda)
}
pub fn freq(&self) -> f32 {
self.omega / f32::two_pi()
}
pub fn wavelength(&self) -> f32 {
1.0 / self.freq()
}
pub fn one_cycle(self) -> Gated<Self> {
let wl = self.wavelength();
Gated::new(self, 0.0, wl)
}
pub fn half_cycle(self) -> Gated<Self> {
let wl = self.wavelength();
Gated::new(self, 0.0, 0.5 * wl)
}
}
impl<A> TimeVarying for Sinusoid<A> {}
impl TimeVarying1 for Sinusoid1 {
fn at(&self, t_sec: f32) -> (f32, f32) {
(
self.amp * (t_sec * self.omega).sin(),
0.0,
)
}
}
impl TimeVarying3 for Sinusoid3 {
fn at(&self, t_sec: f32) -> Fields {
(
self.amp * (t_sec * self.omega).sin(),
Vec3::zero(),
)
}
}
/// E field with magnitude that decays exponentially over t.
#[derive(Clone)]
pub struct Exp<A> {
amp: A,
tau: f32,
}
pub type Exp1 = Exp<f32>;
pub type Exp3 = Exp<Vec3<f32>>;
impl<A> Exp<A> {
pub fn new(amp: A, half_life: f32) -> Self {
let tau = std::f32::consts::LN_2/half_life;
Self { amp, tau }
}
pub fn new_at(amp: A, start: f32, half_life: f32) -> Shifted<Gated<Self>> {
Self::new(amp, half_life)
.gated(0.0, half_life*100.0)
.shifted(start)
}
}
impl<A> TimeVarying for Exp<A> {}
impl TimeVarying1 for Exp1 {
fn at(&self, t_sec: f32) -> (f32, f32) {
(
self.amp * (t_sec * -self.tau).exp(),
0.0,
)
}
}
impl TimeVarying3 for Exp3 {
fn at(&self, t_sec: f32) -> Fields {
(
self.amp * (t_sec * -self.tau).exp(),
Vec3::zero(),
)
}
}
#[derive(Clone)]
pub struct Gated<T> {
inner: T,
start: f32,
end: f32,
}
impl<T> Gated<T> {
pub fn new(inner: T, start: f32, end: f32) -> Self {
Self { inner, start, end }
}
}
impl<T> TimeVarying for Gated<T> {}
impl<T: TimeVarying1> TimeVarying1 for Gated<T> {
fn at(&self, t_sec: f32) -> (f32, f32) {
if (self.start..self.end).contains(&t_sec) {
self.inner.at(t_sec)
} else {
Default::default()
}
}
}
impl<T: TimeVarying3> TimeVarying3 for Gated<T> {
fn at(&self, t_sec: f32) -> Fields {
if (self.start..self.end).contains(&t_sec) {
self.inner.at(t_sec)
} else {
Default::default()
}
}
}
#[derive(Clone)]
pub struct Shifted<T> {
inner: T,
start_at: f32,
}
impl<T> Shifted<T> {
pub fn new(inner: T, start_at: f32) -> Self {
Self { inner, start_at }
}
}
impl<T> TimeVarying for Shifted<T> {}
impl<T: TimeVarying1> TimeVarying1 for Shifted<T> {
fn at(&self, t_sec: f32) -> (f32, f32) {
self.inner.at(t_sec - self.start_at)
}
}
impl<T: TimeVarying3> TimeVarying3 for Shifted<T> {
fn at(&self, t_sec: f32) -> Fields {
self.inner.at(t_sec - self.start_at)
}
}
#[cfg(test)]
mod test {
use super::*;
macro_rules! assert_approx_eq {
($x:expr, $e:expr, $h:expr) => {
let x = $x;
let e = $e;
let h = $h;
let diff_e = (x.0 - e).mag();
assert!(diff_e <= 0.001, "{:?} != {:?}", x, e);
let diff_h = (x.1 - h).mag();
assert!(diff_h <= 0.001, "{:?} != {:?}", x, h);
}
}
#[test]
fn sinusoid3() {
let s = Sinusoid3::new(Vec3::new(10.0, 1.0, -100.0), 1000.0);
assert_eq!(s.at(0.0), (Vec3::zero(), Vec3::zero()));
assert_approx_eq!(s.at(0.00025),
Vec3::new(10.0, 1.0, -100.0), Vec3::zero()
);
assert_approx_eq!(s.at(0.00050), Vec3::zero(), Vec3::zero());
assert_approx_eq!(s.at(0.00075), Vec3::new(-10.0, -1.0, 100.0), Vec3::zero());
}
#[test]
fn sinusoid3_from_wavelength() {
let s = Sinusoid3::from_wavelength(Vec3::new(10.0, 1.0, -100.0), 0.001);
assert_eq!(s.at(0.0), (Vec3::zero(), Vec3::zero()));
assert_approx_eq!(s.at(0.00025), Vec3::new(10.0, 1.0, -100.0), Vec3::zero());
assert_approx_eq!(s.at(0.00050), Vec3::zero(), Vec3::zero());
assert_approx_eq!(s.at(0.00075), Vec3::new(-10.0, -1.0, 100.0), Vec3::zero());
}
}

View File

@@ -0,0 +1,123 @@
//! supporting types for basic Stimulus trait/impls
use crate::real::Real;
use crate::cross::vec::Vec3;
/// field densities
#[derive(Clone, Copy, Debug, Default, PartialEq)]
pub struct Fields<R> {
pub e: Vec3<R>,
pub h: Vec3<R>,
}
impl<R> Fields<R> {
pub fn new_eh(e: Vec3<R>, h: Vec3<R>) -> Self {
Self { e, h}
}
}
impl<R: Real> Fields<R> {
pub fn e(&self) -> Vec3<R> {
self.e
}
pub fn h(&self) -> Vec3<R> {
self.h
}
pub fn new_e(e: Vec3<R>) -> Self {
Self::new_eh(e, Vec3::zero())
}
pub fn new_h(h: Vec3<R>) -> Self {
Self::new_eh(Vec3::zero(), h)
}
pub fn elem_mul(self, other: FieldMags<R>) -> Fields<R> {
Fields {
e: self.e * other.e,
h: self.h * other.h,
}
}
}
impl<R: Real> std::ops::AddAssign for Fields<R> {
fn add_assign(&mut self, other: Self) {
self.e += other.e;
self.h += other.h;
}
}
impl<R: Real> std::ops::Add for Fields<R> {
type Output = Self;
fn add(mut self, other: Self) -> Self::Output {
self += other;
self
}
}
impl<R: Real> std::ops::Mul<R> for Fields<R> {
type Output = Self;
fn mul(self, scale: R) -> Self::Output {
Fields {
e: self.e * scale,
h: self.h * scale,
}
}
}
/// field magnitude densities (really, signed magnitude)
#[derive(Clone, Copy, Debug, Default, PartialEq)]
pub struct FieldMags<R> {
pub e: R,
pub h: R,
}
impl<R: Real> std::ops::AddAssign for FieldMags<R> {
fn add_assign(&mut self, other: Self) {
self.e += other.e;
self.h += other.h;
}
}
impl<R: Real> std::ops::Add for FieldMags<R> {
type Output = Self;
fn add(mut self, other: Self) -> Self::Output {
self += other;
self
}
}
impl<R: Real> std::ops::Mul<R> for FieldMags<R> {
type Output = Self;
fn mul(self, scale: R) -> Self::Output {
FieldMags {
e: self.e * scale,
h: self.h * scale,
}
}
}
impl<R> FieldMags<R> {
pub fn new_eh(e: R, h: R) -> Self {
Self { e, h }
}
}
impl<R: Real> FieldMags<R> {
pub fn e(&self) -> R {
self.e
}
pub fn h(&self) -> R {
self.h
}
pub fn new_e(e: R) -> Self {
Self::new_eh(e, R::zero())
}
pub fn new_h(h: R) -> Self {
Self::new_eh(R::zero(), h)
}
pub fn elem_mul(self, other: Self) -> Self {
FieldMags {
e: self.e * other.e,
h: self.h * other.h,
}
}
}

View File

@@ -0,0 +1,295 @@
use crate::cross::vec::Vec3;
use crate::geom::{Coord as _, Index, Meters};
use crate::real::Real;
use coremem_cross::dim::DimSlice;
use coremem_cross::vec::Vec3u;
use std::borrow::Cow;
use std::ops::Deref;
use rand;
mod fields;
mod time_varying;
mod vector_field;
pub use fields::{Fields, FieldMags};
pub use time_varying::{
Exp,
Gated,
Pulse,
Scaled,
Shifted,
Sinusoid,
Summed,
TimeVarying,
TimeVaryingExt,
UnitEH,
};
pub use vector_field::{
CurlVectorField,
RegionGated,
VectorField,
};
pub trait Stimulus<R: Real>: Sync {
/// Return the (E, H) field which should be added PER-SECOND to the provided position/time.
fn at(&self, t_sec: R, feat_size: R, loc: Index) -> Fields<R>;
/// compute the value of this stimulus across all the simulation space
fn rendered<'a>(
&'a self, scale: R, t_sec: R, feature_size: R, dim: Vec3u
) -> Cow<'a, RenderedStimulus<R>> {
Cow::Owned(render_stim(self, scale, t_sec, feature_size, dim))
}
}
fn render_stim<R: Real, S: Stimulus<R> + ?Sized>(
stim: &S, scale: R, t_sec: R, feature_size: R, dim: Vec3u
) -> RenderedStimulus<R> {
let dim_len = dim.product_sum_usize();
let mut e = Vec::new();
e.resize_with(dim_len, Default::default);
let mut h = Vec::new();
h.resize_with(dim_len, Default::default);
rayon::scope(|s| {
let mut undispatched_e = &mut e[..];
let mut undispatched_h = &mut h[..];
for z in 0..dim.z() {
for y in 0..dim.y() {
let (this_e, this_h);
(this_e, undispatched_e) = undispatched_e.split_at_mut(dim.x() as usize);
(this_h, undispatched_h) = undispatched_h.split_at_mut(dim.x() as usize);
s.spawn(move |_| {
for (x, (out_e, out_h)) in this_e.iter_mut().zip(this_h.iter_mut()).enumerate() {
let Fields { e, h } = stim.at(t_sec, feature_size, Index::new(x as u32, y, z));
*out_e = e * scale;
*out_h = h * scale;
}
});
}
}
});
let field_e = DimSlice::new(dim, e);
let field_h = DimSlice::new(dim, h);
RenderedStimulus::new(
field_e, field_h, scale, feature_size, t_sec
)
}
#[derive(Clone)]
pub struct RenderedStimulus<R> {
e: DimSlice<Vec<Vec3<R>>>,
h: DimSlice<Vec<Vec3<R>>>,
scale: R,
feature_size: R,
t_sec: R,
}
impl<R> RenderedStimulus<R> {
pub fn new(
e: DimSlice<Vec<Vec3<R>>>,
h: DimSlice<Vec<Vec3<R>>>,
scale: R,
feature_size: R,
t_sec: R,
) -> Self {
Self { e, h, scale, feature_size, t_sec }
}
pub fn e<'a>(&'a self) -> DimSlice<&'a [Vec3<R>]> {
self.e.as_ref()
}
pub fn h<'a>(&'a self) -> DimSlice<&'a [Vec3<R>]> {
self.h.as_ref()
}
}
impl<R: Real> RenderedStimulus<R> {
pub fn scale(&self) -> R {
self.scale
}
pub fn feature_size(&self) -> R {
self.feature_size
}
pub fn time(&self) -> R {
self.t_sec
}
}
// TODO: is this necessary?
impl<R: Real> VectorField<R> for RenderedStimulus<R> {
fn at(&self, _feat_size: R, loc: Index) -> Fields<R> {
Fields::new_eh(self.e[loc.into()], self.h[loc.into()])
}
}
impl<R: Real> Stimulus<R> for RenderedStimulus<R> {
fn at(&self, _t_sec: R, _feat_size: R, loc: Index) -> Fields<R> {
Fields::new_eh(self.e[loc.into()], self.h[loc.into()])
}
fn rendered<'a>(
&'a self, scale: R, t_sec: R, feature_size: R, dim: Vec3u
) -> Cow<'a, RenderedStimulus<R>> {
if (self.scale, self.t_sec, self.feature_size, self.e.dim()) == (scale, t_sec, feature_size, dim) {
Cow::Borrowed(self)
} else {
Cow::Owned(render_stim(self, scale, t_sec, feature_size, dim))
}
}
}
impl<R: Real> Stimulus<R> for Fields<R> {
fn at(&self, _t_sec: R, _feat_size: R, _loc: Index) -> Fields<R> {
*self
}
}
/// a VectorField type whose amplitude is modulated by a TimeVarying component.
/// users will almost always use this as their stimulus implementation
pub struct ModulatedVectorField<V, T> {
fields: V,
modulation: T,
}
impl<V, T> ModulatedVectorField<V, T> {
pub fn new(fields: V, modulation: T) -> Self {
Self { fields, modulation }
}
pub fn into_inner(self) -> (V, T) {
(self.fields, self.modulation)
}
pub fn fields(&self) -> &V {
&self.fields
}
pub fn modulation(&self) -> &T {
&self.modulation
}
}
impl<R: Real, V: VectorField<R> + Sync, T: TimeVarying<R> + Sync> Stimulus<R> for ModulatedVectorField<V, T> {
fn at(&self, t_sec: R, feat_size: R, loc: Index) -> Fields<R> {
self.fields.at(feat_size, loc).elem_mul(self.modulation.at(t_sec))
}
}
/// used as a MapVisitor in order to evaluate each Stimulus in a List at a specific time/place.
// struct StimulusEvaluator {
// fields: Fields,
// t_sec: f32,
// feat_size: f32,
// loc: Index,
// }
//
// impl<S: Stimulus> Visitor<&S> for &mut StimulusEvaluator {
// fn visit(&mut self, next: &S) {
// self.fields += next.at(self.t_sec, self.feat_size, self.loc);
// }
// }
//
// impl<L: Sync> Stimulus for L
// where
// for<'a, 'b> &'a L: Visit<&'b mut StimulusEvaluator>,
// {
// fn at(&self, t_sec: f32, pos: Meters) -> Fields {
// let mut ev = StimulusEvaluator { t_sec, pos, fields: Fields::default()};
// self.visit(&mut ev);
// ev.fields
// }
// }
// conflicts with List implementation
// impl<T: Stimulus> Stimulus for &T {
// fn at(&self, t_sec: f32, feat_size: f32, loc: Index) -> Fields {
// (*self).at(t_sec, feat_size, loc)
// }
// }
pub struct StimuliVec<S>(Vec<S>);
pub type DynStimuli<R> = StimuliVec<Box<dyn Stimulus<R> + Send>>;
impl<S> Default for StimuliVec<S> {
fn default() -> Self {
Self(Vec::new())
}
}
impl<S> Deref for StimuliVec<S> {
type Target = Vec<S>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<S> StimuliVec<S> {
pub fn new() -> Self {
Self::default()
}
pub fn from_vec(stim: Vec<S>) -> Self {
Self(stim)
}
pub fn push(&mut self, a: S) {
self.0.push(a)
}
}
impl<R: Real, S: Stimulus<R>> Stimulus<R> for StimuliVec<S> {
fn at(&self, t_sec: R, feat_size: R, loc: Index) -> Fields<R> {
self.0.iter().map(|i| i.at(t_sec, feat_size, loc))
.fold(Fields::default(), core::ops::Add::add)
}
}
impl<R: Real> Stimulus<R> for Box<dyn Stimulus<R> + Send> {
fn at(&self, t_sec: R, feat_size: R, loc: Index) -> Fields<R> {
(**self).at(t_sec, feat_size, loc)
}
}
pub struct NoopStimulus;
impl<R: Real> Stimulus<R> for NoopStimulus {
fn at(&self, _t_sec: R, _feat_size: R, _loc: Index) -> Fields<R> {
Fields::default()
}
}
pub struct RngStimulus {
seed: u64,
e_scale: f32,
h_scale: f32,
}
impl RngStimulus {
pub fn new(seed: u64) -> Self {
Self { seed, e_scale: 1e15, h_scale: 1e15 }
}
pub fn new_e(seed: u64) -> Self {
Self { seed, e_scale: 1e15, h_scale: 0.0 }
}
fn gen(&self, t_sec: f32, pos: Meters, scale: f32, salt: u64) -> Vec3<f32> {
use rand::{Rng as _, SeedableRng as _};
let seed = self.seed
^ (t_sec.to_bits() as u64)
^ ((pos.x().to_bits() as u64) << 8)
^ ((pos.y().to_bits() as u64) << 16)
^ ((pos.z().to_bits() as u64) << 24)
^ (salt << 32);
let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
Vec3::new(
rng.gen_range(-scale..=scale),
rng.gen_range(-scale..=scale),
rng.gen_range(-scale..=scale),
)
}
}
impl<R: Real> Stimulus<R> for RngStimulus {
fn at(&self, t_sec: R, feat_size: R, loc: Index) -> Fields<R> {
Fields {
e: self.gen(t_sec.cast(), loc.to_meters(feat_size.cast()), self.e_scale, 0).cast(),
h: self.gen(t_sec.cast(), loc.to_meters(feat_size.cast()), self.h_scale, 0x7de3).cast(),
}
}
}

View File

@@ -0,0 +1,252 @@
//! time-varying portions of a Stimulus
use crate::real::{self, Real};
use crate::stim::FieldMags;
pub trait TimeVarying<R> {
fn at(&self, t_sec: R) -> FieldMags<R>;
}
pub trait TimeVaryingExt<R>: Sized {
fn shifted(self, new_start: R) -> Shifted<R, Self> {
Shifted::new(self, new_start)
}
fn gated(self, from: R, to: R) -> Gated<R, Self> {
Gated::new(Pulse::new(from, to), self)
}
fn scaled<T: TimeVarying<R>>(self, scale: T) -> Scaled<Self, T> {
Scaled::new(self, scale)
}
fn summed<T: TimeVarying<R>>(self, with: T) -> Summed<Self, T> {
Summed::new(self, with)
}
}
impl<R, T> TimeVaryingExt<R> for T {}
impl<R: Real> TimeVarying<R> for FieldMags<R> {
fn at(&self, _t_sec: R) -> FieldMags<R> {
*self
}
}
// assumed to represent the E field
impl<R: Real> TimeVarying<real::Finite<R>> for real::Finite<R> {
fn at(&self, _t_sec: real::Finite<R>) -> FieldMags<real::Finite<R>> {
FieldMags::new_e(*self)
}
}
impl TimeVarying<f32> for f32 {
fn at(&self, _t_sec: f32) -> FieldMags<f32> {
FieldMags::new_e(*self)
}
}
impl TimeVarying<f64> for f64 {
fn at(&self, _t_sec: f64) -> FieldMags<f64> {
FieldMags::new_e(*self)
}
}
// Vec<T> at any `t_sec` behaves as the sum of all its components at that time.
impl<R: Real, T: TimeVarying<R>> TimeVarying<R> for Vec<T> {
fn at(&self, t_sec: R) -> FieldMags<R> {
self.iter().fold(FieldMags::default(), |acc, i| acc + i.at(t_sec))
}
}
pub struct UnitEH;
impl<R: Real> TimeVarying<R> for UnitEH {
fn at(&self, _t_sec: R) -> FieldMags<R> {
FieldMags::new_eh(R::one(), R::one())
}
}
/// E field which changes magnitude sinusoidally as a function of t
#[derive(Clone)]
pub struct Sinusoid<R> {
omega: R,
}
impl<R: Real> Sinusoid<R> {
pub fn new(freq: R) -> Self {
Self {
omega: freq * R::two_pi(),
}
}
pub fn from_wavelength(lambda: R) -> Self {
Self::new(lambda.inv())
}
pub fn freq(&self) -> R {
self.omega / R::two_pi()
}
pub fn wavelength(&self) -> R {
self.freq().inv()
}
pub fn one_cycle(self) -> Gated<R, Self> {
let wl = self.wavelength();
self.gated(R::zero(), wl)
}
pub fn half_cycle(self) -> Gated<R, Self> {
let wl = self.wavelength();
self.gated(R::zero(), R::half() * wl)
}
}
impl<R: Real> TimeVarying<R> for Sinusoid<R> {
fn at(&self, t_sec: R) -> FieldMags<R> {
let v = (t_sec * self.omega).sin();
FieldMags::new_eh(v, v)
}
}
/// E field that decays exponentially over t.
/// zero for all t < 0
#[derive(Clone)]
pub struct Exp<R> {
tau: R,
}
impl<R: Real + TimeVarying<R>> Exp<R> {
pub fn new(half_life: R) -> Self {
let tau = R::ln2()/half_life;
Self { tau }
}
pub fn new_at(amp: R, start: R, half_life: R) -> Shifted<R, Gated<R, Scaled<Self, R>>> {
Self::new(half_life).scaled(amp).gated(R::zero(), half_life*100f32.cast::<R>()).shifted(start)
}
}
impl<R: Real> TimeVarying<R> for Exp<R> {
fn at(&self, t_sec: R) -> FieldMags<R> {
let a = if t_sec < R::zero() {
// queries for very negative `t_sec` could cause `a` to explode
// and IEEE 754 makes exp(LARGE) be infinity.
// later, these queries are gated with a multiply-by-zero,
// but 0 times INF is NaN.
// so make this zero-valued before the moment of interest.
// (an alternative would be to set it to 1.0).
R::zero()
} else {
(t_sec * -self.tau).exp()
};
FieldMags::new_eh(a, a)
}
}
/// pulses E=1.0 and H=1.0 over the provided duration.
/// this is used as a building block to gate some VectorField over a specific time.
#[derive(Clone)]
pub struct Pulse<R> {
start: R,
end: R,
}
impl<R> Pulse<R> {
pub fn new(start: R, end: R) -> Self {
Self { start, end }
}
}
impl<R: Real> Pulse<R> {
fn contains(&self, t: R) -> bool {
t >= self.start && t < self.end
}
}
impl<R: Real> TimeVarying<R> for Pulse<R> {
fn at(&self, t: R) -> FieldMags<R> {
if self.contains(t) {
FieldMags::new_eh(R::one(), R::one())
} else {
FieldMags::new_eh(R::zero(), R::zero())
}
}
}
pub type Gated<R, T> = Scaled<Pulse<R>, T>;
#[derive(Clone)]
pub struct Shifted<R, T> {
start_at: R,
inner: T,
}
impl<R, T> Shifted<R, T> {
pub fn new(inner: T, start_at: R) -> Self {
Self { inner, start_at }
}
}
impl<R: Real, T: TimeVarying<R>> TimeVarying<R> for Shifted<R, T> {
fn at(&self, t_sec: R) -> FieldMags<R> {
self.inner.at(t_sec - self.start_at)
}
}
#[derive(Clone)]
pub struct Scaled<A, B>(A, B);
impl<A, B> Scaled<A, B> {
pub fn new(a: A, b: B) -> Self {
Self(a, b)
}
}
impl<R: Real, A: TimeVarying<R>, B: TimeVarying<R>> TimeVarying<R> for Scaled<A, B> {
fn at(&self, t_sec: R) -> FieldMags<R> {
self.0.at(t_sec).elem_mul(self.1.at(t_sec))
}
}
#[derive(Clone)]
pub struct Summed<A, B>(A, B);
impl<A, B> Summed<A, B> {
pub fn new(a: A, b: B) -> Self {
Self(a, b)
}
}
impl<R: Real, A: TimeVarying<R>, B: TimeVarying<R>> TimeVarying<R> for Summed<A, B> {
fn at(&self, t_sec: R) -> FieldMags<R> {
self.0.at(t_sec) + self.1.at(t_sec)
}
}
#[cfg(test)]
mod test {
use super::*;
macro_rules! assert_approx_eq {
($x:expr, $e:expr, $h:expr) => {
let x = $x;
let e = $e;
let h = $h;
let diff_e = (x.e - e).abs();
assert!(diff_e <= 0.001, "{:?} != {:?}", x, e);
let diff_h = (x.h - h).abs();
assert!(diff_h <= 0.001, "{:?} != {:?}", x, h);
}
}
#[test]
fn sinusoid() {
let s = Sinusoid::new(1000.0);
assert_eq!(s.at(0.0), FieldMags::default());
assert_approx_eq!(s.at(0.00025), 1.0, 1.0);
assert_approx_eq!(s.at(0.00050), 0.0, 0.0);
assert_approx_eq!(s.at(0.00075), -1.0, -1.0);
}
#[test]
fn sinusoid_from_wavelength() {
let s = Sinusoid::from_wavelength(0.001);
assert_eq!(s.at(0.0), FieldMags::default());
assert_approx_eq!(s.at(0.00025), 1.0, 1.0);
assert_approx_eq!(s.at(0.00050), 0.0, 0.0);
assert_approx_eq!(s.at(0.00075), -1.0, -1.0);
}
}

View File

@@ -0,0 +1,125 @@
use crate::geom::{Coord as _, HasCrossSection, Index, Region};
use crate::real::Real;
use crate::stim::Fields;
use coremem_cross::dim::DimSlice;
use coremem_cross::vec::Vec3u;
/// a static vector field. different value at each location, but constant in time.
/// often used as a building block by wrapping it in something which modulates the fields over
/// time.
pub trait VectorField<R> {
fn at(&self, feat_size: R, loc: Index) -> Fields<R>;
}
// uniform vector field
impl<R: Real> VectorField<R> for Fields<R> {
fn at(&self, _feat_size: R, _loc: Index) -> Fields<R> {
*self
}
}
// could broaden this and implement directly on T, but blanket impls
// are unwieldy
impl<R: Real, T> VectorField<R> for DimSlice<T>
where
DimSlice<T>: core::ops::Index<Vec3u, Output=Fields<R>>
{
fn at(&self, _feat_size: R, loc: Index) -> Fields<R> {
self[loc.into()]
}
}
/// restrict the VectorField to just the specified region, letting it be zero everywhere else
#[derive(Clone)]
pub struct RegionGated<G, V> {
region: G,
field: V,
}
impl<G, V> RegionGated<G, V> {
pub fn new(region: G, field: V) -> Self {
Self {
region, field
}
}
}
impl<R: Real, G: Region + Sync, V: VectorField<R>> VectorField<R> for RegionGated<G, V> {
fn at(&self, feat_size: R, loc: Index) -> Fields<R> {
if self.region.contains(loc.to_meters(feat_size.cast())) {
self.field.at(feat_size, loc)
} else {
Fields::default()
}
}
}
/// VectorField whose field at each point is based on its angle about the specified ray.
/// the field has equal E and H vectors. if you want just one, filter it out with `Scaled`.
#[derive(Clone)]
pub struct CurlVectorField<G> {
region: G,
}
impl<G> CurlVectorField<G> {
pub fn new(region: G) -> Self {
Self { region }
}
}
impl<R: Real, G: Region + HasCrossSection> VectorField<R> for CurlVectorField<G> {
fn at(&self, feat_size: R, loc: Index) -> Fields<R> {
let pos = loc.to_meters(feat_size.cast());
if self.region.contains(pos) {
// TODO: do we *want* this to be normalized?
let rotational = self.region.cross_section_normal(pos).norm().cast();
Fields::new_eh(rotational, rotational)
} else {
Fields::default()
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::cross::vec::Vec3;
use crate::geom::Meters;
struct MockRegion {
normal: Vec3<f32>,
}
impl HasCrossSection for MockRegion {
fn cross_section_normal(&self, _p: Meters) -> Vec3<f32> {
self.normal
}
}
impl Region for MockRegion {
fn contains(&self, _p: Meters) -> bool {
true
}
}
#[test]
fn curl_stimulus_trivial() {
let region = MockRegion {
normal: Vec3::new(1.0, 0.0, 0.0)
};
let stim = CurlVectorField::new(region);
assert_eq!(stim.at(1.0, Index::new(0, 0, 0)), Fields {
e: Vec3::new(1.0, 0.0, 0.0),
h: Vec3::new(1.0, 0.0, 0.0),
});
}
#[test]
fn curl_stimulus_multi_axis() {
let region = MockRegion {
normal: Vec3::new(0.0, -1.0, 1.0)
};
let stim = CurlVectorField::new(region);
let Fields { e, h } = stim.at(1.0, Index::new(0, 0, 0));
assert_eq!(e, h);
assert!(e.distance(Vec3::new(0.0, -1.0, 1.0).norm()) < 1e-6);
}
}

View File

@@ -1,74 +0,0 @@
use log::trace;
use serde::{de::DeserializeOwned, Serialize};
pub struct NoSupplier;
pub struct DiskCache<K, V, S=NoSupplier> {
path: String,
entries: Vec<(K, V)>,
supplier: S,
}
impl<K: DeserializeOwned, V: DeserializeOwned> DiskCache<K, V, NoSupplier> {
pub fn new(path: &str) -> Self {
Self::new_with_supplier(path, NoSupplier)
}
}
impl<K: DeserializeOwned, V: DeserializeOwned, S> DiskCache<K, V, S> {
pub fn new_with_supplier(path: &str, supplier: S) -> Self {
let entries = Self::load_from_disk(path).unwrap_or_default();
Self {
path: path.into(),
entries,
supplier,
}
}
fn load_from_disk(path: &str) -> Option<Vec<(K, V)>> {
let reader = std::io::BufReader::new(std::fs::File::open(path).ok()?);
bincode::deserialize_from(reader).ok()
}
}
impl<K: PartialEq, V, S> DiskCache<K, V, S> {
pub fn get(&self, k: &K) -> Option<&V> {
self.entries.iter().find(|(comp_k, _v): &&(K, V)| comp_k == k).map(|(_k, v)| v)
}
}
impl<K: Serialize, V: Serialize, S> DiskCache<K, V, S> {
pub fn insert(&mut self, k: K, v: V) {
self.entries.push((k, v));
self.flush();
}
fn flush(&self) {
let writer = std::io::BufWriter::new(std::fs::File::create(&self.path).unwrap());
bincode::serialize_into(writer, &self.entries).unwrap();
}
}
impl<K: PartialEq + Serialize, V: Serialize + Clone, S> DiskCache<K, V, S> {
pub fn get_or_insert_with<F: FnOnce() -> V>(&mut self, k: K, f: F) -> V {
if let Some(v) = self.get(&k) {
return v.clone();
}
let v = f();
self.insert(k, v.clone());
v
}
}
impl<K: PartialEq + Serialize, V: Serialize + Clone, S: FnMut(&K) -> V> DiskCache<K, V, S> {
pub fn get_or_insert_from_supplier(&mut self, k: K) -> V {
if let Some(v) = self.get(&k) {
trace!("get_or_insert_from_supplier hit");
return v.clone();
}
trace!("get_or_insert_from_supplier miss");
let v = (self.supplier)(&k);
self.insert(k, v.clone());
v
}
}

View File

@@ -1 +0,0 @@
pub mod cache;

View File

@@ -0,0 +1,216 @@
//! consumer/producer primitives
use crossbeam::channel::{self, Receiver, Sender};
pub struct JobPool<C, R> {
command_chan: Sender<C>,
response_chan: Receiver<R>,
worker_command_chan: Receiver<C>,
worker_response_chan: Sender<R>,
handles: Vec<std::thread::JoinHandle<()>>,
}
struct Worker<C, R, W> {
command_chan: Receiver<C>,
response_chan: Sender<R>,
work_fn: W,
}
impl<C, R, W: Clone> Clone for Worker<C, R, W> {
fn clone(&self) -> Self {
Self {
command_chan: self.command_chan.clone(),
response_chan: self.response_chan.clone(),
work_fn: self.work_fn.clone(),
}
}
}
impl<C, R: Send, W: Fn(C) -> R> Worker<C, R, W> {
fn to_completion(self) {
for cmd in &self.command_chan {
let resp = (self.work_fn)(cmd);
let _ = self.response_chan.send(resp);
}
}
}
impl<C, R> JobPool<C, R> {
pub fn new(buffer: usize) -> Self {
let (cmd_send, cmd_recv) = channel::bounded(buffer);
let (resp_send, resp_recv) = channel::bounded(buffer);
Self {
command_chan: cmd_send,
response_chan: resp_recv,
worker_command_chan: cmd_recv,
worker_response_chan: resp_send,
handles: vec![],
}
}
pub fn num_workers(&self) -> u32 {
self.handles.len().try_into().unwrap()
}
pub fn recv(&self) -> R {
self.response_chan.recv().unwrap()
}
/// `try_recv`. named `tend` because this is often used when we want to ensure no workers are
/// blocked due to lack of space in the output queue.
pub fn tend(&self) -> Option<R> {
self.response_chan.try_recv().ok()
}
pub fn join_workers(&mut self) {
// hang up the sender, to signal workers to exit.
let cap = self.command_chan.capacity().unwrap_or(0);
(self.command_chan, self.worker_command_chan) = channel::bounded(cap);
(self.worker_response_chan, self.response_chan) = channel::bounded(cap);
for h in self.handles.drain(..) {
h.join().unwrap();
}
}
}
impl<C: Send + 'static, R: Send + 'static> JobPool<C, R> {
pub fn spawn_workers<W: Fn(C) -> R + Send + Clone + 'static>(&mut self, n: u32, work_fn: W) {
for _ in 0..n {
self.spawn_worker(work_fn.clone());
}
}
}
impl<C: Send + 'static, R: Send + 'static> JobPool<C, R> {
pub fn spawn_worker<W: Fn(C) -> R + Send + 'static>(&mut self, work_fn: W) {
let worker = Worker {
command_chan: self.worker_command_chan.clone(),
response_chan: self.worker_response_chan.clone(),
work_fn,
};
self.handles.push(std::thread::spawn(move || {
worker.to_completion()
}));
}
}
impl<C, R> Drop for JobPool<C, R> {
fn drop(&mut self) {
self.join_workers();
}
}
impl<C: Send + 'static, R> JobPool<C, R> {
pub fn send(&self, cmd: C) {
self.command_chan.send(cmd).unwrap();
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn lifecycle_no_workers() {
let _pool: JobPool<(), ()> = JobPool::new(0);
}
#[test]
fn lifecycle_some_workers() {
let mut pool: JobPool<(), ()> = JobPool::new(0);
pool.spawn_worker(|_| ());
pool.spawn_workers(2, |_| ());
}
#[test]
fn single_worker() {
let mut pool: JobPool<u32, u32> = JobPool::new(0);
pool.spawn_worker(|x| x*2);
pool.send(5);
assert_eq!(pool.recv(), 10);
pool.send(4);
assert_eq!(pool.recv(), 8);
}
#[test]
fn multi_worker() {
use std::sync::{Arc, Mutex};
let mutex = Arc::new(Mutex::new(()));
let worker_mutex = mutex.clone();
let mut pool: JobPool<u32, u32> = JobPool::new(0);
pool.spawn_workers(2, move |x| {
// wait until caller unlocks us
let _ = worker_mutex.lock().unwrap();
x*2
});
pool.send(1);
assert_eq!(pool.recv(), 2);
{
let _lock = mutex.lock().unwrap();
pool.send(4);
pool.send(5); // shouldn't block
}
let mut replies = [pool.recv(), pool.recv()];
replies.sort();
assert_eq!(replies, [8, 10]);
}
#[test]
fn exit_with_unclaimed_responses() {
let mut pool: JobPool<u32, u32> = JobPool::new(0);
pool.spawn_workers(2, |x| x*2);
pool.send(5);
pool.send(6);
}
#[test]
fn num_workers() {
let mut pool: JobPool<u32, u32> = JobPool::new(0);
assert_eq!(pool.num_workers(), 0);
pool.spawn_workers(2, |x| x*2);
assert_eq!(pool.num_workers(), 2);
pool.spawn_workers(1, |x| x*2);
assert_eq!(pool.num_workers(), 3);
pool.send(5);
pool.send(6);
assert_eq!(pool.num_workers(), 3);
pool.recv();
pool.recv();
assert_eq!(pool.num_workers(), 3);
}
#[test]
fn test_bounded() {
let pool: JobPool<u32, u32> = JobPool::new(2);
// we can do this without blocking even when there are no consumers
// because it just gets buffered
pool.send(1);
pool.send(2);
}
#[test]
fn join_workers() {
let mut pool: JobPool<u32, u32> = JobPool::new(1);
pool.spawn_workers(2, |x| x*2);
pool.send(5);
pool.join_workers();
pool.spawn_workers(2, |x| x*2);
pool.send(4);
// the earlier response to '5' should be lost in the channel
assert_eq!(pool.recv(), 8);
// one message in the response queue; one in the send queue, 2 in the worker threads
pool.send(3); pool.send(2); pool.send(1); pool.send(0);
// should still be able to join even though everyone's blocked.
pool.join_workers();
pool.spawn_workers(1, |x| x*2);
pool.send(7);
// the old '0' command should be lost in the channel
assert_eq!(pool.recv(), 14);
}
}

19
crates/cross/Cargo.toml Normal file
View File

@@ -0,0 +1,19 @@
[package]
name = "coremem_cross"
version = "0.2.0"
authors = ["Colin <colin@uninsane.org>"]
edition = "2021"
[features]
# some functionality does not compile for the spirv target, so we feature gate these.
serde = [ "dep:serde" ]
fmt = []
iter = []
std = []
[dependencies]
serde = { version = "1.0", optional = true } # MIT or Apache 2.0
[dev-dependencies]
coremem_cross = { path = ".", default-features = false, features = ["iter", "fmt", "std"] }
float_eq = "1.0" # MIT or Apache 2.0

View File

@@ -0,0 +1,370 @@
use crate::compound::peano::{P0, Peano, PNext};
use crate::compound::list::{self, Indexable, IntoList, List};
// TODO: we can probably simplify a lot of this by using the newer List traits
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
/// implement for something which supports being called for this specific variant
pub trait Visitor<N: Peano, Arg, Output> {
fn call(self, a: Arg) -> Output;
}
/// anything which can encode a discriminant up to *but not including* P
pub trait DiscriminantCodable<P: Peano>: Sized {
fn decode_discr(&self) -> Discr<P>;
fn encode_discr(d: Discr<P>) -> Self;
fn set_discr(&mut self, d: Discr<P>) {
*self = Self::encode_discr(d)
}
}
/// discriminant which encodes up to *but not including* P.
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct Discr<P: Peano>(u32, P::Unit);
impl<P: Peano> DiscriminantCodable<P> for Discr<P> {
fn decode_discr(&self) -> Self {
*self
}
fn encode_discr(d: Discr<P>) -> Self {
d
}
}
impl<P: Peano> Discr<P> {
pub fn new(u: u32) -> Self {
assert!(u < P::VALUE);
Self::new_unchecked(u)
}
pub fn value(&self) -> u32 {
self.0
}
fn new_unchecked(u: u32) -> Self {
Self(u, Default::default())
}
}
pub trait DiscrDispatch<P: Peano> {
fn dispatch<H: DiscrHandler<P, Output>, Output>(&self, h: H) -> Output;
}
impl<P: Peano> DiscrDispatch<PNext<P>> for Discr<PNext<P>>
where
Discr<P>: DiscrDispatch<P>
{
fn dispatch<H: DiscrHandler<PNext<P>, O>, O>(&self, h: H) -> O {
match self.value() {
// consider P=2: we want to dispatch values of 0 and 1, but handle 2:
// so dispatch v < P
v if v < P::VALUE => Discr::<P>::new_unchecked(v).dispatch(h.prev()),
v if v == P::VALUE => h.call(),
_ => unreachable!(),
}
}
}
impl DiscrDispatch<P0> for Discr<P0> {
fn dispatch<H: DiscrHandler<P0, O>, O>(&self, _h: H) -> O {
unreachable!()
}
}
/// something which can be called with any value up to (but not including) N
pub trait DiscrHandler<N: Peano, R> {
type PrevOrPanic: DiscrHandler<N::PrevOrZero, R>;
/// called when the discriminant has value N-1
fn call(self) -> R;
/// discriminant is < N-1: dispatch to the next handler.
/// in the case that N = 1, this path *should* be unreachable,
/// so a panic would be allowed.
fn prev(self) -> Self::PrevOrPanic;
}
/// helper used to call F with some (yet-to-be-determined) index of I
pub struct DispatchIndexable<I, F> {
indexable: I,
f: F,
}
impl<I, F> DispatchIndexable<I, F> {
fn new(indexable: I, f: F) -> Self {
Self { indexable, f }
}
}
// base case: we tried to index all cases >= 0 and failed.
// this should be unreachable.
impl<'a, I, F, R> DiscrHandler<P0, R> for DispatchIndexable<I, F>
{
type PrevOrPanic = Self;
fn call(self) -> R {
unreachable!()
}
fn prev(self) -> Self::PrevOrPanic {
unreachable!()
}
}
// inductive case: if we know how dispatch up through P, and the collection is P+1-indexable, then we can
// index up through P+1
impl<'a, I, F, P: Peano, R> DiscrHandler<PNext<P>, R> for DispatchIndexable<&'a I, F>
where
I: Indexable<P>,
I::Element: Copy,
F: Visitor<P, I::Element, R>,
Self: DiscrHandler<P, R>,
{
type PrevOrPanic = Self;
fn call(self) -> R {
self.f.call(self.indexable.get())
}
fn prev(self) -> Self::PrevOrPanic {
self
}
}
// mutable indexing case: if we have a mutable handle to the Indexable,
// then assume the variants want to have mutable references to the items.
impl<'a, I, F, P: Peano, R> DiscrHandler<PNext<P>, R> for DispatchIndexable<&'a mut I, F>
where
I: Indexable<P>,
I::Element: 'a,
F: Visitor<P, &'a mut I::Element, R>,
Self: DiscrHandler<P, R>,
{
type PrevOrPanic = Self;
fn call(self) -> R {
self.f.call(self.indexable.get_mut())
}
fn prev(self) -> Self::PrevOrPanic {
self
}
}
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct Enum<D, L>(D, L);
/// Users should prefer this type rather than rely on the internal Enum struct implementation.
#[allow(dead_code)]
pub type InternallyDiscriminated<Args> = Enum<(), List<Args>>;
impl<P: Peano, L> Enum<(Discr<P>,), L> {
#![allow(dead_code)]
pub fn new<Variants>(v: Variants) -> Self
where
Variants: IntoList<List=L>,
L: list::Meta<Length=P>,
{
Enum((Discr::default(),), v.into_list())
}
}
impl<L> Enum<(), L> {
#![allow(dead_code)]
pub fn internally_discriminated<Variants>(v: Variants) -> Self
where
Variants: IntoList<List=L>,
{
Enum((), v.into_list())
}
}
pub trait EnumRequirements {
type NumVariants: Peano;
fn decode_discr(&self) -> Discr<Self::NumVariants>;
fn encode_discr(&mut self, d: Discr<Self::NumVariants>);
}
impl<D, L> EnumRequirements for Enum<(D,), L>
where
D: DiscriminantCodable<<L as list::Meta>::Length>,
L: list::Meta,
{
type NumVariants = <L as list::Meta>::Length;
fn decode_discr(&self) -> Discr<Self::NumVariants> {
self.0.0.decode_discr()
}
fn encode_discr(&mut self, d: Discr<Self::NumVariants>) {
self.0.0.set_discr(d)
}
}
impl<L> EnumRequirements for Enum<(), L>
where
L: list::Meta + Indexable<P0>,
list::ElementAt<P0, L>: DiscriminantCodable<<L as list::Meta>::Length>,
{
type NumVariants = <L as list::Meta>::Length;
fn decode_discr(&self) -> Discr<Self::NumVariants> {
self.1.get_ref().decode_discr()
}
fn encode_discr(&mut self, d: Discr<Self::NumVariants>) {
self.1.get_mut().set_discr(d)
}
}
impl<D, L> Enum<D, L>
where
Self: EnumRequirements
{
/// invoke the closure on the active variant, passing the variant by-value
pub fn dispatch<'a, F, R>(&'a self, f: F) -> R
where
DispatchIndexable<&'a L, F>: DiscrHandler<<Self as EnumRequirements>::NumVariants, R>,
// TODO: this trait bound shouldn't be necessary. Discr ALWAYS implements DiscrDispatch
Discr<<Self as EnumRequirements>::NumVariants>: DiscrDispatch<<Self as EnumRequirements>::NumVariants>,
{
self.decode_discr().dispatch(DispatchIndexable::new(&self.1, f))
}
/// invoke the closure on the active variant, passing the variant by mutable reference
#[allow(dead_code)]
pub fn dispatch_mut<'a, F, R>(&'a mut self, f: F) -> R
where
DispatchIndexable<&'a mut L, F>: DiscrHandler<<Self as EnumRequirements>::NumVariants, R>,
Discr<<Self as EnumRequirements>::NumVariants>: DiscrDispatch<<Self as EnumRequirements>::NumVariants>,
{
self.decode_discr().dispatch(DispatchIndexable::new(&mut self.1, f))
}
pub fn set<P>(&mut self, value: L::Element)
where
P: Peano,
L: Indexable<P>,
{
self.encode_discr(Discr::new(P::VALUE));
self.1.set(value);
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::compound::peano::{P1, P2, P3};
use crate::compound::list::List;
struct ReadReceiver;
impl<P: Peano, T: TryInto<i32>> Visitor<P, T, i32> for ReadReceiver {
fn call(self, v: T) -> i32 {
unsafe {
v.try_into().unwrap_unchecked()
}
}
}
struct AddIndexPlus5Receiver;
impl<P: Peano, T: TryInto<i32>> Visitor<P, T, i32> for AddIndexPlus5Receiver {
fn call(self, v: T) -> i32 {
unsafe {
v.try_into().unwrap_unchecked() + P::VALUE as i32 + 5
}
}
}
struct Add4Receiver;
impl<P: Peano, T: TryInto<i32> + TryFrom<i32> + Copy> Visitor<P, &mut T, ()> for Add4Receiver {
fn call(self, v: &mut T) -> () {
unsafe {
*v = ((*v).try_into().unwrap_unchecked() + 4).try_into().unwrap_unchecked();
}
}
}
#[test]
fn dispatch() {
let mut e: Enum<(Discr<P3>,), List<(u32, i32, u8)>> = Enum::default();
assert_eq!(e.dispatch(AddIndexPlus5Receiver), 5);
e.encode_discr(Discr::new(1));
assert_eq!(e.dispatch(AddIndexPlus5Receiver), 6);
e.encode_discr(Discr::new(2));
assert_eq!(e.dispatch(AddIndexPlus5Receiver), 7);
}
#[test]
fn dispatch_mut() {
let mut e: Enum<(Discr<P3>,), List<(u32, i32, u8)>> = Enum::default();
e.dispatch_mut(Add4Receiver);
assert_eq!(e.dispatch(AddIndexPlus5Receiver), 9);
e.encode_discr(Discr::new(1));
assert_eq!(e.dispatch(AddIndexPlus5Receiver), 6);
}
#[test]
fn set() {
let mut e: Enum<(Discr<P3>,), List<(u32, i32, u8)>> = Enum::default();
e.set::<P0>(2u32);
assert_eq!(e.dispatch(ReadReceiver), 2);
e.set::<P1>(3i32);
assert_eq!(e.dispatch(ReadReceiver), 3);
}
#[derive(Copy, Clone, Default, PartialEq)]
struct BoxedF32(f32);
impl Into<i32> for BoxedF32 {
fn into(self) -> i32 {
self.0 as i32
}
}
impl From<i32> for BoxedF32 {
fn from(v: i32) -> Self {
Self(v as f32)
}
}
impl<P: Peano> DiscriminantCodable<P> for BoxedF32 {
fn decode_discr(&self) -> Discr<P> {
match self.0 {
v if v < 0f32 => Discr::new((-v) as u32),
_non_negative => Discr::new(0),
}
}
fn encode_discr(d: Discr<P>) -> Self {
Self(-(d.value() as f32))
}
}
#[test]
fn internal_discr() {
type E = Enum<(), List<(BoxedF32, i32, u8)>>;
assert_eq!(<E as EnumRequirements>::NumVariants::VALUE, 3);
let mut e: E = Enum::default();
assert_eq!(e.dispatch(ReadReceiver), 0);
e.set::<P0>(BoxedF32(16f32));
assert_eq!(e.dispatch(ReadReceiver), 16);
e.set::<P1>(5);
assert_eq!(e.dispatch(ReadReceiver), 5);
e.set::<P2>(8);
assert_eq!(e.dispatch(ReadReceiver), 8);
e.set::<P0>(BoxedF32(0f32));
assert_eq!(e.dispatch(ReadReceiver), 0);
}
#[test]
fn new() {
type E = Enum<(Discr<P2>,), List<(u32, i32)>>;
assert_eq!(<E as EnumRequirements>::NumVariants::VALUE, 2);
let e: E = Enum::new((5u32, 4i32));
assert_eq!(e.dispatch(ReadReceiver), 5);
let e = Enum::internally_discriminated((BoxedF32(4f32), -1i32));
assert_eq!(e.dispatch(ReadReceiver), 4);
}
}

View File

@@ -0,0 +1,352 @@
//! list implementation where indexing is done non-recursively.
//! this puts a hard limit on the size of a list which can still be indexed (based on macro impl)
//! but works around a limitation in rust-gpu's spirv codegen which otherwise makes lists
//! containing ZSTs break the compiler.
//! this ZST bug should be fixed in later rust-gpu revisions. see: https://github.com/EmbarkStudios/rust-gpu/commit/03f89e8ba6f236218b3c5f9b18fe03c25a4d6a5c
use crate::compound::list::{Indexable, Meta};
use crate::compound::peano::{P0, Peano, PNext};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct Node<H, T: ?Sized> {
head: H,
tail: T,
}
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct Null;
impl<H, T> Node<H, T> {
pub(crate) fn new(head: H, tail: T) -> Self {
Self { head, tail }
}
pub fn get<P: Peano>(&self) -> <Self as Indexable<P>>::Element
where
Self: Indexable<P>,
<Self as Indexable<P>>::Element: Copy,
{
Indexable::<P>::get(self)
}
}
pub trait IntoList {
type List;
fn into_list(self) -> Self::List;
}
impl IntoList for () {
type List = Null;
fn into_list(self) -> Self::List {
Null
}
}
/// expands to the type name for a list with the provided types
/// ```
/// list_for!(E0, E1, E2, T) => Node<E0, Node<E1, Node<E2, T>>>>
/// ```
macro_rules! list_for {
($head:ident) => ($head);
($head:ident, $($rest:ident),+) => (Node<$head, list_for!($($rest),+)>);
}
/// given N idents, return P`N`.
/// ```
/// peano_for!(P0 a, b, c) => P3
/// ```
macro_rules! peano_for {
(P0 $head:ident) => (P0);
(P0 $head:ident, $($rest:ident),+) => (PNext<peano_for!(P0 $($rest),+)>);
}
/// expands to the last item in the sequence
/// ```
/// last!(a, bc, d) => d
/// ```
macro_rules! last {
($last:ident) => ($last);
($first:ident, $($rest:ident),+) => (last!($($rest),+));
}
/// transforms a list of idents into a `self.tail.[...].tail.head` pattern
/// of the same length.
/// ```
/// member_index!(self tail head a, b, c, d, e) => self.tail.tail.tail.head
/// ```
macro_rules! member_index {
// entry point: process `self`
($self:ident tail head $first:ident, $($rest:ident),+) => (
member_index!(@partial tail head [$self] $($rest),+)
);
// recursion: replace `$next` with "tail" and repeat
(@partial tail head [$($converted:ident),+] $next:ident, $($rest:ident),+) => (
member_index!(@partial tail head [$($converted),+, tail] $($rest),+)
);
// process the head: replace `$last` with "head" and then trigger the concat
(@partial tail head [$($converted:ident),+] $last:ident) => (
member_index!(@partial [$($converted),+, head])
);
// base case: all items have been replaced with "tail" or "head": concat them
(@partial [$($converted:ident),+]) => ($($converted).+);
}
/// implements the Indexable trait for the last element provided of any prefix list.
/// ```
/// impl_indexable!(E0, E1, E2)
/// => impl<E0, E1, E2, T> Indexable<P2> for Node<E0, Node<E1, Node<E1, T>>> { ... }
/// ```
macro_rules! impl_indexable {
($($elems:ident),+) => (
impl<$($elems),+, T> Indexable<peano_for!(P0 $($elems),+)> for list_for!($($elems),+, T) {
type Element = last!($($elems),+);
fn get(&self) -> Self::Element where Self::Element: Copy {
member_index!(self tail head $($elems),+, H)
}
fn get_ref(&self) -> &Self::Element {
&member_index!(self tail head $($elems),+, H)
}
fn get_mut(&mut self) -> &mut Self::Element {
&mut member_index!(self tail head $($elems),+, H)
}
fn set(&mut self, v: Self::Element) {
member_index!(self tail head $($elems),+, H) = v;
}
}
);
}
/// implements the IntoList trait for the tuple of the provided elements.
/// ```
/// impl_into_list!(E0, E1, E2)
/// => impl<E0, E1, E2> IntoList for (E0, E1, E2) { ... }
/// ```
macro_rules! impl_into_list {
// syntax irregularities around the 1-tuple means we need to special-case this.
($only:ident) => (
impl<$only> IntoList for ($only,) {
type List = list_for!($only, Null);
fn into_list(self) -> Self::List {
let (only,) = self;
Node::new(only, ().into_list())
}
}
);
($first:ident, $($next:ident),+) => (
impl<$first, $($next),+> IntoList for ($first, $($next),+) {
type List = list_for!($first, $($next),+, Null);
fn into_list(self) -> Self::List {
#[allow(non_snake_case)]
let ( $first, $($next),+ ) = self;
Node::new($first, ( $($next),+, ).into_list())
}
}
);
}
macro_rules! impl_list_traits {
($($elems:ident),+) => (
impl_indexable!($($elems),+);
impl_into_list!($($elems),+);
)
}
impl_list_traits!(E0);
impl_list_traits!(E0, E1);
impl_list_traits!(E0, E1, E2);
impl_list_traits!(E0, E1, E2, E3);
impl_list_traits!(E0, E1, E2, E3, E4);
impl_list_traits!(E0, E1, E2, E3, E4, E5);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90, E91);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90, E91, E92);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90, E91, E92, E93);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90, E91, E92, E93, E94);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90, E91, E92, E93, E94, E95);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90, E91, E92, E93, E94, E95, E96);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90, E91, E92, E93, E94, E95, E96, E97);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90, E91, E92, E93, E94, E95, E96, E97, E98);
impl_list_traits!(E0, E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13, E14, E15, E16, E17, E18, E19, E20, E21, E22, E23, E24, E25, E26, E27, E28, E29, E30, E31, E32, E33, E34, E35, E36, E37, E38, E39, E40, E41, E42, E43, E44, E45, E46, E47, E48, E49, E50, E51, E52, E53, E54, E55, E56, E57, E58, E59, E60, E61, E62, E63, E64, E65, E66, E67, E68, E69, E70, E71, E72, E73, E74, E75, E76, E77, E78, E79, E80, E81, E82, E83, E84, E85, E86, E87, E88, E89, E90, E91, E92, E93, E94, E95, E96, E97, E98, E99);
pub type Prepended<E, Tail> = Node<E, Tail>;
pub trait Prependable {
fn prepend<E>(self, e: E) -> Node<E, Self>;
}
impl Prependable for Null {
fn prepend<E>(self, e: E) -> Node<E, Self> {
Node::new(e, self)
}
}
impl<H, T> Prependable for Node<H, T> {
fn prepend<E>(self, e: E) -> Node<E, Self> {
Node::new(e, self)
}
}
pub type Appended<Head, Next> = <Head as Appendable<Next>>::Result;
pub trait Appendable<E> {
// XXX can't move the E parameter inside without Generic Associated Types
type Result;
fn append(self, e: E) -> Self::Result;
}
impl<E> Appendable<E> for Null {
type Result = Node<E, Null>;
fn append(self, e: E) -> Self::Result {
Node::new(e, Null)
}
}
impl<H, T, E> Appendable<E> for Node<H, T>
where T: Appendable<E>
{
type Result = Node<H, T::Result>;
fn append(self, e: E) -> Self::Result {
Node::new(self.head, self.tail.append(e))
}
}
impl Meta for Null {
type Length = P0;
}
impl<H, T: Meta> Meta for Node<H, T> {
type Length = PNext<T::Length>;
}
pub trait SplitHead {
type Head;
type Tail;
fn split(self) -> (Self::Head, Self::Tail);
fn split_ref<'a>(&'a self) -> (&'a Self::Head, &'a Self::Tail);
}
impl<H, T> SplitHead for Node<H, T> {
type Head = H;
type Tail = T;
fn split(self) -> (Self::Head, Self::Tail) {
(self.head, self.tail)
}
fn split_ref<'a>(&'a self) -> (&'a Self::Head, &'a Self::Tail) {
(&self.head, &self.tail)
}
}
/// these are exported for the convenience of potential consumers: not needed internally
pub(crate) mod exports {
#![allow(dead_code)]
use super::{IntoList, Node, Null};
pub type List<Args> = <Args as IntoList>::List;
pub type List1<E0> = Node<E0, Null>;
pub type List2<E0, E1> = Node<E0, List1<E1>>;
pub type List3<E0, E1, E2> = Node<E0, List2<E1, E2>>;
pub type List4<E0, E1, E2, E3> = Node<E0, List3<E1, E2, E3>>;
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn append() {
let l0 = ().into_list();
let l1 = l0.append(5u32);
assert!(l1 == (5u32,).into_list());
let l2 = l1.append(());
assert!(l2 == (5u32, ()).into_list());
let l3 = l2.append(4f32);
assert!(l3 == (5u32, (), 4f32).into_list());
}
}

View File

@@ -0,0 +1,130 @@
use crate::compound::peano::{P0, Peano, PeanoNonZero};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct Middle<H, T> {
head: H,
tail: T,
}
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct Terminal<H> {
head: H,
// XXX: needed to handle ZSTs in spirv, else we can't hand out a reference to &self
_pad: u32,
}
impl<H, T> Middle<H, T> {
fn new(head: H, tail: T) -> Self {
Middle { head, tail }
}
}
impl<H> Terminal<H> {
fn new(head: H) -> Self {
Terminal { head, _pad: Default::default() }
}
}
// Self is a Superlist of L
pub trait Superlist<Distance: Peano> {
type Of: ListOps;
fn as_sublist(&self) -> &Self::Of;
}
impl<L: ListOps> Superlist<P0> for L {
type Of = L;
fn as_sublist(&self) -> &Self::Of {
self
}
}
// if our tail T0 is a Superlist<P-1> of T1,
// then we are a Superlist<P> of T1.
impl<H0, T0: ListOps, T1: ListOps, P: PeanoNonZero> Superlist<P> for Middle<H0, T0>
where
T0: Superlist<P::Prev, Of=T1>
{
type Of = T1;
fn as_sublist(&self) -> &T1 {
self.tail.as_sublist()
}
}
pub trait ListOps {
type Element;
fn read(&self) -> Self::Element where Self::Element: Copy;
fn index<P: Peano>(&self) -> &<Self as Superlist<P>>::Of
where Self: Superlist<P>
{
self.as_sublist()
}
fn get<P: Peano>(&self) -> <<Self as Superlist<P>>::Of as ListOps>::Element
where
Self: Superlist<P>,
<<Self as Superlist<P>>::Of as ListOps>::Element: Copy
{
self.index::<P>().read()
}
}
impl<H> ListOps for Terminal<H> {
type Element = H;
fn read(&self) -> Self::Element where Self::Element: Copy {
self.head
}
}
impl<H, T> ListOps for Middle<H, T> {
type Element = H;
fn read(&self) -> Self::Element where Self::Element: Copy {
self.head
}
}
pub trait IntoList {
type List;
fn into_list(self) -> Self::List;
}
impl<E0> IntoList for (E0,) {
type List = Terminal<E0>;
fn into_list(self) -> Self::List {
Terminal::new(self.0)
}
}
impl<E0, E1> IntoList for (E0, E1) {
type List = Middle<E0, <(E1,) as IntoList>::List>;
fn into_list(self) -> Self::List {
Middle::new(self.0, (self.1,).into_list())
}
}
impl<E0, E1, E2> IntoList for (E0, E1, E2) {
type List = Middle<E0, <(E1, E2) as IntoList>::List>;
fn into_list(self) -> Self::List {
Middle::new(self.0, (self.1, self.2).into_list())
}
}
pub type List<Args> = <Args as IntoList>::List;
#[cfg(test)]
mod test {
use super::*;
use crate::compound::peano::{P0, P1, P2};
#[test]
fn list_index() {
let l = (1u32, 3f32, 4u32).into_list();
assert_eq!(l.read(), 1u32);
assert_eq!(l.index::<P0>().read(), 1u32);
assert_eq!(l.index::<P1>().read(), 3f32);
assert_eq!(l.index::<P2>().read(), 4u32);
}
}

View File

@@ -0,0 +1,724 @@
use crate::compound::peano::{Peano, P0};
mod flat;
// mod linked;
// mod tuple_consumer;
// pub use tuple_consumer::*;
// pub use linked::*;
pub use flat::{IntoList, Appendable, Appended, Prependable, Prepended};
pub use flat::exports::*;
use flat::{Node, SplitHead};
pub type Empty = flat::Null;
/// something which can be indexed at `P`.
/// a List of length N is expected to implement `Indexable<P>` for all `P < N`.
pub trait Indexable<P: Peano> {
type Element;
fn get(&self) -> Self::Element where Self::Element: Copy;
fn get_ref(&self) -> &Self::Element;
fn get_mut(&mut self) -> &mut Self::Element;
fn set(&mut self, v: Self::Element);
}
pub trait IndexableExplicit {
fn get<P: Peano>(&self) -> <Self as Indexable<P>>::Element
where
Self: Indexable<P>,
Self::Element: Copy,
{
Indexable::get(self)
}
fn get_ref<P: Peano>(&self) -> &<Self as Indexable<P>>::Element
where
Self: Indexable<P>,
{
Indexable::get_ref(self)
}
fn get_mut<P: Peano>(&mut self) -> &mut <Self as Indexable<P>>::Element
where
Self: Indexable<P>,
{
Indexable::get_mut(self)
}
fn set<P: Peano>(&mut self, v: Self::Element)
where
Self: Indexable<P>,
{
Indexable::set(self, v)
}
fn get_first(&self) -> <Self as Indexable<P0>>::Element
where
Self: Indexable<P0>,
Self::Element: Copy,
{
Indexable::get(self)
}
fn get_first_ref(&self) -> &<Self as Indexable<P0>>::Element
where
Self: Indexable<P0>,
{
Indexable::get_ref(self)
}
fn get_first_mut(&mut self) -> &mut <Self as Indexable<P0>>::Element
where
Self: Indexable<P0>,
{
Indexable::get_mut(self)
}
fn set_first(&mut self, v: Self::Element)
where
Self: Indexable<P0>,
{
Indexable::set(self, v)
}
}
impl<L> IndexableExplicit for L {}
/// convenience to lookup the type of the element at index `P` of list `L`.
pub type ElementAt<P, L> = <L as Indexable<P>>::Element;
/// implemented by any List (including the Null, empty list)
pub trait Meta {
type Length: Peano;
fn len(&self) -> u32 {
Self::Length::VALUE
}
}
pub trait ListConsumer<L> {
type Output;
fn consume(self, a: L) -> Self::Output;
}
/// implement on your own type to process one list value and return whatever state is necessary to
/// process the subsequent value (and by extension all values)
pub trait FoldOp<State, V> {
type Output;
fn feed(&mut self, prev: State, next: V) -> Self::Output;
}
pub struct FoldImpl<Op, State>(Op, State);
//////// fold by-value
impl<Op, State> ListConsumer<Empty> for FoldImpl<Op, State> {
type Output = State;
fn consume(self, _l: Empty) -> Self::Output {
self.1
}
}
impl<H, T, Op, State> ListConsumer<Node<H, T>> for FoldImpl<Op, State>
where
Op: FoldOp<State, H>,
FoldImpl<Op, Op::Output>: ListConsumer<T>,
{
type Output = <FoldImpl<Op, Op::Output> as ListConsumer<T>>::Output;
fn consume(self, l: Node<H, T>) -> Self::Output {
let FoldImpl(mut op, state) = self;
let (head, tail) = l.split();
let next_state = op.feed(state, head);
FoldImpl(op, next_state).consume(tail)
}
}
//////// fold by-ref
impl<Op, State> ListConsumer<&Empty> for FoldImpl<Op, State> {
type Output = State;
fn consume(self, _l: &Empty) -> Self::Output {
self.1
}
}
impl<'a, H, T, Op, State> ListConsumer<&'a Node<H, T>> for FoldImpl<Op, State>
where
Op: FoldOp<State, &'a H>,
FoldImpl<Op, Op::Output>: ListConsumer<&'a T>,
{
type Output = <FoldImpl<Op, Op::Output> as ListConsumer<&'a T>>::Output;
fn consume(self, l: &'a Node<H, T>) -> Self::Output {
let FoldImpl(mut op, state) = self;
let (head, tail) = l.split_ref();
let next_state = op.feed(state, head);
FoldImpl(op, next_state).consume(tail)
}
}
pub trait Fold<Op, Init> {
type Output;
fn fold(self, op: Op, init: Init) -> Self::Output;
}
impl<L, Op, Init> Fold<Op, Init> for L
where
FoldImpl<Op, Init>: ListConsumer<L>
{
type Output = <FoldImpl<Op, Init> as ListConsumer<L>>::Output;
fn fold(self, op: Op, init: Init) -> Self::Output {
FoldImpl(op, init).consume(self)
}
}
pub trait Visitor<E> {
fn visit(&mut self, v: E);
}
pub struct VisitOp<V>(V);
impl<V, Next> FoldOp<(), Next> for VisitOp<V>
where
V: Visitor<Next>
{
type Output = ();
fn feed(&mut self, _prev: (), next: Next) {
self.0.visit(next)
}
}
/// invokes the Visitor `V` on every element of the list.
pub trait Visit<V> {
fn visit(self, v: V);
}
impl<V, L> Visit<V> for L
where
L: Fold<VisitOp<V>, (), Output=()>
{
fn visit(self, v: V) {
self.fold(VisitOp(v), ())
}
}
pub struct ReverseOp;
impl<Prev, Next> FoldOp<Prev, Next> for ReverseOp {
type Output = Node<Next, Prev>;
fn feed(&mut self, prev: Prev, next: Next) -> Self::Output {
Node::new(next, prev)
}
}
pub trait Reverse {
type Output;
fn reverse(self) -> Self::Output;
}
impl<L> Reverse for L
where
L: Fold<ReverseOp, Empty>
{
type Output = L::Output;
fn reverse(self) -> Self::Output {
self.fold(ReverseOp, Empty::default())
}
}
pub struct SumOp;
impl<Prev, Next> FoldOp<Prev, Next> for SumOp
where
Prev: core::ops::Add<Next>,
{
type Output = Prev::Output;
fn feed(&mut self, prev: Prev, next: Next) -> Self::Output {
prev + next
}
}
pub trait Sum<Init> {
type Output;
fn sum(self, init: Init) -> Self::Output;
}
impl<Init, L> Sum<Init> for L
where
L: Fold<SumOp, Init>
{
type Output = L::Output;
fn sum(self, init: Init) -> Self::Output {
self.fold(SumOp, init)
}
}
#[cfg(feature = "std")]
mod into_vec {
use super::*;
pub struct IntoVecOp;
impl<Next> FoldOp<Vec<Next>, Next> for IntoVecOp {
type Output = Vec<Next>;
fn feed(&mut self, mut prev: Vec<Next>, next: Next) -> Self::Output {
prev.push(next);
prev
}
}
pub trait IntoVec<T> {
fn into_vec(self) -> Vec<T>;
}
impl<T, L> IntoVec<T> for L
where
L: Fold<IntoVecOp, Vec<T>, Output=Vec<T>>
{
fn into_vec(self) -> Vec<T> {
self.fold(IntoVecOp, Vec::new())
}
}
}
#[cfg(feature = "std")]
pub use into_vec::{IntoVec, IntoVecOp};
pub trait MapVisitor<V> {
type Output;
fn map(&self, elem: V) -> Self::Output;
}
pub struct MapOp<F>(F);
impl<Prev, Next, F> FoldOp<Prev, Next> for MapOp<F>
where
F: MapVisitor<Next>,
Prev: Appendable<F::Output>,
{
type Output = Appended<Prev, F::Output>;
fn feed(&mut self, prev: Prev, next: Next) -> Self::Output {
prev.append(self.0.map(next))
}
}
pub trait Map<Visitor> {
type Output;
fn map(self, op: Visitor) -> Self::Output;
}
impl<L, Visitor> Map<Visitor> for L
where
L: Fold<MapOp<Visitor>, Empty>
{
type Output = L::Output;
fn map(self, visitor: Visitor) -> Self::Output {
self.fold(MapOp(visitor), Empty::default())
}
}
pub struct IdentityMapVisitor;
impl<V> MapVisitor<V> for IdentityMapVisitor {
type Output = V;
fn map(&self, elem: V) -> Self::Output {
elem
}
}
pub trait Extend<L> {
type Output;
fn extend(self, l: L) -> Self::Output;
}
impl<L0, L1> Extend<L1> for L0
where
L1: Fold<MapOp<IdentityMapVisitor>, L0>
{
type Output = L1::Output;
fn extend(self, l: L1) -> Self::Output {
l.fold(MapOp(IdentityMapVisitor), self)
}
}
pub struct FlattenOp;
impl<Prev, Next> FoldOp<Prev, Next> for FlattenOp
where
Prev: Extend<Next>,
{
type Output = Prev::Output;
fn feed(&mut self, prev: Prev, next: Next) -> Self::Output {
prev.extend(next)
}
}
pub trait Flatten {
type Output;
fn flatten(self) -> Self::Output;
}
impl<L> Flatten for L
where
L: Fold<FlattenOp, Empty>
{
type Output = L::Output;
fn flatten(self) -> Self::Output {
self.fold(FlattenOp, Empty::default())
}
}
#[derive(Copy, Clone, Default, PartialEq)]
pub struct Tagged<P: Peano, V> {
inner: V,
_p: P::Unit,
}
impl<P: Peano, V> Tagged<P, V> {
pub fn new(inner: V) -> Self {
Self { inner, _p: P::Unit::default() }
}
pub fn into_inner(self) -> V {
self.inner
}
}
impl<P: Peano, V> core::ops::Deref for Tagged<P, V> {
type Target = V;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<P: Peano, V> core::ops::DerefMut for Tagged<P, V> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
pub struct EnumerateOp;
impl<L, Next> FoldOp<L, Next> for EnumerateOp
where
L: Meta,
L: Appendable<Tagged<L::Length, Next>>,
{
type Output = Appended<L, Tagged<L::Length, Next>>;
fn feed(&mut self, prev: L, next: Next) -> Self::Output {
prev.append(Tagged::new(next))
}
}
pub trait Enumerate {
type Output;
fn enumerate(self) -> Self::Output;
}
impl<L> Enumerate for L
where
L: Fold<EnumerateOp, Empty>
{
type Output = L::Output;
fn enumerate(self) -> Self::Output {
self.fold(EnumerateOp, Empty::default())
}
}
pub struct MapTagToValueOp<T>(T /* unused */);
impl<P: Peano, V, T: From<u32>> MapVisitor<Tagged<P, V>> for MapTagToValueOp<T> {
type Output = (T, V);
fn map(&self, elem: Tagged<P, V>) -> Self::Output {
(P::VALUE.into(), elem.into_inner())
}
}
pub trait EnumerateU32 {
type Output;
fn enumerate_u32(self) -> Self::Output;
}
impl<L> EnumerateU32 for L
where
L: Enumerate,
L::Output: Map<MapTagToValueOp<u32>>,
{
type Output = <L::Output as Map<MapTagToValueOp<u32>>>::Output;
fn enumerate_u32(self) -> Self::Output {
self.enumerate().map(MapTagToValueOp(0u32 /* unused */))
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::compound::peano::{P0, P1, P2};
struct SumVal;
impl FoldOp<i32, i32> for SumVal {
type Output = i32;
fn feed(&mut self, prev: i32, next: i32) -> Self::Output {
prev + next
}
}
#[test]
fn fold_prim() {
let list = (3, 4, 5i32).into_list();
assert_eq!(list.fold(SumVal, 2i32), 2+3+4+5);
}
impl FoldOp<i32, f32> for SumVal {
type Output = f32;
fn feed(&mut self, prev: i32, next: f32) -> Self::Output {
prev as f32 + next
}
}
impl FoldOp<f32, f32> for SumVal {
type Output = f32;
fn feed(&mut self, prev: f32, next: f32) -> Self::Output {
prev + next
}
}
impl FoldOp<f32, i32> for SumVal {
type Output = f32;
fn feed(&mut self, prev: f32, next: i32) -> Self::Output {
prev + next as f32
}
}
#[test]
fn fold_mixed() {
// we fold:
// 2i32 + 3i32
// 5i32 + 4i32
// 9i32 + 5.5f32
// 14.5f32 + 6.5f32
// 21f32 + 7i32
let list = (3i32, 4i32, 5.5f32, 6.5f32, 7i32).into_list();
assert_eq!(list.fold(SumVal, 2i32), 28f32);
}
#[derive(PartialEq)]
struct NotCopy(i32);
struct SumRef;
impl FoldOp<i32, &NotCopy> for SumRef {
type Output = i32;
fn feed(&mut self, prev: i32, next: &NotCopy) -> Self::Output {
prev + next.0
}
}
impl FoldOp<i32, &i32> for SumRef {
type Output = i32;
fn feed(&mut self, prev: i32, next: &i32) -> Self::Output {
prev + *next
}
}
#[test]
fn fold_ref() {
let list = &(3i32, NotCopy(4i32), 5i32).into_list();
assert_eq!(list.fold(SumRef, 2i32), 14i32);
assert!(list == list); // just check that it wasn't consumed
}
struct NoopVisitor;
impl<V> Visitor<V> for NoopVisitor {
fn visit(&mut self, _e: V) {}
}
#[test]
fn visit_noop() {
let list = (3f32, NotCopy(4i32), 5u32).into_list();
list.visit(NoopVisitor);
let list = &(3f32, NotCopy(4i32), 5u32).into_list();
list.visit(NoopVisitor);
}
struct AccumVisitor(i32);
impl Visitor<i32> for &mut AccumVisitor {
fn visit(&mut self, e: i32) {
self.0 += e * 2;
}
}
#[test]
fn visit_mut() {
let list = (3i32, 4i32, 5i32).into_list();
let mut v = AccumVisitor(0);
list.visit(&mut v);
assert_eq!(v.0, 24);
}
#[test]
fn into_vec() {
let list = (3i32, 4i32, 5i32).into_list();
assert_eq!(list.into_vec(), vec![3i32, 4, 5]);
}
#[test]
fn into_vec_ref() {
let list = &(3i32, 4i32, 5i32).into_list();
assert_eq!(list.into_vec(), vec![&3i32, &4, &5]);
}
#[test]
fn into_vec_empty() {
assert_eq!(IntoVec::<u32>::into_vec(Empty::default()), vec![]);
}
#[test]
fn sum() {
let list = (3i32, 4i32, 5i32).into_list();
assert_eq!(list.sum(2i32), 14i32);
}
struct SumA(f32);
#[derive(Debug, PartialEq)]
struct SumB(f32);
impl core::ops::Add<i32> for SumA {
type Output = SumB;
fn add(self, other: i32) -> Self::Output {
SumB(self.0 + other as f32)
}
}
impl core::ops::Add<f32> for SumB {
type Output = SumA;
fn add(self, other: f32) -> Self::Output {
SumA(self.0 + other)
}
}
#[test]
fn sum_mixed() {
let list = (3i32, 4f32, 5i32).into_list();
assert_eq!(list.sum(SumA(2f32)), SumB(14f32));
}
#[test]
fn reverse_empty() {
assert!(Empty::default().reverse() == Empty::default());
}
#[test]
fn reverse_non_empty() {
let list = (3i32, 4f32, 5u32).into_list();
let expected = (5u32, 4f32, 3i32).into_list();
assert!(list.reverse() == expected);
}
struct Double;
impl MapVisitor<i32> for Double {
type Output = i32;
fn map(&self, v: i32) -> Self::Output {
v + v
}
}
impl MapVisitor<f32> for Double {
type Output = f32;
fn map(&self, v: f32) -> Self::Output {
v + v
}
}
#[test]
fn map_empty() {
assert!(Empty::default().map(Double) == Empty::default());
}
#[test]
fn map_mixed() {
let list = (2i32, 3f32, 4i32).into_list();
let expected = (4i32, 6f32, 8i32).into_list();
assert!(list.map(Double) == expected);
}
#[test]
fn extend_empty() {
let l0 = Empty::default();
let l1 = Empty::default();
assert!(l0.extend(l1) == Empty::default());
}
#[test]
fn extend_with_empty() {
let l0 = (2u32, 3f32).into_list();
let l1 = Empty::default();
let expected = (2u32, 3f32).into_list();
assert!(l0.extend(l1) == expected);
}
#[test]
fn extend_from_empty() {
let l0 = Empty::default();
let l1 = (2u32, 3f32).into_list();
let expected = (2u32, 3f32).into_list();
assert!(l0.extend(l1) == expected);
}
#[test]
fn extend_mixed() {
let l0 = (2u32, 3f32).into_list();
let l1 = ("hello",).into_list();
let expected = (2u32, 3f32, "hello").into_list();
assert!(l0.extend(l1) == expected);
}
#[test]
fn flatten_empty() {
assert!(Empty::default().flatten() == Empty::default());
}
#[test]
fn flatten_inner_empty1() {
let l = (Empty::default(),).into_list();
assert!(l.flatten() == Empty::default());
}
#[test]
fn flatten_inner_empty2() {
let l = (Empty::default(), Empty::default()).into_list();
assert!(l.flatten() == Empty::default());
}
#[test]
fn flatten_mixed() {
let l = (
(2u32, 3f32).into_list(),
(4u32,).into_list(),
).into_list();
let expected = (
2u32,
3f32,
4u32,
).into_list();
assert!(l.flatten() == expected);
}
#[test]
fn flatten_nested() {
let l = (
(2u32, 3f32).into_list(),
("hello", ("every", "one").into_list()).into_list(),
(4u32,).into_list(),
).into_list();
let expected = (
2u32,
3f32,
"hello",
("every", "one").into_list(),
4u32,
).into_list();
assert!(l.flatten() == expected);
}
#[test]
fn enumerate_empty() {
assert!(Empty::default().enumerate() == Empty::default());
}
#[test]
fn enumerate_one() {
let list = (2i32,).into_list();
let expected = (Tagged::<P0, _>::new(2i32),).into_list();
assert!(list.enumerate() == expected);
}
#[test]
fn enumerate_multiple() {
let list = (2i32, (), 4f32).into_list();
let expected = (
Tagged::<P0, _>::new(2i32),
Tagged::<P1, _>::new(()),
Tagged::<P2, _>::new(4f32),
).into_list();
assert!(list.enumerate() == expected);
}
#[test]
fn enumerate_u32_multiple() {
let list = (2i32, (), 4f32).into_list();
let expected = (
(0u32, 2i32),
(1u32, ()),
(2u32, 4f32)
).into_list();
assert!(list.enumerate_u32() == expected);
}
}

View File

@@ -0,0 +1,161 @@
use crate::compound::peano::{P0, PNext};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
pub trait TuplePrims: Sized {
type Head; // single element
type Tail; // variably-sized tuple
fn into_head(self) -> Self::Head;
fn into_tail(self) -> Self::Tail;
}
impl<E0> TuplePrims for (E0,) {
type Head = E0;
type Tail = ();
fn into_head(self) -> Self::Head {
self.0
}
fn into_tail(self) -> Self::Tail {
()
}
}
impl<E0, E1> TuplePrims for (E0, E1) {
type Head = E0;
type Tail = (E1,);
fn into_head(self) -> Self::Head {
self.0
}
fn into_tail(self) -> Self::Tail {
(self.1,)
}
}
impl<E0, E1, E2> TuplePrims for (E0, E1, E2) {
type Head = E0;
type Tail = (E1, E2);
fn into_head(self) -> Self::Head {
self.0
}
fn into_tail(self) -> Self::Tail {
(self.1, self.2)
}
}
impl<E0, E1, E2, E3> TuplePrims for (E0, E1, E2, E3) {
type Head = E0;
type Tail = (E1, E2, E3);
fn into_head(self) -> Self::Head {
self.0
}
fn into_tail(self) -> Self::Tail {
(self.1, self.2, self.3)
}
}
// note that this construction allows a zero-length list (Null),
// which is sort of interesting.
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct List<Args>(Args);
impl<Args> List<Args> {
pub fn new(args: Args) -> Self {
Self(args)
}
}
pub trait ListPrims: Sized {
type Head; // single element
type Tail; // variably-sized List
fn into_head(self) -> Self::Head;
fn into_tail(self) -> Self::Tail;
}
impl<Args: TuplePrims> ListPrims for List<Args> {
type Head = Args::Head;
type Tail = List<Args::Tail>;
fn into_head(self) -> Self::Head {
self.0.into_head()
}
fn into_tail(self) -> Self::Tail {
List(self.0.into_tail())
}
}
pub trait Consumable<P> {
type Result: ListPrims;
fn consume(self) -> Self::Result;
}
impl<Args: TuplePrims> Consumable<P0> for List<Args> {
type Result = Self;
fn consume(self) -> Self::Result {
self
}
}
impl<Args, P> Consumable<PNext<P>> for List<Args>
where
Self: ListPrims,
<Self as ListPrims>::Tail: Consumable<P>,
{
type Result = <<Self as ListPrims>::Tail as Consumable<P>>::Result;
fn consume(self) -> Self::Result {
self.into_tail().consume()
}
}
impl<Args: TuplePrims> List<Args> {
pub fn consume<P>(self) -> <Self as Consumable<P>>::Result
where Self: Consumable<P>
{
Consumable::<P>::consume(self)
}
pub fn get<P>(self) -> <<Self as Consumable<P>>::Result as ListPrims>::Head
where Self: Consumable<P>
{
self.consume().into_head()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::compound::peano::{P1, P2};
#[test]
fn get() {
let list = List((5u32, 4i32, 3f32));
assert_eq!(list.get::<P0>(), 5u32);
assert_eq!(list.get::<P1>(), 4i32);
assert_eq!(list.get::<P2>(), 3f32);
}
// #[test]
// fn get() {
// let list = (5u32, 4i32, 3f32).into_list();
// assert_eq!(list.get::<P0>(), &5u32);
// assert_eq!(list.get::<P1>(), &4i32);
// assert_eq!(list.get::<P2>(), &3f32);
// }
// #[test]
// fn set() {
// let mut list = List::<(u32, i32, f32)>::default();
// list.set::<P0>(5u32);
// assert_eq!(list.get::<P0>(), &5u32);
// assert_eq!(list.get::<P1>(), &0i32);
// assert_eq!(list.get::<P2>(), &0f32);
// list.set::<P2>(3f32);
// list.set::<P1>(4i32);
// assert_eq!(list.get::<P0>(), &5u32);
// assert_eq!(list.get::<P1>(), &4i32);
// assert_eq!(list.get::<P2>(), &3f32);
// }
}

View File

@@ -0,0 +1,6 @@
pub mod enumerated;
pub mod list;
mod optional;
pub mod peano;
pub use optional::Optional;

View File

@@ -0,0 +1,86 @@
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
/// This is a spirv-compatible option type.
/// The native rust Option type produces invalid spirv due to its enum nature; this custom option
/// type creates code which will actually compile.
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, PartialEq)]
pub struct Optional<T> {
// XXX: not a bool, because: "entrypoint parameter cannot contain a boolean"
present: u8,
data: T,
}
impl<T> Optional<T> {
pub fn some(data: T) -> Self {
Self {
present: 1,
data,
}
}
pub fn explicit_none(data: T) -> Self {
Self {
present: 0,
data,
}
}
pub fn is_some(self) -> bool {
self.present != 0
}
pub fn unwrap(self) -> T {
debug_assert!(self.present != 0);
self.data
}
pub fn map<U: Default, F: FnOnce(T) -> U>(self, f: F) -> Optional<U> {
self.and_then(|inner| Optional::some(f(inner)))
}
pub fn and_then<U: Default, F: FnOnce(T) -> Optional<U>>(self, f: F) -> Optional<U> {
if self.present != 0 {
f(self.data)
} else {
Optional::none()
}
}
pub fn unwrap_or(self, default: T) -> T {
if self.present != 0 {
self.data
} else {
default
}
}
}
impl<T: Default> Optional<T> {
pub fn none() -> Self {
Self::explicit_none(Default::default())
}
pub fn unwrap_or_default(self) -> T {
self.unwrap_or(Default::default())
}
}
impl<T: Default> Default for Optional<T> {
fn default() -> Self {
Self::none()
}
}
impl<T0: Default, T1: Default> Optional<(T0, T1)> {
pub fn flatten((f0, f1): (Optional<T0>, Optional<T1>)) -> Self {
if f0.present != 0 && f1.present != 0 {
Optional::some((f0.data, f1.data))
} else {
Optional::none()
}
}
}

View File

@@ -0,0 +1,76 @@
//! Peano numbers (also known as Church numerals) are type-level natural numbers.
//! each non-zero Peano number is defined as the unique successor of a previous Peano number.
//!
//! - given some Peano number I, we can derive its successor by S=PNext<I>.
//! - given a Peano number PNext<I>, we can define its predecessor as I=P.
//! - the base Peano number, which represents 0, is `P0`.
//! - the `Peano` trait exposes alternative syntaxes for these: `P::Next` and `P::Prev`,
//! however the type system can reason less about these (they're just a convenience).
//!
//! the primary use of Peano numbers is to allow types to specialize on a specific natural number
//! out of some larger set of natural numbers. e.g. one might have a `struct List<Length: Peano>`
//! to allow constructing a list of compile-time constant length.
//!
//! this whole module will hopefully be obsoleted as Rust's type-level integers become more
//! capable, but in 2022 Peano numbers enable more operations (arithmetic, specialization) than type-level integers.
#[derive(Copy, Clone, Default, PartialEq)]
pub struct PNext<P>(P);
#[derive(Copy, Clone, Default, PartialEq)]
pub struct P0;
pub type P1 = PNext<P0>;
/// these are exported for the convenience of potential consumers: not needed internally
mod exports {
#![allow(dead_code)]
use super::{P1, PNext};
pub type P2 = PNext<P1>;
pub type P3 = PNext<P2>;
pub type P4 = PNext<P3>;
pub type P5 = PNext<P4>;
pub type P6 = PNext<P5>;
pub type P7 = PNext<P6>;
pub type P8 = PNext<P7>;
pub type P9 = PNext<P8>;
pub type P10 = PNext<P9>;
pub type P11 = PNext<P10>;
pub type P12 = PNext<P11>;
pub type P13 = PNext<P12>;
pub type P14 = PNext<P13>;
pub type P15 = PNext<P14>;
}
pub use exports::*;
pub trait Peano: Copy + Clone + Default + PartialEq {
type Next: PeanoNonZero;
type PrevOrZero: Peano;
/// always set to ()
/// this exists to allow Peano numbers to be used as struct parameters without PhantomData
type Unit: Copy + Clone + Default + PartialEq;
const VALUE: u32;
}
pub trait PeanoNonZero: Peano {
type Prev: Peano;
}
impl Peano for P0 {
type Next = P1;
type PrevOrZero = P0;
type Unit = ();
const VALUE: u32 = 0;
}
impl<P: Peano> Peano for PNext<P> {
type Next = PNext<PNext<P>>;
type PrevOrZero = P;
type Unit = ();
const VALUE: u32 = 1 + P::VALUE;
}
impl<P: Peano> PeanoNonZero for PNext<P> {
type Prev = P;
}
// A: LessThan<B> is satisfied only if A is strictly less than B.
pub trait LessThan<P: Peano> { }
impl<P: Peano> LessThan<PNext<P>> for P { }

View File

@@ -0,0 +1,365 @@
use core::convert::{AsMut, AsRef};
use core::iter::Zip;
use core::ops::{Index, IndexMut};
use crate::vec::Vec3u;
/// use this to wrap a flat region of memory into something which can be indexed by coordinates in
/// 3d space.
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Clone, Default, PartialEq)]
pub struct DimSlice<T> {
dim: Vec3u,
items: T,
}
impl<T> DimSlice<T> {
pub fn new(dim: Vec3u, items: T) -> Self {
Self { dim, items }
}
pub fn dim(&self) -> Vec3u {
self.dim
}
pub fn into_inner(self) -> T {
self.items
}
pub fn indices(&self) -> DimIter {
DimIter::new(self.dim)
}
/// re-borrow the slice with a different lifetime.
pub fn as_ref<R: ?Sized>(&self) -> DimSlice<&R>
where T: AsRef<R>
{
DimSlice::new(self.dim, self.items.as_ref())
}
/// re-borrow the slice with a different lifetime.
pub fn as_mut<R: ?Sized>(&mut self) -> DimSlice<&mut R>
where T: AsMut<R>
{
DimSlice::new(self.dim, self.items.as_mut())
}
}
#[cfg(feature = "iter")]
impl<T: IntoIterator> DimSlice<T> {
pub fn enumerated(self) -> Zip<DimIter, T::IntoIter> {
self.indices().zip(self.into_iter())
}
}
fn index(loc: Vec3u, dim: Vec3u) -> usize {
((loc.z()*dim.y() + loc.y())*dim.x() + loc.x()) as usize
}
impl<'a, T: Index<usize> + ?Sized> Index<Vec3u> for DimSlice<&'a T> {
type Output=T::Output;
fn index(&self, idx: Vec3u) -> &Self::Output {
let idx = index(idx, self.dim);
&self.items[idx]
}
}
impl<'a, T: Index<usize> + ?Sized> Index<Vec3u> for DimSlice<&'a mut T> {
type Output=T::Output;
fn index(&self, idx: Vec3u) -> &Self::Output {
let idx = index(idx, self.dim);
&self.items[idx]
}
}
impl<'a, T: IndexMut<usize> + ?Sized> IndexMut<Vec3u> for DimSlice<&'a mut T> {
fn index_mut(&mut self, idx: Vec3u) -> &mut Self::Output {
let idx = index(idx, self.dim);
&mut self.items[idx]
}
}
#[cfg(feature = "std")]
impl<T> Index<Vec3u> for DimSlice<Vec<T>> {
type Output=T;
fn index(&self, idx: Vec3u) -> &Self::Output {
let idx = index(idx, self.dim);
&self.items[idx]
}
}
#[cfg(feature = "std")]
impl<T> IndexMut<Vec3u> for DimSlice<Vec<T>> {
fn index_mut(&mut self, idx: Vec3u) -> &mut Self::Output {
let idx = index(idx, self.dim);
&mut self.items[idx]
}
}
#[cfg(feature = "std")]
impl<T> Index<Vec3u> for DimSlice<Box<[T]>> {
type Output=T;
fn index(&self, idx: Vec3u) -> &Self::Output {
let idx = index(idx, self.dim);
&self.items[idx]
}
}
#[cfg(feature = "std")]
impl<T> IndexMut<Vec3u> for DimSlice<Box<[T]>> {
fn index_mut(&mut self, idx: Vec3u) -> &mut Self::Output {
let idx = index(idx, self.dim);
&mut self.items[idx]
}
}
impl<T: IntoIterator> IntoIterator for DimSlice<T> {
type Item = T::Item;
type IntoIter = T::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.items.into_iter()
}
}
pub struct DimIter {
idx: Vec3u,
dim: Vec3u,
}
impl DimIter {
fn new(dim: Vec3u) -> Self {
Self {idx: Vec3u::default(), dim }
}
}
#[cfg(feature = "iter")]
impl Iterator for DimIter {
type Item = Vec3u;
fn next(&mut self) -> Option<Self::Item> {
if self.dim.x() == 0 || self.dim.y() == 0 || self.dim.z() == 0 {
// no items
return None;
}
if self.idx.z() == self.dim.z() {
// reached the end
return None;
}
let cur = self.idx;
self.idx = match cur.x()+1 {
// need to increment y
next_x if next_x == self.dim.x() => match cur.y() + 1 {
// need to increment z
next_y if next_y == self.dim.y() => Vec3u::new(0, 0, cur.z() + 1),
next_y => Vec3u::new(0, next_y, cur.z()),
},
next_x => Vec3u::new(next_x, cur.y(), cur.z()),
};
Some(cur)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_index() {
let dim = Vec3u::new(2, 3, 7);
assert_eq!(index(Vec3u::new(0, 0, 0), dim), 0);
assert_eq!(index(Vec3u::new(1, 0, 0), dim), 1);
assert_eq!(index(Vec3u::new(0, 1, 0), dim), 2);
assert_eq!(index(Vec3u::new(1, 1, 0), dim), 3);
assert_eq!(index(Vec3u::new(0, 2, 0), dim), 4);
assert_eq!(index(Vec3u::new(0, 0, 1), dim), 6);
assert_eq!(index(Vec3u::new(1, 0, 1), dim), 7);
assert_eq!(index(Vec3u::new(0, 1, 1), dim), 8);
assert_eq!(index(Vec3u::new(1, 2, 1), dim), 11);
assert_eq!(index(Vec3u::new(1, 2, 2), dim), 17);
}
#[test]
fn as_ref() {
let data = [1, 2];
let s = DimSlice::new(Vec3u::new(1, 2, 1), &data[..]);
assert_eq!(s.as_ref(), DimSlice::new(Vec3u::new(1, 2, 1), &data[..]));
}
#[test]
fn dim_slice_index() {
let data = [
0, 1, 2,
3, 4, 5,
0, 10,20,
30,40,50,
];
let s = DimSlice::new(Vec3u::new(3, 2, 2), &data);
assert_eq!(s[Vec3u::new(0, 0, 0)], 0);
assert_eq!(s[Vec3u::new(1, 0, 0)], 1);
assert_eq!(s[Vec3u::new(1, 1, 0)], 4);
assert_eq!(s[Vec3u::new(1, 1, 1)], 40);
assert_eq!(s[Vec3u::new(2, 1, 1)], 50);
}
#[test]
fn dim_slice_index_mut() {
let mut data = [
0, 1, 2,
3, 4, 5,
0, 10,20,
30,40,50,
];
let mut s = DimSlice::new(Vec3u::new(3, 2, 2), &mut data);
s[Vec3u::new(0, 0, 0)] = 100;
s[Vec3u::new(0, 1, 1)] = 300;
assert_eq!(data, [
100,1, 2,
3, 4, 5,
0, 10, 20,
300,40,50,
]);
}
#[test]
fn dim_slice_into_iter() {
let data = [1, 2, 3, 4, 5, 6];
let s = DimSlice::new(Vec3u::new(3, 1, 2), &data);
let mut i = s.into_iter();
assert_eq!(*i.next().unwrap(), 1);
assert_eq!(*i.next().unwrap(), 2);
assert_eq!(*i.next().unwrap(), 3);
assert_eq!(*i.next().unwrap(), 4);
assert_eq!(*i.next().unwrap(), 5);
assert_eq!(*i.next().unwrap(), 6);
assert_eq!(i.next(), None);
}
#[test]
fn dim_slice_into_iter_mut() {
let mut data = [1, 2, 3, 4, 5, 6];
let s = DimSlice::new(Vec3u::new(3, 1, 2), &mut data);
let mut i = s.into_iter();
*i.next().unwrap() = 10;
assert_eq!(*i.next().unwrap(), 2);
*i.next().unwrap() += 27;
assert_eq!(*i.next().unwrap(), 4);
*i.next().unwrap() *= 10;
assert_eq!(*i.next().unwrap(), 6);
assert_eq!(i.next(), None);
assert_eq!(data, [10,2,30,4,50,6]);
}
#[test]
fn dim_slice_indices() {
let s = DimSlice::new(Vec3u::new(4, 3, 2), &[()]);
let mut i = s.indices();
assert_eq!(i.next().unwrap(), Vec3u::new(0, 0, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(1, 0, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(2, 0, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(3, 0, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(0, 1, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(1, 1, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(2, 1, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(3, 1, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(0, 2, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(1, 2, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(2, 2, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(3, 2, 0));
assert_eq!(i.next().unwrap(), Vec3u::new(0, 0, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(1, 0, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(2, 0, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(3, 0, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(0, 1, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(1, 1, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(2, 1, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(3, 1, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(0, 2, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(1, 2, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(2, 2, 1));
assert_eq!(i.next().unwrap(), Vec3u::new(3, 2, 1));
assert_eq!(i.next(), None);
assert_eq!(i.next(), None);
}
#[test]
fn dim_slice_indices_zero_dim() {
let s = DimSlice::new(Vec3u::new(4, 3, 0), &[()]);
assert_eq!(s.indices().next(), None);
assert_eq!(s.indices().next(), None);
let s = DimSlice::new(Vec3u::new(4, 0, 2), &[()]);
assert_eq!(s.indices().next(), None);
assert_eq!(s.indices().next(), None);
let s = DimSlice::new(Vec3u::new(0, 3, 2), &[()]);
assert_eq!(s.indices().next(), None);
assert_eq!(s.indices().next(), None);
let s = DimSlice::new(Vec3u::new(3, 0, 0), &[()]);
assert_eq!(s.indices().next(), None);
assert_eq!(s.indices().next(), None);
let s = DimSlice::new(Vec3u::new(0, 1, 0), &[()]);
assert_eq!(s.indices().next(), None);
assert_eq!(s.indices().next(), None);
let s = DimSlice::new(Vec3u::new(0, 0, 2), &[()]);
assert_eq!(s.indices().next(), None);
assert_eq!(s.indices().next(), None);
}
#[test]
fn dim_slice_enumerated() {
let data = [
10, 11,
20, 21,
30, 31,
];
let s = DimSlice::new(Vec3u::new(1, 2, 3), &data);
let mut i = s.enumerated();
assert_eq!(i.next().unwrap(), (Vec3u::new(0, 0, 0), &10));
assert_eq!(i.next().unwrap(), (Vec3u::new(0, 1, 0), &11));
assert_eq!(i.next().unwrap(), (Vec3u::new(0, 0, 1), &20));
assert_eq!(i.next().unwrap(), (Vec3u::new(0, 1, 1), &21));
assert_eq!(i.next().unwrap(), (Vec3u::new(0, 0, 2), &30));
assert_eq!(i.next().unwrap(), (Vec3u::new(0, 1, 2), &31));
assert_eq!(i.next(), None);
}
#[test]
fn dim_slice_enumerated_mut() {
let mut data = [
10, 11,
20, 21,
30, 31,
];
let s = DimSlice::new(Vec3u::new(2, 1, 3), &mut data);
let mut i = s.enumerated();
let (idx, v) = i.next().unwrap();
assert_eq!(idx, Vec3u::new(0, 0, 0));
*v = 100;
let (idx, v) = i.next().unwrap();
assert_eq!(idx, Vec3u::new(1, 0, 0));
*v = 110;
i.next().unwrap();
let (idx, v) = i.next().unwrap();
assert_eq!(idx, Vec3u::new(1, 0, 1));
*v = 210;
assert_eq!(data, [100, 110, 20, 210, 30, 31]);
}
}

View File

@@ -0,0 +1,7 @@
mod dim_slice;
mod offset_dim_slice;
pub use dim_slice::{
DimSlice,
DimIter,
};
pub use offset_dim_slice::OffsetDimSlice;

View File

@@ -0,0 +1,223 @@
use core::convert::{AsMut, AsRef};
use core::iter::Zip;
use core::ops::{Index, IndexMut};
use crate::dim::{DimIter, DimSlice};
use crate::vec::Vec3u;
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Clone, Default, PartialEq)]
pub struct OffsetDimSlice<T> {
offset: Vec3u,
inner: DimSlice<T>,
}
impl<T> OffsetDimSlice<T> {
pub fn new(offset: Vec3u, dim: Vec3u, items: T) -> Self {
Self { offset, inner: DimSlice::new(dim, items) }
}
pub fn dim(&self) -> Vec3u {
self.inner.dim()
}
pub fn offset(&self) -> Vec3u {
self.offset
}
pub fn into_inner(self) -> T {
self.inner.into_inner()
}
pub fn indices(&self) -> OffsetDimIter {
OffsetDimIter::new(self.offset, self.inner.indices())
}
/// re-borrow the slice with a different lifetime.
pub fn as_ref<R: ?Sized>(&self) -> OffsetDimSlice<&R>
where T: AsRef<R>
{
OffsetDimSlice { offset: self.offset, inner: self.inner.as_ref()}
}
/// re-borrow the slice with a different lifetime.
pub fn as_mut<R: ?Sized>(&mut self) -> OffsetDimSlice<&mut R>
where T: AsMut<R>
{
OffsetDimSlice { offset: self.offset, inner: self.inner.as_mut()}
}
}
#[cfg(feature = "iter")]
impl<T: IntoIterator> OffsetDimSlice<T> {
pub fn enumerated(self) -> Zip<OffsetDimIter, T::IntoIter> {
self.indices().zip(self.into_iter())
}
}
impl<T> Index<Vec3u> for OffsetDimSlice<T>
where
DimSlice<T>: Index<Vec3u>
{
type Output=<DimSlice<T> as Index<Vec3u>>::Output;
fn index(&self, idx: Vec3u) -> &Self::Output {
&self.inner[idx - self.offset]
}
}
impl<T> IndexMut<Vec3u> for OffsetDimSlice<T>
where
DimSlice<T>: IndexMut<Vec3u>
{
fn index_mut(&mut self, idx: Vec3u) -> &mut Self::Output {
&mut self.inner[idx - self.offset]
}
}
impl<T: IntoIterator> IntoIterator for OffsetDimSlice<T> {
type Item = T::Item;
type IntoIter = T::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.inner.into_iter()
}
}
pub struct OffsetDimIter {
offset: Vec3u,
inner: DimIter,
}
impl OffsetDimIter {
fn new(offset: Vec3u, inner: DimIter) -> Self {
Self { offset, inner }
}
}
#[cfg(feature = "iter")]
impl Iterator for OffsetDimIter {
type Item = Vec3u;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|i| i + self.offset)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn offset_dim_slice_index() {
let data = [
0, 1, 2,
3, 4, 5,
0, 10,20,
30,40,50,
];
let s = OffsetDimSlice::new(Vec3u::new(1, 2, 3), Vec3u::new(3, 2, 2), &data);
assert_eq!(s[Vec3u::new(1, 2, 3)], 0);
assert_eq!(s[Vec3u::new(2, 2, 3)], 1);
assert_eq!(s[Vec3u::new(2, 3, 3)], 4);
assert_eq!(s[Vec3u::new(2, 3, 4)], 40);
assert_eq!(s[Vec3u::new(3, 3, 4)], 50);
}
#[test]
fn offset_dim_slice_index_mut() {
let mut data = [
0, 1, 2,
3, 4, 5,
0, 10,20,
30,40,50,
];
let mut s = OffsetDimSlice::new(Vec3u::new(1, 2, 3), Vec3u::new(3, 2, 2), &mut data);
s[Vec3u::new(1, 2, 3)] = 100;
s[Vec3u::new(1, 3, 4)] = 300;
assert_eq!(data, [
100,1, 2,
3, 4, 5,
0, 10, 20,
300,40,50,
]);
}
#[test]
fn offset_dim_slice_into_iter() {
let data = [1, 2, 3, 4];
let s = OffsetDimSlice::new(Vec3u::new(1, 2, 3), Vec3u::new(2, 1, 2), &data);
let mut i = s.into_iter();
assert_eq!(*i.next().unwrap(), 1);
assert_eq!(*i.next().unwrap(), 2);
assert_eq!(*i.next().unwrap(), 3);
assert_eq!(*i.next().unwrap(), 4);
assert_eq!(i.next(), None);
}
#[test]
fn offset_dim_slice_into_iter_mut() {
let mut data = [1, 2, 3, 4];
let s = OffsetDimSlice::new(Vec3u::new(1, 2, 3), Vec3u::new(2, 1, 2), &mut data);
let mut i = s.into_iter();
*i.next().unwrap() = 10;
assert_eq!(*i.next().unwrap(), 2);
*i.next().unwrap() += 27;
assert_eq!(*i.next().unwrap(), 4);
assert_eq!(i.next(), None);
assert_eq!(data, [10,2,30,4]);
}
#[test]
fn offset_dim_slice_indices() {
let s = OffsetDimSlice::new(Vec3u::new(10, 20, 30), Vec3u::new(2, 1, 2), &[()]);
let mut i = s.indices();
assert_eq!(i.next().unwrap(), Vec3u::new(10, 20, 30));
assert_eq!(i.next().unwrap(), Vec3u::new(11, 20, 30));
assert_eq!(i.next().unwrap(), Vec3u::new(10, 20, 31));
assert_eq!(i.next().unwrap(), Vec3u::new(11, 20, 31));
assert_eq!(i.next(), None);
}
#[test]
fn offset_dim_slice_enumerated() {
let data = [
10, 11,
20, 21,
30, 31,
];
let s = OffsetDimSlice::new(Vec3u::new(10, 20, 30), Vec3u::new(1, 2, 2), &data);
let mut i = s.enumerated();
assert_eq!(i.next().unwrap(), (Vec3u::new(10, 20, 30), &10));
assert_eq!(i.next().unwrap(), (Vec3u::new(10, 21, 30), &11));
assert_eq!(i.next().unwrap(), (Vec3u::new(10, 20, 31), &20));
assert_eq!(i.next().unwrap(), (Vec3u::new(10, 21, 31), &21));
assert_eq!(i.next(), None);
}
#[test]
fn offset_dim_slice_enumerated_mut() {
let mut data = [
10, 11,
20, 21,
];
let s = OffsetDimSlice::new(Vec3u::new(10, 20, 30), Vec3u::new(2, 1, 2), &mut data);
let mut i = s.enumerated();
let (idx, v) = i.next().unwrap();
assert_eq!(idx, Vec3u::new(10, 20, 30));
*v = 100;
i.next().unwrap();
i.next().unwrap();
let (idx, v) = i.next().unwrap();
assert_eq!(idx, Vec3u::new(11, 20, 31));
*v = 210;
assert_eq!(i.next(), None);
assert_eq!(data, [100, 11, 20, 210]);
}
}

11
crates/cross/src/lib.rs Normal file
View File

@@ -0,0 +1,11 @@
#![feature(core_intrinsics)]
#![cfg_attr(not(feature = "std"), no_std)]
pub mod compound;
pub mod dim;
pub mod mat;
pub mod real;
pub mod step;
pub mod vec;
// private because `vec` re-exports to important vecu constructs
mod vecu;

View File

@@ -0,0 +1,264 @@
use crate::compound::enumerated::{DiscriminantCodable, Enum, EnumRequirements, Visitor};
use crate::compound::list::{Indexable, List2, List3};
use crate::compound::peano::{Peano, P0, P1, P2, P3};
use crate::real::Real;
use crate::vec::Vec3;
use crate::mat::{
AnisomorphicConductor,
Ferroxcube3R1MH,
IsomorphicConductor,
Material,
MBPgram,
MHPgram,
Vacuum,
};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
/// a material which can take on 1 of N types.
/// it's assumed that the first type in the material list is capable of storing the discriminant
/// (i.e. that it implements DiscriminantCodable).
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct DiscrMat<Mats>(Enum<(), Mats>);
pub type DiscrMat2<M0, M1> = DiscrMat<List2<M0, M1>>;
pub type DiscrMat3<M0, M1, M2> = DiscrMat<List3<M0, M1, M2>>;
impl<Mats: Default> DiscrMat<Mats> {
fn new<P: Peano>(m: Mats::Element) -> Self
where
Mats: Indexable<P>,
Enum<(), Mats>: EnumRequirements,
{
let mut me = Self::default();
me.0.set::<P>(m);
me
}
}
/// invokes Material::conductivity on any Material
struct ConductivityDispatcher;
impl<P: Peano, R: Real, T: Material<R>> Visitor<P, T, Vec3<R>> for ConductivityDispatcher {
fn call(self, v: T) -> Vec3<R> {
v.conductivity()
}
}
/// invokes Material::move_b_vec on any Material
struct MoveBVecDispatcher<R> {
m: Vec3<R>,
target_b: Vec3<R>,
}
impl<R: Real, P: Peano, T: Material<R>> Visitor<P, T, Vec3<R>> for MoveBVecDispatcher<R> {
fn call(self, v: T) -> Vec3<R> {
v.move_b_vec(self.m, self.target_b)
}
}
/// invokes Into<T>::into on any variant
struct IntoDispatcher;
impl<P: Peano, T: Into<I>, I> Visitor<P, T, I> for IntoDispatcher {
fn call(self, v: T) -> I {
v.into()
}
}
impl<R, M0, M1> Into<FullyGenericMaterial<R>> for DiscrMat2<M0, M1>
where
M0: DiscriminantCodable<P2> + Into<FullyGenericMaterial<R>> + Copy,
M1: Into<FullyGenericMaterial<R>> + Copy,
{
fn into(self) -> FullyGenericMaterial<R> {
self.0.dispatch(IntoDispatcher)
}
}
impl<R: Real, M0, M1> Material<R> for DiscrMat2<M0, M1>
where
M0: DiscriminantCodable<P2> + Material<R> + Copy,
M1: Material<R> + Copy,
{
fn conductivity(&self) -> Vec3<R> {
self.0.dispatch(ConductivityDispatcher)
}
fn move_b_vec(&self, m: Vec3<R>, target_b: Vec3<R>) -> Vec3<R> {
self.0.dispatch(MoveBVecDispatcher { m, target_b })
}
}
impl<R, M0, M1, M2> Into<FullyGenericMaterial<R>> for DiscrMat3<M0, M1, M2>
where
M0: DiscriminantCodable<P3> + Into<FullyGenericMaterial<R>> + Copy,
M1: Into<FullyGenericMaterial<R>> + Copy,
M2: Into<FullyGenericMaterial<R>> + Copy,
{
fn into(self) -> FullyGenericMaterial<R> {
self.0.dispatch(IntoDispatcher)
}
}
impl<R: Real, M0, M1, M2> Material<R> for DiscrMat3<M0, M1, M2>
where
M0: DiscriminantCodable<P3> + Material<R> + Copy,
M1: Material<R> + Copy,
M2: Material<R> + Copy,
{
fn conductivity(&self) -> Vec3<R> {
self.0.dispatch(ConductivityDispatcher)
}
fn move_b_vec(&self, m: Vec3<R>, target_b: Vec3<R>) -> Vec3<R> {
self.0.dispatch(MoveBVecDispatcher { m, target_b })
}
}
/// represents a Material which is either an isomorphic conductor, or some other Material `M1`
pub type IsoConductorOr<R, M1> = DiscrMat2<IsomorphicConductor<R>, M1>;
// XXX: can't do this for generic M, because that creates duplicate `From` impls for the
// IsomorphicConductor itself
impl<R: Real> From<Ferroxcube3R1MH> for IsoConductorOr<R, Ferroxcube3R1MH> {
fn from(mat: Ferroxcube3R1MH) -> Self {
IsoConductorOr::new::<P1>(mat)
}
}
impl<R: Real> From<IsomorphicConductor<R>> for IsoConductorOr<R, Ferroxcube3R1MH> {
fn from(mat: IsomorphicConductor<R>) -> Self {
IsoConductorOr::new::<P0>(mat)
}
}
/// muxes operations to either the conductor or the magnetic material
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct DualMaterial<C, M> {
conductor: C,
magnetic: M,
}
impl<C, M> DualMaterial<C, M> {
pub fn new(conductor: C, magnetic: M) -> Self {
Self { conductor, magnetic }
}
}
impl<R: Real, C: Material<R>, M: Material<R>> Material<R> for DualMaterial<C, M> {
fn conductivity(&self) -> Vec3<R> {
self.conductor.conductivity()
}
fn move_b_vec(&self, m: Vec3<R>, target_b: Vec3<R>) -> Vec3<R> {
self.magnetic.move_b_vec(m, target_b)
}
}
/// Material which can encode any of the well-known magnetic materials (or Vacuum)
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, PartialEq)]
pub struct GenericMagnetic<R>(DiscrMat3<MBPgram<R>, MHPgram<R>, Vacuum>);
impl<R: Real> Default for GenericMagnetic<R> {
fn default() -> Self {
// N.B.: the default is not the first variant.
// we order the variants specifically so that the first one can store the discriminant, but
// we NEED Vacuum to be the default.
Vacuum.into()
}
}
impl<R: Real> Material<R> for GenericMagnetic<R> {
fn conductivity(&self) -> Vec3<R> {
self.0.conductivity()
}
fn move_b_vec(&self, m: Vec3<R>, target_b: Vec3<R>) -> Vec3<R> {
self.0.move_b_vec(m, target_b)
}
}
impl<R: Real> From<MBPgram<R>> for GenericMagnetic<R> {
fn from(mat: MBPgram<R>) -> Self {
GenericMagnetic(DiscrMat3::new::<P0>(mat))
}
}
impl<R: Real> From<MHPgram<R>> for GenericMagnetic<R> {
fn from(mat: MHPgram<R>) -> Self {
GenericMagnetic(DiscrMat3::new::<P1>(mat))
}
}
impl<R: Real> From<Vacuum> for GenericMagnetic<R> {
fn from(mat: Vacuum) -> Self {
GenericMagnetic(DiscrMat3::new::<P2>(mat))
}
}
/// "Fully Generic" in that one can set both the conductivity,
/// and set any of the well-known magnetic materials, simultaneously.
pub type FullyGenericMaterial<R> = DualMaterial<
AnisomorphicConductor<R>,
GenericMagnetic<R>,
>;
impl<R: Real> From<AnisomorphicConductor<R>> for FullyGenericMaterial<R> {
fn from(mat: AnisomorphicConductor<R>) -> Self {
Self::new(mat, Default::default())
}
}
impl<R: Real> From<MBPgram<R>> for FullyGenericMaterial<R> {
fn from(mat: MBPgram<R>) -> Self {
Self::new(Default::default(), mat.into())
}
}
impl<R: Real> From<MHPgram<R>> for FullyGenericMaterial<R> {
fn from(mat: MHPgram<R>) -> Self {
Self::new(Default::default(), mat.into())
}
}
impl<R: Real> From<Vacuum> for FullyGenericMaterial<R> {
fn from(mat: Vacuum) -> Self {
Self::new(Default::default(), mat.into())
}
}
impl<R: Real> From<IsomorphicConductor<R>> for FullyGenericMaterial<R> {
fn from(mat: IsomorphicConductor<R>) -> Self {
let mat: AnisomorphicConductor<R> = mat.into();
mat.into()
}
}
impl<R: Real> From<Ferroxcube3R1MH> for FullyGenericMaterial<R> {
fn from(mat: Ferroxcube3R1MH) -> Self {
let mat: MHPgram<R> = mat.into();
mat.into()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::mat::AnisomorphicConductor;
use crate::real::R32;
#[test]
fn iso_conductor_or_3r1() {
let c: IsoConductorOr<f32, Ferroxcube3R1MH> = IsomorphicConductor::new(22.0f32).into();
assert!(c.conductivity() == Vec3::uniform(22.0f32));
let c: IsoConductorOr<f32, Ferroxcube3R1MH> = Ferroxcube3R1MH::new().into();
assert!(c.conductivity() == Vec3::zero());
}
#[test]
fn iso_conductor_or_aniso() {
type I = IsoConductorOr<R32, AnisomorphicConductor<R32>>;
let c = I::new::<P0>(IsomorphicConductor::new(22f32.cast()));
assert!(c.conductivity() == Vec3::uniform(22.0).cast());
let c = I::new::<P1>(AnisomorphicConductor::new(
Vec3::new(2.0, 3.0, 4.0).cast()
));
assert!(c.conductivity() == Vec3::new(2.0, 3.0, 4.0).cast());
}
}

View File

@@ -0,0 +1,105 @@
use crate::compound::enumerated::{Discr, DiscriminantCodable};
use crate::compound::peano::Peano;
use crate::mat::Material;
use crate::real::Real;
use crate::vec::Vec3;
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct Conductor<T>(T);
pub type AnisomorphicConductor<R> = Conductor<Vec3<R>>;
pub type IsomorphicConductor<R> = Conductor<(R,)>;
impl<R> IsomorphicConductor<R> {
pub fn new(c: R) -> Self {
Self((c,))
}
}
impl<V: Clone> IsomorphicConductor<V> {
pub fn iso_conductivity(&self) -> V {
self.0.0.clone()
}
}
impl<R> AnisomorphicConductor<R> {
pub fn new(c: Vec3<R>) -> Self {
Self(c)
}
}
impl<R: Real> Into<AnisomorphicConductor<R>> for IsomorphicConductor<R> {
fn into(self) -> AnisomorphicConductor<R> {
AnisomorphicConductor::new(Vec3::uniform(self.iso_conductivity()))
}
}
impl<R: Real> Material<R> for AnisomorphicConductor<R> {
fn conductivity(&self) -> Vec3<R> {
self.0
}
}
impl<R: Real> Material<R> for IsomorphicConductor<R> {
fn conductivity(&self) -> Vec3<R> {
Vec3::uniform(self.iso_conductivity())
}
}
impl<R: Real, P: Peano> DiscriminantCodable<P> for AnisomorphicConductor<R> {
fn decode_discr(&self) -> Discr<P> {
let cond = self.conductivity().x();
let d = if cond < R::zero() {
(-cond.to_f32()) as u32
} else {
0
};
Discr::new(d)
}
fn encode_discr(d: Discr<P>) -> Self {
Self::new(Vec3::new_x(
R::from_primitive(-(d.value() as i32))
))
}
}
impl<R: Real, P: Peano> DiscriminantCodable<P> for IsomorphicConductor<R> {
fn decode_discr(&self) -> Discr<P> {
let cond = self.iso_conductivity();
let d = if cond < R::zero() {
(-cond.to_f32()) as u32
} else {
0
};
Discr::new(d)
}
fn encode_discr(d: Discr<P>) -> Self {
Self::new(R::from_primitive(-(d.value() as i32)))
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::compound::peano::P6;
#[test]
fn iso_conductor_discr() {
type T = IsomorphicConductor<f32>;
let c = <T as DiscriminantCodable<P6>>::encode_discr(Discr::new(5));
assert_eq!(DiscriminantCodable::<P6>::decode_discr(&c).value(), 5);
let c = <T as DiscriminantCodable<P6>>::encode_discr(Discr::new(0));
assert_eq!(DiscriminantCodable::<P6>::decode_discr(&c).value(), 0);
let c = T::new(5.0);
assert_eq!(DiscriminantCodable::<P6>::decode_discr(&c).value(), 0);
let c = T::new(0.0);
assert_eq!(DiscriminantCodable::<P6>::decode_discr(&c).value(), 0);
}
}

View File

@@ -1,3 +1,5 @@
use crate::compound::enumerated::{Discr, DiscriminantCodable};
use crate::compound::peano::Peano;
use crate::mat::Material; use crate::mat::Material;
use crate::real::Real; use crate::real::Real;
use crate::vec::Vec3; use crate::vec::Vec3;
@@ -67,6 +69,24 @@ impl<R: Real> Material<R> for MBPgram<R> {
} }
} }
impl<R: Real, P: Peano> DiscriminantCodable<P> for MBPgram<R> {
fn decode_discr(&self) -> Discr<P> {
let max_m = self.max_m;
let d = if max_m < R::zero() {
(-max_m.to_f32()) as u32
} else {
0
};
Discr::new(d)
}
fn encode_discr(d: Discr<P>) -> Self {
Self {
max_m: R::from_primitive(-(d.value() as i32)),
..Default::default()
}
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {

View File

@@ -0,0 +1,35 @@
use crate::real::Real;
use crate::vec::Vec3;
mod compound;
mod conductor;
mod mb_pgram;
mod mh_pgram;
pub use compound::{FullyGenericMaterial, IsoConductorOr};
pub use conductor::{AnisomorphicConductor, IsomorphicConductor};
pub use mb_pgram::MBPgram;
pub use mh_pgram::{Ferroxcube3R1MH, MHPgram};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
pub trait Material<R: Real>: Sized {
fn conductivity(&self) -> Vec3<R> {
Default::default()
}
/// returns the new M vector for this material
fn move_b_vec(&self, m: Vec3<R>, _target_b: Vec3<R>) -> Vec3<R> {
// XXX could return either 0, or `m`. they should be the same, but one might be more
// optimizable than the other (untested).
m
}
}
/// Default, non-interesting Material.
/// has 0 conductivity (electrical and magnetic)
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct Vacuum;
impl<R: Real> Material<R> for Vacuum {}

View File

@@ -18,13 +18,27 @@ pub trait ToFloat {
fn to_f64(&self) -> f64 { fn to_f64(&self) -> f64 {
self.to_f32() as _ self.to_f32() as _
} }
fn to_r32(&self) -> R32 {
R32::new(self.to_f32())
}
fn to_r64(&self) -> R64 {
R64::new(self.to_f64())
}
} }
#[cfg(feature = "fmt")]
pub trait RealFeatures: fmt::LowerExp + fmt::Display + fmt::Debug {} #[cfg(all(not(feature = "fmt"), not(feature = "serde")))]
#[cfg(not(feature = "fmt"))]
pub trait RealFeatures {} pub trait RealFeatures {}
#[cfg(all(not(feature = "fmt"), feature = "serde"))]
pub trait RealFeatures: Serialize + for<'a> Deserialize<'a> {}
#[cfg(all(feature = "fmt", not(feature = "serde")))]
pub trait RealFeatures: fmt::LowerExp + fmt::Display + fmt::Debug {}
#[cfg(all(feature = "fmt", feature = "serde"))]
pub trait RealFeatures: fmt::LowerExp + fmt::Display + fmt::Debug + Serialize + for<'a> Deserialize<'a> {}
/// This exists to allow configuration over # of bits (f32 v.s. f64) as well as /// This exists to allow configuration over # of bits (f32 v.s. f64) as well as
/// constraints. /// constraints.
pub trait Real: pub trait Real:
@@ -78,6 +92,12 @@ pub trait Real:
fn powf(self, p: Self) -> Self; fn powf(self, p: Self) -> Self;
fn sqrt(self) -> Self; fn sqrt(self) -> Self;
fn sin_cos(self) -> (Self, Self); fn sin_cos(self) -> (Self, Self);
fn sin(self) -> Self {
self.sin_cos().0
}
fn cos(self) -> Self {
self.sin_cos().1
}
fn min_max_or_undefined(self, other: Self) -> (Self, Self) { fn min_max_or_undefined(self, other: Self) -> (Self, Self) {
match self.partial_cmp(&other) { match self.partial_cmp(&other) {
@@ -99,6 +119,10 @@ pub trait Real:
self == Self::zero() self == Self::zero()
} }
fn inv(self) -> Self {
Self::one() / self
}
fn zero() -> Self; fn zero() -> Self;
fn one() -> Self; fn one() -> Self;
fn two() -> Self; fn two() -> Self;
@@ -109,12 +133,15 @@ pub trait Real:
fn half() -> Self; fn half() -> Self;
fn pi() -> Self; fn pi() -> Self;
fn two_pi() -> Self; fn two_pi() -> Self;
fn ln2() -> Self;
/// Speed of light in a vacuum; m/s. /// Speed of light in a vacuum; m/s.
/// Also equal to 1/sqrt(epsilon_0 mu_0) /// Also equal to 1/sqrt(epsilon_0 mu_0)
fn c() -> Self; fn c() -> Self;
fn c_inv() -> Self;
/// Vaccum Permittivity /// Vaccum Permittivity
fn eps0() -> Self; fn eps0() -> Self;
fn eps0_inv() -> Self;
fn twice_eps0() -> Self; fn twice_eps0() -> Self;
/// Vacuum Permeability /// Vacuum Permeability
fn mu0() -> Self; fn mu0() -> Self;
@@ -154,18 +181,27 @@ macro_rules! decl_consts {
fn two_pi() -> Self { fn two_pi() -> Self {
$wrap(6.283185307179586) $wrap(6.283185307179586)
} }
fn ln2() -> Self {
$wrap(0.6931471805599453)
}
/// Speed of light in a vacuum; m/s. /// Speed of light in a vacuum; m/s.
/// Also equal to 1/sqrt(epsilon_0 mu_0) /// Also equal to 1/sqrt(epsilon_0 mu_0)
fn c() -> Self { fn c() -> Self {
$wrap(299792458.0) $wrap(299792458.0)
} }
fn c_inv() -> Self {
$wrap(3.3356409519815204e-09)
}
/// Vaccum Permittivity /// Vaccum Permittivity
fn eps0() -> Self { fn eps0() -> Self {
$wrap(8.854187812813e-12) // F⋅m1 $wrap(8.854187812813e-12) // F/m
}
fn eps0_inv() -> Self {
$wrap(112940906737.1361) // m/F
} }
fn twice_eps0() -> Self { fn twice_eps0() -> Self {
$wrap(1.7708375625626e-11) // F⋅m1 $wrap(1.7708375625626e-11) // F/m
} }
/// Vacuum Permeability /// Vacuum Permeability
fn mu0() -> Self { fn mu0() -> Self {
@@ -292,7 +328,7 @@ impl<T: Real> Finite<T> {
} }
#[cfg(not(feature = "fmt"))] #[cfg(not(feature = "fmt"))]
fn handle_non_finite(_inner: T) -> ! { fn handle_non_finite(_inner: T) -> ! {
panic!("Finite<T> is not finite"); panic!(); // expected a finite real
} }
} }
@@ -371,6 +407,16 @@ impl<T: Real> Div for Finite<T> {
} }
} }
#[cfg(feature = "iter")]
impl<T: Real> std::iter::Sum for Finite<T> {
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = Self>
{
iter.fold(Self::zero(), |a, b| a + b)
}
}
impl<T: ToFloat> ToFloat for Finite<T> { impl<T: ToFloat> ToFloat for Finite<T> {
fn to_f32(&self) -> f32 { fn to_f32(&self) -> f32 {
@@ -384,7 +430,6 @@ impl<T: ToFloat> ToFloat for Finite<T> {
impl<T: RealFeatures> RealFeatures for Finite<T> {} impl<T: RealFeatures> RealFeatures for Finite<T> {}
impl<T: Real> Real for Finite<T> { impl<T: Real> Real for Finite<T> {
decl_consts!(Self::from_primitive);
fn from_primitive<P: ToFloat>(p: P) -> Self { fn from_primitive<P: ToFloat>(p: P) -> Self {
Self::new(T::from_primitive(p)) Self::new(T::from_primitive(p))
} }
@@ -422,6 +467,62 @@ impl<T: Real> Real for Finite<T> {
let (s, c) = self.0.sin_cos(); let (s, c) = self.0.sin_cos();
(Self::new(s), Self::new(c)) (Self::new(s), Self::new(c))
} }
// we would ideally use `decl_consts` here, but that produces f64 -> f32 casts for R32 code.
fn zero() -> Self {
Self::from_primitive(T::zero())
}
fn one() -> Self {
Self::from_primitive(T::one())
}
fn two() -> Self {
Self::from_primitive(T::two())
}
fn three() -> Self {
Self::from_primitive(T::three())
}
fn ten() -> Self {
Self::from_primitive(T::ten())
}
fn tenth() -> Self {
Self::from_primitive(T::tenth())
}
fn third() -> Self {
Self::from_primitive(T::third())
}
fn half() -> Self {
Self::from_primitive(T::half())
}
fn pi() -> Self {
Self::from_primitive(T::pi())
}
fn two_pi() -> Self {
Self::from_primitive(T::two_pi())
}
fn ln2() -> Self {
Self::from_primitive(T::ln2())
}
fn c() -> Self {
Self::from_primitive(T::c())
}
fn c_inv() -> Self {
Self::from_primitive(T::c_inv())
}
fn eps0() -> Self {
Self::from_primitive(T::eps0())
}
fn eps0_inv() -> Self {
Self::from_primitive(T::eps0_inv())
}
fn twice_eps0() -> Self {
Self::from_primitive(T::twice_eps0())
}
fn mu0() -> Self {
Self::from_primitive(T::mu0())
}
fn mu0_inv() -> Self {
Self::from_primitive(T::mu0_inv())
}
} }
impl ToFloat for i32 { impl ToFloat for i32 {

View File

@@ -0,0 +1,526 @@
use core::ops::{Index, IndexMut};
use crate::dim::DimSlice;
use crate::mat::Material;
use crate::real::Real;
use crate::vec::{Vec3, Vec3u};
mod support;
pub use support::{
SimMeta,
VolumeSampleNeg,
VolumeSamplePos,
};
// TODO: make the fields private and hide behind a constructor?
pub struct StepEContext<'a, R, M> {
pub inv_feature_size: R,
pub time_step: R,
pub stim_e: Vec3<R>,
pub mat: &'a M,
/// Input field sampled near this location
pub in_h: VolumeSampleNeg<R>,
pub in_e: Vec3<R>,
}
impl<'a, R: Real, M: Material<R>> StepEContext<'a, R, M> {
/// given the simulation resoures (material, stimulus, fields),
/// advance the `e` field at the provided index.
/// this is a good toplevel function to use if you don't care about any of the details.
pub fn step_flat_view<RM, RF, WF>(
meta: SimMeta<R>,
mat: &RM,
stim_e: &RF,
e: &mut WF,
h: &RF,
idx: Vec3u,
)
where
RM: Index<usize, Output=M> + ?Sized,
RF: Index<usize, Output=Vec3<R>> + ?Sized,
WF: Index<usize, Output=Vec3<R>> + IndexMut<usize> + ?Sized,
{
let dim = meta.dim();
let stim_e_matrix = DimSlice::new(dim, stim_e);
let mat_matrix = DimSlice::new(dim, mat);
let mut e_matrix = DimSlice::new(dim, e);
let h_matrix = DimSlice::new(dim, h);
let stim_e = stim_e_matrix[idx];
let mat = &mat_matrix[idx];
let in_e = e_matrix[idx];
let in_h = VolumeSampleNeg::from_indexable(&h_matrix, idx);
let update_state = StepEContext {
inv_feature_size: meta.inv_feature_size(),
time_step: meta.time_step(),
stim_e,
mat,
in_h,
in_e,
};
let new_e = update_state.step_e();
e_matrix[idx] = new_e;
}
/// return the e field for this cell at one time step from where it was previously evaluated.
pub fn step_e(self) -> Vec3<R> {
// ```tex
// Ampere's circuital law with Maxwell's addition, in SI units ("macroscopic version"):
// $\nabla x H = J_f + dD/dt$ (1)
// where $J_f$ = current density = $\sigma E$, $\sigma$ being a material parameter ("conductivity")
// note that $D = \epsilon_0 E + P$, but we don't simulate any material where $P \ne 0$.
//
// substitute $D = \epsilon_0 E$ into (1):
// $\nabla x H = J_f + \epsilon_0 dE/dt$
// expand with $J_f = \sigma E$:
// $\nabla x H = \sigma E + \epsilon_0 dE/dt$
// rearrange:
// $dE/dt = 1/\epsilon_0 (\nabla x H - \sigma E)$ (2)
//
// let $E_p$ be $E$ at $T - \Delta\!t$ and $E_n$ be $E$ at $T+\Delta\!t$.
// apply these substitutions into a linear expansion of (2):
// $(E_n - E_p)/(2\Delta\!t) = 1/\epsilon_0 (\nabla x H - \sigma (E_n + E_p)/2)$
// normalize:
// $E_n - E_p = 2\Delta\!t/\epsilon_0 (\nabla x H - \sigma (E_n + E_p)/2)$
// expand:
// $E_n - E_p = 2\Delta\!t/\epsilon_0 \nabla x H - \sigma \Delta\!t/\epsilon_0 (E_n + E_p)$
// replace $E_n = E_p + \Delta\!E$
// $\Delta\!E = 2\Delta\!t/\epsilon_0 \nabla x H - \sigma \Delta\!t/\epsilon_0 (2E_p + \Delta\!E)$
// rearrange:
// $\Delta\!E (1 + \sigma\Delta\!t/\epsilon_0) = 2\Delta\!t/\epsilon_0 (\nabla x H - \sigma E_p$
// normalize:
// $\Delta\!E (\epsilon_0 + \sigma\Delta\!t) = 2\Delta\!t (\nabla x H - \sigma E_p)$ (3)
// then $\Delta\!E$ is trivially solved, and $E_n = E_p + \Delta\!E$
// ```
let twice_eps0 = R::twice_eps0();
let deltas = self.in_h.delta_h();
// \nabla x H
let nabla_h = deltas.nabla() * self.inv_feature_size;
let sigma = self.mat.conductivity();
let e_prev = self.in_e;
// evaluate (3)
// NB: the simulation uses $\Delta t$ to mean the time between E_p and E_n,
// whereas the math above denotes that as $2\Delta t$.
// hence, we actually compute:
// ```tex
// $\Delta\!E (\epsilon_0 + \frac{1}{2}\sigma\Delta\!t) = \Delta\!t (\nabla x H - \sigma E_p)$
// or:
// $\Delta\!E (2\epsilon_0 + \sigma\Delta\!t) = 2\Delta\!t (\nabla x H - \sigma E_p)$
// ```
// XXX: this evaluation is very prone to rounding error.
// - eps0 is *very* small -- about 1e-11.
// - combined with the division, i worry about error accumulation here.
let delta_e = (nabla_h - e_prev.elem_mul(sigma)).elem_div(
sigma*self.time_step + Vec3::uniform(twice_eps0)
)*(R::two()*self.time_step);
// E_n = E_p + \Delta E + user_injected_field
e_prev + delta_e + self.stim_e
}
}
pub struct StepHContext<'a, R, M> {
pub inv_feature_size: R,
pub time_step: R,
pub stim_h: Vec3<R>,
pub mat: &'a M,
/// Input field sampled near this location
pub in_e: VolumeSamplePos<R>,
pub in_h: Vec3<R>,
pub in_m: Vec3<R>,
}
impl<'a, R: Real, M: Material<R>> StepHContext<'a, R, M> {
/// given the simulation resoures (material, stimulus, fields),
/// advance the `h` field at the provided index.
/// this is a good toplevel function to use if you don't care about any of the details.
pub fn step_flat_view<RM, RF, WF>(
meta: SimMeta<R>,
mat: &RM,
stim_h: &RF,
e: &RF,
h: &mut WF,
m: &mut WF,
idx: Vec3u,
)
where
RM: Index<usize, Output=M> + ?Sized,
RF: Index<usize, Output=Vec3<R>> + ?Sized,
WF: Index<usize, Output=Vec3<R>> + IndexMut<usize> + ?Sized,
{
let dim = meta.dim();
let stim_h_matrix = DimSlice::new(dim, stim_h);
let mat_matrix = DimSlice::new(dim, mat);
let e_matrix = DimSlice::new(dim, e);
let mut h_matrix = DimSlice::new(dim, h);
let mut m_matrix = DimSlice::new(dim, m);
let stim_h = stim_h_matrix[idx];
let mat = &mat_matrix[idx];
let in_e = VolumeSamplePos::from_indexable(&e_matrix, dim, idx);
let in_h = h_matrix[idx];
let in_m = m_matrix[idx];
let update_state = StepHContext {
inv_feature_size: meta.inv_feature_size(),
time_step: meta.time_step(),
stim_h,
mat,
in_e,
in_h,
in_m,
};
let (new_h, new_m) = update_state.step_h();
h_matrix[idx] = new_h;
m_matrix[idx] = new_m;
}
/// return the `(h, m)` fields for this cell at one time-step from where it was previously
/// evaluated.
pub fn step_h(self) -> (Vec3<R>, Vec3<R>) {
// ```tex
// Maxwell-Faraday equation:
// $\nabla x E = -dB/dt$ (1)
//
// where
// $B = \mu_0 (H + M)$ (2)
// ```
let mu0 = R::mu0();
let mu0_inv = R::mu0_inv();
let deltas = self.in_e.delta_e();
// \nabla x E
// TODO: inv_feature_size and time_step could be folded
let nabla_e = deltas.nabla() * self.inv_feature_size;
let delta_b = nabla_e * (-self.time_step);
// Relation between these is: B = mu0*(H + M)
let old_h = self.in_h;
let old_m = self.in_m;
let old_b = (old_h + old_m) * mu0;
// evaluate the next B value.
// eq (1) enforces this relation. H and M can vary freely within that context,
// so long as they sum to B as per (2).
// we let the Material dictate M, and from that compute H.
// TODO(opt): fold stim_h * mu0 into the old_b = ... * mu0 calculation above
let new_b = old_b + delta_b + self.stim_h * mu0;
let mat = self.mat; // XXX: copy off of self to avoid a rust-gpu bug around ZSTs.
let new_m = mat.move_b_vec(old_m, new_b);
let new_h = new_b * mu0_inv - new_m;
(new_h, new_m)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::compound::Optional;
use crate::mat::{AnisomorphicConductor, Vacuum};
use float_eq::assert_float_eq;
use std::cell::Cell;
fn assert_vec_eq(a: Vec3<f64>, b: Vec3<f64>) {
assert_float_eq!(a.x(), b.x(), r2nd <= 1e-9, "{:?} != {:?}", a, b);
assert_float_eq!(a.y(), b.y(), r2nd <= 1e-9, "{:?} != {:?}", a, b);
assert_float_eq!(a.z(), b.z(), r2nd <= 1e-9, "{:?} != {:?}", a, b);
}
#[test]
fn step_e_trivial() {
let ctx = StepEContext {
inv_feature_size: 1.0,
time_step: 1.0,
stim_e: Vec3::zero(),
mat: &Vacuum,
in_h: VolumeSampleNeg::default(),
in_e: Vec3::new(1.0, 2.0, 3.0),
};
let new_e = ctx.step_e();
// when the h field has zero spatial derivative, E has zero time derivative.
assert_vec_eq(new_e, Vec3::new(1.0, 2.0, 3.0));
}
#[test]
fn step_e_applies_stimulus() {
let ctx = StepEContext {
inv_feature_size: 1.0,
time_step: 1.0,
stim_e: Vec3::new(-3.0, -2.0, -1.0),
mat: &Vacuum,
in_h: VolumeSampleNeg::default(),
in_e: Vec3::new(1.0, 2.0, 3.0),
};
let new_e = ctx.step_e();
// when the h field has zero spatial derivative, E has zero time derivative.
assert_vec_eq(new_e, Vec3::new(-2.0, 0.0, 2.0));
}
#[test]
fn step_e_understands_nabla_h() {
let mid = Vec3::new(4.0, 8.0, 12.0);
let ctx = StepEContext {
inv_feature_size: 1.0,
time_step: 1.0,
stim_e: Vec3::zero(),
mat: &Vacuum,
in_h: VolumeSampleNeg {
mid,
xm1: Optional::some(mid - Vec3::new(1.0, 2.0, 3.0)),
ym1: Optional::some(mid - Vec3::new(4.0, 5.0, 6.0)),
zm1: Optional::some(mid - Vec3::new(7.0, 8.0, 9.0)),
},
in_e: Vec3::zero(),
};
//```tex
// $\nabla x H$:
// $[ \Delta\!H_z/\Delta\!y - \Delta\!H_y/\Delta\!z$
// $ \Delta\!H_x/\Delta\!z - \Delta\!H_z/\Delta\!x$
// $ \Delta\!H_y/\Delta\!x - \Delta\!H_x/\Delta\!y ]$
// we take these values from `in_h`:
// $[ 6.0 - 8.0 $
// $ 7.0 - 3.0$
// $ 2.0 - 4.0 ]$
// this gives $dD/dt$, and then we scale by $\epsilon_0^{-1}$ to get $dE/dt$.
//```
let delta_d = Vec3::new(-2.0, 4.0, -2.0);
let delta_e = delta_d * f64::eps0_inv();
let new_e = ctx.step_e();
// when the h field has zero spatial derivative, E has zero time derivative.
assert_vec_eq(new_e, delta_e);
}
#[test]
fn step_e_respects_material() {
let cond = Vec3::new(4.0e-10, 5.0e-10, 6.0e-10);
let mat = AnisomorphicConductor::new(cond);
let in_e = Vec3::new(1.0, 2.0, 3.0);
let ctx = StepEContext {
inv_feature_size: 1.0,
time_step: 1.0,
stim_e: Vec3::zero(),
mat: &mat,
in_h: VolumeSampleNeg::default(),
in_e,
};
//```tex
// guiding Maxwell equation:
// $\nabla x H = J_f + dD/dt$
// we've forced $\Nabla x H = 0$, so
// $dD/dt = -J_f$
// simplified:
// $\epsilon_0 dE/dt = -\sigma E$
//```
let new_e = ctx.step_e();
let mid_e = (in_e + new_e)*0.5;
let delta_e = new_e - in_e;
assert_vec_eq(delta_e, cond.elem_mul(mid_e) * -f64::eps0_inv());
}
#[test]
fn step_e_understands_time_scale_feature_size() {
let cond = Vec3::new(4.0, 5.0, 6.0);
let mat = AnisomorphicConductor::new(cond);
let in_e = Vec3::new(1.0, 2.0, 3.0);
let ctx = StepEContext {
inv_feature_size: 1e6,
time_step: 1e-10,
stim_e: Vec3::zero(),
mat: &mat,
in_h: VolumeSampleNeg {
mid: Vec3::zero(),
xm1: Optional::some(-Vec3::new(1e-6, 2e-6, 3e-6)),
ym1: Optional::some(-Vec3::new(4e-6, 5e-6, 6e-6)),
zm1: Optional::some(-Vec3::new(7e-6, 8e-6, 9e-6)),
},
in_e,
};
//```tex
// guiding Maxwell equation:
// $\nabla x H = J_f + dD/dt$
// where $D = \epsilon_0 E$
// and $J_f = \sigma E$
//```
let new_e = ctx.step_e();
let mid_e = (in_e + new_e)*0.5;
let delta_e = new_e - in_e;
// see step_e_understands_nabla_h
// we pre-scaled the in_h values by feature_size in order to get the same nabla_h as before
let nabla_h_component = Vec3::new(-2.0, 4.0, -2.0);
let current_component = cond.elem_mul(mid_e);
assert_vec_eq(delta_e, (nabla_h_component - current_component) * f64::eps0_inv() * 1e-10);
}
#[test]
fn step_h_trivial() {
let ctx = StepHContext {
inv_feature_size: 1.0,
time_step: 1.0,
stim_h: Vec3::zero(),
mat: &Vacuum,
in_e: VolumeSamplePos::default(),
in_h: Vec3::new(1.0, 2.0, 3.0),
in_m: Vec3::zero(),
};
let (new_h, new_m) = ctx.step_h();
// when the e field has zero spatial derivative, B has zero time derivative.
assert_vec_eq(new_h, Vec3::new(1.0, 2.0, 3.0));
assert_vec_eq(new_m, Vec3::zero());
}
#[test]
fn step_h_applies_stimulus() {
let ctx = StepHContext {
inv_feature_size: 1.0,
time_step: 1.0,
stim_h: Vec3::new(-3.0, -2.0, -1.0),
mat: &Vacuum,
in_e: VolumeSamplePos::default(),
in_h: Vec3::new(1.0, 2.0, 3.0),
in_m: Vec3::zero(),
};
let (new_h, new_m) = ctx.step_h();
// when the e field has zero spatial derivative, B has zero time derivative.
assert_vec_eq(new_h, Vec3::new(-2.0, 0.0, 2.0));
assert_vec_eq(new_m, Vec3::zero());
}
#[test]
fn step_h_understands_nabla_e() {
let mid = Vec3::new(4.0, 8.0, 12.0);
let ctx = StepHContext {
inv_feature_size: 1.0,
time_step: 1.0,
stim_h: Vec3::zero(),
mat: &Vacuum,
in_e: VolumeSamplePos {
mid,
xp1: Optional::some(mid + Vec3::new(1.0, 2.0, 3.0)),
yp1: Optional::some(mid + Vec3::new(4.0, 5.0, 6.0)),
zp1: Optional::some(mid + Vec3::new(7.0, 8.0, 9.0)),
},
in_h: Vec3::zero(),
in_m: Vec3::zero(),
};
//```tex
// $\nabla x E$:
// $[ \Delta\!E_z/\Delta\!y - \Delta\!E_y/\Delta\!z$
// $ \Delta\!E_x/\Delta\!z - \Delta\!E_z/\Delta\!x$
// $ \Delta\!E_y/\Delta\!x - \Delta\!E_x/\Delta\!y ]$
// we take these values from `in_e`:
// $[ 6.0 - 8.0 $
// $ 7.0 - 3.0$
// $ 2.0 - 4.0 ]$
// this gets negated, and then scaled by $\mu_0^{-1}$
//```
let delta_b = -Vec3::new(-2.0, 4.0, -2.0);
let (new_h, new_m) = ctx.step_h();
assert_vec_eq(new_h, delta_b * f64::mu0_inv());
assert_vec_eq(new_m, Vec3::zero());
}
#[test]
fn step_h_understands_time_scale_feature_size() {
let mid = Vec3::new(4.0, 8.0, 12.0);
let ctx = StepHContext {
inv_feature_size: 5.0, // $\Delta x = 0.2$
time_step: 1e-3,
stim_h: Vec3::zero(),
mat: &Vacuum,
in_e: VolumeSamplePos {
mid,
xp1: Optional::some(mid + Vec3::new(1.0, 2.0, 3.0)),
yp1: Optional::some(mid + Vec3::new(4.0, 5.0, 6.0)),
zp1: Optional::some(mid + Vec3::new(7.0, 8.0, 9.0)),
},
in_h: Vec3::zero(),
in_m: Vec3::zero(),
};
// see step_h_understands_nabla_e
// the nabla is dE/dx, so divide by \Delta x.
// nabla relates to dB/dt, so multiply by \Delta t to get \Delta B
let delta_b = -Vec3::new(-2.0, 4.0, -2.0) * 1e-3 / 0.2;
let (new_h, new_m) = ctx.step_h();
assert_vec_eq(new_h, delta_b * f64::mu0_inv());
assert_vec_eq(new_m, Vec3::zero());
}
struct MockHMaterial {
response: Vec3<f64>,
called_with: Cell<(Vec3<f64>, Vec3<f64>)>,
}
impl Material<f64> for MockHMaterial {
fn move_b_vec(&self, m: Vec3<f64>, target_b: Vec3<f64>) -> Vec3<f64> {
self.called_with.set((m, target_b));
self.response
}
}
#[test]
fn step_h_respects_material() {
let mock_mat = MockHMaterial {
response: Vec3::new(1e-6, 2e-6, 3e-6),
called_with: Default::default(),
};
let ctx = StepHContext {
inv_feature_size: 1.0,
time_step: 1.0,
stim_h: Vec3::zero(),
mat: &mock_mat,
in_e: VolumeSamplePos {
mid: Vec3::zero(),
xp1: Optional::some(Vec3::new(1.0, 2.0, 3.0)),
yp1: Optional::some(Vec3::new(4.0, 5.0, 6.0)),
zp1: Optional::some(Vec3::new(7.0, 8.0, 9.0)),
},
in_h: Vec3::zero(),
in_m: Vec3::zero(),
};
// see step_h_understands_nabla_e
let delta_b = -Vec3::new(-2.0, 4.0, -2.0);
let (new_h, new_m) = ctx.step_h();
assert_vec_eq(mock_mat.called_with.get().0, Vec3::zero()); // not magnetized
assert_vec_eq(mock_mat.called_with.get().1, delta_b);
assert_vec_eq(new_m, mock_mat.response);
assert_vec_eq(new_h, delta_b * f64::mu0_inv() - new_m);
}
#[test]
fn step_h_understands_previous_m_h() {
let mock_mat = MockHMaterial {
response: Vec3::new(1e-6, 2e-6, 3e-6),
called_with: Default::default(),
};
let in_h = Vec3::new(4e-6, 5e-6, 6e-6);
let in_m = Vec3::new(7e-6, 8e-6, 9e-6);
let ctx = StepHContext {
inv_feature_size: 1.0,
time_step: 1.0,
stim_h: Vec3::zero(),
mat: &mock_mat,
in_e: VolumeSamplePos {
mid: Vec3::zero(),
xp1: Optional::some(Vec3::new(1.0, 2.0, 3.0)),
yp1: Optional::some(Vec3::new(4.0, 5.0, 6.0)),
zp1: Optional::some(Vec3::new(7.0, 8.0, 9.0)),
},
in_h,
in_m,
};
// see step_h_understands_nabla_e
let delta_b = -Vec3::new(-2.0, 4.0, -2.0);
let prev_b = (in_h + in_m) * f64::mu0();
let new_b = prev_b + delta_b;
let (new_h, new_m) = ctx.step_h();
assert_vec_eq(mock_mat.called_with.get().0, in_m);
assert_vec_eq(mock_mat.called_with.get().1, new_b);
assert_vec_eq(new_m, mock_mat.response);
assert_vec_eq(new_h, new_b * f64::mu0_inv() - new_m);
}
}

View File

@@ -0,0 +1,257 @@
use core::ops::Index;
use crate::compound::Optional;
use crate::real::Real;
use crate::vec::{Vec3, Vec3u};
#[cfg(feature = "serde")]
use serde::{Serialize, Deserialize};
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
#[cfg_attr(feature = "fmt", derive(Debug))]
#[derive(Copy, Clone, Default, PartialEq)]
pub struct SimMeta<R> {
dim: Vec3u,
inv_feature_size: R,
time_step: R,
feature_size: R,
}
impl<R: Real> SimMeta<R> {
pub fn new(dim: Vec3u, feature_size: R, time_step: R) -> Self {
Self {
dim,
inv_feature_size: feature_size.inv(),
time_step,
feature_size
}
}
}
impl<R> SimMeta<R> {
pub fn dim(&self) -> Vec3u {
self.dim
}
}
impl<R: Copy> SimMeta<R> {
pub fn inv_feature_size(&self) -> R {
self.inv_feature_size
}
pub fn time_step(&self) -> R {
self.time_step
}
pub fn feature_size(&self) -> R {
self.feature_size
}
}
impl<R: Real> SimMeta<R> {
pub fn cast<R2: Real>(self) -> SimMeta<R2> {
SimMeta {
dim: self.dim,
inv_feature_size: self.inv_feature_size.cast(),
time_step: self.time_step.cast(),
feature_size: self.feature_size.cast(),
}
}
}
/// Package the field vectors adjacent to some particular location.
/// Particular those at negative offsets from the midpoint.
/// This is used in step_e when looking at the H field deltas.
#[derive(Copy, Clone, Default)]
pub struct VolumeSampleNeg<R> {
pub mid: Vec3<R>,
// TODO(optimization): if we just force the boundary cells to be always E=0, H=0,
// we can save a lot of bounds checks and special cases, at the slight expense that the
// boundaries now reflect all energy and we use slightly more memory (6*N^2 extra cells).
// having the edges be perfectly reflective might actually simplify testing though (e.g. energy
// would actually be preserved?)
pub xm1: Optional<Vec3<R>>,
pub ym1: Optional<Vec3<R>>,
pub zm1: Optional<Vec3<R>>,
}
impl<R: Copy + Default> VolumeSampleNeg<R> {
pub fn from_indexable<I: Index<Vec3u, Output=Vec3<R>>>(i: &I, idx: Vec3u) -> Self {
VolumeSampleNeg {
mid: i[idx],
xm1: prev_x(idx).map(|idx| i[idx]),
ym1: prev_y(idx).map(|idx| i[idx]),
zm1: prev_z(idx).map(|idx| i[idx]),
}
}
}
fn prev_x(idx: Vec3u) -> Optional<Vec3u> {
match idx.into() {
(0, _, _) => Optional::none(),
(x, y, z) => Optional::some(Vec3u::new(x-1, y, z)),
}
}
fn prev_y(idx: Vec3u) -> Optional<Vec3u> {
match idx.into() {
(_, 0, _) => Optional::none(),
(x, y, z) => Optional::some(Vec3u::new(x, y-1, z)),
}
}
fn prev_z(idx: Vec3u) -> Optional<Vec3u> {
match idx.into() {
(_, _, 0) => Optional::none(),
(x, y, z) => Optional::some(Vec3u::new(x, y, z-1)),
}
}
impl<R: Real> VolumeSampleNeg<R> {
/// Calculate the delta in H values amongst this cell and its neighbors (left/up/out)
pub fn delta_h(self) -> FieldDeltas<R> {
let mid = self.mid;
// let (dfy_dx, dfz_dx) = self.xm1.map(|xm1| {
// (mid.y() - xm1.y(), mid.z() - xm1.z())
// }).unwrap_or_default();
// let (dfx_dy, dfz_dy) = self.ym1.map(|ym1| {
// (mid.x() - ym1.x(), mid.z() - ym1.z())
// }).unwrap_or_default();
// let (dfx_dz, dfy_dz) = self.zm1.map(|zm1| {
// (mid.x() - zm1.x(), mid.y() - zm1.y())
// }).unwrap_or_default();
let (dfy_dx, dfz_dx) = if self.xm1.is_some() {
(mid.y() - self.xm1.unwrap().y(), mid.z() - self.xm1.unwrap().z())
} else {
(R::zero(), R::zero())
};
let (dfx_dy, dfz_dy) = if self.ym1.is_some() {
(mid.x() - self.ym1.unwrap().x(), mid.z() - self.ym1.unwrap().z())
} else {
(R::zero(), R::zero())
};
let (dfx_dz, dfy_dz) = if self.zm1.is_some() {
(mid.x() - self.zm1.unwrap().x(), mid.y() - self.zm1.unwrap().y())
} else {
(R::zero(), R::zero())
};
FieldDeltas {
dfy_dx,
dfz_dx,
dfx_dy,
dfz_dy,
dfx_dz,
dfy_dz,
}
}
}
/// Package the field vectors adjacent to some particular location.
/// Particular those at positive offsets from the midpoint.
/// This is used in step_h when looking at the E field deltas.
#[derive(Copy, Clone, Default)]
pub struct VolumeSamplePos<R> {
pub mid: Vec3<R>,
pub xp1: Optional<Vec3<R>>,
pub yp1: Optional<Vec3<R>>,
pub zp1: Optional<Vec3<R>>
}
impl<R: Copy + Default> VolumeSamplePos<R> {
pub fn from_indexable<I: Index<Vec3u, Output=Vec3<R>>>(i: &I, dim: Vec3u, idx: Vec3u) -> Self {
VolumeSamplePos {
mid: i[idx],
xp1: next_x(dim, idx).map(|idx| i[idx]),
yp1: next_y(dim, idx).map(|idx| i[idx]),
zp1: next_z(dim, idx).map(|idx| i[idx]),
}
}
}
fn next_x(dim: Vec3u, idx: Vec3u) -> Optional<Vec3u> {
match idx.into() {
(x, y, z) if x + 1 < dim.x() => Optional::some(Vec3u::new(x+1, y, z)),
_ => Optional::none(),
}
}
fn next_y(dim: Vec3u, idx: Vec3u) -> Optional<Vec3u> {
match idx.into() {
(x, y, z) if y + 1 < dim.y() => Optional::some(Vec3u::new(x, y+1, z)),
_ => Optional::none(),
}
}
fn next_z(dim: Vec3u, idx: Vec3u) -> Optional<Vec3u> {
match idx.into() {
(x, y, z) if z + 1 < dim.z() => Optional::some(Vec3u::new(x, y, z+1)),
_ => Optional::none(),
}
}
impl<R: Real> VolumeSamplePos<R> {
/// Calculate the delta in E values amongst this cell and its neighbors (right/down/in)
pub fn delta_e(self) -> FieldDeltas<R> {
let mid = self.mid;
// let (dfy_dx, dfz_dx) = self.xp1.map(|xp1| {
// (xp1.y() - mid.y(), xp1.z() - mid.z())
// }).unwrap_or_default();
// let (dfx_dy, dfz_dy) = self.yp1.map(|yp1| {
// (yp1.x() - mid.x(), yp1.z() - mid.z())
// }).unwrap_or_default();
// let (dfx_dz, dfy_dz) = self.zp1.map(|zp1| {
// (zp1.x() - mid.x(), zp1.y() - mid.y())
// }).unwrap_or_default();
let (dfy_dx, dfz_dx) = if self.xp1.is_some() {
(self.xp1.unwrap().y() - mid.y(), self.xp1.unwrap().z() - mid.z())
} else {
(R::zero(), R::zero())
};
let (dfx_dy, dfz_dy) = if self.yp1.is_some() {
(self.yp1.unwrap().x() - mid.x(), self.yp1.unwrap().z() - mid.z())
} else {
(R::zero(), R::zero())
};
let (dfx_dz, dfy_dz) = if self.zp1.is_some() {
(self.zp1.unwrap().x() - mid.x(), self.zp1.unwrap().y() - mid.y())
} else {
(R::zero(), R::zero())
};
FieldDeltas {
dfy_dx,
dfz_dx,
dfx_dy,
dfz_dy,
dfx_dz,
dfy_dz,
}
}
}
pub struct FieldDeltas<R> {
dfy_dx: R,
dfz_dx: R,
dfx_dy: R,
dfz_dy: R,
dfx_dz: R,
dfy_dz: R,
}
impl<R: Real> FieldDeltas<R> {
pub fn nabla(self) -> Vec3<R> {
Vec3::new(
self.dfz_dy - self.dfy_dz,
self.dfx_dz - self.dfz_dx,
self.dfy_dx - self.dfx_dy,
)
}
}

View File

@@ -124,23 +124,29 @@ impl<R: Real> Vec2<R> {
self.mag_sq().sqrt() self.mag_sq().sqrt()
} }
pub fn with_mag(&self, new_mag: R) -> Self { pub fn with_mag(&self, new_mag: R) -> Option<Self> {
if new_mag.is_zero() { if new_mag.is_zero() {
// avoid div-by-zero if self.mag() == 0 and new_mag == 0 // avoid div-by-zero if self.mag() == 0 and new_mag == 0
Vec2::new(R::zero(), R::zero()) Some(Self::zero())
} else { } else {
let scale = new_mag / self.mag(); let old_mag = self.mag();
*self * scale if old_mag.is_zero() {
None
} else {
Some(*self * (new_mag / old_mag))
}
} }
} }
// /// Returns the angle of this point, (-pi, pi] /// returns the angle of this point, (-pi, pi]
// pub fn arg(&self) -> R { /// requires std feature
// // self.y.atan2(self.x) #[cfg(feature = "std")]
// self.y.to_f64().atan2(self.x.to_f64()).cast() pub fn arg(&self) -> R {
// } // self.y.atan2(self.x)
// TODO: add `Real::atan2` and remove these casts
self.y.to_f64().atan2(self.x.to_f64()).cast()
}
// TODO test
pub fn rotate(&self, angle: R) -> Self { pub fn rotate(&self, angle: R) -> Self {
let (sin, cos) = angle.sin_cos(); let (sin, cos) = angle.sin_cos();
let map_1x_0y = Vec2::new(cos, sin); let map_1x_0y = Vec2::new(cos, sin);
@@ -250,6 +256,22 @@ impl<R: Real> Vec3<R> {
pub fn with_xy(&self, xy: Vec2<R>) -> Self { pub fn with_xy(&self, xy: Vec2<R>) -> Self {
Self::new(xy.x(), xy.y(), self.z()) Self::new(xy.x(), xy.y(), self.z())
} }
pub fn xz(&self) -> Vec2<R> {
Vec2::new(self.x(), self.z())
}
pub fn with_xz(&self, xz: Vec2<R>) -> Self {
// NB: the `Vec2` type calls these coordinates `x` and `y`
// even though they represent `x` and `z` in this context.
Self::new(xz.x(), self.y(), xz.y())
}
pub fn yz(&self) -> Vec2<R> {
Vec2::new(self.y(), self.z())
}
pub fn with_yz(&self, yz: Vec2<R>) -> Self {
// NB: the `Vec2` type calls these coordinates `x` and `y`
// even though they represent `y` and `z` in this context.
Self::new(self.x(), yz.x(), yz.y())
}
pub fn distance(&self, other: Self) -> R { pub fn distance(&self, other: Self) -> R {
(*self - other).mag() (*self - other).mag()
@@ -329,13 +351,19 @@ impl<R: Real> Vec3<R> {
Self::new(self.x().exp(), self.y().exp(), self.z().exp()) Self::new(self.x().exp(), self.y().exp(), self.z().exp())
} }
pub fn with_mag(&self, new_mag: R) -> Self { /// the only condition upon which this returns `None` is if the current magnitude is zero
/// and the new magnitude and NON-zero.
pub fn with_mag(&self, new_mag: R) -> Option<Self> {
if new_mag.is_zero() { if new_mag.is_zero() {
// avoid div-by-zero if self.mag() == 0 and new_mag == 0 // avoid div-by-zero if self.mag() == 0 and new_mag == 0
Self::zero() Some(Self::zero())
} else { } else {
let scale = new_mag / self.mag(); let old_mag = self.mag();
*self * scale if old_mag.is_zero() {
None
} else {
Some(*self * (new_mag / old_mag))
}
} }
} }
@@ -344,7 +372,7 @@ impl<R: Real> Vec3<R> {
if *self == Self::zero() { if *self == Self::zero() {
*self *self
} else { } else {
self.with_mag(R::one()) self.with_mag(R::one()).unwrap()
} }
} }
pub fn round(&self) -> Self { pub fn round(&self) -> Self {
@@ -356,6 +384,25 @@ impl<R: Real> Vec3<R> {
pub fn floor(&self) -> Self { pub fn floor(&self) -> Self {
Self::new(self.x().floor(), self.y().floor(), self.z().floor()) Self::new(self.x().floor(), self.y().floor(), self.z().floor())
} }
/// rotate in the xy plane
pub fn rotate_xy(&self, angle: R) -> Self {
self.with_xy(
self.xy().rotate(angle)
)
}
/// rotate in the xz plane
pub fn rotate_xz(&self, angle: R) -> Self {
self.with_xz(
self.xz().rotate(angle)
)
}
/// rotate in the yz plane
pub fn rotate_yz(&self, angle: R) -> Self {
self.with_yz(
self.yz().rotate(angle)
)
}
} }
impl<R> Into<(R, R, R)> for Vec3<R> { impl<R> Into<(R, R, R)> for Vec3<R> {
@@ -482,3 +529,41 @@ impl<R: Real + fmt::Display> fmt::Display for Vec3<R> {
fmt::Display::fmt(")", f) fmt::Display::fmt(")", f)
} }
} }
#[cfg(test)]
mod test {
use super::*;
use float_eq::assert_float_eq;
fn assert_vec2(got: Vec2<f32>, want: Vec2<f32>) {
assert_float_eq!(got.x(), want.x(), abs <= 1e-6);
assert_float_eq!(got.y(), want.y(), abs <= 1e-6);
}
#[test]
fn vec2_rotate_trivial() {
// no-op rotate
assert_vec2(Vec2::new(1.0, 0.0).rotate(0.0), Vec2::new(1.0, 0.0));
assert_vec2(Vec2::new(1.0, 1.0).rotate(f32::two_pi()), Vec2::new(1.0, 1.0));
assert_vec2(Vec2::new(-2.0, 3.0).rotate(-f32::two_pi()), Vec2::new(-2.0, 3.0));
}
#[test]
fn vec2_rotate_quarter_turns() {
assert_vec2(Vec2::new(1.0, 0.0).rotate(f32::pi()), Vec2::new(-1.0, 0.0));
assert_vec2(Vec2::new(1.0, -1.0).rotate(f32::pi()), Vec2::new(-1.0, 1.0));
assert_vec2(Vec2::new(1.0, 0.0).rotate(0.5*f32::pi()), Vec2::new(0.0, 1.0));
assert_vec2(Vec2::new(1.0, -1.0).rotate(0.5*f32::pi()), Vec2::new(1.0, 1.0));
assert_vec2(Vec2::new(-1.0, 0.0).rotate(-0.5*f32::pi()), Vec2::new(0.0, 1.0));
assert_vec2(Vec2::new(-1.0, -1.0).rotate(-0.5*f32::pi()), Vec2::new(-1.0, 1.0));
}
#[test]
fn vec2_rotate_zero() {
assert_vec2(Vec2::new(0.0, 0.0).rotate(1.0), Vec2::new(0.0, 0.0));
assert_vec2(Vec2::new(0.0, 0.0).rotate(1e9), Vec2::new(0.0, 0.0));
}
// TODO: lots more Vec, Vec2 tests need backfilling
}

View File

@@ -51,6 +51,10 @@ impl Vec3u {
pub fn product_sum(&self) -> u32 { pub fn product_sum(&self) -> u32 {
self.x * self.y * self.z self.x * self.y * self.z
} }
pub fn product_sum_usize(&self) -> usize {
self.x as usize * self.y as usize * self.z as usize
}
} }
impl From<(u32, u32, u32)> for Vec3u { impl From<(u32, u32, u32)> for Vec3u {
@@ -59,6 +63,12 @@ impl From<(u32, u32, u32)> for Vec3u {
} }
} }
impl Into<(u32, u32, u32)> for Vec3u {
fn into(self) -> (u32, u32, u32) {
(self.x, self.y, self.z)
}
}
impl<R: Real> From<Vec3<R>> for Vec3u { impl<R: Real> From<Vec3<R>> for Vec3u {
fn from(v: Vec3<R>) -> Self { fn from(v: Vec3<R>) -> Self {
Self::new(v.x().to_f64() as _, v.y().to_f64() as _, v.z().to_f64() as _) Self::new(v.x().to_f64() as _, v.y().to_f64() as _, v.z().to_f64() as _)

View File

@@ -1,4 +1,5 @@
// use crate::{Loader, LoaderCache}; //! extracts Measurements from rendered .bc files and dumps them into a CSV
use coremem_post::{Loader, LoaderCache}; use coremem_post::{Loader, LoaderCache};
use std::path::PathBuf; use std::path::PathBuf;
use structopt::StructOpt; use structopt::StructOpt;
@@ -15,17 +16,13 @@ fn main() {
let mut frame = cache.load_first(); let mut frame = cache.load_first();
for meas in frame.measurements() { for meas in frame.measurements() {
for key in meas.key_value(&**frame).keys() { print!("\"{}\",", meas.name());
print!("\"{}\",", key);
}
} }
println!(""); println!("");
loop { loop {
for meas in frame.measurements() { for meas in frame.measurements() {
for value in meas.key_value(&**frame).values() { print!("\"{}\",", meas.machine_readable().replace(",", "\\,"));
print!("\"{}\",", value);
}
} }
println!(""); println!("");

View File

@@ -1,4 +1,5 @@
// use crate::Loader; //! "decimates" the binary rendered form of a simulation
//! by deleting all but every N rendered files
use coremem_post::Loader; use coremem_post::Loader;
use std::fs; use std::fs;
use std::path::PathBuf; use std::path::PathBuf;

View File

@@ -1,8 +1,15 @@
//! interactive CLI viewer for .bc files.
//! navigate through the simulation over time and space,
//! and toggle views to see more detail over material, electric, or magnetic changes.
use coremem_post::{Loader, Viewer}; use coremem_post::{Loader, Viewer};
use std::path::PathBuf; use std::path::PathBuf;
use std::io::Write as _;
use std::time::Duration; use std::time::Duration;
use structopt::StructOpt; use structopt::StructOpt;
use crossterm::{cursor, QueueableCommand as _};
use crossterm::event::{Event, KeyCode}; use crossterm::event::{Event, KeyCode};
use crossterm::style::{style, PrintStyledContent};
#[derive(Debug, StructOpt)] #[derive(Debug, StructOpt)]
struct Opt { struct Opt {
@@ -33,6 +40,12 @@ fn event_loop(mut viewer: Viewer) {
} }
} }
viewer.navigate(time_steps, z_steps); viewer.navigate(time_steps, z_steps);
let mut stdout = std::io::stdout();
stdout.queue(cursor::MoveToColumn(30)).unwrap();
stdout.queue(PrintStyledContent(style("wasd=appearance; arrows,PgUp/PgDown=navigate; q=quit"))).unwrap();
stdout.flush().unwrap();
let _ = crossterm::event::poll(Duration::from_millis(33)).unwrap(); let _ = crossterm::event::poll(Duration::from_millis(33)).unwrap();
} }
} }

View File

@@ -1,15 +1,14 @@
//! Post-processing tools //! Post-processing tools
use coremem::meas::AbstractMeasurement; use coremem::meas;
use coremem::render::{ColorTermRenderer, Renderer as _, RenderConfig, SerializedFrame}; use coremem::render::{ColorTermRenderer, Renderer as _, RenderConfig, SerializedFrame};
use coremem::sim::{SimState, StaticSim}; use coremem::sim::{AbstractSim, GenericSim};
use itertools::Itertools as _; use itertools::Itertools as _;
use lru::LruCache; use lru::LruCache;
use rayon::{ThreadPool, ThreadPoolBuilder}; use rayon::{ThreadPool, ThreadPoolBuilder};
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::{DirEntry, File, read_dir}; use std::fs::{DirEntry, File, read_dir};
use std::io::{BufReader, Seek as _, SeekFrom}; use std::io::BufReader;
use std::ops::Deref;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::sync::mpsc::{self, Receiver, Sender}; use std::sync::mpsc::{self, Receiver, Sender};
@@ -33,25 +32,21 @@ pub type Result<T> = std::result::Result<T, Error>;
pub struct Frame { pub struct Frame {
path: PathBuf, path: PathBuf,
data: SerializedFrame<StaticSim>, data: SerializedFrame<GenericSim<f32>>,
} }
impl Frame { impl Frame {
pub fn measurements(&self) -> &[Box<dyn AbstractMeasurement>] { pub fn measurements(&self) -> &[meas::Measurement] {
&*self.data.measurements &*self.data.measurements
} }
pub fn sim(&self) -> &GenericSim<f32> {
&self.data.state
}
pub fn path(&self) -> &Path { pub fn path(&self) -> &Path {
&*self.path &*self.path
} }
} }
impl Deref for Frame {
type Target = StaticSim;
fn deref(&self) -> &Self::Target {
&self.data.state
}
}
#[derive(Default)] #[derive(Default)]
pub struct Loader { pub struct Loader {
dir: PathBuf, dir: PathBuf,
@@ -105,20 +100,19 @@ impl Loader {
fn load(&self, path: &Path) -> Result<Frame> { fn load(&self, path: &Path) -> Result<Frame> {
let mut reader = BufReader::new(File::open(path).unwrap()); let mut reader = BufReader::new(File::open(path).unwrap());
// Try to deserialize a couple different types of likely sims. // let data = bincode::deserialize_from(&mut reader).or_else(|_| -> Result<_> {
// TODO: would be good to drop a marker in the file to make sure we don't // reader.seek(SeekFrom::Start(0)).unwrap();
// decode to a valid but incorrect state... // let data: SerializedFrame<SimState<f32>> =
let data = bincode::deserialize_from(&mut reader).or_else(|_| -> Result<_> { // bincode::deserialize_from(&mut reader)?;
reader.seek(SeekFrom::Start(0)).unwrap(); // Ok(SerializedFrame::to_static(data))
let data: SerializedFrame<SimState<f32>> = // }).or_else(|_| -> Result<_> {
bincode::deserialize_from(&mut reader)?; // reader.seek(SeekFrom::Start(0)).unwrap();
Ok(SerializedFrame::to_static(data)) // let data: SerializedFrame<SimState<f64>> =
}).or_else(|_| -> Result<_> { // bincode::deserialize_from(reader)?;
reader.seek(SeekFrom::Start(0)).unwrap(); // Ok(SerializedFrame::to_static(data))
let data: SerializedFrame<SimState<f64>> = // })?;
bincode::deserialize_from(reader)?; // TODO: try to decode a few common sim types (as above) if this fails?
Ok(SerializedFrame::to_static(data)) let data = bincode::deserialize_from(&mut reader)?;
})?;
Ok(Frame { Ok(Frame {
path: path.into(), path: path.into(),
data data
@@ -253,7 +247,7 @@ impl Viewer {
let mut cache = LoaderCache::new(loader, 6, 6); let mut cache = LoaderCache::new(loader, 6, 6);
let viewing = cache.load_first(); let viewing = cache.load_first();
Self { Self {
z: viewing.depth() / 2, z: viewing.sim().depth() / 2,
viewing, viewing,
cache, cache,
renderer: Default::default(), renderer: Default::default(),
@@ -263,7 +257,7 @@ impl Viewer {
} }
pub fn navigate(&mut self, time_steps: isize, z_steps: i32) { pub fn navigate(&mut self, time_steps: isize, z_steps: i32) {
let new_z = (self.z as i32).saturating_add(z_steps); let new_z = (self.z as i32).saturating_add(z_steps);
let new_z = new_z.max(0).min(self.viewing.depth() as i32 - 1) as u32; let new_z = new_z.max(0).min(self.viewing.sim().depth() as i32 - 1) as u32;
if time_steps == 0 && new_z == self.z && self.render_config == self.last_config { if time_steps == 0 && new_z == self.z && self.render_config == self.last_config {
return; return;
} }
@@ -275,7 +269,10 @@ impl Viewer {
} }
pub fn render(&self) { pub fn render(&self) {
self.renderer.render_z_slice( self.renderer.render_z_slice(
&**self.viewing, self.z, &self.viewing.data.measurements, self.render_config self.viewing.sim(),
self.z,
&*meas::as_dyn_measurements(self.viewing.measurements()),
self.render_config,
); );
} }
pub fn render_config(&mut self) -> &mut RenderConfig { pub fn render_config(&mut self) -> &mut RenderConfig {

View File

@@ -8,4 +8,4 @@ crate-type = ["dylib", "lib"]
[dependencies] [dependencies]
spirv-std = { git = "https://github.com/EmbarkStudios/rust-gpu", features = ["glam"] } # MIT or Apache 2.0 spirv-std = { git = "https://github.com/EmbarkStudios/rust-gpu", features = ["glam"] } # MIT or Apache 2.0
coremem_types = { path = "../types" } coremem_cross = { path = "../cross" }

View File

@@ -0,0 +1,53 @@
use spirv_std::RuntimeArray;
use coremem_cross::mat::Material;
use coremem_cross::real::Real;
use coremem_cross::step::{SimMeta, StepEContext, StepHContext};
use coremem_cross::vec::{Vec3, Vec3u};
use crate::support::SizedArray;
pub(crate) fn step_h<R: Real, M: Material<R>>(
idx: Vec3u,
meta: &SimMeta<R>,
stimulus_h: &RuntimeArray<Vec3<R>>,
material: &RuntimeArray<M>,
e: &RuntimeArray<Vec3<R>>,
h: &mut RuntimeArray<Vec3<R>>,
m: &mut RuntimeArray<Vec3<R>>,
) {
let dim = meta.dim();
if idx.x() < dim.x() && idx.y() < dim.y() && idx.z() < dim.z() {
let len = dim.product_sum_usize();
let stim_h_array = unsafe { SizedArray::new(stimulus_h, len) };
let mat_array = unsafe { SizedArray::new(material, len) };
let e_array = unsafe { SizedArray::new(e, len) };
let mut h_array = unsafe { SizedArray::new(h, len) };
let mut m_array = unsafe { SizedArray::new(m, len) };
StepHContext::step_flat_view(*meta, &mat_array, &stim_h_array, &e_array, &mut h_array, &mut m_array, idx);
}
}
pub(crate) fn step_e<R: Real, M: Material<R>>(
idx: Vec3u,
meta: &SimMeta<R>,
stimulus_e: &RuntimeArray<Vec3<R>>,
material: &RuntimeArray<M>,
e: &mut RuntimeArray<Vec3<R>>,
h: &RuntimeArray<Vec3<R>>,
) {
let dim = meta.dim();
if idx.x() < dim.x() && idx.y() < dim.y() && idx.z() < dim.z() {
let len = dim.product_sum_usize();
let stim_e_array = unsafe { SizedArray::new(stimulus_e, len) };
let mat_array = unsafe { SizedArray::new(material, len) };
let mut e_array = unsafe { SizedArray::new(e, len) };
let h_array = unsafe { SizedArray::new(h, len) };
StepEContext::step_flat_view(*meta, &mat_array, &stim_e_array, &mut e_array, &h_array, idx);
}
}

View File

@@ -8,115 +8,87 @@
extern crate spirv_std; extern crate spirv_std;
pub use spirv_std::glam; use spirv_std::{glam, RuntimeArray};
#[cfg(not(target_arch = "spirv"))] #[cfg(not(target_arch = "spirv"))]
use spirv_std::macros::spirv; use spirv_std::macros::spirv;
pub mod mat; mod adapt;
pub mod sim; mod support;
pub mod support;
pub use sim::{SerializedSimMeta, SerializedStepE, SerializedStepH}; use coremem_cross::mat::{Ferroxcube3R1MH, FullyGenericMaterial, IsoConductorOr};
pub use support::{Optional, UnsizedArray}; use coremem_cross::real::R32;
use coremem_cross::step::SimMeta;
use coremem_cross::vec::{Vec3, Vec3u};
use mat::{IsoConductorOr, FullyGenericMaterial}; type Iso3R1<R> = IsoConductorOr<R, Ferroxcube3R1MH>;
use coremem_types::mat::{Ferroxcube3R1MH, Material};
use coremem_types::vec::{Vec3, Vec3u};
type Iso3R1 = IsoConductorOr<Ferroxcube3R1MH>;
fn glam_vec_to_internal(v: glam::UVec3) -> Vec3u { fn glam_vec_to_internal(v: glam::UVec3) -> Vec3u {
Vec3u::new(v.x, v.y, v.z) Vec3u::new(v.x, v.y, v.z)
} }
fn step_h<M: Material<f32>>( mod private {
id: Vec3u, pub trait Sealed {}
meta: &SerializedSimMeta,
stimulus_h: &UnsizedArray<Vec3<f32>>,
material: &UnsizedArray<M>,
e: &UnsizedArray<Vec3<f32>>,
h: &mut UnsizedArray<Vec3<f32>>,
m: &mut UnsizedArray<Vec3<f32>>,
) {
if id.x() < meta.dim.x() && id.y() < meta.dim.y() && id.z() < meta.dim.z() {
let sim_state = SerializedStepH::new(meta, stimulus_h, material, e, h, m);
let update_state = sim_state.index(id);
update_state.step_h();
}
} }
fn step_e<M: Material<f32>>( pub trait HasEntryPoints<R>: private::Sealed {
id: Vec3u, fn step_h() -> &'static str;
meta: &SerializedSimMeta, fn step_e() -> &'static str;
stimulus_e: &UnsizedArray<Vec3<f32>>,
material: &UnsizedArray<M>,
e: &mut UnsizedArray<Vec3<f32>>,
h: &UnsizedArray<Vec3<f32>>,
) {
if id.x() < meta.dim.x() && id.y() < meta.dim.y() && id.z() < meta.dim.z() {
let sim_state = SerializedStepE::new(meta, stimulus_e, material, e, h);
let update_state = sim_state.index(id);
update_state.step_e();
}
}
/// Return the step_h/step_e entry point names for the provided material
pub fn entry_points<M: 'static>() -> Optional<(&'static str, &'static str)> {
use core::any::TypeId;
let mappings = [
(TypeId::of::<FullyGenericMaterial>(),
("step_h_generic_material", "step_e_generic_material")
),
(TypeId::of::<Iso3R1>(),
("step_h_iso_3r1", "step_e_iso_3r1")
),
];
for (id, names) in mappings {
if id == TypeId::of::<M>() {
return Optional::some(names);
}
}
Optional::none()
} }
macro_rules! steps { macro_rules! steps {
($mat:ty, $step_h:ident, $step_e:ident) => { ($flt:ty, $mat:ty, $step_h:ident, $step_e:ident) => {
impl private::Sealed for $mat { }
impl HasEntryPoints<$flt> for $mat {
fn step_h() -> &'static str {
stringify!($step_h)
}
fn step_e() -> &'static str {
stringify!($step_e)
}
}
// LocalSize/numthreads // LocalSize/numthreads
#[spirv(compute(threads(4, 4, 4)))] #[spirv(compute(threads(4, 4, 4)))]
pub fn $step_h( pub fn $step_h(
#[spirv(global_invocation_id)] id: glam::UVec3, #[spirv(global_invocation_id)] id: glam::UVec3,
#[spirv(storage_buffer, descriptor_set = 0, binding = 0)] meta: &SerializedSimMeta, #[spirv(storage_buffer, descriptor_set = 0, binding = 0)] meta: &SimMeta<$flt>,
// XXX: delete this input? // XXX: delete this input?
#[spirv(storage_buffer, descriptor_set = 0, binding = 1)] _unused_stimulus_e: &UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 1)] _unused_stimulus_e: &RuntimeArray<Vec3<$flt>>,
#[spirv(storage_buffer, descriptor_set = 0, binding = 2)] stimulus_h: &UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 2)] stimulus_h: &RuntimeArray<Vec3<$flt>>,
#[spirv(storage_buffer, descriptor_set = 0, binding = 3)] material: &UnsizedArray<$mat>, #[spirv(storage_buffer, descriptor_set = 0, binding = 3)] material: &RuntimeArray<$mat>,
#[spirv(storage_buffer, descriptor_set = 0, binding = 4)] e: &UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 4)] e: &RuntimeArray<Vec3<$flt>>,
#[spirv(storage_buffer, descriptor_set = 0, binding = 5)] h: &mut UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 5)] h: &mut RuntimeArray<Vec3<$flt>>,
#[spirv(storage_buffer, descriptor_set = 0, binding = 6)] m: &mut UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 6)] m: &mut RuntimeArray<Vec3<$flt>>,
) { ) {
step_h(glam_vec_to_internal(id), meta, stimulus_h, material, e, h, m) adapt::step_h(glam_vec_to_internal(id), meta, stimulus_h, material, e, h, m)
} }
#[spirv(compute(threads(4, 4, 4)))] #[spirv(compute(threads(4, 4, 4)))]
pub fn $step_e( pub fn $step_e(
#[spirv(global_invocation_id)] id: glam::UVec3, #[spirv(global_invocation_id)] id: glam::UVec3,
#[spirv(storage_buffer, descriptor_set = 0, binding = 0)] meta: &SerializedSimMeta, #[spirv(storage_buffer, descriptor_set = 0, binding = 0)] meta: &SimMeta<$flt>,
#[spirv(storage_buffer, descriptor_set = 0, binding = 1)] stimulus_e: &UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 1)] stimulus_e: &RuntimeArray<Vec3<$flt>>,
// XXX: delete this input? // XXX: delete this input?
#[spirv(storage_buffer, descriptor_set = 0, binding = 2)] _unused_stimulus_h: &UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 2)] _unused_stimulus_h: &RuntimeArray<Vec3<$flt>>,
#[spirv(storage_buffer, descriptor_set = 0, binding = 3)] material: &UnsizedArray<$mat>, #[spirv(storage_buffer, descriptor_set = 0, binding = 3)] material: &RuntimeArray<$mat>,
#[spirv(storage_buffer, descriptor_set = 0, binding = 4)] e: &mut UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 4)] e: &mut RuntimeArray<Vec3<$flt>>,
#[spirv(storage_buffer, descriptor_set = 0, binding = 5)] h: &UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 5)] h: &RuntimeArray<Vec3<$flt>>,
// XXX: can/should this m input be deleted? // XXX: can/should this m input be deleted?
#[spirv(storage_buffer, descriptor_set = 0, binding = 6)] _unused_m: &UnsizedArray<Vec3<f32>>, #[spirv(storage_buffer, descriptor_set = 0, binding = 6)] _unused_m: &RuntimeArray<Vec3<$flt>>,
) { ) {
step_e(glam_vec_to_internal(id), meta, stimulus_e, material, e, h) adapt::step_e(glam_vec_to_internal(id), meta, stimulus_e, material, e, h)
} }
}; };
} }
steps!(FullyGenericMaterial, step_h_generic_material, step_e_generic_material); steps!(f32, FullyGenericMaterial<f32>, step_h_generic_material_f32, step_e_generic_material_f32);
steps!(Iso3R1, step_h_iso_3r1, step_e_iso_3r1); steps!(f32, Iso3R1<f32>, step_h_iso_3r1_f32, step_e_iso_3r1_f32);
steps!(R32, FullyGenericMaterial<R32>, step_h_generic_material_r32, step_e_generic_material_r32);
steps!(R32, Iso3R1<R32>, step_h_iso_3r1_r32, step_e_iso_3r1_r32);
// these should work, but require OpCapability Float64
// we disable them for compatibility concerns: use the Cpu if you need f64 or temporarily uncomment
// this and add the capability to the WgpuBackend driver.
// steps!(f64, FullyGenericMaterial<f64>, step_h_generic_material_f64, step_e_generic_material_f64);
// steps!(f64, Iso3R1<f64>, step_h_iso_3r1_f64, step_e_iso_3r1_f64);
// steps!(R64, FullyGenericMaterial<R64>, step_h_generic_material_r64, step_e_generic_material_r64);
// steps!(R64, Iso3R1<R64>, step_h_iso_3r1_r64, step_e_iso_3r1_r64);

View File

@@ -1,29 +0,0 @@
use crate::support::Optional;
use coremem_types::mat::{Material, MBPgram, MHPgram};
use coremem_types::vec::Vec3;
#[derive(Copy, Clone, Default, PartialEq)]
pub struct FullyGenericMaterial {
pub conductivity: Vec3<f32>,
pub m_b_curve: Optional<MBPgram<f32>>,
pub m_h_curve: Optional<MHPgram<f32>>,
}
impl Material<f32> for FullyGenericMaterial {
fn conductivity(&self) -> Vec3<f32> {
self.conductivity
}
fn move_b_vec(&self, m: Vec3<f32>, target_b: Vec3<f32>) -> Vec3<f32> {
if self.m_b_curve.is_some() {
self.m_b_curve.unwrap().move_b_vec(m, target_b)
} else if self.m_h_curve.is_some() {
self.m_h_curve.unwrap().move_b_vec(m, target_b)
} else {
Default::default()
}
}
}
pub type IsoConductorOr<M> = coremem_types::mat::IsoConductorOr<f32, M>;

Some files were not shown because too many files have changed in this diff Show More