diff --git a/.gitignore b/.gitignore
index 423c8ef..034bc94 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,3 +28,4 @@ Manifest.toml
.vscode
.julia
**/.ipynb_checkpoints/
+*.bkp
diff --git a/Project.toml b/Project.toml
index a15ad52..f1957b2 100644
--- a/Project.toml
+++ b/Project.toml
@@ -12,6 +12,7 @@ JuliaFormatter = "98e50ef6-434e-11e9-1051-2b60c6c9e899"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
NumaAllocators = "21436f30-1b4a-4f08-87af-e26101bb5379"
QEDbase = "10e22c08-3ccb-4172-bfcf-7d7aa3d04d93"
+QEDprocesses = "46de9c38-1bb3-4547-a1ec-da24d767fdad"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Roots = "f2b01f46-fcfa-551c-844a-d8ac1e96c665"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
diff --git a/README.md b/README.md
index f09438a..ee74050 100644
--- a/README.md
+++ b/README.md
@@ -50,8 +50,8 @@ Problems:
For graphs AB->AB^n:
- Number of Sums should always be 1
-- Number of ComputeTaskS2 should always be (n+1)!
-- Number of ComputeTaskU should always be (n+3)
+- Number of ComputeTaskABC_S2 should always be (n+1)!
+- Number of ComputeTaskABC_U should always be (n+3)
Times are from my home machine: AMD Ryzen 7900X3D, 64GB DDR5 RAM @ 6000MHz (not necessarily up to date, check Jupyter Notebooks in `notebooks/` instead)
@@ -59,9 +59,9 @@ Times are from my home machine: AMD Ryzen 7900X3D, 64GB DDR5 RAM @ 6000MHz (not
$ julia --project examples/import_bench.jl
AB->AB:
Graph:
- Nodes: Total: 34, DataTask: 19, ComputeTaskP: 4,
- ComputeTaskS2: 2, ComputeTaskV: 4, ComputeTaskU: 4,
- ComputeTaskSum: 1
+ Nodes: Total: 34, DataTask: 19, ComputeTaskABC_P: 4,
+ ComputeTaskABC_S2: 2, ComputeTaskABC_V: 4, ComputeTaskABC_U: 4,
+ ComputeTaskABC_Sum: 1
Edges: 37
Total Compute Effort: 185
Total Data Transfer: 102
@@ -71,9 +71,9 @@ Graph:
AB->ABBB:
Graph:
- Nodes: Total: 280, DataTask: 143, ComputeTaskP: 6,
- ComputeTaskS2: 24, ComputeTaskV: 64, ComputeTaskU: 6,
- ComputeTaskSum: 1, ComputeTaskS1: 36
+ Nodes: Total: 280, DataTask: 143, ComputeTaskABC_P: 6,
+ ComputeTaskABC_S2: 24, ComputeTaskABC_V: 64, ComputeTaskABC_U: 6,
+ ComputeTaskABC_Sum: 1, ComputeTaskABC_S1: 36
Edges: 385
Total Compute Effort: 2007
Total Data Transfer: 828
@@ -83,9 +83,9 @@ Graph:
AB->ABBBBB:
Graph:
- Nodes: Total: 7854, DataTask: 3931, ComputeTaskP: 8,
- ComputeTaskS2: 720, ComputeTaskV: 1956, ComputeTaskU: 8,
- ComputeTaskSum: 1, ComputeTaskS1: 1230
+ Nodes: Total: 7854, DataTask: 3931, ComputeTaskABC_P: 8,
+ ComputeTaskABC_S2: 720, ComputeTaskABC_V: 1956, ComputeTaskABC_U: 8,
+ ComputeTaskABC_Sum: 1, ComputeTaskABC_S1: 1230
Edges: 11241
Total Compute Effort: 58789
Total Data Transfer: 23244
@@ -95,9 +95,9 @@ Graph:
AB->ABBBBBBB:
Graph:
- Nodes: Total: 438436, DataTask: 219223, ComputeTaskP: 10,
- ComputeTaskS2: 40320, ComputeTaskV: 109600, ComputeTaskU: 10,
- ComputeTaskSum: 1, ComputeTaskS1: 69272
+ Nodes: Total: 438436, DataTask: 219223, ComputeTaskABC_P: 10,
+ ComputeTaskABC_S2: 40320, ComputeTaskABC_V: 109600, ComputeTaskABC_U: 10,
+ ComputeTaskABC_Sum: 1, ComputeTaskABC_S1: 69272
Edges: 628665
Total Compute Effort: 3288131
Total Data Transfer: 1297700
@@ -107,7 +107,7 @@ Graph:
AB->ABBBBBBBBB:
Graph:
- Nodes: Total: 39456442, DataTask: 19728227, ComputeTaskS1: 6235290, ComputeTaskP: 12, ComputeTaskU: 12, ComputeTaskV: 9864100, ComputeTaskS2: 3628800, ComputeTaskSum: 1
+ Nodes: Total: 39456442, DataTask: 19728227, ComputeTaskABC_S1: 6235290, ComputeTaskABC_P: 12, ComputeTaskABC_U: 12, ComputeTaskABC_V: 9864100, ComputeTaskABC_S2: 3628800, ComputeTaskABC_Sum: 1
Edges: 56578129
Total Compute Effort: 295923153
Total Data Transfer: 175407750
@@ -116,9 +116,9 @@ Graph:
ABAB->ABAB:
Graph:
- Nodes: Total: 3218, DataTask: 1613, ComputeTaskP: 8,
- ComputeTaskS2: 288, ComputeTaskV: 796, ComputeTaskU: 8,
- ComputeTaskSum: 1, ComputeTaskS1: 504
+ Nodes: Total: 3218, DataTask: 1613, ComputeTaskABC_P: 8,
+ ComputeTaskABC_S2: 288, ComputeTaskABC_V: 796, ComputeTaskABC_U: 8,
+ ComputeTaskABC_Sum: 1, ComputeTaskABC_S1: 504
Edges: 4581
Total Compute Effort: 24009
Total Data Transfer: 9494
@@ -128,9 +128,9 @@ Graph:
ABAB->ABC:
Graph:
- Nodes: Total: 817, DataTask: 412, ComputeTaskP: 7,
- ComputeTaskS2: 72, ComputeTaskV: 198, ComputeTaskU: 7,
- ComputeTaskSum: 1, ComputeTaskS1: 120
+ Nodes: Total: 817, DataTask: 412, ComputeTaskABC_P: 7,
+ ComputeTaskABC_S2: 72, ComputeTaskABC_V: 198, ComputeTaskABC_U: 7,
+ ComputeTaskABC_Sum: 1, ComputeTaskABC_S1: 120
Edges: 1151
Total Compute Effort: 6028
Total Data Transfer: 2411
diff --git a/docs/src/QED-Bhabha-DAG.drawio b/docs/src/QED-Bhabha-DAG.drawio
new file mode 100644
index 0000000..0fbd5e4
--- /dev/null
+++ b/docs/src/QED-Bhabha-DAG.drawio
@@ -0,0 +1,259 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/src/QED-Compton-DAG.drawio b/docs/src/QED-Compton-DAG.drawio
new file mode 100644
index 0000000..dd51f9d
--- /dev/null
+++ b/docs/src/QED-Compton-DAG.drawio
@@ -0,0 +1,259 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/src/lib/internals/models.md b/docs/src/lib/internals/models.md
index a258ce5..89cd63b 100644
--- a/docs/src/lib/internals/models.md
+++ b/docs/src/lib/internals/models.md
@@ -69,4 +69,58 @@ Order = [:function]
## QED-Model
-*To be added*
+### Feynman Diagrams
+```@autodocs
+Modules = [MetagraphOptimization]
+Pages = ["models/qed/diagrams.jl"]
+Order = [:type, :function, :constant]
+```
+
+### Types
+```@autodocs
+Modules = [MetagraphOptimization]
+Pages = ["models/qed/types.jl"]
+Order = [:type, :constant]
+```
+
+### Particle
+```@autodocs
+Modules = [MetagraphOptimization]
+Pages = ["models/qed/particle.jl"]
+Order = [:type, :constant, :function]
+```
+
+### Parse
+```@autodocs
+Modules = [MetagraphOptimization]
+Pages = ["models/qed/parse.jl"]
+Order = [:function]
+```
+
+### Properties
+```@autodocs
+Modules = [MetagraphOptimization]
+Pages = ["models/qed/properties.jl"]
+Order = [:function]
+```
+
+### Create
+```@autodocs
+Modules = [MetagraphOptimization]
+Pages = ["models/qed/create.jl"]
+Order = [:function]
+```
+
+### Compute
+```@autodocs
+Modules = [MetagraphOptimization]
+Pages = ["models/qed/compute.jl"]
+Order = [:function]
+```
+
+### Print
+```@autodocs
+Modules = [MetagraphOptimization]
+Pages = ["models/qed/print.jl"]
+Order = [:function]
+```
diff --git a/docs/src/structure_qed.drawio b/docs/src/structure_qed.drawio
new file mode 100644
index 0000000..0a04132
--- /dev/null
+++ b/docs/src/structure_qed.drawio
@@ -0,0 +1,122 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/notebooks/abc_model_large.ipynb b/notebooks/abc_model_large.ipynb
index 8189d04..653a9b1 100644
--- a/notebooks/abc_model_large.ipynb
+++ b/notebooks/abc_model_large.ipynb
@@ -11,27 +11,18 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 11,
"metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Found 1 NUMA nodes\n",
- "CUDA is non-functional\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"# Get machine and set dictionary caching strategy\n",
"machine = get_machine_info()\n",
- "MetagraphOptimization.set_cache_strategy(machine.devices[1], MetagraphOptimization.Dictionary())"
+ "MetagraphOptimization.set_cache_strategy(machine.devices[1], MetagraphOptimization.LocalVariables())"
]
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 12,
"metadata": {},
"outputs": [
{
@@ -39,9 +30,9 @@
"output_type": "stream",
"text": [
"Graph:\n",
- " Nodes: Total: 7854, ComputeTaskP: 8, ComputeTaskS2: 720, \n",
- " ComputeTaskU: 8, ComputeTaskSum: 1, ComputeTaskS1: 1230, \n",
- " ComputeTaskV: 1956, DataTask: 3931\n",
+ " Nodes: Total: 7854, DataTask: 3931, ComputeTaskABC_S1: 1230, \n",
+ " ComputeTaskABC_Sum: 1, ComputeTaskABC_U: 8, ComputeTaskABC_P: 8, \n",
+ " ComputeTaskABC_V: 1956, ComputeTaskABC_S2: 720\n",
" Edges: 11241\n",
" Total Compute Effort: 33915.0\n",
" Total Data Transfer: 322464.0\n",
@@ -59,18 +50,17 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 13,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "compute__ae7097a4_7bfc_11ee_2cec_190d7ced64f1 (generic function with 1 method)"
+ "compute__8bced4be_8f2e_11ee_37d9_3f851690d249 (generic function with 1 method)"
]
},
- "execution_count": 7,
"metadata": {},
- "output_type": "execute_result"
+ "output_type": "display_data"
}
],
"source": [
@@ -79,22 +69,22 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- " 0.140021 seconds (791.41 k allocations: 30.317 MiB, 9.74% gc time)\n",
+ " 0.184484 seconds (2.75 M allocations: 153.561 MiB, 15.46% gc time)\n",
"Graph:\n",
- " Nodes: Total: 4998, ComputeTaskP: 8, ComputeTaskS2: 720, \n",
- " ComputeTaskU: 8, ComputeTaskSum: 1, ComputeTaskS1: 516, \n",
- " ComputeTaskV: 1242, DataTask: 2503\n",
+ " Nodes: Total: 4998, DataTask: 2503, ComputeTaskABC_S1: 516, \n",
+ " ComputeTaskABC_Sum: 1, ComputeTaskABC_U: 8, ComputeTaskABC_P: 8, \n",
+ " ComputeTaskABC_V: 1242, ComputeTaskABC_S2: 720\n",
" Edges: 7671\n",
" Total Compute Effort: 21777.0\n",
- " Total Data Transfer: 219648.0\n",
- " Total Compute Intensity: 0.09914499562937062\n"
+ " Total Data Transfer: 253920.0\n",
+ " Total Compute Intensity: 0.0857632325141777\n"
]
}
],
@@ -105,25 +95,24 @@
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- " 3.626740 seconds (1.52 M allocations: 114.358 MiB, 0.84% gc time)\n"
+ " 0.822702 seconds (574.85 k allocations: 48.098 MiB, 0.90% gc time)\n"
]
},
{
"data": {
"text/plain": [
- "compute__bad8f2ac_7bfc_11ee_176b_b72dc8919aad (generic function with 1 method)"
+ "compute__8dffb17a_8f2e_11ee_2d70_13a063f6b2e1 (generic function with 1 method)"
]
},
- "execution_count": 9,
"metadata": {},
- "output_type": "execute_result"
+ "output_type": "display_data"
}
],
"source": [
@@ -132,14 +121,14 @@
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
- " 2.130952 seconds (4.31 M allocations: 276.129 MiB, 4.50% gc time, 99.02% compilation time)\n"
+ " 0.054193 seconds (108.22 k allocations: 6.222 MiB, 92.26% compilation time)\n"
]
},
{
@@ -148,309 +137,236 @@
"1000-element Vector{ABCProcessInput}:\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [4.694213004647641, 0.0, 0.0, 4.58646222408983]\n",
- " B: [4.694213004647641, 0.0, 0.0, -4.58646222408983]\n",
+ " A: [5.53824935935883, 0.0, 0.0, 5.447220021849539]\n",
+ " B: [5.53824935935883, 0.0, 0.0, -5.447220021849539]\n",
" 6 Outgoing Particles:\n",
- " A: [-1.1989656045893697, -0.40235742161696864, 0.06512533692021122, 0.5209469423550988]\n",
- " B: [-1.2555060342925868, 0.3685683194051901, 0.4785890883121294, -0.4597882997907804]\n",
- " B: [-2.189083660521547, 0.31663070338411387, 0.1742479621961443, -1.9134967776579581]\n",
- " B: [-1.0637129314000269, -0.2948512505337184, 0.0500740340487307, -0.2050378784528044]\n",
- " B: [-1.6149410305664367, 1.0344652685816964, -0.406159957064284, 0.6106965118475143]\n",
- " B: [-2.0662167479253144, -1.0224556192203134, -0.3618764644129321, 1.4466795016989296]\n",
+ " A: [-1.3103925957044282, 0.7331872395687581, 0.24174619498761993, 0.34802873993327305]\n",
+ " B: [-1.7235347423723115, -0.9221216475500805, -0.5368654338299067, 0.9121618174658171]\n",
+ " B: [-3.2983236636246445, -1.4122494078132704, -0.264394674616116, -2.7954581120438933]\n",
+ " B: [-1.4663199369248787, -0.21617929792622487, -0.41022326537895987, 0.9669940750145931]\n",
+ " B: [-1.1596695896410607, 0.40971989086421784, 0.1871290088754596, -0.3767570864705371]\n",
+ " B: [-2.118258190450336, 1.4076432228565998, 0.7826081699619032, 0.945030566100747]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [5.621657834589244, 0.0, 0.0, 5.532001157736559]\n",
- " B: [5.621657834589244, 0.0, 0.0, -5.532001157736559]\n",
+ " A: [6.406766539805908, 0.0, 0.0, 6.328242844232241]\n",
+ " B: [6.406766539805908, 0.0, 0.0, -6.328242844232241]\n",
" 6 Outgoing Particles:\n",
- " A: [-2.058801595505931, 0.7220299456693885, 0.22719930902793095, 1.6327024349806234]\n",
- " B: [-1.1826215869997767, 0.04638669502532437, -0.553508153090363, -0.30011800516629]\n",
- " B: [-2.3776830758041227, -0.8637209881441633, -0.22710813067439403, 1.9636152272240621]\n",
- " B: [-1.9086249240920268, 0.02598092498567318, -1.087715954825374, -1.2079106316365085]\n",
- " B: [-2.6526208210236426, 0.3117066248738638, 1.6178469805428013, -1.8225826038033035]\n",
- " B: [-1.0629636657529868, -0.24238320241008685, 0.023285949019398133, -0.2657064215985837]\n",
+ " A: [-1.6009185206411505, -0.5320720115654639, 1.09590848570997, -0.2807562558330809]\n",
+ " B: [-3.146359037361951, -0.17028519968266745, 1.7773008494544373, -2.389933018577465]\n",
+ " B: [-1.010135923448664, 0.06427364329577855, -0.1146419285663243, -0.05568402673627389]\n",
+ " B: [-3.6289281421436512, 0.6465018878980286, -0.8216898266580996, 3.328059584585744]\n",
+ " B: [-1.3592677632187082, 0.8038563415980269, -0.35192233894694247, -0.27852199472993183]\n",
+ " B: [-2.06792369279769, -0.8122746615437029, -1.5849552409930403, -0.323164288708993]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [6.176284774018432, 0.0, 0.0, 6.094792335245879]\n",
- " B: [6.176284774018432, 0.0, 0.0, -6.094792335245879]\n",
+ " A: [4.592675400586894, 0.0, 0.0, 4.482484504731276]\n",
+ " B: [4.592675400586894, 0.0, 0.0, -4.482484504731276]\n",
" 6 Outgoing Particles:\n",
- " A: [-3.2943110238771185, 1.9799744259594443, 2.3805040294128346, 0.5151572192390796]\n",
- " B: [-1.0255775134941767, 0.18009906891836583, -0.12779691496180498, 0.05514988745120904]\n",
- " B: [-1.7854209452644407, -0.56381615584479, -0.9572322565407875, 0.9764966468120639]\n",
- " B: [-3.3312939695760786, -0.5949754252793171, -2.9420979921841868, -1.0428725518649993]\n",
- " B: [-1.6551651824618003, -0.8748451354288965, 0.9749427327758187, -0.1539624566503731]\n",
- " B: [-1.260800913363249, -0.12643677832480643, 0.6716804014981268, -0.34996874498697933]\n",
+ " A: [-1.1473149674649585, -0.35076892712815855, -0.170139004859497, -0.4053955023873595]\n",
+ " B: [-2.058220554606089, -0.8121547455466859, -1.4272449393744948, 0.7346076529133699]\n",
+ " B: [-2.0024960896606476, 1.3172479417787402, 0.7582221815549833, -0.8366286944540325]\n",
+ " B: [-1.0179814720237987, 0.162899519872391, -0.09860388948222289, -0.0052246328160273445]\n",
+ " B: [-1.834456765054589, -0.0990687609983643, 1.3606293642672649, 0.7100033355854413]\n",
+ " B: [-1.1248809523637056, -0.2181550279779225, -0.42286371210603335, -0.19736215884139197]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [4.747497785190141, 0.0, 0.0, 4.640984294348053]\n",
- " B: [4.747497785190141, 0.0, 0.0, -4.640984294348053]\n",
+ " A: [4.037101162257922, 0.0, 0.0, 3.9112895308714055]\n",
+ " B: [4.037101162257922, 0.0, 0.0, -3.9112895308714055]\n",
" 6 Outgoing Particles:\n",
- " A: [-1.3704329562088802, 0.8292801285050307, 0.2251475790952209, 0.3737506167990253]\n",
- " B: [-1.352958681672649, 0.11120507604905326, 0.6088733084867489, -0.6688825902852584]\n",
- " B: [-1.4224569379606473, -0.25277059018918374, -0.4925475402927904, -0.84669220478242]\n",
- " B: [-2.4534584066229996, -0.23638988525842838, -1.4120549440785204, 1.7232756047945383]\n",
- " B: [-1.4378719974624208, 0.5461758322111039, 0.8131489669135029, -0.3285674953530594]\n",
- " B: [-1.457816590452685, -0.9975005613175758, 0.257432629875838, -0.25288393117282576]\n",
+ " A: [-1.7053110482506162, -0.23947337333507246, -1.2744970749813946, 0.47581034101100217]\n",
+ " B: [-1.3631569288619594, 0.7221467297219651, 0.42638713494656166, -0.3935669251960867]\n",
+ " B: [-1.0326521624735496, -0.11131042747240362, 0.20341304874809626, 0.11226579619908084]\n",
+ " B: [-1.195196392865049, -0.5445059949974184, -0.16637078706558947, 0.32299907142385453]\n",
+ " B: [-1.1830550739590457, 0.24824882865433953, -0.423307203181585, -0.39850073880304915]\n",
+ " B: [-1.5948307181056223, -0.07510576257141027, 1.2343748815339113, -0.11900754463480165]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [6.148648417619223, 0.0, 0.0, 6.066784763240853]\n",
- " B: [6.148648417619223, 0.0, 0.0, -6.066784763240853]\n",
+ " A: [7.636716907339512, 0.0, 0.0, 7.57096064729207]\n",
+ " B: [7.636716907339512, 0.0, 0.0, -7.57096064729207]\n",
" 6 Outgoing Particles:\n",
- " A: [-1.5381168736188293, 0.5769721565317305, 1.0069443436143835, 0.13773066601554382]\n",
- " B: [-1.3178580311796126, 0.27781510267038506, -0.8083323925420551, 0.07853217328003184]\n",
- " B: [-1.5330954954905804, 0.4994081736550063, -1.0290017953406905, 0.20525247761163526]\n",
- " B: [-3.083592979398096, -2.1497728433794587, -1.2247634566690573, -1.5449844205264607]\n",
- " B: [-3.1391572693216845, 0.49043306139044257, 2.931865230552653, 0.13397777318202247]\n",
- " B: [-1.6854761862296446, 0.30514434913189475, -0.876711929615233, 0.989491330437227]\n",
+ " A: [-1.8228350224036067, -0.22313230508453247, 0.05829362440621317, -1.5064997001932685]\n",
+ " B: [-2.467409891320565, 1.6506915327402656, -0.771321444516658, 1.3298091083892047]\n",
+ " B: [-3.7191367050304223, 1.01401048234514, -0.8448690579747132, -3.3301586819963456]\n",
+ " B: [-1.086062092991359, 0.018065163049532738, 0.4218324659828878, 0.035523096142663795]\n",
+ " B: [-3.708627500490809, -3.0248517041401413, 1.3840072581447456, 1.2995052961646025]\n",
+ " B: [-2.4693626024422626, 0.5652168310897357, -0.24794284604247502, 2.171820881493144]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [7.422637433466136, 0.0, 0.0, 7.35496746890785]\n",
- " B: [7.422637433466136, 0.0, 0.0, -7.35496746890785]\n",
+ " A: [4.844757462595395, 0.0, 0.0, 4.740429819264681]\n",
+ " B: [4.844757462595395, 0.0, 0.0, -4.740429819264681]\n",
" 6 Outgoing Particles:\n",
- " A: [-3.3788591199517355, 2.3069724486616927, -0.5016400230094518, 2.2006645271171985]\n",
- " B: [-2.193241133599192, -1.652465184572841, -0.691853387986234, -0.7752447184070871]\n",
- " B: [-2.295315825041209, 0.334376552772819, 0.5374003175214306, 1.966689593293318]\n",
- " B: [-2.3721558149969235, -2.0813404180022568, 0.4923496733367945, 0.22964554029865022]\n",
- " B: [-1.5367714331999278, 0.9008878309070798, 0.1482895506792473, -0.7266895920420517]\n",
- " B: [-3.068931540143284, 0.1915687702335065, 0.015453869458212284, -2.8950653502600274]\n",
+ " A: [-1.3377157678137663, -0.44312783214029056, -0.34462836811169034, -0.6887325226333468]\n",
+ " B: [-1.0287552354600262, 0.10884372468923921, -0.0798214909694111, 0.20029704855940197]\n",
+ " B: [-1.237602042094568, -0.1707812371296387, -0.708500409075891, -0.02279811352743621]\n",
+ " B: [-1.2285767946957649, -0.45314793159826366, 0.5376309116329622, -0.12251895938933055]\n",
+ " B: [-2.3944375695065316, 0.5631279933752329, -1.4234056115727505, 1.5460060162511446]\n",
+ " B: [-2.4624275156201336, 0.3950852828037212, 2.0187249680967807, -0.9122534692604332]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [6.716486802754837, 0.0, 0.0, 6.64162592830851]\n",
- " B: [6.716486802754837, 0.0, 0.0, -6.64162592830851]\n",
+ " A: [6.914095647194839, 0.0, 0.0, 6.841397417089481]\n",
+ " B: [6.914095647194839, 0.0, 0.0, -6.841397417089481]\n",
" 6 Outgoing Particles:\n",
- " A: [-1.3263331205917814, -0.5023870926274977, 0.418137178911541, 0.5761319775467438]\n",
- " B: [-2.1603199304697136, -1.202627416523187, 1.024176720111292, -1.0824654936733602]\n",
- " B: [-1.1665818595303201, 0.5747508534091106, 0.05041215840441908, 0.16743149576984034]\n",
- " B: [-1.829760754209137, 0.5127529745920416, -0.17835468593467171, -1.4329334983509001]\n",
- " B: [-2.891550940379351, -2.652621236308268, 0.3953841214715819, 0.41029113320086874]\n",
- " B: [-4.05842700032937, 3.2701319174577996, -1.7097554929641623, 1.3615443855068068]\n",
+ " A: [-1.8747539146164607, -1.15195487912761, 1.0796978964166692, -0.14817101368775237]\n",
+ " B: [-2.0219963752169967, -0.8963094934108238, -1.380862038576808, 0.6150761447412909]\n",
+ " B: [-2.4839643051342004, -0.5463241040770312, 0.28470426735854887, -2.1887329948244236]\n",
+ " B: [-1.0870998264481033, 0.03306160941873628, 0.20168848226668348, -0.3741854069403313]\n",
+ " B: [-2.4584897964753116, 0.9082805780526032, -1.8726214974559325, -0.844089567623928]\n",
+ " B: [-3.9018870764986056, 1.6532462891441266, 1.6873928899908393, 2.9401028383351444]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [7.700331598721008, 0.0, 0.0, 7.635123229539995]\n",
- " B: [7.700331598721008, 0.0, 0.0, -7.635123229539995]\n",
+ " A: [4.882838018892802, 0.0, 0.0, 4.77934170349275]\n",
+ " B: [4.882838018892802, 0.0, 0.0, -4.77934170349275]\n",
" 6 Outgoing Particles:\n",
- " A: [-2.382743739041896, -1.410381415274026, 1.0613871843128353, 1.2496996576655786]\n",
- " B: [-3.021630369232257, 0.25595209564405125, -2.8389223073732714, 0.07251720968504605]\n",
- " B: [-2.7262381500229256, 1.0736489469437192, 2.293577756890956, 0.13839603484966886]\n",
- " B: [-2.222260574660266, 1.5432031708495264, -0.7055857379280247, 1.0291330339668954]\n",
- " B: [-1.650055097318715, -1.062833285640475, -0.34598865120359784, 0.6880109623839291]\n",
- " B: [-3.397735267165956, -0.3995895125227963, 0.5355317553011019, -3.1777568985511193]\n",
+ " A: [-1.3368922715636002, -0.024254114235374817, -0.17993280734873465, 0.8685141729118435]\n",
+ " B: [-1.336032053759296, 0.44580739433740213, 0.4009862518446777, -0.6522633223307408]\n",
+ " B: [-1.1917158881102905, 0.11587748600254362, 0.21032579337862262, -0.6020981870524788]\n",
+ " B: [-1.8590179700604674, -0.4659878149612763, 1.4629321849562218, 0.3140582613697155]\n",
+ " B: [-1.2740128533657533, -0.3900331968801154, 0.6651639498517544, 0.16893719451393388]\n",
+ " B: [-2.7680050009261956, 0.3185902457368207, -2.559475372682542, -0.09714811941227354]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [4.9341647451125334, 0.0, 0.0, 4.8317679716550375]\n",
- " B: [4.9341647451125334, 0.0, 0.0, -4.8317679716550375]\n",
+ " A: [4.215107110349817, 0.0, 0.0, 4.094768363622244]\n",
+ " B: [4.215107110349817, 0.0, 0.0, -4.094768363622244]\n",
" 6 Outgoing Particles:\n",
- " A: [-1.834221818900379, 0.1070495973399568, 1.2695354794210922, 0.860923766155068]\n",
- " B: [-1.5116322118250454, 0.39753882899610743, -0.756426277560466, -0.7448584495617266]\n",
- " B: [-1.6588475476725886, 0.06712527283179799, 0.6875031760830096, -1.1289857249063835]\n",
- " B: [-1.5718164783029667, 0.4294130824657117, -0.6215317131811225, -0.9486357444151968]\n",
- " B: [-1.7838526603309615, -0.5732435925039472, -0.9425541080554634, 0.9824020820472578]\n",
- " B: [-1.5079587731931232, -0.4278831891296266, 0.36347344329295106, 0.979154070680981]\n",
+ " A: [-1.3241447475687065, 0.7510738166043768, -0.3909856211208319, 0.19072933335458914]\n",
+ " B: [-1.7731907344857587, 0.036019000265901324, 1.4622797510086056, -0.06816114931690141]\n",
+ " B: [-1.019387957593508, 0.014655316462798782, 0.19300767940790514, -0.04104954903058491]\n",
+ " B: [-1.6169881803397028, 0.04956396056952302, -1.0323879934365006, -0.7391679242087841]\n",
+ " B: [-1.6537900060652204, -1.1032956801849205, -0.08849835738509954, 0.7140924778952892]\n",
+ " B: [-1.0427125946467377, 0.2519835862823207, -0.14341545847407883, -0.056443188693607704]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [7.099667747066588, 0.0, 0.0, 7.028889109862067]\n",
- " B: [7.099667747066588, 0.0, 0.0, -7.028889109862067]\n",
+ " A: [7.2720657357811564, 0.0, 0.0, 7.202981331748843]\n",
+ " B: [7.2720657357811564, 0.0, 0.0, -7.202981331748843]\n",
" 6 Outgoing Particles:\n",
- " A: [-3.851129225519823, 2.5555470019017212, -2.502060728335724, 1.019837214678957]\n",
- " B: [-2.3860288930086897, 0.6059782347076652, 0.6711053982516709, 1.9686395814801452]\n",
- " B: [-1.9543999030878276, -1.5857282951514855, 0.5255033921941499, -0.17026726032362857]\n",
- " B: [-1.5523812781985644, -1.154244859738803, 0.03484928145183679, -0.2763909626783212]\n",
- " B: [-3.2795110937910716, -1.0290377989842119, 1.3607888704851536, -2.616204860580336]\n",
- " B: [-1.175885100527199, 0.6074857172651138, -0.09018621404708665, 0.07438628742318319]\n",
- "\n",
- " Input for ABC Process: 'AB->ABBBBB':\n",
- " 2 Incoming particles:\n",
- " A: [6.3653048194550985, 0.0, 0.0, 6.286263233796236]\n",
- " B: [6.3653048194550985, 0.0, 0.0, -6.286263233796236]\n",
- " 6 Outgoing Particles:\n",
- " A: [-3.274142279992413, -2.62046758782023, -1.339558866223036, 1.028950598785383]\n",
- " B: [-1.8502190446152251, -1.1967169760014287, 0.8476370040459147, 0.5221977611776395]\n",
- " B: [-1.3090919645484567, 0.8304076910302604, -0.132118345313184, 0.08178985973111547]\n",
- " B: [-1.7699077332157842, 0.8156249668276708, -0.2891156025546255, 1.1763254081859622]\n",
- " B: [-1.6671330761442815, 1.2573648831500233, 0.2190135291489001, -0.3878135096217862]\n",
- " B: [-2.8601155403940384, 0.913787022813704, 0.6941422808960306, -2.421450118258315]\n",
- "\n",
- " Input for ABC Process: 'AB->ABBBBB':\n",
- " 2 Incoming particles:\n",
- " A: [5.2620105860572215, 0.0, 0.0, 5.166116085395126]\n",
- " B: [5.2620105860572215, 0.0, 0.0, -5.166116085395126]\n",
- " 6 Outgoing Particles:\n",
- " A: [-1.9479176369516882, 0.8861257045164052, 1.1018829783040076, 0.8916379636750793]\n",
- " B: [-1.2433791528628988, 0.41365857789168176, 0.544699730060495, -0.27960776595565956]\n",
- " B: [-1.074755543453127, 0.3002469943380598, 0.01041159782849033, 0.25464253219924826]\n",
- " B: [-1.7453891507499704, 1.1576089006622574, 0.03134512003430503, -0.8398466551182168]\n",
- " B: [-1.5208938996272057, 0.008686514238768405, -1.1440782944999142, -0.06424682441800389]\n",
- " B: [-2.991685788469555, -2.7663266916471727, -0.544261131727384, 0.03742074961755215]\n",
- "\n",
- " Input for ABC Process: 'AB->ABBBBB':\n",
- " 2 Incoming particles:\n",
- " A: [4.439668869119513, 0.0, 0.0, 4.325582003318043]\n",
- " B: [4.439668869119513, 0.0, 0.0, -4.325582003318043]\n",
- " 6 Outgoing Particles:\n",
- " A: [-1.1969832203303146, 0.48265768801558717, -0.02482335564392214, 0.4463117598342591]\n",
- " B: [-1.7251727113760817, -1.0744400415092346, 0.6322269398265393, 0.6496834443295479]\n",
- " B: [-1.419669052608684, -0.4173084301546306, -0.44626125418717505, -0.8013518491074973]\n",
- " B: [-1.331289111993432, -0.7645577006899625, -0.3423664341778722, 0.2656453402118452]\n",
- " B: [-1.5156451020746182, 0.6491857388484042, 0.8955487542892042, -0.2715333876518423]\n",
- " B: [-1.6905785398558963, 1.1244627454898357, -0.7143246501067739, -0.2887553076163127]\n",
+ " A: [-1.110939233644008, -0.268184416567738, 0.24360224044987097, 0.3208131044822848]\n",
+ " B: [-2.6388927199644003, 0.8314814079287018, -0.21777668284358856, 2.2858186218857472]\n",
+ " B: [-3.473898607870094, 2.051862236379928, 2.4003392500206266, -1.046997796315806]\n",
+ " B: [-3.152819934613197, -1.9424358511984305, -2.028267056813039, -1.0263280422556738]\n",
+ " B: [-2.275152937944009, -1.7654922583464505, 0.7703768739716074, -0.6825521583027478]\n",
+ " B: [-1.8924280375266047, 1.0927688818039885, -1.1682746247854774, 0.14924627050619674]\n",
"\n",
" ⋮\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [5.750717080737416, 0.0, 0.0, 5.663104002460582]\n",
- " B: [5.750717080737416, 0.0, 0.0, -5.663104002460582]\n",
+ " A: [6.22966038636724, 0.0, 0.0, 6.148875387375584]\n",
+ " B: [6.22966038636724, 0.0, 0.0, -6.148875387375584]\n",
" 6 Outgoing Particles:\n",
- " A: [-1.0362067302993534, 0.23737037129807034, 0.1316212944823847, 0.007451817649030921]\n",
- " B: [-3.597917991072113, -1.5787159301449987, 0.28387609057144564, 3.0613860010767477]\n",
- " B: [-1.0798303035395174, -0.06880694215947386, -0.2669312876106363, -0.3000779512850572]\n",
- " B: [-1.3394551212059678, -0.7053379424304421, 0.44160810884651497, -0.3187799976376953]\n",
- " B: [-3.270241523195321, 1.927780354010675, 0.003047457202140131, -2.4450221348130854]\n",
- " B: [-1.1777824921625586, 0.1877100894261692, -0.5932216634918489, -0.004957734989940532]\n",
+ " A: [-1.4304429070664482, -0.33884344128192095, 0.8653360836289696, -0.42725343187224885]\n",
+ " B: [-1.9749814666096197, 1.3609392980219706, -0.9441991051819204, -0.39608593805462516]\n",
+ " B: [-2.2715747343865793, 1.2408591011012648, 1.6172984936557957, 0.06830847338590983]\n",
+ " B: [-1.661609068228756, -0.4012681871023404, -1.1964016761233542, 0.4105503221395213]\n",
+ " B: [-1.746963024762814, 1.345279186098992, -0.06451410595930414, 0.48779263162695097]\n",
+ " B: [-3.373749571680263, -3.2069659568379674, -0.2775196900201868, -0.1433120572255088]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [6.84577391627276, 0.0, 0.0, 6.772342320993563]\n",
- " B: [6.84577391627276, 0.0, 0.0, -6.772342320993563]\n",
+ " A: [4.358722688789774, 0.0, 0.0, 4.242459602373458]\n",
+ " B: [4.358722688789774, 0.0, 0.0, -4.242459602373458]\n",
" 6 Outgoing Particles:\n",
- " A: [-1.0594956991232163, -0.09579189209396338, 0.21296650876679918, 0.2607687021353065]\n",
- " B: [-1.8300488673592041, 0.8497425690197566, -0.8227483588311224, 0.9747315329664396]\n",
- " B: [-2.860723394379955, 0.6743651794772785, 0.1320397309862766, 2.5906631300310776]\n",
- " B: [-2.557528905485892, -1.3508678766931497, 1.2829278224554168, -1.4388211440218013]\n",
- " B: [-3.790115184858299, 0.47588521284738383, -1.0334447791446917, -3.474262262286086]\n",
- " B: [-1.5936357813389537, -0.553333192557306, 0.2282590757673212, 1.086920041175065]\n",
+ " A: [-1.0452779390743625, -0.2727572224505045, -0.0754336299872278, 0.11188938726967125]\n",
+ " B: [-1.7048247824379945, 0.4983084694471347, 0.872827621048126, 0.9467249611304639]\n",
+ " B: [-1.2899467751023526, 0.29644307338358544, -0.46128198344041976, -0.602746313628815]\n",
+ " B: [-2.1244189851466975, -1.8139000349895653, -0.4266469607437963, -0.20222526648433034]\n",
+ " B: [-1.4709803178987078, 1.0687795622551313, -0.1466043527374882, 0.0007118353293400601]\n",
+ " B: [-1.0819965779194327, 0.22312615235421782, 0.23713930586080637, -0.25435460361632983]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [6.25909007687458, 0.0, 0.0, 6.178689876537731]\n",
- " B: [6.25909007687458, 0.0, 0.0, -6.178689876537731]\n",
+ " A: [4.946953336826144, 0.0, 0.0, 4.844826861378569]\n",
+ " B: [4.946953336826144, 0.0, 0.0, -4.844826861378569]\n",
" 6 Outgoing Particles:\n",
- " A: [-2.15208406752572, -0.27987613820502405, 0.20983197963180572, -1.873260718983155]\n",
- " B: [-3.1436326945514232, -2.0821664144960677, -1.9679549582157083, 0.8210741885063981]\n",
- " B: [-2.206056617746511, 1.7689323832663284, -0.4273996865759156, -0.7449117612507478]\n",
- " B: [-1.8709609004510535, 0.5332842722412897, 1.48760475220818, -0.055988188078690854]\n",
- " B: [-1.0916331546903268, 0.018218872767661307, 0.4300802089857822, 0.07976234031782706]\n",
- " B: [-2.0538127187841235, 0.04160702442581186, 0.2678377039658561, 1.7733241394883685]\n",
+ " A: [-1.0798321354813016, -0.05701177676898147, 0.3748038410417432, -0.1493625751924078]\n",
+ " B: [-2.535607459805834, 0.2786802518140389, -2.1413493157456154, 0.8753659894167939]\n",
+ " B: [-1.1465622434125131, 0.048325266102822936, -0.30303094935893476, 0.46951239643469417]\n",
+ " B: [-1.0565850692648957, -0.15422821749644713, -0.2946016814579471, -0.0761282786060691]\n",
+ " B: [-1.3897397103611828, 0.8757386144485694, 0.40183039146109456, 0.054687093694094344]\n",
+ " B: [-2.6855800553265587, -0.9915041381000028, 1.96234771405966, -1.1740746257471053]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [6.8752382625158255, 0.0, 0.0, 6.802124753807565]\n",
- " B: [6.8752382625158255, 0.0, 0.0, -6.802124753807565]\n",
+ " A: [5.263219273050624, 0.0, 0.0, 5.1673472029864165]\n",
+ " B: [5.263219273050624, 0.0, 0.0, -5.1673472029864165]\n",
" 6 Outgoing Particles:\n",
- " A: [-3.815955448364548, 1.7284392485789066, 3.22998101457395, -0.37581430702794955]\n",
- " B: [-3.705003390432734, 0.8773209536576554, -3.1633610279519866, -1.3966048382509024]\n",
- " B: [-1.4798429985544235, -0.876885056483666, -0.05155962504198175, 0.6467994303891397]\n",
- " B: [-1.196598159149068, -0.6492448407423084, 0.0066213036625077295, -0.10141227532326653]\n",
- " B: [-1.307725757451199, -0.47623875265044, -0.08939192779758245, -0.6894580410872709]\n",
- " B: [-2.2453507710796776, -0.6033915523601473, 0.06771026255509205, 1.91649003130025]\n",
+ " A: [-2.399019535788919, -1.2110047848361276, -1.812263889139395, -0.06679625979229631]\n",
+ " B: [-2.017935306086244, -0.3374680394916718, 1.6282821358219384, 0.5539634536990483]\n",
+ " B: [-1.6695031594114513, 0.8270762338660977, -0.06260699981442713, 1.0484589005931164]\n",
+ " B: [-2.2597097606741916, 0.7611180237287621, 0.18055687193684328, -1.869327893238054]\n",
+ " B: [-1.073204850363539, -0.22248377596385552, 0.3188604064962904, -0.024447115284049005]\n",
+ " B: [-1.1070659337769053, 0.18276234269679548, -0.25282852530124955, 0.3581489140222342]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [6.591382068439754, 0.0, 0.0, 6.515083849970707]\n",
- " B: [6.591382068439754, 0.0, 0.0, -6.515083849970707]\n",
+ " A: [4.459941032222146, 0.0, 0.0, 4.346386316343583]\n",
+ " B: [4.459941032222146, 0.0, 0.0, -4.346386316343583]\n",
" 6 Outgoing Particles:\n",
- " A: [-2.166341377746586, 0.738656605699622, 1.1097711420427974, -1.3841348908550482]\n",
- " B: [-1.9136122405957643, -1.3687809690739081, -0.8052302154690981, 0.37410528752561706]\n",
- " B: [-1.020282522629639, 0.01566959851558055, -0.04103060943002397, -0.1976040959992001]\n",
- " B: [-3.3680104240574718, -0.44221430614525714, -3.1855463435158966, -0.015336796039828009]\n",
- " B: [-1.1380460439601876, 0.33787512483866744, -0.3053034033656307, 0.2962752606648943]\n",
- " B: [-3.576471527889859, 0.7187939461652956, 3.227339429737853, 0.9266952347035636]\n",
+ " A: [-1.9579957774892203, 0.01711251988645602, -0.9941971785148113, 1.3583175610150744]\n",
+ " B: [-2.2086526478827153, 0.26811947256465357, -0.29730202477347406, -1.9281778894844153]\n",
+ " B: [-1.1393295497986875, -0.09576318262839165, 0.3418914140864091, 0.4147426875441645]\n",
+ " B: [-1.5437833884502452, -0.2526758526831343, 1.1436052762387854, 0.10765238541055888]\n",
+ " B: [-1.029324601398587, -0.04086809209820055, -0.11666716588470447, -0.21030384327692128]\n",
+ " B: [-1.040796099424839, 0.10407513495861721, -0.07733032115220424, 0.25776909879153836]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [7.366791305680796, 0.0, 0.0, 7.298603574756898]\n",
- " B: [7.366791305680796, 0.0, 0.0, -7.298603574756898]\n",
+ " A: [5.6127229037846575, 0.0, 0.0, 5.522921183094041]\n",
+ " B: [5.6127229037846575, 0.0, 0.0, -5.522921183094041]\n",
" 6 Outgoing Particles:\n",
- " A: [-1.1161936134323496, 0.1815174250263101, -0.30155987378038246, 0.34928677273057857]\n",
- " B: [-1.1768168637671912, -0.488638136596838, -0.0387546058981897, 0.38030091090042567]\n",
- " B: [-3.8756829146246745, -0.22123631639903027, -3.6727532274395425, -0.694878606198396]\n",
- " B: [-1.4161987387916468, -0.42653096897021076, -0.26480462532703347, -0.8680833546784509]\n",
- " B: [-3.4638938410201177, 2.8217659294852746, 1.2824429941168167, 1.179634497585545]\n",
- " B: [-3.6847966397256138, -1.8668779325455054, 2.995429338328331, -0.346260220339702]\n",
+ " A: [-1.3401191006255044, 0.07455340773270878, 0.8329539127008466, 0.3107229836576332]\n",
+ " B: [-2.2407608326391446, 1.9616328357565815, 0.2748188274329855, 0.3122184153114968]\n",
+ " B: [-1.9353505325144305, 0.5041718248979296, 0.4986811623094062, -1.4975678792765024]\n",
+ " B: [-1.1665291383852119, -0.5919830552573446, -0.0003589073718047799, 0.10171609595055851]\n",
+ " B: [-1.3532183234755, -0.2764818233423043, 0.8493370095656062, 0.18271364627008788]\n",
+ " B: [-3.1894678799295257, -1.671893189787572, -2.45543200463704, 0.5901967380867258]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [4.762032860651893, 0.0, 0.0, 4.655851905497903]\n",
- " B: [4.762032860651893, 0.0, 0.0, -4.655851905497903]\n",
+ " A: [4.8915558702989275, 0.0, 0.0, 4.788247991933574]\n",
+ " B: [4.8915558702989275, 0.0, 0.0, -4.788247991933574]\n",
" 6 Outgoing Particles:\n",
- " A: [-2.656166654414924, 2.017338594394486, -1.384735065574992, 0.2609120345236529]\n",
- " B: [-1.031990140619295, -0.035004877965791346, -0.20112979442869375, 0.15272561883031827]\n",
- " B: [-1.7319386082994335, -1.0359644740176492, 0.8025718625008718, -0.5312883934487891]\n",
- " B: [-1.7450617894727098, -0.49163856285061436, 1.1666756465784553, 0.6651316473275205]\n",
- " B: [-1.0945973465763637, -0.42438631366397905, -0.017047995524507212, 0.1332252744613839]\n",
- " B: [-1.2643111819210613, -0.030344365896452122, -0.3663346535511349, -0.6807061816940867]\n",
+ " A: [-1.7166600698631052, -0.6792891539923208, 0.6748994636717233, 1.0148885429772172]\n",
+ " B: [-2.5106233942424825, -0.7525848308448442, -1.9630692909736174, 0.9397897950798489]\n",
+ " B: [-1.0591214238384126, 0.22224342472975844, 0.26723772059994233, -0.030496742226701214]\n",
+ " B: [-2.107615205886531, 1.2019506202258687, 1.111787687227206, -0.8725163042331971]\n",
+ " B: [-1.1276654384352531, 0.3419112314983172, -0.15371273194576066, -0.3620751950278375]\n",
+ " B: [-1.2614262083320695, -0.33423129161677956, 0.06285715142050609, -0.689590096569332]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [6.12211537837656, 0.0, 0.0, 6.039892110473065]\n",
- " B: [6.12211537837656, 0.0, 0.0, -6.039892110473065]\n",
+ " A: [7.730105975946025, 0.0, 0.0, 7.665150905191394]\n",
+ " B: [7.730105975946025, 0.0, 0.0, -7.665150905191394]\n",
" 6 Outgoing Particles:\n",
- " A: [-2.09449973649211, -1.247911941781509, -0.776547530016726, 1.1075282684200622]\n",
- " B: [-2.857971140758051, 1.4507115887866229, 2.2078617054725442, 0.43449006556414854]\n",
- " B: [-2.068918524386865, -0.43350532192333185, 1.7407499017717505, -0.24957318745593]\n",
- " B: [-1.0503370840395667, 0.28162676024293815, -0.11219953076948735, 0.10632790470480236]\n",
- " B: [-1.6648953051752136, 0.3171875953909028, -1.2925202016854087, 0.025689195388605857]\n",
- " B: [-2.5076089659013125, -0.36810868071562286, -1.7673443447726724, -1.4244622466216894]\n",
+ " A: [-1.5069861693238755, -0.14569717271308374, -1.0624243147247645, 0.3478997325070473]\n",
+ " B: [-1.3943234172777221, -0.04432112759455558, 0.08353004942916775, 0.9670554071303941]\n",
+ " B: [-2.959534510858716, -2.3414048211285614, 1.2349523309699664, 0.8669260203682391]\n",
+ " B: [-3.9504084752062516, -1.3395798731389539, -0.8585843373250325, -3.4747785282176675]\n",
+ " B: [-3.4956434330579116, 2.5236614743308494, -0.431975773525167, 2.1596418001942994]\n",
+ " B: [-2.153315946167574, 1.3473415202443053, 1.0345020451758309, -0.8667444319823133]\n",
"\n",
" Input for ABC Process: 'AB->ABBBBB':\n",
" 2 Incoming particles:\n",
- " A: [7.431058837653249, 0.0, 0.0, 7.363466265874004]\n",
- " B: [7.431058837653249, 0.0, 0.0, -7.363466265874004]\n",
+ " A: [5.140973354732315, 0.0, 0.0, 5.042777710158126]\n",
+ " B: [5.140973354732315, 0.0, 0.0, -5.042777710158126]\n",
" 6 Outgoing Particles:\n",
- " A: [-1.4340725727125623, 0.9525417282027518, 0.38239995291064965, -0.05476016666222433]\n",
- " B: [-3.5734117962040854, 2.3267511116139916, 2.49915109639257, -0.33127771922267657]\n",
- " B: [-2.3529075757582945, 1.185265706342765, -1.375530715171772, 1.1132091075119688]\n",
- " B: [-2.710381815585542, -2.1195780947035594, -1.2974231675570782, -0.4126153305389483]\n",
- " B: [-2.374272199256637, -1.2400410368129877, 1.6839473809113144, -0.5136028830766439]\n",
- " B: [-2.4170717157893766, -1.104939414642962, -1.8925445474856835, 0.1990469919885247]\n",
- "\n",
- " Input for ABC Process: 'AB->ABBBBB':\n",
- " 2 Incoming particles:\n",
- " A: [4.370360958267613, 0.0, 0.0, 4.254415930013168]\n",
- " B: [4.370360958267613, 0.0, 0.0, -4.254415930013168]\n",
- " 6 Outgoing Particles:\n",
- " A: [-1.0037967551530176, -0.04979456910726583, -0.007092097585518878, 0.07126098999442977]\n",
- " B: [-2.2427356029926337, 0.4432886498747459, -1.2315068062419472, -1.522087101319342]\n",
- " B: [-1.576810353663218, -0.08400160217698217, 1.025238316808337, 0.6543401378482231]\n",
- " B: [-1.1878570602356244, 0.3852696171578499, -0.47734716319323317, 0.18630996601909597]\n",
- " B: [-1.6436772930583505, -1.0018521094453126, 0.4216069097815019, 0.7212593210074284]\n",
- " B: [-1.0858448514323804, 0.3070900136969648, 0.26910084043086047, -0.11108331354983517]\n",
- "\n",
- " Input for ABC Process: 'AB->ABBBBB':\n",
- " 2 Incoming particles:\n",
- " A: [5.940760429560125, 0.0, 0.0, 5.855991332082674]\n",
- " B: [5.940760429560125, 0.0, 0.0, -5.855991332082674]\n",
- " 6 Outgoing Particles:\n",
- " A: [-2.5515863925730233, 0.0574036477190863, 1.9321385747234918, 1.3319678930281418]\n",
- " B: [-3.2707523737124977, -2.710802011299676, -1.41016923110446, -0.6006632045712658]\n",
- " B: [-1.6965910302662786, 0.9846458960035911, 0.9504416414719069, -0.07452697242920955]\n",
- " B: [-1.0283520810617242, 0.1620200166783027, 0.15874691422324994, -0.07782630689000514]\n",
- " B: [-1.277724475991329, 0.26836143674120055, -0.33222621981983513, -0.6709602929248032]\n",
- " B: [-2.0565145055153993, 1.2383710141574962, -1.298931679494354, 0.09200888378714224]\n",
- "\n",
- " Input for ABC Process: 'AB->ABBBBB':\n",
- " 2 Incoming particles:\n",
- " A: [6.732994664701373, 0.0, 0.0, 6.65831939417877]\n",
- " B: [6.732994664701373, 0.0, 0.0, -6.65831939417877]\n",
- " 6 Outgoing Particles:\n",
- " A: [-1.602557260532173, -0.06659157948757613, 0.9308846463293637, -0.8349904850080558]\n",
- " B: [-1.3205375883536927, 0.7078592481114431, -0.05631226213188625, -0.48947291677035515]\n",
- " B: [-1.7625153098951976, 0.12706601232750347, 0.34097061443470383, 1.405010137407617]\n",
- " B: [-2.7792473938949334, 1.6510422215054068, 1.7155538904747691, -1.0272051928194055]\n",
- " B: [-2.722083339444658, -0.5204063912580275, -2.061236049180356, -1.3748530264647703]\n",
- " B: [-3.279048437282091, -1.89896951119875, -0.8698608399265956, 2.3215114836549695]\n"
+ " A: [-2.1212395395513415, 0.5721186152245487, -1.464674439391297, 1.013442776314144]\n",
+ " B: [-1.4152359585953729, 0.6568206137784666, 0.5137348552056548, -0.5545773150462135]\n",
+ " B: [-1.6621060291271548, -0.07490000906447869, -1.013680695206552, 0.8540713605247167]\n",
+ " B: [-1.602034710373159, -1.201656230753467, -0.11487974312683813, 0.3306662379967043]\n",
+ " B: [-1.6826459861655199, -0.324056691191041, 0.7444127790391002, -1.082651555236741]\n",
+ " B: [-1.7986844856520843, 0.3716737020059716, 1.3350872434799315, -0.5609515045526104]\n"
]
},
- "execution_count": 10,
"metadata": {},
- "output_type": "execute_result"
+ "output_type": "display_data"
}
],
"source": [
@@ -459,149 +375,26 @@
},
{
"cell_type": "code",
- "execution_count": 14,
+ "execution_count": 17,
"metadata": {},
"outputs": [
{
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Internal error: stack overflow in type inference of materialize(Base.Broadcast.Broadcasted{Base.Broadcast.DefaultArrayStyle{1}, Nothing, typeof(MetagraphOptimization.compute__bad8f2ac_7bfc_11ee_176b_b72dc8919aad), Tuple{Array{MetagraphOptimization.ABCProcessInput, 1}}}).\n",
- "This might be caused by recursion over very long tuples or argument lists.\n"
- ]
- },
- {
- "ename": "LoadError",
- "evalue": "StackOverflowError:",
- "output_type": "error",
- "traceback": [
- "StackOverflowError:",
- "",
- "Stacktrace:",
- " [1] argtypes_to_type",
- " @ ./compiler/typeutils.jl:71 [inlined]",
- " [2] abstract_call_known(interp::Core.Compiler.NativeInterpreter, f::Any, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1948",
- " [3] abstract_call(interp::Core.Compiler.NativeInterpreter, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2020",
- " [4] abstract_apply(interp::Core.Compiler.NativeInterpreter, argtypes::Vector{Any}, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1566",
- " [5] abstract_call_known(interp::Core.Compiler.NativeInterpreter, f::Any, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1855",
- " [6] abstract_call(interp::Core.Compiler.NativeInterpreter, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Nothing)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2020",
- " [7] abstract_call(interp::Core.Compiler.NativeInterpreter, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1999",
- " [8] abstract_eval_statement_expr(interp::Core.Compiler.NativeInterpreter, e::Expr, vtypes::Vector{Core.Compiler.VarState}, sv::Core.Compiler.InferenceState, mi::Nothing)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2183",
- " [9] abstract_eval_statement(interp::Core.Compiler.NativeInterpreter, e::Any, vtypes::Vector{Core.Compiler.VarState}, sv::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2396",
- " [10] abstract_eval_basic_statement(interp::Core.Compiler.NativeInterpreter, stmt::Any, pc_vartable::Vector{Core.Compiler.VarState}, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2682",
- " [11] typeinf_local(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2867",
- " [12] typeinf_nocycle(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2955",
- " [13] _typeinf(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/typeinfer.jl:246",
- " [14] typeinf(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/typeinfer.jl:216",
- " [15] typeinf_edge(interp::Core.Compiler.NativeInterpreter, method::Method, atype::Any, sparams::Core.SimpleVector, caller::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/typeinfer.jl:932",
- " [16] abstract_call_method(interp::Core.Compiler.NativeInterpreter, method::Method, sig::Any, sparams::Core.SimpleVector, hardlimit::Bool, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:611",
- " [17] abstract_call_gf_by_type(interp::Core.Compiler.NativeInterpreter, f::Any, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, atype::Any, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:152",
- " [18] abstract_call_known(interp::Core.Compiler.NativeInterpreter, f::Any, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1949",
- "--- the last 16 lines are repeated 413 more times ---",
- " [6627] abstract_call(interp::Core.Compiler.NativeInterpreter, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2020",
- " [6628] abstract_apply(interp::Core.Compiler.NativeInterpreter, argtypes::Vector{Any}, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1566",
- " [6629] abstract_call_known(interp::Core.Compiler.NativeInterpreter, f::Any, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1855",
- " [6630] abstract_call(interp::Core.Compiler.NativeInterpreter, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Nothing)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2020",
- " [6631] abstract_call(interp::Core.Compiler.NativeInterpreter, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1999",
- " [6632] abstract_eval_statement_expr(interp::Core.Compiler.NativeInterpreter, e::Expr, vtypes::Vector{Core.Compiler.VarState}, sv::Core.Compiler.InferenceState, mi::Nothing)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2183",
- " [6633] abstract_eval_statement(interp::Core.Compiler.NativeInterpreter, e::Any, vtypes::Vector{Core.Compiler.VarState}, sv::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2396",
- " [6634] abstract_eval_basic_statement(interp::Core.Compiler.NativeInterpreter, stmt::Any, pc_vartable::Vector{Core.Compiler.VarState}, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2658",
- " [6635] typeinf_local(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2867",
- " [6636] typeinf_nocycle(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2955",
- " [6637] _typeinf(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/typeinfer.jl:246",
- " [6638] typeinf(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/typeinfer.jl:216",
- " [6639] typeinf_edge(interp::Core.Compiler.NativeInterpreter, method::Method, atype::Any, sparams::Core.SimpleVector, caller::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/typeinfer.jl:932",
- " [6640] abstract_call_method(interp::Core.Compiler.NativeInterpreter, method::Method, sig::Any, sparams::Core.SimpleVector, hardlimit::Bool, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:611",
- " [6641] abstract_call_gf_by_type(interp::Core.Compiler.NativeInterpreter, f::Any, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, atype::Any, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:152",
- " [6642] abstract_call_known(interp::Core.Compiler.NativeInterpreter, f::Any, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Int64)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1949",
- " [6643] abstract_call(interp::Core.Compiler.NativeInterpreter, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState, max_methods::Nothing)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2020",
- " [6644] abstract_call(interp::Core.Compiler.NativeInterpreter, arginfo::Core.Compiler.ArgInfo, si::Core.Compiler.StmtInfo, sv::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:1999",
- " [6645] abstract_eval_statement_expr(interp::Core.Compiler.NativeInterpreter, e::Expr, vtypes::Vector{Core.Compiler.VarState}, sv::Core.Compiler.InferenceState, mi::Nothing)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2183",
- " [6646] abstract_eval_statement(interp::Core.Compiler.NativeInterpreter, e::Any, vtypes::Vector{Core.Compiler.VarState}, sv::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2396",
- " [6647] abstract_eval_basic_statement(interp::Core.Compiler.NativeInterpreter, stmt::Any, pc_vartable::Vector{Core.Compiler.VarState}, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2682",
- " [6648] typeinf_local(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2867",
- " [6649] typeinf_nocycle(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/abstractinterpretation.jl:2955",
- " [6650] _typeinf(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/typeinfer.jl:246",
- " [6651] typeinf(interp::Core.Compiler.NativeInterpreter, frame::Core.Compiler.InferenceState)",
- " @ Core.Compiler ./compiler/typeinfer.jl:216",
- " [6652] typeinf",
- " @ ./compiler/typeinfer.jl:12 [inlined]",
- " [6653] typeinf_type(interp::Core.Compiler.NativeInterpreter, method::Method, atype::Any, sparams::Core.SimpleVector)",
- " @ Core.Compiler ./compiler/typeinfer.jl:1079",
- " [6654] return_type(interp::Core.Compiler.NativeInterpreter, t::DataType)",
- " @ Core.Compiler ./compiler/typeinfer.jl:1140",
- " [6655] return_type(f::Any, t::DataType)",
- " @ Core.Compiler ./compiler/typeinfer.jl:1112",
- " [6656] combine_eltypes(f::Function, args::Tuple{Vector{ABCProcessInput}})",
- " @ Base.Broadcast ./broadcast.jl:730",
- " [6657] copy(bc::Base.Broadcast.Broadcasted{Style}) where Style",
- " @ Base.Broadcast ./broadcast.jl:895",
- " [6658] materialize(bc::Base.Broadcast.Broadcasted)",
- " @ Base.Broadcast ./broadcast.jl:873",
- " [6659] var\"##core#302\"()",
- " @ Main ~/.julia/packages/BenchmarkTools/0owsb/src/execution.jl:489",
- " [6660] var\"##sample#303\"(::Tuple{}, __params::BenchmarkTools.Parameters)",
- " @ Main ~/.julia/packages/BenchmarkTools/0owsb/src/execution.jl:495",
- " [6661] _run(b::BenchmarkTools.Benchmark, p::BenchmarkTools.Parameters; verbose::Bool, pad::String, kwargs::Base.Pairs{Symbol, Integer, NTuple{4, Symbol}, NamedTuple{(:samples, :evals, :gctrial, :gcsample), Tuple{Int64, Int64, Bool, Bool}}})",
- " @ BenchmarkTools ~/.julia/packages/BenchmarkTools/0owsb/src/execution.jl:99",
- " [6662] #invokelatest#2",
- " @ ./essentials.jl:821 [inlined]",
- " [6663] invokelatest",
- " @ ./essentials.jl:816 [inlined]",
- " [6664] #run_result#45",
- " @ ~/.julia/packages/BenchmarkTools/0owsb/src/execution.jl:34 [inlined]",
- " [6665] run_result",
- " @ ~/.julia/packages/BenchmarkTools/0owsb/src/execution.jl:34 [inlined]",
- " [6666] run(b::BenchmarkTools.Benchmark, p::BenchmarkTools.Parameters; progressid::Nothing, nleaves::Float64, ndone::Float64, kwargs::Base.Pairs{Symbol, Integer, NTuple{5, Symbol}, NamedTuple{(:verbose, :samples, :evals, :gctrial, :gcsample), Tuple{Bool, Int64, Int64, Bool, Bool}}})",
- " @ BenchmarkTools ~/.julia/packages/BenchmarkTools/0owsb/src/execution.jl:117",
- " [6667] run (repeats 2 times)",
- " @ ~/.julia/packages/BenchmarkTools/0owsb/src/execution.jl:117 [inlined]",
- " [6668] #warmup#54",
- " @ ~/.julia/packages/BenchmarkTools/0owsb/src/execution.jl:169 [inlined]",
- " [6669] warmup(item::BenchmarkTools.Benchmark)",
- " @ BenchmarkTools ~/.julia/packages/BenchmarkTools/0owsb/src/execution.jl:168"
- ]
+ "data": {
+ "text/plain": [
+ "BenchmarkTools.Trial: 231 samples with 1 evaluation.\n",
+ " Range \u001b[90m(\u001b[39m\u001b[36m\u001b[1mmin\u001b[22m\u001b[39m … \u001b[35mmax\u001b[39m\u001b[90m): \u001b[39m\u001b[36m\u001b[1m18.197 ms\u001b[22m\u001b[39m … \u001b[35m27.498 ms\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmin … max\u001b[90m): \u001b[39m0.00% … 8.36%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[34m\u001b[1mmedian\u001b[22m\u001b[39m\u001b[90m): \u001b[39m\u001b[34m\u001b[1m21.868 ms \u001b[22m\u001b[39m\u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmedian\u001b[90m): \u001b[39m0.00%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[32m\u001b[1mmean\u001b[22m\u001b[39m ± \u001b[32mσ\u001b[39m\u001b[90m): \u001b[39m\u001b[32m\u001b[1m21.644 ms\u001b[22m\u001b[39m ± \u001b[32m 1.609 ms\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmean ± σ\u001b[90m): \u001b[39m1.21% ± 2.71%\n",
+ "\n",
+ " \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[32m \u001b[39m\u001b[39m▃\u001b[34m█\u001b[39m\u001b[39m▁\u001b[39m \u001b[39m▅\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \n",
+ " \u001b[39m▃\u001b[39m▃\u001b[39m▁\u001b[39m▅\u001b[39m▇\u001b[39m▃\u001b[39m▅\u001b[39m▅\u001b[39m▅\u001b[39m▃\u001b[39m▄\u001b[39m▃\u001b[39m▃\u001b[39m▅\u001b[39m▄\u001b[39m▅\u001b[39m▃\u001b[39m▅\u001b[39m▄\u001b[39m▃\u001b[39m▅\u001b[39m▄\u001b[39m▃\u001b[39m▅\u001b[39m▇\u001b[39m▅\u001b[39m▅\u001b[32m▆\u001b[39m\u001b[39m█\u001b[34m█\u001b[39m\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m▇\u001b[39m▆\u001b[39m▇\u001b[39m▄\u001b[39m▅\u001b[39m▄\u001b[39m▅\u001b[39m▅\u001b[39m▄\u001b[39m▃\u001b[39m▅\u001b[39m▃\u001b[39m▁\u001b[39m▁\u001b[39m▃\u001b[39m▄\u001b[39m▁\u001b[39m▄\u001b[39m▁\u001b[39m▃\u001b[39m▃\u001b[39m▁\u001b[39m▃\u001b[39m▁\u001b[39m▁\u001b[39m▃\u001b[39m \u001b[39m▃\n",
+ " 18.2 ms\u001b[90m Histogram: frequency by time\u001b[39m 25.6 ms \u001b[0m\u001b[1m<\u001b[22m\n",
+ "\n",
+ " Memory estimate\u001b[90m: \u001b[39m\u001b[33m6.78 MiB\u001b[39m, allocs estimate\u001b[90m: \u001b[39m\u001b[33m17003\u001b[39m."
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
}
],
"source": [
@@ -612,7 +405,7 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 9,
"metadata": {},
"outputs": [],
"source": []
@@ -620,7 +413,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Julia 1.9.3",
+ "display_name": "Julia 1.9.4",
"language": "julia",
"name": "julia-1.9"
},
@@ -628,7 +421,7 @@
"file_extension": ".jl",
"mimetype": "application/julia",
"name": "julia",
- "version": "1.9.3"
+ "version": "1.9.4"
}
},
"nbformat": 4,
diff --git a/notebooks/abc_model_showcase.ipynb b/notebooks/abc_model_showcase.ipynb
index 9cef189..771c26b 100644
--- a/notebooks/abc_model_showcase.ipynb
+++ b/notebooks/abc_model_showcase.ipynb
@@ -97,7 +97,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "Total: 280, ComputeTaskP"
+ "Total: 280, ComputeTaskABC_P"
]
},
{
@@ -119,9 +119,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
- ": 6, ComputeTaskU: 6, \n",
- " ComputeTaskV: 64, ComputeTaskSum: 1, ComputeTaskS2: 24, \n",
- " ComputeTaskS1: 36, DataTask: 143"
+ ": 6, ComputeTaskABC_U: 6, \n",
+ " ComputeTaskABC_V: 64, ComputeTaskABC_Sum: 1, ComputeTaskABC_S2: 24, \n",
+ " ComputeTaskABC_S1: 36, DataTask: 143"
]
}
],
diff --git a/notebooks/diagram_gen.ipynb b/notebooks/diagram_gen.ipynb
new file mode 100644
index 0000000..290fc4b
--- /dev/null
+++ b/notebooks/diagram_gen.ipynb
@@ -0,0 +1,451 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "using Revise; using QEDbase; using QEDprocesses; using MetagraphOptimization; using BenchmarkTools; using DataStructures\n",
+ "import MetagraphOptimization.gen_diagrams"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Diagram 1: Initial Particles: [k_i_1, e_i_1, k_o_1, e_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_i_1 -> e_i_2, k_o_1 + e_o_1 -> e_o_2]\n",
+ " Tie: e_i_2 -- e_o_2\n",
+ "\n",
+ "Diagram 2: Initial Particles: [k_i_1, e_i_1, k_o_1, e_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_o_1 -> e_o_2, e_i_1 + k_o_1 -> e_i_2]\n",
+ " Tie: e_o_2 -- e_i_2\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Compton Scattering\n",
+ "fd = FeynmanDiagram(parse_process(\"ke->ke\", QEDModel()))\n",
+ "\n",
+ "diagrams = gen_diagrams(fd)\n",
+ "\n",
+ "c = 1\n",
+ "for d in diagrams\n",
+ " println(\"Diagram $c: $d\")\n",
+ " c += 1\n",
+ "end"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "BenchmarkTools.Trial: 6044 samples with 1 evaluation.\n",
+ " Range \u001b[90m(\u001b[39m\u001b[36m\u001b[1mmin\u001b[22m\u001b[39m … \u001b[35mmax\u001b[39m\u001b[90m): \u001b[39m\u001b[36m\u001b[1m490.857 μs\u001b[22m\u001b[39m … \u001b[35m 3.657 ms\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmin … max\u001b[90m): \u001b[39m0.00% … 77.38%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[34m\u001b[1mmedian\u001b[22m\u001b[39m\u001b[90m): \u001b[39m\u001b[34m\u001b[1m800.314 μs \u001b[22m\u001b[39m\u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmedian\u001b[90m): \u001b[39m0.00%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[32m\u001b[1mmean\u001b[22m\u001b[39m ± \u001b[32mσ\u001b[39m\u001b[90m): \u001b[39m\u001b[32m\u001b[1m825.263 μs\u001b[22m\u001b[39m ± \u001b[32m208.306 μs\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmean ± σ\u001b[90m): \u001b[39m1.62% ± 5.53%\n",
+ "\n",
+ " \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▃\u001b[39m█\u001b[39m▂\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▁\u001b[39m \u001b[39m▂\u001b[39m▃\u001b[39m▃\u001b[39m▂\u001b[39m▃\u001b[39m▃\u001b[39m▄\u001b[39m▅\u001b[34m▅\u001b[39m\u001b[39m▅\u001b[39m▃\u001b[32m▂\u001b[39m\u001b[39m▁\u001b[39m \u001b[39m▁\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▃\u001b[39m▆\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \n",
+ " \u001b[39m▂\u001b[39m▂\u001b[39m▁\u001b[39m▂\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▂\u001b[39m▂\u001b[39m▁\u001b[39m▁\u001b[39m▃\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m▇\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[34m█\u001b[39m\u001b[39m█\u001b[39m█\u001b[32m█\u001b[39m\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m▇\u001b[39m▆\u001b[39m▆\u001b[39m▅\u001b[39m▅\u001b[39m▄\u001b[39m▄\u001b[39m▄\u001b[39m▅\u001b[39m▇\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m▅\u001b[39m▄\u001b[39m▃\u001b[39m \u001b[39m▅\n",
+ " 491 μs\u001b[90m Histogram: frequency by time\u001b[39m 1.04 ms \u001b[0m\u001b[1m<\u001b[22m\n",
+ "\n",
+ " Memory estimate\u001b[90m: \u001b[39m\u001b[33m280.03 KiB\u001b[39m, allocs estimate\u001b[90m: \u001b[39m\u001b[33m2709\u001b[39m."
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Found 6 Diagrams for 2-Photon Compton\n",
+ "Diagram 1: Initial Particles: [k_i_1, k_i_2, e_i_1, k_o_1, e_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_i_1 -> e_i_2, k_i_2 + e_o_1 -> e_o_2]\n",
+ " Virtuality Level 2 Vertices: [k_o_1 + e_i_2 -> e_i_3]\n",
+ " Tie: e_o_2 -- e_i_3\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# 2-Photon Compton Scattering\n",
+ "two_k_compton = FeynmanDiagram(parse_process(\"kke->ke\", QEDModel()))\n",
+ "\n",
+ "display(@benchmark gen_diagrams(two_k_compton))\n",
+ "diagrams = gen_diagrams(two_k_compton)\n",
+ "\n",
+ "println(\"Found $(length(diagrams)) Diagrams for 2-Photon Compton\")\n",
+ "println(\"Diagram 1: $(first(diagrams))\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "BenchmarkTools.Trial: 1167 samples with 1 evaluation.\n",
+ " Range \u001b[90m(\u001b[39m\u001b[36m\u001b[1mmin\u001b[22m\u001b[39m … \u001b[35mmax\u001b[39m\u001b[90m): \u001b[39m\u001b[36m\u001b[1m2.581 ms\u001b[22m\u001b[39m … \u001b[35m 7.394 ms\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmin … max\u001b[90m): \u001b[39m0.00% … 38.39%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[34m\u001b[1mmedian\u001b[22m\u001b[39m\u001b[90m): \u001b[39m\u001b[34m\u001b[1m4.278 ms \u001b[22m\u001b[39m\u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmedian\u001b[90m): \u001b[39m0.00%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[32m\u001b[1mmean\u001b[22m\u001b[39m ± \u001b[32mσ\u001b[39m\u001b[90m): \u001b[39m\u001b[32m\u001b[1m4.284 ms\u001b[22m\u001b[39m ± \u001b[32m550.104 μs\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmean ± σ\u001b[90m): \u001b[39m1.84% ± 6.28%\n",
+ "\n",
+ " \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▃\u001b[39m▃\u001b[39m▅\u001b[39m▅\u001b[34m▃\u001b[39m\u001b[39m▃\u001b[39m▇\u001b[39m█\u001b[39m▄\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \n",
+ " \u001b[39m▂\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▂\u001b[39m▂\u001b[39m▁\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m▄\u001b[39m█\u001b[39m▄\u001b[39m▄\u001b[39m▄\u001b[39m▃\u001b[39m▃\u001b[39m▄\u001b[39m▆\u001b[39m▇\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[34m█\u001b[39m\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m▆\u001b[39m▄\u001b[39m▃\u001b[39m▃\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▂\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▂\u001b[39m▃\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m▂\u001b[39m \u001b[39m▃\n",
+ " 2.58 ms\u001b[90m Histogram: frequency by time\u001b[39m 6.46 ms \u001b[0m\u001b[1m<\u001b[22m\n",
+ "\n",
+ " Memory estimate\u001b[90m: \u001b[39m\u001b[33m1.71 MiB\u001b[39m, allocs estimate\u001b[90m: \u001b[39m\u001b[33m15410\u001b[39m."
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Found 24 Diagrams for 3-Photon Compton\n",
+ "Diagram 1: Initial Particles: [k_i_1, k_i_2, k_i_3, e_i_1, k_o_1, e_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_2 + e_o_1 -> e_o_2, k_i_3 + e_i_1 -> e_i_2]\n",
+ " Virtuality Level 2 Vertices: [k_i_1 + e_o_2 -> e_o_3, k_o_1 + e_i_2 -> e_i_3]\n",
+ " Tie: e_o_3 -- e_i_3\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# 3-Photon Compton Scattering\n",
+ "three_k_compton = FeynmanDiagram(parse_process(\"kkke->ke\", QEDModel()))\n",
+ "\n",
+ "display(@benchmark gen_diagrams(three_k_compton))\n",
+ "diagrams = gen_diagrams(three_k_compton)\n",
+ "\n",
+ "println(\"Found $(length(diagrams)) Diagrams for 3-Photon Compton\")\n",
+ "println(\"Diagram 1: $(first(diagrams))\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "BenchmarkTools.Trial: 141 samples with 1 evaluation.\n",
+ " Range \u001b[90m(\u001b[39m\u001b[36m\u001b[1mmin\u001b[22m\u001b[39m … \u001b[35mmax\u001b[39m\u001b[90m): \u001b[39m\u001b[36m\u001b[1m31.255 ms\u001b[22m\u001b[39m … \u001b[35m42.658 ms\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmin … max\u001b[90m): \u001b[39m0.00% … 4.92%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[34m\u001b[1mmedian\u001b[22m\u001b[39m\u001b[90m): \u001b[39m\u001b[34m\u001b[1m35.749 ms \u001b[22m\u001b[39m\u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmedian\u001b[90m): \u001b[39m4.34%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[32m\u001b[1mmean\u001b[22m\u001b[39m ± \u001b[32mσ\u001b[39m\u001b[90m): \u001b[39m\u001b[32m\u001b[1m35.690 ms\u001b[22m\u001b[39m ± \u001b[32m 2.009 ms\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmean ± σ\u001b[90m): \u001b[39m3.04% ± 2.83%\n",
+ "\n",
+ " \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▆\u001b[39m▁\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▃\u001b[39m▁\u001b[39m▁\u001b[39m \u001b[39m \u001b[39m▁\u001b[39m \u001b[39m▃\u001b[39m▁\u001b[39m▃\u001b[39m▁\u001b[39m \u001b[39m█\u001b[34m▆\u001b[39m\u001b[39m▁\u001b[39m▁\u001b[39m▆\u001b[39m▁\u001b[39m▁\u001b[39m▃\u001b[39m \u001b[39m▁\u001b[39m \u001b[39m▃\u001b[39m▆\u001b[39m▁\u001b[39m▆\u001b[39m█\u001b[39m \u001b[39m \u001b[39m \u001b[39m▁\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▁\u001b[39m \u001b[39m \u001b[39m \u001b[39m \n",
+ " \u001b[39m▇\u001b[39m▄\u001b[39m▄\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▄\u001b[39m▇\u001b[39m▇\u001b[39m▄\u001b[39m▄\u001b[39m▄\u001b[39m▇\u001b[39m▄\u001b[39m█\u001b[39m█\u001b[39m▇\u001b[39m▄\u001b[39m▇\u001b[39m▇\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m▇\u001b[39m▁\u001b[39m█\u001b[39m▄\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m▇\u001b[39m█\u001b[34m█\u001b[39m\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m▄\u001b[39m█\u001b[39m▇\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m█\u001b[39m▇\u001b[39m▇\u001b[39m▁\u001b[39m█\u001b[39m▄\u001b[39m▁\u001b[39m▄\u001b[39m▇\u001b[39m█\u001b[39m▇\u001b[39m▄\u001b[39m \u001b[39m▄\n",
+ " 31.3 ms\u001b[90m Histogram: frequency by time\u001b[39m 39.2 ms \u001b[0m\u001b[1m<\u001b[22m\n",
+ "\n",
+ " Memory estimate\u001b[90m: \u001b[39m\u001b[33m23.29 MiB\u001b[39m, allocs estimate\u001b[90m: \u001b[39m\u001b[33m171048\u001b[39m."
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Found 120 Diagrams for 4-Photon Compton\n",
+ "Diagram 1: Initial Particles: [k_i_1, k_i_2, k_i_3, k_i_4, e_i_1, k_o_1, e_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_o_1 -> e_o_2, e_i_1 + k_o_1 -> e_i_2]\n",
+ " Virtuality Level 2 Vertices: [k_i_3 + e_o_2 -> e_o_3, k_i_2 + e_i_2 -> e_i_3]\n",
+ " Virtuality Level 3 Vertices: [k_i_4 + e_o_3 -> e_o_4]\n",
+ " Tie: e_i_3 -- e_o_4\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# 4-Photon Compton Scattering\n",
+ "four_k_compton = FeynmanDiagram(parse_process(\"kkkke->ke\", QEDModel()))\n",
+ "\n",
+ "display(@benchmark gen_diagrams(four_k_compton))\n",
+ "diagrams = gen_diagrams(four_k_compton)\n",
+ "\n",
+ "println(\"Found $(length(diagrams)) Diagrams for 4-Photon Compton\")\n",
+ "println(\"Diagram 1: $(first(diagrams))\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "BenchmarkTools.Trial: 10 samples with 1 evaluation.\n",
+ " Range \u001b[90m(\u001b[39m\u001b[36m\u001b[1mmin\u001b[22m\u001b[39m … \u001b[35mmax\u001b[39m\u001b[90m): \u001b[39m\u001b[36m\u001b[1m471.789 ms\u001b[22m\u001b[39m … \u001b[35m527.196 ms\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmin … max\u001b[90m): \u001b[39m6.00% … 7.35%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[34m\u001b[1mmedian\u001b[22m\u001b[39m\u001b[90m): \u001b[39m\u001b[34m\u001b[1m499.068 ms \u001b[22m\u001b[39m\u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmedian\u001b[90m): \u001b[39m6.98%\n",
+ " Time \u001b[90m(\u001b[39m\u001b[32m\u001b[1mmean\u001b[22m\u001b[39m ± \u001b[32mσ\u001b[39m\u001b[90m): \u001b[39m\u001b[32m\u001b[1m502.132 ms\u001b[22m\u001b[39m ± \u001b[32m 17.383 ms\u001b[39m \u001b[90m┊\u001b[39m GC \u001b[90m(\u001b[39mmean ± σ\u001b[90m): \u001b[39m6.79% ± 0.77%\n",
+ "\n",
+ " \u001b[39m▁\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m█\u001b[39m▁\u001b[39m \u001b[34m▁\u001b[39m\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[32m \u001b[39m\u001b[39m \u001b[39m▁\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▁\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▁\u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m \u001b[39m▁\u001b[39m▁\u001b[39m \u001b[39m \n",
+ " \u001b[39m█\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m█\u001b[39m█\u001b[39m▁\u001b[34m█\u001b[39m\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[32m▁\u001b[39m\u001b[39m▁\u001b[39m█\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m█\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m█\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m▁\u001b[39m█\u001b[39m█\u001b[39m \u001b[39m▁\n",
+ " 472 ms\u001b[90m Histogram: frequency by time\u001b[39m 527 ms \u001b[0m\u001b[1m<\u001b[22m\n",
+ "\n",
+ " Memory estimate\u001b[90m: \u001b[39m\u001b[33m627.12 MiB\u001b[39m, allocs estimate\u001b[90m: \u001b[39m\u001b[33m3747679\u001b[39m."
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Found 720 Diagrams for 5-Photon Compton\n",
+ "Diagram 1: Initial Particles: [k_i_1, k_i_2, k_i_3, k_i_4, k_i_5, e_i_1, k_o_1, e_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_i_1 -> e_i_2, k_i_4 + e_o_1 -> e_o_2]\n",
+ " Virtuality Level 2 Vertices: [k_i_3 + e_i_2 -> e_i_3, k_i_5 + e_o_2 -> e_o_3]\n",
+ " Virtuality Level 3 Vertices: [k_i_2 + e_i_3 -> e_i_4, k_o_1 + e_o_3 -> e_o_4]\n",
+ " Tie: e_i_4 -- e_o_4\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# 5-Photon Compton Scattering\n",
+ "five_k_compton = FeynmanDiagram(parse_process(\"kkkkke->ke\", QEDModel()))\n",
+ "\n",
+ "display(@benchmark gen_diagrams(five_k_compton))\n",
+ "diagrams = gen_diagrams(five_k_compton)\n",
+ "\n",
+ "println(\"Found $(length(diagrams)) Diagrams for 5-Photon Compton\")\n",
+ "println(\"Diagram 1: $(first(diagrams))\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Diagram 1: Initial Particles: [p_i_1, e_i_1, e_o_1, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [p_i_1 + e_i_1 -> k_o_2, e_o_1 + p_o_1 -> k_o_1]\n",
+ " Tie: k_o_2 -- k_o_1\n",
+ "\n",
+ "Diagram 2: Initial Particles: [p_i_1, e_i_1, e_o_1, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [p_i_1 + p_o_1 -> k_o_1, e_i_1 + e_o_1 -> k_o_2]\n",
+ " Tie: k_o_1 -- k_o_2\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Bhabha Scattering\n",
+ "fd = FeynmanDiagram(parse_process(\"ep->ep\", QEDModel()))\n",
+ "\n",
+ "diagrams = gen_diagrams(fd)\n",
+ "\n",
+ "c = 1\n",
+ "for d in diagrams\n",
+ " println(\"Diagram $c: $d\")\n",
+ " c += 1\n",
+ "end"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Diagram 1: Initial Particles: [e_i_1, e_i_2, e_o_1, e_o_2]\n",
+ " Virtuality Level 1 Vertices: [e_i_2 + e_o_2 -> k_o_2, e_i_1 + e_o_1 -> k_o_1]\n",
+ " Tie: k_o_2 -- k_o_1\n",
+ "\n",
+ "Diagram 2: Initial Particles: [e_i_1, e_i_2, e_o_1, e_o_2]\n",
+ " Virtuality Level 1 Vertices: [e_i_1 + e_o_2 -> k_o_1, e_i_2 + e_o_1 -> k_o_2]\n",
+ " Tie: k_o_1 -- k_o_2\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Moller Scattering\n",
+ "fd = FeynmanDiagram(parse_process(\"ee->ee\", QEDModel()))\n",
+ "\n",
+ "diagrams = gen_diagrams(fd)\n",
+ "\n",
+ "c = 1\n",
+ "for d in diagrams\n",
+ " println(\"Diagram $c: $d\")\n",
+ " c += 1\n",
+ "end"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Diagram 1: Initial Particles: [p_i_1, e_i_1, k_o_1, k_o_2]\n",
+ " Virtuality Level 1 Vertices: [e_i_1 + k_o_2 -> e_i_2, p_i_1 + k_o_1 -> e_o_1]\n",
+ " Tie: e_i_2 -- e_o_1\n",
+ "\n",
+ "Diagram 2: Initial Particles: [p_i_1, e_i_1, k_o_1, k_o_2]\n",
+ " Virtuality Level 1 Vertices: [e_i_1 + k_o_1 -> e_i_2, p_i_1 + k_o_2 -> e_o_1]\n",
+ " Tie: e_i_2 -- e_o_1\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Pair annihilation\n",
+ "fd = FeynmanDiagram(parse_process(\"ep->kk\", QEDModel()))\n",
+ "\n",
+ "diagrams = gen_diagrams(fd)\n",
+ "\n",
+ "c = 1\n",
+ "for d in diagrams\n",
+ " println(\"Diagram $c: $d\")\n",
+ " c += 1\n",
+ "end"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Diagram 1: Initial Particles: [k_i_1, k_i_2, e_o_1, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + p_o_1 -> e_i_1, k_i_2 + e_o_1 -> e_o_2]\n",
+ " Tie: e_i_1 -- e_o_2\n",
+ "\n",
+ "Diagram 2: Initial Particles: [k_i_1, k_i_2, e_o_1, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_o_1 -> e_o_2, k_i_2 + p_o_1 -> e_i_1]\n",
+ " Tie: e_o_2 -- e_i_1\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Pair production\n",
+ "fd = FeynmanDiagram(parse_process(\"kk->pe\", QEDModel()))\n",
+ "\n",
+ "diagrams = gen_diagrams(fd)\n",
+ "\n",
+ "c = 1\n",
+ "for d in diagrams\n",
+ " println(\"Diagram $c: $d\")\n",
+ " c += 1\n",
+ "end"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Found 8 diagrams:\n",
+ "Diagram 1: Initial Particles: [k_i_1, e_i_1, e_o_1, e_o_2, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_o_1 -> e_o_3, e_i_1 + e_o_2 -> k_o_1]\n",
+ " Virtuality Level 2 Vertices: [p_o_1 + k_o_1 -> e_i_2]\n",
+ " Tie: e_o_3 -- e_i_2\n",
+ "\n",
+ "Diagram 2: Initial Particles: [k_i_1, e_i_1, e_o_1, e_o_2, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + p_o_1 -> e_i_2, e_i_1 + e_o_2 -> k_o_1]\n",
+ " Virtuality Level 2 Vertices: [e_o_1 + e_i_2 -> k_o_2]\n",
+ " Tie: k_o_1 -- k_o_2\n",
+ "\n",
+ "Diagram 3: Initial Particles: [k_i_1, e_i_1, e_o_1, e_o_2, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_o_2 -> e_o_3, e_i_1 + e_o_1 -> k_o_1]\n",
+ " Virtuality Level 2 Vertices: [p_o_1 + e_o_3 -> k_o_2]\n",
+ " Tie: k_o_1 -- k_o_2\n",
+ "\n",
+ "Diagram 4: Initial Particles: [k_i_1, e_i_1, e_o_1, e_o_2, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_i_1 -> e_i_2, e_o_2 + p_o_1 -> k_o_1]\n",
+ " Virtuality Level 2 Vertices: [e_o_1 + e_i_2 -> k_o_2]\n",
+ " Tie: k_o_1 -- k_o_2\n",
+ "\n",
+ "Diagram 5: Initial Particles: [k_i_1, e_i_1, e_o_1, e_o_2, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_o_1 -> e_o_3, e_o_2 + p_o_1 -> k_o_1]\n",
+ " Virtuality Level 2 Vertices: [e_i_1 + k_o_1 -> e_i_2]\n",
+ " Tie: e_o_3 -- e_i_2\n",
+ "\n",
+ "Diagram 6: Initial Particles: [k_i_1, e_i_1, e_o_1, e_o_2, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_o_2 -> e_o_3, e_o_1 + p_o_1 -> k_o_1]\n",
+ " Virtuality Level 2 Vertices: [e_i_1 + e_o_3 -> k_o_2]\n",
+ " Tie: k_o_1 -- k_o_2\n",
+ "\n",
+ "Diagram 7: Initial Particles: [k_i_1, e_i_1, e_o_1, e_o_2, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + p_o_1 -> e_i_2, e_i_1 + e_o_1 -> k_o_1]\n",
+ " Virtuality Level 2 Vertices: [e_o_2 + k_o_1 -> e_o_3]\n",
+ " Tie: e_i_2 -- e_o_3\n",
+ "\n",
+ "Diagram 8: Initial Particles: [k_i_1, e_i_1, e_o_1, e_o_2, p_o_1]\n",
+ " Virtuality Level 1 Vertices: [k_i_1 + e_i_1 -> e_i_2, e_o_1 + p_o_1 -> k_o_1]\n",
+ " Virtuality Level 2 Vertices: [e_o_2 + k_o_1 -> e_o_3]\n",
+ " Tie: e_i_2 -- e_o_3\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Trident\n",
+ "fd = FeynmanDiagram(parse_process(\"ke->epe\", QEDModel()))\n",
+ "\n",
+ "diagrams = gen_diagrams(fd)\n",
+ "\n",
+ "println(\"Found $(length(diagrams)) diagrams:\")\n",
+ "c = 1\n",
+ "for d in diagrams\n",
+ " println(\"Diagram $c: $d\")\n",
+ " c += 1\n",
+ "end"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Julia 1.9.4",
+ "language": "julia",
+ "name": "julia-1.9"
+ },
+ "language_info": {
+ "file_extension": ".jl",
+ "mimetype": "application/julia",
+ "name": "julia",
+ "version": "1.9.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebooks/diagram_gen_profiling.ipynb b/notebooks/diagram_gen_profiling.ipynb
new file mode 100644
index 0000000..ef7d1bc
--- /dev/null
+++ b/notebooks/diagram_gen_profiling.ipynb
@@ -0,0 +1,111 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "595a07c5-0ecc-4f3e-8cbe-63fc64b456da",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\u001b[36m\u001b[1m[ \u001b[22m\u001b[39m\u001b[36m\u001b[1mInfo: \u001b[22m\u001b[39mPrecompiling MetagraphOptimization [3e869610-d48d-4942-ba70-c1b702a33ca4]\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "1"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "using BenchmarkTools; using Profile; using PProf; using Revise; using MetagraphOptimization;\n",
+ "Threads.nthreads()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "163f84be-1e2e-480e-9944-1fa4e0eedf3b",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Found 1 NUMA nodes\n",
+ "CUDA is non-functional\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "QED Process: 'ke->kkkkke'"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "machine = get_machine_info()\n",
+ "model = QEDModel()\n",
+ "process = parse_process(\"ke->kkkkke\", model)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "6c2eef40-5df0-4396-8e62-5204c4de61f3",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "\"profile.pb.gz\""
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Main binary filename not available.\n",
+ "Serving web UI on http://localhost:57599\n"
+ ]
+ }
+ ],
+ "source": [
+ "gen_graph(parse_process(\"ke->kke\", model))\n",
+ "Profile.clear()\n",
+ "@profile gen_graph(process)\n",
+ "pprof()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Julia 1.9.4",
+ "language": "julia",
+ "name": "julia-1.9"
+ },
+ "language_info": {
+ "file_extension": ".jl",
+ "mimetype": "application/julia",
+ "name": "julia",
+ "version": "1.9.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/large_compton.ipynb b/notebooks/large_compton.ipynb
new file mode 100644
index 0000000..7845ece
--- /dev/null
+++ b/notebooks/large_compton.ipynb
@@ -0,0 +1,129 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "12"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "using MetagraphOptimization\n",
+ "using BenchmarkTools\n",
+ "\n",
+ "Threads.nthreads()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Graph:\n",
+ " Nodes: Total: 131069, DataTask: 65539, ComputeTaskQED_Sum: 1, \n",
+ " ComputeTaskQED_V: 35280, ComputeTaskQED_S2: 5040, ComputeTaskQED_U: 9, \n",
+ " ComputeTaskQED_S1: 25200\n",
+ " Edges: 176419\n",
+ " Total Compute Effort: 549370.0\n",
+ " Total Data Transfer: 1.0645344e7\n",
+ " Total Compute Intensity: 0.05160659909158408\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "machine = get_machine_info()\n",
+ "model = QEDModel()\n",
+ "process = parse_process(\"ke->kkkkkke\", model)\n",
+ "\n",
+ "inputs = [gen_process_input(process) for _ in 1:1e3];\n",
+ "graph = gen_graph(process)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "Graph:\n",
+ " Nodes: Total: 14783, DataTask: 7396, ComputeTaskQED_Sum: 1, \n",
+ " ComputeTaskQED_V: 1819, ComputeTaskQED_S2: 5040, ComputeTaskQED_U: 9, \n",
+ " ComputeTaskQED_S1: 518\n",
+ " Edges: 26672\n",
+ " Total Compute Effort: 77102.0\n",
+ " Total Data Transfer: 5.063616e6\n",
+ " Total Compute Intensity: 0.015226668056977465\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "optimizer = ReductionOptimizer()\n",
+ "\n",
+ "optimize_to_fixpoint!(optimizer, graph)\n",
+ "graph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Calculated 15537.0 results/s, 1295.0 results/s per thread for QED Process: 'ke->kkkkkke' (12 threads)\n"
+ ]
+ }
+ ],
+ "source": [
+ "compute_compton_reduced = get_compute_function(graph, process, machine)\n",
+ "outputs = [zero(ComplexF64) for _ in 1:1e6]\n",
+ "\n",
+ "bench_result = @benchmark begin\n",
+ " Threads.@threads :static for i in eachindex(inputs)\n",
+ " outputs[i] = compute_compton_reduced(inputs[i])\n",
+ " end\n",
+ "end\n",
+ "\n",
+ "rate = length(inputs) / (mean(bench_result.times) / 1.0e9)\n",
+ "rate_per_thread = rate / Threads.nthreads()\n",
+ "println(\"Calculated $(round(rate)) results/s, $(round(rate_per_thread)) results/s per thread for $(process) ($(Threads.nthreads()) threads)\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Julia 1.9.4",
+ "language": "julia",
+ "name": "julia-1.9"
+ },
+ "language_info": {
+ "file_extension": ".jl",
+ "mimetype": "application/julia",
+ "name": "julia",
+ "version": "1.9.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/notebooks/profiling.ipynb b/notebooks/profiling.ipynb
index f4032ee..e0be63d 100644
--- a/notebooks/profiling.ipynb
+++ b/notebooks/profiling.ipynb
@@ -35,11 +35,11 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": null,
"metadata": {},
"outputs": [],
"source": [
- "@ProfileView.profview comp_func = get_compute_function(graph, process)"
+ "@ProfileView.profview comp_func = get_compute_function(graph, process, get_machine_info())"
]
},
{
@@ -52,7 +52,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Julia 1.9.3",
+ "display_name": "Julia 1.9.4",
"language": "julia",
"name": "julia-1.9"
},
@@ -60,7 +60,7 @@
"file_extension": ".jl",
"mimetype": "application/julia",
"name": "julia",
- "version": "1.9.3"
+ "version": "1.9.4"
},
"orig_nbformat": 4
},
diff --git a/results/FWKHIP8999 b/results/FWKHIP8999
index 001731f..a115cab 100644
--- a/results/FWKHIP8999
+++ b/results/FWKHIP8999
@@ -4,8 +4,8 @@ Run with 32 Threads
AB->AB:
Graph:
- Nodes: Total: 34, ComputeTaskS2: 2, ComputeTaskU: 4,
- ComputeTaskSum: 1, ComputeTaskV: 4, ComputeTaskP: 4,
+ Nodes: Total: 34, ComputeTaskABC_S2: 2, ComputeTaskABC_U: 4,
+ ComputeTaskABC_Sum: 1, ComputeTaskABC_V: 4, ComputeTaskABC_P: 4,
DataTask: 19
Edges: 37
Total Compute Effort: 185
@@ -27,9 +27,9 @@ Waiting...
AB->ABBB:
Graph:
- Nodes: Total: 280, ComputeTaskS2: 24, ComputeTaskU: 6,
- ComputeTaskV: 64, ComputeTaskSum: 1, ComputeTaskP: 6,
- ComputeTaskS1: 36, DataTask: 143
+ Nodes: Total: 280, ComputeTaskABC_S2: 24, ComputeTaskABC_U: 6,
+ ComputeTaskABC_V: 64, ComputeTaskABC_Sum: 1, ComputeTaskABC_P: 6,
+ ComputeTaskABC_S1: 36, DataTask: 143
Edges: 385
Total Compute Effort: 2007
Total Data Transfer: 1176
@@ -50,9 +50,9 @@ Waiting...
AB->ABBBBB:
Graph:
- Nodes: Total: 7854, ComputeTaskS2: 720, ComputeTaskU: 8,
- ComputeTaskV: 1956, ComputeTaskSum: 1, ComputeTaskP: 8,
- ComputeTaskS1: 1230, DataTask: 3931
+ Nodes: Total: 7854, ComputeTaskABC_S2: 720, ComputeTaskABC_U: 8,
+ ComputeTaskABC_V: 1956, ComputeTaskABC_Sum: 1, ComputeTaskABC_P: 8,
+ ComputeTaskABC_S1: 1230, DataTask: 3931
Edges: 11241
Total Compute Effort: 58789
Total Data Transfer: 34826
@@ -73,9 +73,9 @@ Waiting...
AB->ABBBBBBB:
Graph:
- Nodes: Total: 438436, ComputeTaskS2: 40320, ComputeTaskU: 10,
- ComputeTaskV: 109600, ComputeTaskSum: 1, ComputeTaskP: 10,
- ComputeTaskS1: 69272, DataTask: 219223
+ Nodes: Total: 438436, ComputeTaskABC_S2: 40320, ComputeTaskABC_U: 10,
+ ComputeTaskABC_V: 109600, ComputeTaskABC_Sum: 1, ComputeTaskABC_P: 10,
+ ComputeTaskABC_S1: 69272, DataTask: 219223
Edges: 628665
Total Compute Effort: 3288131
Total Data Transfer: 1949004
@@ -96,9 +96,9 @@ Waiting...
AB->ABBBBBBBBB:
Graph:
- Nodes: Total: 39456442, ComputeTaskS2: 3628800, ComputeTaskU: 12,
- ComputeTaskV: 9864100, ComputeTaskSum: 1, ComputeTaskP: 12,
- ComputeTaskS1: 6235290, DataTask: 19728227
+ Nodes: Total: 39456442, ComputeTaskABC_S2: 3628800, ComputeTaskABC_U: 12,
+ ComputeTaskABC_V: 9864100, ComputeTaskABC_Sum: 1, ComputeTaskABC_P: 12,
+ ComputeTaskABC_S1: 6235290, DataTask: 19728227
Edges: 56578129
Total Compute Effort: 295923153
Total Data Transfer: 175407750
@@ -119,9 +119,9 @@ Waiting...
ABAB->ABAB:
Graph:
- Nodes: Total: 3218, ComputeTaskS2: 288, ComputeTaskU: 8,
- ComputeTaskV: 796, ComputeTaskSum: 1, ComputeTaskP: 8,
- ComputeTaskS1: 504, DataTask: 1613
+ Nodes: Total: 3218, ComputeTaskABC_S2: 288, ComputeTaskABC_U: 8,
+ ComputeTaskABC_V: 796, ComputeTaskABC_Sum: 1, ComputeTaskABC_P: 8,
+ ComputeTaskABC_S1: 504, DataTask: 1613
Edges: 4581
Total Compute Effort: 24009
Total Data Transfer: 14144
@@ -142,9 +142,9 @@ Waiting...
ABAB->ABC:
Graph:
- Nodes: Total: 817, ComputeTaskS2: 72, ComputeTaskU: 7,
- ComputeTaskV: 198, ComputeTaskSum: 1, ComputeTaskP: 7,
- ComputeTaskS1: 120, DataTask: 412
+ Nodes: Total: 817, ComputeTaskABC_S2: 72, ComputeTaskABC_U: 7,
+ ComputeTaskABC_V: 198, ComputeTaskABC_Sum: 1, ComputeTaskABC_P: 7,
+ ComputeTaskABC_S1: 120, DataTask: 412
Edges: 1151
Total Compute Effort: 6028
Total Data Transfer: 3538
diff --git a/src/MetagraphOptimization.jl b/src/MetagraphOptimization.jl
index 385b8fb..d996413 100644
--- a/src/MetagraphOptimization.jl
+++ b/src/MetagraphOptimization.jl
@@ -5,6 +5,8 @@ A module containing tools to work on DAGs.
"""
module MetagraphOptimization
+using QEDbase
+
# graph types
export DAG
export Node
@@ -52,13 +54,25 @@ export get_operations
# ABC model
export ParticleValue
export ParticleA, ParticleB, ParticleC
-export ABCProcessDescription, ABCProcessInput, ABCModel
-export ComputeTaskP
-export ComputeTaskS1
-export ComputeTaskS2
-export ComputeTaskV
-export ComputeTaskU
-export ComputeTaskSum
+export ABCParticle, ABCProcessDescription, ABCProcessInput, ABCModel
+export ComputeTaskABC_P
+export ComputeTaskABC_S1
+export ComputeTaskABC_S2
+export ComputeTaskABC_V
+export ComputeTaskABC_U
+export ComputeTaskABC_Sum
+
+# QED model
+export FeynmanDiagram, FeynmanVertex, FeynmanTie, FeynmanParticle
+export PhotonStateful, FermionStateful, AntiFermionStateful
+export QEDParticle, QEDProcessDescription, QEDProcessInput, QEDModel
+export ComputeTaskQED_P
+export ComputeTaskQED_S1
+export ComputeTaskQED_S2
+export ComputeTaskQED_V
+export ComputeTaskQED_U
+export ComputeTaskQED_Sum
+export gen_graph
# code generation related
export execute
@@ -83,6 +97,9 @@ export ==, in, show, isempty, delete!, length
export bytes_to_human_readable
+# TODO: this is probably not good
+import QEDprocesses.compute
+
import Base.length
import Base.show
import Base.==
@@ -160,6 +177,15 @@ include("models/abc/properties.jl")
include("models/abc/parse.jl")
include("models/abc/print.jl")
+include("models/qed/types.jl")
+include("models/qed/particle.jl")
+include("models/qed/diagrams.jl")
+include("models/qed/compute.jl")
+include("models/qed/create.jl")
+include("models/qed/properties.jl")
+include("models/qed/parse.jl")
+include("models/qed/print.jl")
+
include("devices/measure.jl")
include("devices/detect.jl")
include("devices/impl.jl")
diff --git a/src/code_gen/main.jl b/src/code_gen/main.jl
index 944ccb6..3b8b24b 100644
--- a/src/code_gen/main.jl
+++ b/src/code_gen/main.jl
@@ -61,13 +61,11 @@ function gen_input_assignment_code(
assignInputs = Vector{Expr}()
for (name, symbols) in inputSymbols
- type = type_from_name(name)
- index = parse(Int, name[2:end])
-
+ (type, index) = type_index_from_name(model(processDescription), name)
p = nothing
- if (index > in_particles(processDescription)[type])
- index -= in_particles(processDescription)[type]
+ if (index > get(in_particles(processDescription), type, 0))
+ index -= get(in_particles(processDescription), type, 0)
@assert index <= out_particles(processDescription)[type] "Too few particles of type $type in input particles for this process"
p = "filter(x -> typeof(x) <: $type, out_particles($(processInputSymbol)))[$(index)]"
@@ -76,10 +74,9 @@ function gen_input_assignment_code(
end
for symbol in symbols
- # TODO: how to get the "default" cpu device?
device = entry_device(machine)
evalExpr = eval(gen_access_expr(device, symbol))
- push!(assignInputs, Meta.parse("$(evalExpr)::ParticleValue{$type} = ParticleValue($p, 1.0)"))
+ push!(assignInputs, Meta.parse("$(evalExpr)::ParticleValue{$type} = ParticleValue($p, one(ComplexF64))"))
end
end
diff --git a/src/graph/mute.jl b/src/graph/mute.jl
index 4c6e0af..d5ad22c 100644
--- a/src/graph/mute.jl
+++ b/src/graph/mute.jl
@@ -123,7 +123,6 @@ function remove_edge!(graph::DAG, node1::Node, node2::Node; track = true, invali
pre_length1 = length(node1.parents)
pre_length2 = length(node2.children)
- #TODO: filter is very slow
for i in eachindex(node1.parents)
if (node1.parents[i] == node2)
splice!(node1.parents, i)
@@ -252,7 +251,6 @@ function invalidate_caches!(graph::DAG, operation::NodeFusion)
delete!(graph.possibleOperations, operation)
# delete the operation from all caches of nodes involved in the operation
- # TODO: filter is very slow
for n in [1, 3]
for i in eachindex(operation.input[n].nodeFusions)
if operation == operation.input[n].nodeFusions[i]
diff --git a/src/graph/print.jl b/src/graph/print.jl
index e452749..650cc27 100644
--- a/src/graph/print.jl
+++ b/src/graph/print.jl
@@ -41,7 +41,7 @@ function show(io::IO, graph::DAG)
if length(graph.nodes) <= 20
show_nodes(io, graph)
else
- print("Total: ", length(graph.nodes), ", ")
+ print(io, "Total: ", length(graph.nodes), ", ")
first = true
i = 0
for (type, number) in zip(keys(nodeDict), values(nodeDict))
@@ -49,12 +49,12 @@ function show(io::IO, graph::DAG)
if first
first = false
else
- print(", ")
+ print(io, ", ")
end
if (i % 3 == 0)
- print("\n ")
+ print(io, "\n ")
end
- print(type, ": ", number)
+ print(io, type, ": ", number)
end
end
println(io)
diff --git a/src/models/abc/compute.jl b/src/models/abc/compute.jl
index d5f6056..6f1857d 100644
--- a/src/models/abc/compute.jl
+++ b/src/models/abc/compute.jl
@@ -1,46 +1,46 @@
using AccurateArithmetic
"""
- compute(::ComputeTaskP, data::ParticleValue)
+ compute(::ComputeTaskABC_P, data::ABCParticleValue)
Return the particle and value as is.
0 FLOP.
"""
-function compute(::ComputeTaskP, data::ParticleValue{P})::ParticleValue{P} where {P <: ABCParticle}
+function compute(::ComputeTaskABC_P, data::ABCParticleValue{P})::ABCParticleValue{P} where {P <: ABCParticle}
return data
end
"""
- compute(::ComputeTaskU, data::ParticleValue)
+ compute(::ComputeTaskABC_U, data::ABCParticleValue)
-Compute an outer edge. Return the particle value with the same particle and the value multiplied by an outer_edge factor.
+Compute an outer edge. Return the particle value with the same particle and the value multiplied by an ABC_outer_edge factor.
1 FLOP.
"""
-function compute(::ComputeTaskU, data::ParticleValue{P})::ParticleValue{P} where {P <: ABCParticle}
- return ParticleValue(data.p, data.v * outer_edge(data.p))
+function compute(::ComputeTaskABC_U, data::ABCParticleValue{P})::ABCParticleValue{P} where {P <: ABCParticle}
+ return ABCParticleValue{P}(data.p, data.v * ABC_outer_edge(data.p))
end
"""
- compute(::ComputeTaskV, data1::ParticleValue, data2::ParticleValue)
+ compute(::ComputeTaskABC_V, data1::ABCParticleValue, data2::ABCParticleValue)
Compute a vertex. Preserve momentum and particle types (AB->C etc.) to create resulting particle, multiply values together and times a vertex factor.
6 FLOP.
"""
function compute(
- ::ComputeTaskV,
- data1::ParticleValue{P1},
- data2::ParticleValue{P2},
-)::ParticleValue where {P1 <: ABCParticle, P2 <: ABCParticle}
- p3 = preserve_momentum(data1.p, data2.p)
- dataOut = ParticleValue(p3, data1.v * vertex() * data2.v)
+ ::ComputeTaskABC_V,
+ data1::ABCParticleValue{P1},
+ data2::ABCParticleValue{P2},
+)::ABCParticleValue where {P1 <: ABCParticle, P2 <: ABCParticle}
+ p3 = ABC_conserve_momentum(data1.p, data2.p)
+ dataOut = ABCParticleValue{typeof(p3)}(p3, data1.v * ABC_vertex() * data2.v)
return dataOut
end
"""
- compute(::ComputeTaskS2, data1::ParticleValue, data2::ParticleValue)
+ compute(::ComputeTaskABC_S2, data1::ABCParticleValue, data2::ABCParticleValue)
Compute a final inner edge (2 input particles, no output particle).
@@ -48,112 +48,116 @@ For valid inputs, both input particles should have the same momenta at this poin
12 FLOP.
"""
-function compute(::ComputeTaskS2, data1::ParticleValue{P}, data2::ParticleValue{P})::Float64 where {P <: ABCParticle}
+function compute(
+ ::ComputeTaskABC_S2,
+ data1::ParticleValue{P},
+ data2::ParticleValue{P},
+)::Float64 where {P <: ABCParticle}
#=
@assert isapprox(abs(data1.p.momentum.E), abs(data2.p.momentum.E), rtol = 0.001, atol = sqrt(eps())) "E: $(data1.p.momentum.E) vs. $(data2.p.momentum.E)"
@assert isapprox(data1.p.momentum.px, -data2.p.momentum.px, rtol = 0.001, atol = sqrt(eps())) "px: $(data1.p.momentum.px) vs. $(data2.p.momentum.px)"
@assert isapprox(data1.p.momentum.py, -data2.p.momentum.py, rtol = 0.001, atol = sqrt(eps())) "py: $(data1.p.momentum.py) vs. $(data2.p.momentum.py)"
@assert isapprox(data1.p.momentum.pz, -data2.p.momentum.pz, rtol = 0.001, atol = sqrt(eps())) "pz: $(data1.p.momentum.pz) vs. $(data2.p.momentum.pz)"
=#
- inner = inner_edge(data1.p)
+ inner = ABC_inner_edge(data1.p)
return data1.v * inner * data2.v
end
"""
- compute(::ComputeTaskS1, data::ParticleValue)
+ compute(::ComputeTaskABC_S1, data::ABCParticleValue)
Compute inner edge (1 input particle, 1 output particle).
11 FLOP.
"""
-function compute(::ComputeTaskS1, data::ParticleValue{P})::ParticleValue{P} where {P <: ABCParticle}
- return ParticleValue(data.p, data.v * inner_edge(data.p))
+function compute(::ComputeTaskABC_S1, data::ABCParticleValue{P})::ABCParticleValue{P} where {P <: ABCParticle}
+ return ABCParticleValue{P}(data.p, data.v * ABC_inner_edge(data.p))
end
"""
- compute(::ComputeTaskSum, data::Vector{Float64})
+ compute(::ComputeTaskABC_Sum, data::Vector{Float64})
Compute a sum over the vector. Use an algorithm that accounts for accumulated errors in long sums with potentially large differences in magnitude of the summands.
Linearly many FLOP with growing data.
"""
-function compute(::ComputeTaskSum, data::Vector{Float64})::Float64
+function compute(::ComputeTaskABC_Sum, data::Vector{Float64})::Float64
return sum_kbn(data)
end
"""
- get_expression(::ComputeTaskP, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+ get_expression(::ComputeTaskABC_P, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
-Generate and return code evaluating [`ComputeTaskP`](@ref) on `inSyms`, providing the output on `outSym`.
+Generate and return code evaluating [`ComputeTaskABC_P`](@ref) on `inSyms`, providing the output on `outSym`.
"""
-function get_expression(::ComputeTaskP, device::AbstractDevice, inExprs::Vector, outExpr)
+function get_expression(::ComputeTaskABC_P, device::AbstractDevice, inExprs::Vector, outExpr)
in = [eval(inExprs[1])]
out = eval(outExpr)
- return Meta.parse("$out = compute(ComputeTaskP(), $(in[1]))")
+ return Meta.parse("$out = compute(ComputeTaskABC_P(), $(in[1]))")
end
"""
- get_expression(::ComputeTaskU, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+ get_expression(::ComputeTaskABC_U, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
-Generate code evaluating [`ComputeTaskU`](@ref) on `inSyms`, providing the output on `outSym`.
-`inSyms` should be of type [`ParticleValue`](@ref), `outSym` will be of type [`ParticleValue`](@ref).
+Generate code evaluating [`ComputeTaskABC_U`](@ref) on `inSyms`, providing the output on `outSym`.
+`inSyms` should be of type [`ABCParticleValue`](@ref), `outSym` will be of type [`ABCParticleValue`](@ref).
"""
-function get_expression(::ComputeTaskU, device::AbstractDevice, inExprs::Vector, outExpr)
+function get_expression(::ComputeTaskABC_U, device::AbstractDevice, inExprs::Vector, outExpr)
in = [eval(inExprs[1])]
out = eval(outExpr)
- return Meta.parse("$out = compute(ComputeTaskU(), $(in[1]))")
+ return Meta.parse("$out = compute(ComputeTaskABC_U(), $(in[1]))")
end
"""
- get_expression(::ComputeTaskV, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+ get_expression(::ComputeTaskABC_V, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
-Generate code evaluating [`ComputeTaskV`](@ref) on `inSyms`, providing the output on `outSym`.
-`inSym[1]` and `inSym[2]` should be of type [`ParticleValue`](@ref), `outSym` will be of type [`ParticleValue`](@ref).
+Generate code evaluating [`ComputeTaskABC_V`](@ref) on `inSyms`, providing the output on `outSym`.
+`inSym[1]` and `inSym[2]` should be of type [`ABCParticleValue`](@ref), `outSym` will be of type [`ABCParticleValue`](@ref).
"""
-function get_expression(::ComputeTaskV, device::AbstractDevice, inExprs::Vector, outExpr)
+function get_expression(::ComputeTaskABC_V, device::AbstractDevice, inExprs::Vector, outExpr)
in = [eval(inExprs[1]), eval(inExprs[2])]
out = eval(outExpr)
- return Meta.parse("$out = compute(ComputeTaskV(), $(in[1]), $(in[2]))")
+ return Meta.parse("$out = compute(ComputeTaskABC_V(), $(in[1]), $(in[2]))")
end
"""
- get_expression(::ComputeTaskS2, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+ get_expression(::ComputeTaskABC_S2, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
-Generate code evaluating [`ComputeTaskS2`](@ref) on `inSyms`, providing the output on `outSym`.
-`inSyms[1]` and `inSyms[2]` should be of type [`ParticleValue`](@ref), `outSym` will be of type `Float64`.
+Generate code evaluating [`ComputeTaskABC_S2`](@ref) on `inSyms`, providing the output on `outSym`.
+`inSyms[1]` and `inSyms[2]` should be of type [`ABCParticleValue`](@ref), `outSym` will be of type `Float64`.
"""
-function get_expression(::ComputeTaskS2, device::AbstractDevice, inExprs::Vector, outExpr)
+function get_expression(::ComputeTaskABC_S2, device::AbstractDevice, inExprs::Vector, outExpr)
in = [eval(inExprs[1]), eval(inExprs[2])]
out = eval(outExpr)
- return Meta.parse("$out = compute(ComputeTaskS2(), $(in[1]), $(in[2]))")
+ return Meta.parse("$out = compute(ComputeTaskABC_S2(), $(in[1]), $(in[2]))")
end
"""
- get_expression(::ComputeTaskS1, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+ get_expression(::ComputeTaskABC_S1, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
-Generate code evaluating [`ComputeTaskS1`](@ref) on `inSyms`, providing the output on `outSym`.
-`inSyms` should be of type [`ParticleValue`](@ref), `outSym` will be of type [`ParticleValue`](@ref).
+Generate code evaluating [`ComputeTaskABC_S1`](@ref) on `inSyms`, providing the output on `outSym`.
+`inSyms` should be of type [`ABCParticleValue`](@ref), `outSym` will be of type [`ABCParticleValue`](@ref).
"""
-function get_expression(::ComputeTaskS1, device::AbstractDevice, inExprs::Vector, outExpr)
+function get_expression(::ComputeTaskABC_S1, device::AbstractDevice, inExprs::Vector, outExpr)
in = [eval(inExprs[1])]
out = eval(outExpr)
- return Meta.parse("$out = compute(ComputeTaskS1(), $(in[1]))")
+ return Meta.parse("$out = compute(ComputeTaskABC_S1(), $(in[1]))")
end
"""
- get_expression(::ComputeTaskSum, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+ get_expression(::ComputeTaskABC_Sum, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
-Generate code evaluating [`ComputeTaskSum`](@ref) on `inSyms`, providing the output on `outSym`.
+Generate code evaluating [`ComputeTaskABC_Sum`](@ref) on `inSyms`, providing the output on `outSym`.
`inSyms` should be of type [`Float64`], `outSym` will be of type [`Float64`].
"""
-function get_expression(::ComputeTaskSum, device::AbstractDevice, inExprs::Vector, outExpr)
+function get_expression(::ComputeTaskABC_Sum, device::AbstractDevice, inExprs::Vector, outExpr)
in = eval.(inExprs)
out = eval(outExpr)
- return Meta.parse("$out = compute(ComputeTaskSum(), [$(unroll_symbol_vector(in))])")
+ return Meta.parse("$out = compute(ComputeTaskABC_Sum(), [$(unroll_symbol_vector(in))])")
end
diff --git a/src/models/abc/create.jl b/src/models/abc/create.jl
index d33ec4a..0d5f844 100644
--- a/src/models/abc/create.jl
+++ b/src/models/abc/create.jl
@@ -3,7 +3,7 @@ using Random
using Roots
using ForwardDiff
-ComputeTaskSum() = ComputeTaskSum(0)
+ComputeTaskABC_Sum() = ComputeTaskABC_Sum(0)
"""
gen_process_input(processDescription::ABCProcessDescription)
@@ -62,137 +62,3 @@ function gen_process_input(processDescription::ABCProcessDescription)
return return processInput
end
-
-####################
-# CODE FROM HERE BORROWED FROM SOURCE: https://codebase.helmholtz.cloud/qedsandbox/QEDphasespaces.jl/
-# use qedphasespaces directly once released
-#
-# quick and dirty implementation of the RAMBO algorithm
-#
-# reference:
-# * https://cds.cern.ch/record/164736/files/198601282.pdf
-# * https://www.sciencedirect.com/science/article/pii/0010465586901190
-####################
-
-function generate_initial_moms(ss, masses)
- E1 = (ss^2 + masses[1]^2 - masses[2]^2) / (2 * ss)
- E2 = (ss^2 + masses[2]^2 - masses[1]^2) / (2 * ss)
-
- rho1 = sqrt(E1^2 - masses[1]^2)
- rho2 = sqrt(E2^2 - masses[2]^2)
-
- return [SFourMomentum(E1, 0, 0, rho1), SFourMomentum(E2, 0, 0, -rho2)]
-end
-
-
-Random.rand(rng::AbstractRNG, ::Random.SamplerType{SFourMomentum}) = SFourMomentum(rand(rng, 4))
-Random.rand(rng::AbstractRNG, ::Random.SamplerType{NTuple{N, Float64}}) where {N} = Tuple(rand(rng, N))
-
-
-function _transform_uni_to_mom(u1, u2, u3, u4)
- cth = 2 * u1 - 1
- sth = sqrt(1 - cth^2)
- phi = 2 * pi * u2
- q0 = -log(u3 * u4)
- qx = q0 * sth * cos(phi)
- qy = q0 * sth * sin(phi)
- qz = q0 * cth
-
- return SFourMomentum(q0, qx, qy, qz)
-end
-
-function _transform_uni_to_mom!(uni_mom, dest)
- u1, u2, u3, u4 = Tuple(uni_mom)
- cth = 2 * u1 - 1
- sth = sqrt(1 - cth^2)
- phi = 2 * pi * u2
- q0 = -log(u3 * u4)
- qx = q0 * sth * cos(phi)
- qy = q0 * sth * sin(phi)
- qz = q0 * cth
-
- return dest = SFourMomentum(q0, qx, qy, qz)
-end
-
-_transform_uni_to_mom(u1234::Tuple) = _transform_uni_to_mom(u1234...)
-_transform_uni_to_mom(u1234::SFourMomentum) = _transform_uni_to_mom(Tuple(u1234))
-
-function generate_massless_moms(rng, n::Int)
- a = Vector{SFourMomentum}(undef, n)
- rand!(rng, a)
- return map(_transform_uni_to_mom, a)
-end
-
-function generate_physical_massless_moms(rng, ss, n)
- r_moms = generate_massless_moms(rng, n)
- Q = sum(r_moms)
- M = sqrt(Q * Q)
- fac = -1 / M
- Qx = getX(Q)
- Qy = getY(Q)
- Qz = getZ(Q)
- bx = fac * Qx
- by = fac * Qy
- bz = fac * Qz
- gamma = getT(Q) / M
- a = 1 / (1 + gamma)
- x = ss / M
-
- i = 1
- while i <= n
- mom = r_moms[i]
- mom0 = getT(mom)
- mom1 = getX(mom)
- mom2 = getY(mom)
- mom3 = getZ(mom)
-
- bq = bx * mom1 + by * mom2 + bz * mom3
-
- p0 = x * (gamma * mom0 + bq)
- px = x * (mom1 + bx * mom0 + a * bq * bx)
- py = x * (mom2 + by * mom0 + a * bq * by)
- pz = x * (mom3 + bz * mom0 + a * bq * bz)
-
- r_moms[i] = SFourMomentum(p0, px, py, pz)
- i += 1
- end
- return r_moms
-end
-
-function _to_be_solved(xi, masses, p0s, ss)
- sum = 0.0
- for (i, E) in enumerate(p0s)
- sum += sqrt(masses[i]^2 + xi^2 * E^2)
- end
- return sum - ss
-end
-
-function _build_massive_momenta(xi, masses, massless_moms)
- vec = SFourMomentum[]
- i = 1
- while i <= length(massless_moms)
- massless_mom = massless_moms[i]
- k0 = sqrt(getT(massless_mom)^2 * xi^2 + masses[i]^2)
-
- kx = xi * getX(massless_mom)
- ky = xi * getY(massless_mom)
- kz = xi * getZ(massless_mom)
-
- push!(vec, SFourMomentum(k0, kx, ky, kz))
-
- i += 1
- end
- return vec
-end
-
-first_derivative(func) = x -> ForwardDiff.derivative(func, float(x))
-
-
-function generate_physical_massive_moms(rng, ss, masses; x0 = 0.1)
- n = length(masses)
- massless_moms = generate_physical_massless_moms(rng, ss, n)
- energies = getT.(massless_moms)
- f = x -> _to_be_solved(x, masses, energies, ss)
- xi = find_zero((f, first_derivative(f)), x0, Roots.Newton())
- return _build_massive_momenta(xi, masses, massless_moms)
-end
diff --git a/src/models/abc/parse.jl b/src/models/abc/parse.jl
index 05d2855..c16554c 100644
--- a/src/models/abc/parse.jl
+++ b/src/models/abc/parse.jl
@@ -63,7 +63,7 @@ function parse_dag(filename::AbstractString, model::ABCModel, verbose::Bool = fa
end
sizehint!(graph.nodes, estimate_no_nodes)
- sum_node = insert_node!(graph, make_node(ComputeTaskSum(0)), track = false, invalidate_cache = false)
+ sum_node = insert_node!(graph, make_node(ComputeTaskABC_Sum(0)), track = false, invalidate_cache = false)
global_data_out = insert_node!(graph, make_node(DataTask(FLOAT_SIZE)), track = false, invalidate_cache = false)
insert_edge!(graph, sum_node, global_data_out, track = false, invalidate_cache = false)
@@ -92,12 +92,12 @@ function parse_dag(filename::AbstractString, model::ABCModel, verbose::Bool = fa
track = false,
invalidate_cache = false,
) # read particle data node
- compute_P = insert_node!(graph, make_node(ComputeTaskP()), track = false, invalidate_cache = false) # compute P node
+ compute_P = insert_node!(graph, make_node(ComputeTaskABC_P()), track = false, invalidate_cache = false) # compute P node
data_Pu =
- insert_node!(graph, make_node(DataTask(PARTICLE_VALUE_SIZE)), track = false, invalidate_cache = false) # transfer data from P to u (one ParticleValue object)
- compute_u = insert_node!(graph, make_node(ComputeTaskU()), track = false, invalidate_cache = false) # compute U node
+ insert_node!(graph, make_node(DataTask(PARTICLE_VALUE_SIZE)), track = false, invalidate_cache = false) # transfer data from P to u (one ABCParticleValue object)
+ compute_u = insert_node!(graph, make_node(ComputeTaskABC_U()), track = false, invalidate_cache = false) # compute U node
data_out =
- insert_node!(graph, make_node(DataTask(PARTICLE_VALUE_SIZE)), track = false, invalidate_cache = false) # transfer data out from u (one ParticleValue object)
+ insert_node!(graph, make_node(DataTask(PARTICLE_VALUE_SIZE)), track = false, invalidate_cache = false) # transfer data out from u (one ABCParticleValue object)
insert_edge!(graph, data_in, compute_P, track = false, invalidate_cache = false)
insert_edge!(graph, compute_P, data_Pu, track = false, invalidate_cache = false)
@@ -112,13 +112,13 @@ function parse_dag(filename::AbstractString, model::ABCModel, verbose::Bool = fa
in1 = capt.captures[1]
in2 = capt.captures[2]
- compute_v = insert_node!(graph, make_node(ComputeTaskV()), track = false, invalidate_cache = false)
+ compute_v = insert_node!(graph, make_node(ComputeTaskABC_V()), track = false, invalidate_cache = false)
data_out =
insert_node!(graph, make_node(DataTask(PARTICLE_VALUE_SIZE)), track = false, invalidate_cache = false)
if (occursin(regex_c, in1))
# put an S node after this input
- compute_S = insert_node!(graph, make_node(ComputeTaskS1()), track = false, invalidate_cache = false)
+ compute_S = insert_node!(graph, make_node(ComputeTaskABC_S1()), track = false, invalidate_cache = false)
data_S_v = insert_node!(
graph,
make_node(DataTask(PARTICLE_VALUE_SIZE)),
@@ -137,7 +137,7 @@ function parse_dag(filename::AbstractString, model::ABCModel, verbose::Bool = fa
if (occursin(regex_c, in2))
# i think the current generator only puts the combined particles in the first space, so this case might never be entered
# put an S node after this input
- compute_S = insert_node!(graph, make_node(ComputeTaskS1()), track = false, invalidate_cache = false)
+ compute_S = insert_node!(graph, make_node(ComputeTaskABC_S1()), track = false, invalidate_cache = false)
data_S_v = insert_node!(
graph,
make_node(DataTask(PARTICLE_VALUE_SIZE)),
@@ -164,7 +164,7 @@ function parse_dag(filename::AbstractString, model::ABCModel, verbose::Bool = fa
in3 = capt.captures[3]
# in2 + in3 with a v
- compute_v = insert_node!(graph, make_node(ComputeTaskV()), track = false, invalidate_cache = false)
+ compute_v = insert_node!(graph, make_node(ComputeTaskABC_V()), track = false, invalidate_cache = false)
data_v =
insert_node!(graph, make_node(DataTask(PARTICLE_VALUE_SIZE)), track = false, invalidate_cache = false)
@@ -173,7 +173,7 @@ function parse_dag(filename::AbstractString, model::ABCModel, verbose::Bool = fa
insert_edge!(graph, compute_v, data_v, track = false, invalidate_cache = false)
# combine with the v of the combined other input
- compute_S2 = insert_node!(graph, make_node(ComputeTaskS2()), track = false, invalidate_cache = false)
+ compute_S2 = insert_node!(graph, make_node(ComputeTaskABC_S2()), track = false, invalidate_cache = false)
data_out = insert_node!(graph, make_node(DataTask(FLOAT_SIZE)), track = false, invalidate_cache = false) # output of a S2 task is only a float
insert_edge!(graph, data_v, compute_S2, track = false, invalidate_cache = false)
diff --git a/src/models/abc/particle.jl b/src/models/abc/particle.jl
index 6db9a1c..8769eaf 100644
--- a/src/models/abc/particle.jl
+++ b/src/models/abc/particle.jl
@@ -1,5 +1,3 @@
-using QEDbase
-
import QEDbase.mass
"""
@@ -68,6 +66,8 @@ struct ABCProcessInput <: AbstractProcessInput
outParticles::Vector{ABCParticle}
end
+ABCParticleValue{ParticleType <: ABCParticle} = ParticleValue{ParticleType, ComplexF64}
+
"""
PARTICLE_MASSES
@@ -119,65 +119,63 @@ function square(p::ABCParticle)
end
"""
- inner_edge(p::ABCParticle)
+ ABC_inner_edge(p::ABCParticle)
Return the factor of the inner edge with the given (virtual) particle.
Takes 10 effective FLOP. (3 here + 7 in square(p))
"""
-function inner_edge(p::ABCParticle)
+function ABC_inner_edge(p::ABCParticle)
return 1.0 / (square(p) - mass(typeof(p)) * mass(typeof(p)))
end
"""
- outer_edge(p::ABCParticle)
+ ABC_outer_edge(p::ABCParticle)
Return the factor of the outer edge with the given (real) particle.
Takes 0 effective FLOP.
"""
-function outer_edge(p::ABCParticle)
+function ABC_outer_edge(p::ABCParticle)
return 1.0
end
"""
- vertex()
+ ABC_vertex()
Return the factor of a vertex.
Takes 0 effective FLOP since it's constant.
"""
-function vertex()
+function ABC_vertex()
i = 1.0
lambda = 1.0 / 137.0
return i * lambda
end
"""
- preserve_momentum(p1::ABCParticle, p2::ABCParticle)
+ ABC_conserve_momentum(p1::ABCParticle, p2::ABCParticle)
Calculate and return a new particle from two given interacting ones at a vertex.
Takes 4 effective FLOP.
"""
-function preserve_momentum(p1::ABCParticle, p2::ABCParticle)
+function ABC_conserve_momentum(p1::ABCParticle, p2::ABCParticle)
t3 = interaction_result(typeof(p1), typeof(p2))
p3 = t3(p1.momentum + p2.momentum)
return p3
end
-"""
- type_from_name(name::String)
+model(::ABCProcessDescription) = ABCModel()
+model(::ABCProcessInput) = ABCModel()
-For a name of a particle, return the particle's [`Type`].
-"""
-function type_from_name(name::String)
+function type_index_from_name(::ABCModel, name::String)
if startswith(name, "A")
- return ParticleA
+ return (ParticleA, parse(Int, name[2:end]))
elseif startswith(name, "B")
- return ParticleB
+ return (ParticleB, parse(Int, name[2:end]))
elseif startswith(name, "C")
- return ParticleC
+ return (ParticleC, parse(Int, name[2:end]))
else
throw("Invalid name for a particle in the ABC model")
end
diff --git a/src/models/abc/properties.jl b/src/models/abc/properties.jl
index ca9bdda..538e187 100644
--- a/src/models/abc/properties.jl
+++ b/src/models/abc/properties.jl
@@ -1,166 +1,134 @@
"""
- compute_effort(t::ComputeTaskS1)
+ compute_effort(t::ComputeTaskABC_S1)
Return the compute effort of an S1 task.
"""
-compute_effort(t::ComputeTaskS1)::Float64 = 11.0
+compute_effort(t::ComputeTaskABC_S1)::Float64 = 11.0
"""
- compute_effort(t::ComputeTaskS2)
+ compute_effort(t::ComputeTaskABC_S2)
Return the compute effort of an S2 task.
"""
-compute_effort(t::ComputeTaskS2)::Float64 = 12.0
+compute_effort(t::ComputeTaskABC_S2)::Float64 = 12.0
"""
- compute_effort(t::ComputeTaskU)
+ compute_effort(t::ComputeTaskABC_U)
Return the compute effort of a U task.
"""
-compute_effort(t::ComputeTaskU)::Float64 = 1.0
+compute_effort(t::ComputeTaskABC_U)::Float64 = 1.0
"""
- compute_effort(t::ComputeTaskV)
+ compute_effort(t::ComputeTaskABC_V)
Return the compute effort of a V task.
"""
-compute_effort(t::ComputeTaskV)::Float64 = 6.0
+compute_effort(t::ComputeTaskABC_V)::Float64 = 6.0
"""
- compute_effort(t::ComputeTaskP)
+ compute_effort(t::ComputeTaskABC_P)
Return the compute effort of a P task.
"""
-compute_effort(t::ComputeTaskP)::Float64 = 0.0
+compute_effort(t::ComputeTaskABC_P)::Float64 = 0.0
"""
- compute_effort(t::ComputeTaskSum)
+ compute_effort(t::ComputeTaskABC_Sum)
Return the compute effort of a Sum task.
Note: This is a constant compute effort, even though sum scales with the number of its inputs. Since there is only ever a single sum node in a graph generated from the ABC-Model,
this doesn't matter.
"""
-compute_effort(t::ComputeTaskSum)::Float64 = 1.0
+compute_effort(t::ComputeTaskABC_Sum)::Float64 = 1.0
"""
- show(io::IO, t::DataTask)
-
-Print the data task to io.
-"""
-function show(io::IO, t::DataTask)
- return print(io, "Data", t.data)
-end
-
-"""
- show(io::IO, t::ComputeTaskS1)
+ show(io::IO, t::ComputeTaskABC_S1)
Print the S1 task to io.
"""
-show(io::IO, t::ComputeTaskS1) = print(io, "ComputeS1")
+show(io::IO, t::ComputeTaskABC_S1) = print(io, "ComputeS1")
"""
- show(io::IO, t::ComputeTaskS2)
+ show(io::IO, t::ComputeTaskABC_S2)
Print the S2 task to io.
"""
-show(io::IO, t::ComputeTaskS2) = print(io, "ComputeS2")
+show(io::IO, t::ComputeTaskABC_S2) = print(io, "ComputeS2")
"""
- show(io::IO, t::ComputeTaskP)
+ show(io::IO, t::ComputeTaskABC_P)
Print the P task to io.
"""
-show(io::IO, t::ComputeTaskP) = print(io, "ComputeP")
+show(io::IO, t::ComputeTaskABC_P) = print(io, "ComputeP")
"""
- show(io::IO, t::ComputeTaskU)
+ show(io::IO, t::ComputeTaskABC_U)
Print the U task to io.
"""
-show(io::IO, t::ComputeTaskU) = print(io, "ComputeU")
+show(io::IO, t::ComputeTaskABC_U) = print(io, "ComputeU")
"""
- show(io::IO, t::ComputeTaskV)
+ show(io::IO, t::ComputeTaskABC_V)
Print the V task to io.
"""
-show(io::IO, t::ComputeTaskV) = print(io, "ComputeV")
+show(io::IO, t::ComputeTaskABC_V) = print(io, "ComputeV")
"""
- show(io::IO, t::ComputeTaskSum)
+ show(io::IO, t::ComputeTaskABC_Sum)
Print the sum task to io.
"""
-show(io::IO, t::ComputeTaskSum) = print(io, "ComputeSum")
+show(io::IO, t::ComputeTaskABC_Sum) = print(io, "ComputeSum")
"""
- copy(t::DataTask)
+ children(::ComputeTaskABC_S1)
-Copy the data task and return it.
+Return the number of children of a ComputeTaskABC_S1 (always 1).
"""
-copy(t::DataTask) = DataTask(t.data)
+children(::ComputeTaskABC_S1) = 1
"""
- children(::DataTask)
+ children(::ComputeTaskABC_S2)
-Return the number of children of a data task (always 1).
+Return the number of children of a ComputeTaskABC_S2 (always 2).
"""
-children(::DataTask) = 1
+children(::ComputeTaskABC_S2) = 2
"""
- children(::ComputeTaskS1)
+ children(::ComputeTaskABC_P)
-Return the number of children of a ComputeTaskS1 (always 1).
+Return the number of children of a ComputeTaskABC_P (always 1).
"""
-children(::ComputeTaskS1) = 1
+children(::ComputeTaskABC_P) = 1
"""
- children(::ComputeTaskS2)
+ children(::ComputeTaskABC_U)
-Return the number of children of a ComputeTaskS2 (always 2).
+Return the number of children of a ComputeTaskABC_U (always 1).
"""
-children(::ComputeTaskS2) = 2
+children(::ComputeTaskABC_U) = 1
"""
- children(::ComputeTaskP)
+ children(::ComputeTaskABC_V)
-Return the number of children of a ComputeTaskP (always 1).
+Return the number of children of a ComputeTaskABC_V (always 2).
"""
-children(::ComputeTaskP) = 1
-
-"""
- children(::ComputeTaskU)
-
-Return the number of children of a ComputeTaskU (always 1).
-"""
-children(::ComputeTaskU) = 1
-
-"""
- children(::ComputeTaskV)
-
-Return the number of children of a ComputeTaskV (always 2).
-"""
-children(::ComputeTaskV) = 2
+children(::ComputeTaskABC_V) = 2
"""
- children(::ComputeTaskSum)
+ children(::ComputeTaskABC_Sum)
-Return the number of children of a ComputeTaskSum.
+Return the number of children of a ComputeTaskABC_Sum.
"""
-children(t::ComputeTaskSum) = t.children_number
+children(t::ComputeTaskABC_Sum) = t.children_number
-"""
- children(t::FusedComputeTask)
-
-Return the number of children of a FusedComputeTask.
-"""
-function children(t::FusedComputeTask)
- return length(union(Set(t.t1_inputs), Set(t.t2_inputs)))
-end
-
-function add_child!(t::ComputeTaskSum)
+function add_child!(t::ComputeTaskABC_Sum)
t.children_number += 1
return nothing
end
diff --git a/src/models/abc/types.jl b/src/models/abc/types.jl
index 319ee78..cff7c72 100644
--- a/src/models/abc/types.jl
+++ b/src/models/abc/types.jl
@@ -1,53 +1,44 @@
"""
- DataTask <: AbstractDataTask
-
-Task representing a specific data transfer in the ABC Model.
-"""
-struct DataTask <: AbstractDataTask
- data::Float64
-end
-
-"""
- ComputeTaskS1 <: AbstractComputeTask
+ ComputeTaskABC_S1 <: AbstractComputeTask
S task with a single child.
"""
-struct ComputeTaskS1 <: AbstractComputeTask end
+struct ComputeTaskABC_S1 <: AbstractComputeTask end
"""
- ComputeTaskS2 <: AbstractComputeTask
+ ComputeTaskABC_S2 <: AbstractComputeTask
S task with two children.
"""
-struct ComputeTaskS2 <: AbstractComputeTask end
+struct ComputeTaskABC_S2 <: AbstractComputeTask end
"""
- ComputeTaskP <: AbstractComputeTask
+ ComputeTaskABC_P <: AbstractComputeTask
P task with no children.
"""
-struct ComputeTaskP <: AbstractComputeTask end
+struct ComputeTaskABC_P <: AbstractComputeTask end
"""
- ComputeTaskV <: AbstractComputeTask
+ ComputeTaskABC_V <: AbstractComputeTask
v task with two children.
"""
-struct ComputeTaskV <: AbstractComputeTask end
+struct ComputeTaskABC_V <: AbstractComputeTask end
"""
- ComputeTaskU <: AbstractComputeTask
+ ComputeTaskABC_U <: AbstractComputeTask
u task with a single child.
"""
-struct ComputeTaskU <: AbstractComputeTask end
+struct ComputeTaskABC_U <: AbstractComputeTask end
"""
- ComputeTaskSum <: AbstractComputeTask
+ ComputeTaskABC_Sum <: AbstractComputeTask
Task that sums all its inputs, n children.
"""
-mutable struct ComputeTaskSum <: AbstractComputeTask
+mutable struct ComputeTaskABC_Sum <: AbstractComputeTask
children_number::Int
end
@@ -56,4 +47,5 @@ end
Constant vector of all tasks of the ABC-Model.
"""
-ABC_TASKS = [DataTask, ComputeTaskS1, ComputeTaskS2, ComputeTaskP, ComputeTaskV, ComputeTaskU, ComputeTaskSum]
+ABC_TASKS =
+ [ComputeTaskABC_S1, ComputeTaskABC_S2, ComputeTaskABC_P, ComputeTaskABC_V, ComputeTaskABC_U, ComputeTaskABC_Sum]
diff --git a/src/models/interface.jl b/src/models/interface.jl
index dfb2c9f..ca30152 100644
--- a/src/models/interface.jl
+++ b/src/models/interface.jl
@@ -1,3 +1,5 @@
+import QEDbase.mass
+import QEDbase.AbstractParticle
"""
AbstractPhysicsModel
@@ -6,23 +8,16 @@ Base type for a model, e.g. ABC-Model or QED. This is used to dispatch many func
"""
abstract type AbstractPhysicsModel end
-"""
- AbstractParticle
-
-Base type for particles belonging to a certain [`AbstractPhysicsModel`](@ref).
-"""
-abstract type AbstractParticle end
-
"""
ParticleValue{ParticleType <: AbstractParticle}
-A struct describing a particle during a calculation of a Feynman Diagram, together with the value that's being calculated.
+A struct describing a particle during a calculation of a Feynman Diagram, together with the value that's being calculated. `AbstractParticle` is the type from the QEDbase package.
`sizeof(ParticleValue())` = 48 Byte
"""
-struct ParticleValue{ParticleType <: AbstractParticle}
+struct ParticleValue{ParticleType <: AbstractParticle, ValueType}
p::ParticleType
- v::Float64
+ v::ValueType
end
"""
@@ -43,13 +38,6 @@ See also: [`gen_process_input`](@ref)
"""
abstract type AbstractProcessInput end
-"""
- mass(t::Type{T}) where {T <: AbstractParticle}
-
-Interface function that must be implemented for every subtype of [`AbstractParticle`](@ref), returning the particles mass at rest.
-"""
-function mass end
-
"""
interaction_result(t1::Type{T1}, t2::Type{T2}) where {T1 <: AbstractParticle, T2 <: AbstractParticle}
@@ -107,3 +95,18 @@ Interface function that must be implemented for every specific [`AbstractProcess
Returns a randomly generated and valid corresponding `ProcessInput`.
"""
function gen_process_input end
+
+"""
+ model(::AbstractProcessDescription)
+ model(::AbstarctProcessInput)
+
+Return the model of this process description or input.
+"""
+function model end
+
+"""
+ type_from_name(model::AbstractModel, name::String)
+
+For a name of a particle in the given [`AbstractModel`](@ref), return the particle's [`Type`] and index as a tuple. The input string can be expetced to be of the form \"\".
+"""
+function type_index_from_name end
diff --git a/src/models/qed/compute.jl b/src/models/qed/compute.jl
new file mode 100644
index 0000000..1225160
--- /dev/null
+++ b/src/models/qed/compute.jl
@@ -0,0 +1,198 @@
+
+"""
+ compute(::ComputeTaskQED_P, data::QEDParticleValue)
+
+Return the particle as is and initialize the Value.
+"""
+function compute(::ComputeTaskQED_P, data::QEDParticleValue{P})::QEDParticleValue{P} where {P <: QEDParticle}
+ # TODO do we actually need this for anything?
+ return QEDParticleValue{P}(data.p, one(DiracMatrix))
+end
+
+"""
+ compute(::ComputeTaskQED_U, data::QEDParticleValue)
+
+Compute an outer edge. Return the particle value with the same particle and the value multiplied by an outer_edge factor.
+"""
+function compute(::ComputeTaskQED_U, data::PV) where {P <: QEDParticle, PV <: QEDParticleValue{P}}
+ state = base_state(particle(data.p), direction(data.p), momentum(data.p), spin_or_pol(data.p))
+ return ParticleValue{P, typeof(state)}(
+ data.p,
+ state, # will return a SLorentzVector{ComplexF64}, BiSpinor or AdjointBiSpinor
+ )
+end
+
+"""
+ compute(::ComputeTaskQED_V, data1::QEDParticleValue, data2::QEDParticleValue)
+
+Compute a vertex. Preserve momentum and particle types (e + gamma->p etc.) to create resulting particle, multiply values together and times a vertex factor.
+"""
+function compute(
+ ::ComputeTaskQED_V,
+ data1::PV1,
+ data2::PV2,
+) where {P1 <: QEDParticle, P2 <: QEDParticle, PV1 <: QEDParticleValue{P1}, PV2 <: QEDParticleValue{P2}}
+ p3 = QED_conserve_momentum(data1.p, data2.p)
+ P3 = interaction_result(P1, P2)
+
+ state = QED_vertex()
+ if (typeof(data1.v) <: AdjointBiSpinor)
+ state = data1.v * state
+ else
+ state = state * data1.v
+ end
+ if (typeof(data2.v) <: AdjointBiSpinor)
+ state = data2.v * state
+ else
+ state = state * data2.v
+ end
+
+ dataOut = ParticleValue{P3, typeof(state)}(P3(p3), state)
+ return dataOut
+end
+
+"""
+ compute(::ComputeTaskQED_S2, data1::QEDParticleValue, data2::QEDParticleValue)
+
+Compute a final inner edge (2 input particles, no output particle).
+
+For valid inputs, both input particles should have the same momenta at this point.
+
+12 FLOP.
+"""
+function compute(
+ ::ComputeTaskQED_S2,
+ data1::ParticleValue{P1},
+ data2::ParticleValue{P2},
+)::ComplexF64 where {
+ P1 <: Union{AntiFermionStateful, FermionStateful},
+ P2 <: Union{AntiFermionStateful, FermionStateful},
+}
+ @assert isapprox(data1.p.momentum, data2.p.momentum, rtol = sqrt(eps()), atol = sqrt(eps())) "$(data1.p.momentum) vs. $(data2.p.momentum)"
+
+ inner = QED_inner_edge(propagation_result(P1)(data1.p))
+
+ # inner edge is just a "scalar", data1 and data2 are bispinor/adjointbispinnor, need to keep correct order
+ if typeof(data1.v) <: BiSpinor
+ return data2.v * inner * data1.v
+ else
+ return data1.v * inner * data2.v
+ end
+end
+
+# TODO: S2 when the particles are photons?
+function compute(
+ ::ComputeTaskQED_S2,
+ data1::ParticleValue{P1},
+ data2::ParticleValue{P2},
+)::ComplexF64 where {P1 <: PhotonStateful, P2 <: PhotonStateful}
+ # TODO: assert that data1 and data2 are opposites
+ inner = QED_inner_edge(data1.p)
+ # inner edge is just a scalar, data1 and data2 are photon states that are just Complex numbers here
+ return data1.v * inner * data2.v
+end
+
+"""
+ compute(::ComputeTaskQED_S1, data::QEDParticleValue)
+
+Compute inner edge (1 input particle, 1 output particle).
+"""
+function compute(::ComputeTaskQED_S1, data::QEDParticleValue{P})::QEDParticleValue where {P <: QEDParticle}
+ newP = propagation_result(P)
+ new_p = newP(data.p)
+ # inner edge is just a scalar, can multiply from either side
+ if typeof(data.v) <: BiSpinor
+ return ParticleValue(new_p, QED_inner_edge(new_p) * data.v)
+ else
+ return ParticleValue(new_p, data.v * QED_inner_edge(new_p))
+ end
+end
+
+"""
+ compute(::ComputeTaskQED_Sum, data::Vector{ComplexF64})
+
+Compute a sum over the vector. Use an algorithm that accounts for accumulated errors in long sums with potentially large differences in magnitude of the summands.
+
+Linearly many FLOP with growing data.
+"""
+function compute(::ComputeTaskQED_Sum, data::Vector{ComplexF64})::ComplexF64
+ # TODO: want to use sum_kbn here but it doesn't seem to support ComplexF64, do it element-wise?
+ return sum(data)
+end
+
+"""
+ get_expression(::ComputeTaskQED_P, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+
+Generate and return code evaluating [`ComputeTaskQED_P`](@ref) on `inSyms`, providing the output on `outSym`.
+"""
+function get_expression(::ComputeTaskQED_P, device::AbstractDevice, inExprs::Vector, outExpr)
+ in = [eval(inExprs[1])]
+ out = eval(outExpr)
+
+ return Meta.parse("$out = compute(ComputeTaskQED_P(), $(in[1]))")
+end
+
+"""
+ get_expression(::ComputeTaskQED_U, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+
+Generate code evaluating [`ComputeTaskQED_U`](@ref) on `inSyms`, providing the output on `outSym`.
+`inSyms` should be of type [`QEDParticleValue`](@ref), `outSym` will be of type [`QEDParticleValue`](@ref).
+"""
+function get_expression(::ComputeTaskQED_U, device::AbstractDevice, inExprs::Vector, outExpr)
+ in = [eval(inExprs[1])]
+ out = eval(outExpr)
+
+ return Meta.parse("$out = compute(ComputeTaskQED_U(), $(in[1]))")
+end
+
+"""
+ get_expression(::ComputeTaskQED_V, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+
+Generate code evaluating [`ComputeTaskQED_V`](@ref) on `inSyms`, providing the output on `outSym`.
+`inSym[1]` and `inSym[2]` should be of type [`QEDParticleValue`](@ref), `outSym` will be of type [`QEDParticleValue`](@ref).
+"""
+function get_expression(::ComputeTaskQED_V, device::AbstractDevice, inExprs::Vector, outExpr)
+ in = [eval(inExprs[1]), eval(inExprs[2])]
+ out = eval(outExpr)
+
+ return Meta.parse("$out = compute(ComputeTaskQED_V(), $(in[1]), $(in[2]))")
+end
+
+"""
+ get_expression(::ComputeTaskQED_S2, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+
+Generate code evaluating [`ComputeTaskQED_S2`](@ref) on `inSyms`, providing the output on `outSym`.
+`inSyms[1]` and `inSyms[2]` should be of type [`QEDParticleValue`](@ref), `outSym` will be of type `Float64`.
+"""
+function get_expression(::ComputeTaskQED_S2, device::AbstractDevice, inExprs::Vector, outExpr)
+ in = [eval(inExprs[1]), eval(inExprs[2])]
+ out = eval(outExpr)
+
+ return Meta.parse("$out = compute(ComputeTaskQED_S2(), $(in[1]), $(in[2]))")
+end
+
+"""
+ get_expression(::ComputeTaskQED_S1, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+
+Generate code evaluating [`ComputeTaskQED_S1`](@ref) on `inSyms`, providing the output on `outSym`.
+`inSyms` should be of type [`QEDParticleValue`](@ref), `outSym` will be of type [`QEDParticleValue`](@ref).
+"""
+function get_expression(::ComputeTaskQED_S1, device::AbstractDevice, inExprs::Vector, outExpr)
+ in = [eval(inExprs[1])]
+ out = eval(outExpr)
+
+ return Meta.parse("$out = compute(ComputeTaskQED_S1(), $(in[1]))")
+end
+
+"""
+ get_expression(::ComputeTaskQED_Sum, device::AbstractDevice, inExprs::Vector{Expr}, outExpr::Expr)
+
+Generate code evaluating [`ComputeTaskQED_Sum`](@ref) on `inSyms`, providing the output on `outSym`.
+`inSyms` should be of type [`Float64`], `outSym` will be of type [`Float64`].
+"""
+function get_expression(::ComputeTaskQED_Sum, device::AbstractDevice, inExprs::Vector, outExpr)
+ in = eval.(inExprs)
+ out = eval(outExpr)
+
+ return Meta.parse("$out = compute(ComputeTaskQED_Sum(), [$(unroll_symbol_vector(in))])")
+end
diff --git a/src/models/qed/create.jl b/src/models/qed/create.jl
new file mode 100644
index 0000000..baf73f2
--- /dev/null
+++ b/src/models/qed/create.jl
@@ -0,0 +1,172 @@
+
+ComputeTaskQED_Sum() = ComputeTaskQED_Sum(0)
+
+"""
+ gen_process_input(processDescription::QEDProcessDescription)
+
+Return a ProcessInput of randomly generated [`QEDParticle`](@ref)s from a [`QEDProcessDescription`](@ref). The process description can be created manually or parsed from a string using [`parse_process`](@ref).
+
+Note: This uses RAMBO to create a valid process with conservation of momentum and energy.
+"""
+function gen_process_input(processDescription::QEDProcessDescription)
+ massSum = 0
+ inputMasses = Vector{Float64}()
+ for (particle, n) in processDescription.inParticles
+ for _ in 1:n
+ massSum += mass(particle)
+ push!(inputMasses, mass(particle))
+ end
+ end
+ outputMasses = Vector{Float64}()
+ for (particle, n) in processDescription.outParticles
+ for _ in 1:n
+ massSum += mass(particle)
+ push!(outputMasses, mass(particle))
+ end
+ end
+
+ # add some extra random mass to allow for some momentum
+ massSum += rand(rng[threadid()]) * (length(inputMasses) + length(outputMasses))
+
+
+ inputParticles = Vector{QEDParticle}()
+ initialMomenta = generate_initial_moms(massSum, inputMasses)
+ index = 1
+ for (particle, n) in processDescription.inParticles
+ for _ in 1:n
+ mom = initialMomenta[index]
+ push!(inputParticles, particle(mom))
+ index += 1
+ end
+ end
+
+ outputParticles = Vector{QEDParticle}()
+ final_momenta = generate_physical_massive_moms(rng[threadid()], massSum, outputMasses)
+ index = 1
+ for (particle, n) in processDescription.outParticles
+ for _ in 1:n
+ push!(outputParticles, particle(final_momenta[index]))
+ index += 1
+ end
+ end
+
+ processInput = QEDProcessInput(processDescription, inputParticles, outputParticles)
+
+ return return processInput
+end
+
+"""
+ gen_graph(process_description::QEDProcessDescription)
+
+For a given [`QEDProcessDescription`](@ref), return the [`DAG`](@ref) that computes it.
+"""
+function gen_graph(process_description::QEDProcessDescription)
+ initial_diagram = FeynmanDiagram(process_description)
+ diagrams = gen_diagrams(initial_diagram)
+
+ graph = DAG()
+
+ COMPLEX_SIZE = sizeof(ComplexF64)
+ PARTICLE_VALUE_SIZE = 96.0
+
+ # TODO: Not all diagram outputs should always be summed at the end, if they differ by fermion exchange they need to be diffed
+ # Should not matter for n-Photon Compton processes though
+ sum_node = insert_node!(graph, make_node(ComputeTaskQED_Sum(0)), track = false, invalidate_cache = false)
+ global_data_out = insert_node!(graph, make_node(DataTask(COMPLEX_SIZE)), track = false, invalidate_cache = false)
+ insert_edge!(graph, sum_node, global_data_out, track = false, invalidate_cache = false)
+
+ # remember the data out nodes for connection
+ dataOutNodes = Dict()
+
+ for particle in initial_diagram.particles
+ # generate data in and U tasks
+ data_in = insert_node!(
+ graph,
+ make_node(DataTask(PARTICLE_VALUE_SIZE), String(particle)),
+ track = false,
+ invalidate_cache = false,
+ ) # read particle data node
+ compute_u = insert_node!(graph, make_node(ComputeTaskQED_U()), track = false, invalidate_cache = false) # compute U node
+ data_out =
+ insert_node!(graph, make_node(DataTask(PARTICLE_VALUE_SIZE)), track = false, invalidate_cache = false) # transfer data out from u (one ABCParticleValue object)
+
+ insert_edge!(graph, data_in, compute_u, track = false, invalidate_cache = false)
+ insert_edge!(graph, compute_u, data_out, track = false, invalidate_cache = false)
+
+ # remember the data_out node for future edges
+ dataOutNodes[String(particle)] = data_out
+ end
+
+ #dataOutBackup = copy(dataOutNodes)
+
+ for diagram in diagrams
+ # the intermediate (virtual) particles change across
+ #dataOutNodes = copy(dataOutBackup)
+
+ tie = diagram.tie[]
+
+ # handle the vertices
+ for vertices in diagram.vertices
+ for vertex in vertices
+ data_in1 = dataOutNodes[String(vertex.in1)]
+ data_in2 = dataOutNodes[String(vertex.in2)]
+
+ compute_V = insert_node!(graph, make_node(ComputeTaskQED_V()), track = false, invalidate_cache = false) # compute vertex
+
+ insert_edge!(graph, data_in1, compute_V, track = false, invalidate_cache = false)
+ insert_edge!(graph, data_in2, compute_V, track = false, invalidate_cache = false)
+
+ data_V_out = insert_node!(
+ graph,
+ make_node(DataTask(PARTICLE_VALUE_SIZE)),
+ track = false,
+ invalidate_cache = false,
+ )
+
+ insert_edge!(graph, compute_V, data_V_out, track = false, invalidate_cache = false)
+
+ if (vertex.out == tie.in1 || vertex.out == tie.in2)
+ # out particle is part of the tie -> there will be an S2 task with it later, don't make S1 task
+ dataOutNodes[String(vertex.out)] = data_V_out
+ continue
+ end
+
+ # otherwise, add S1 task
+ compute_S1 =
+ insert_node!(graph, make_node(ComputeTaskQED_S1()), track = false, invalidate_cache = false) # compute propagator
+
+ insert_edge!(graph, data_V_out, compute_S1, track = false, invalidate_cache = false)
+
+ data_S1_out = insert_node!(
+ graph,
+ make_node(DataTask(PARTICLE_VALUE_SIZE)),
+ track = false,
+ invalidate_cache = false,
+ )
+
+ insert_edge!(graph, compute_S1, data_S1_out, track = false, invalidate_cache = false)
+
+ # overrides potentially different nodes from previous diagrams, which is intentional
+ dataOutNodes[String(vertex.out)] = data_S1_out
+ end
+ end
+
+ # handle the tie
+ data_in1 = dataOutNodes[String(tie.in1)]
+ data_in2 = dataOutNodes[String(tie.in2)]
+
+ compute_S2 = insert_node!(graph, make_node(ComputeTaskQED_S2()), track = false, invalidate_cache = false)
+
+ data_S2 = insert_node!(graph, make_node(DataTask(PARTICLE_VALUE_SIZE)), track = false, invalidate_cache = false)
+
+ insert_edge!(graph, data_in1, compute_S2, track = false, invalidate_cache = false)
+ insert_edge!(graph, data_in2, compute_S2, track = false, invalidate_cache = false)
+
+ insert_edge!(graph, compute_S2, data_S2, track = false, invalidate_cache = false)
+
+ insert_edge!(graph, data_S2, sum_node, track = false, invalidate_cache = false)
+ add_child!(task(sum_node))
+ end
+
+ return graph
+end
diff --git a/src/models/qed/diagrams.jl b/src/models/qed/diagrams.jl
new file mode 100644
index 0000000..eb207a4
--- /dev/null
+++ b/src/models/qed/diagrams.jl
@@ -0,0 +1,484 @@
+
+import Base.copy
+import Base.hash
+import Base.==
+import Base.show
+
+"""
+ FeynmanParticle
+
+Representation of a particle for use in [`FeynmanDiagram`](@ref)s. Consist of the [`QEDParticle`](@ref) type and an id.
+"""
+struct FeynmanParticle
+ particle::Type{<:QEDParticle}
+ id::Int
+end
+
+"""
+ FeynmanVertex
+
+Representation of a vertex in a [`FeynmanDiagram`](@ref). Stores two input [`FeynmanParticle`](@ref)s and one output.
+"""
+struct FeynmanVertex
+ in1::FeynmanParticle
+ in2::FeynmanParticle
+ out::FeynmanParticle
+end
+
+"""
+ FeynmanTie
+
+Representation of a "tie" in a [`FeynmanDiagram`](@ref). A tie ties two virtual particles in a diagram together and thus represent an inner line of the diagram. Not all inner lines are [`FeynmanTie`](@ref)s, in fact, a connected diagram only ever has exactly one tie.
+"""
+struct FeynmanTie
+ in1::FeynmanParticle
+ in2::FeynmanParticle
+end
+
+"""
+ FeynmanDiagram
+
+Representation of a feynman diagram. It consists of its initial input/output particles, and a vector of sets of [`FeynmanVertex`](@ref)s. The vertices are to be applied level by level.
+A [`FeynmanVertex`](@ref) will always be at the lowest level possible, i.e. the lowest level at which all input particles for it exist.
+The [`FeynmanTie`](@ref) represents the final inner edge of the diagram.
+"""
+struct FeynmanDiagram
+ vertices::Vector{Set{FeynmanVertex}}
+ tie::Ref{Union{FeynmanTie, Missing}}
+ particles::Vector{FeynmanParticle}
+ type_ids::Dict{Type, Int64} # lut for number of used ids for a particle type
+end
+
+"""
+ FeynmanDiagram(pd::QEDProcessDescription)
+
+Create an initial [`FeynmanDiagram`](@ref) with only its initial particles set and no vertices or ties.
+
+Use [`gen_diagrams`](@ref) to generate all possible diagrams from this one.
+"""
+function FeynmanDiagram(pd::QEDProcessDescription)
+ parts = Vector{FeynmanParticle}()
+ for (type, n) in pd.inParticles
+ for i in 1:n
+ push!(parts, FeynmanParticle(type, i))
+ end
+ end
+ for (type, n) in pd.outParticles
+ for i in 1:n
+ push!(parts, FeynmanParticle(type, i))
+ end
+ end
+ ids = Dict{Type, Int64}()
+ for t in types(QEDModel())
+ if (isincoming(t))
+ ids[t] = get(pd.inParticles, t, 0)
+ else
+ ids[t] = get(pd.outParticles, t, 0)
+ end
+ end
+
+ return FeynmanDiagram([], missing, parts, ids)
+end
+
+function particle_after_tie(p::FeynmanParticle, t::FeynmanTie)
+ if p == t.in1 || p == t.in2
+ return FeynmanParticle(FermionStateful{Incoming}, -1) # placeholder particle and id for tied particles
+ end
+ return p
+end
+
+function vertex_after_tie(v::FeynmanVertex, t::FeynmanTie)
+ return FeynmanVertex(particle_after_tie(v.in1, t), particle_after_tie(v.in2, t), particle_after_tie(v.out, t))
+end
+
+function vertex_after_tie(v::FeynmanVertex, t::Missing)
+ return v
+end
+
+function vertex_set_after_tie(vs::Set{FeynmanVertex}, t::FeynmanTie)
+ return Set{FeynmanVertex}(vertex_after_tie(v, t) for v in vs)
+end
+
+function vertex_set_after_tie(vs::Set{FeynmanVertex}, t::Missing)
+ return vs
+end
+
+function vertex_set_after_tie(vs::Set{FeynmanVertex}, t1::Union{FeynmanTie, Missing}, t2::Union{FeynmanTie, Missing})
+ return Set{FeynmanVertex}(vertex_after_tie(vertex_after_tie(v, t1), t2) for v in vs)
+end
+
+"""
+ String(p::FeynmanParticle)
+
+Return a string representation of the [`FeynmanParticle`](@ref) in a format that is readable by [`type_index_from_name`](@ref).
+"""
+function String(p::FeynmanParticle)
+ return "$(String(p.particle))$(String(direction(p.particle)))$(p.id)"
+end
+
+function hash(v::FeynmanVertex)
+ return hash(v.in1) * hash(v.in2)
+end
+
+function hash(t::FeynmanTie)
+ return hash(t.in1) * hash(t.in2)
+end
+
+function hash(d::FeynmanDiagram)
+ return hash((d.vertices, d.particles))
+end
+
+function ==(v1::FeynmanVertex, v2::FeynmanVertex)
+ return (v1.in1 == v2.in1 && v1.in2 == v2.in1) || (v1.in2 == v2.in1 && v1.in1 == v2.in2)
+end
+
+function ==(t1::FeynmanTie, t2::FeynmanTie)
+ return (t1.in1 == t2.in1 && t1.in2 == t2.in1) || (t1.in2 == t2.in1 && t1.in1 == t2.in2)
+end
+
+function ==(d1::FeynmanDiagram, d2::FeynmanDiagram)
+ if (!ismissing(d1.tie[]) && ismissing(d2.tie[])) || (ismissing(d1.tie[]) && !ismissing(d2.tie[]))
+ return false
+ end
+ if d1.particles != d2.particles
+ return false
+ end
+ if length(d1.vertices) != length(d2.vertices)
+ return false
+ end
+
+ # TODO can i prove that this works?
+ for (v1, v2) in zip(d1.vertices, d2.vertices)
+ if vertex_set_after_tie(v1, d1.tie[], d2.tie[]) != vertex_set_after_tie(v2, d1.tie[], d2.tie[])
+ return false
+ end
+ end
+ return true
+
+ #=return isequal.(
+ vertex_set_after_tie(d1.vertices, d1.tie, d2.tie),
+ vertex_set_after_tie(d2.vertices, d1.tie, d2.tie),
+ )=#
+end
+
+copy(fd::FeynmanDiagram) =
+ FeynmanDiagram(deepcopy(fd.vertices), copy(fd.tie[]), deepcopy(fd.particles), copy(fd.type_ids))
+
+"""
+ id_for_type(d::FeynmanDiagram, t::Type{<:QEDParticle})
+
+Return the highest id of any particle of the given type in the diagram + 1.
+"""
+function id_for_type(d::FeynmanDiagram, t::Type{<:QEDParticle})
+ return d.type_ids[t] + 1
+end
+
+"""
+ can_apply_vertex(particles::Vector{FeynmanParticle}, vertex::FeynmanVertex)
+
+Return true if the given [`FeynmanVertex`](@ref) can be applied to the given particles, i.e. both input particles of the vertex are in the vector and the output particle is not.
+"""
+function can_apply_vertex(particles::Vector{FeynmanParticle}, vertex::FeynmanVertex)
+ return vertex.in1 in particles && vertex.in2 in particles && !(vertex.out in particles)
+end
+
+"""
+ apply_vertex!(particles::Vector{FeynmanParticle}, vertex::FeynmanVertex)
+
+Apply a [`FeynmanVertex`](@ref) to the given vector of [`FeynmanParticle`](@ref)s.
+"""
+function apply_vertex!(particles::Vector{FeynmanParticle}, vertex::FeynmanVertex)
+ #@assert can_apply_vertex(particles, vertex)
+ length_before = length(particles)
+ filter!(x -> x != vertex.in1 && x != vertex.in2, particles)
+ push!(particles, vertex.out)
+ #@assert length(particles) == length_before - 1
+ return nothing
+end
+
+"""
+ can_apply_tie(particles::Vector{FeynmanParticle}, tie::FeynmanTie)
+
+Return true if the given [`FeynmanTie`](@ref) can be applied to the given particles, i.e. both input particles of the tie are in the vector.
+"""
+function can_apply_tie(particles::Vector{FeynmanParticle}, tie::FeynmanTie)
+ return tie.in1 in particles && tie.in2 in particles
+end
+
+"""
+ apply_tie!(particles::Vector{FeynmanParticle}, tie::FeynmanTie)
+
+Apply a [`FeynmanTie`](@ref) to the given vector of [`FeynmanParticle`](@ref)s.
+"""
+function apply_tie!(particles::Vector{FeynmanParticle}, tie::FeynmanTie)
+ @assert length(particles) == 2
+ @assert can_apply_tie(particles, tie)
+ @assert can_tie(tie.in1.particle, tie.in2.particle)
+ empty!(particles)
+ @assert length(particles) == 0
+ return nothing
+end
+
+function apply_tie!(::Vector{FeynmanParticle}, ::Missing)
+ return nothing
+end
+
+"""
+ get_particles(fd::FeynmanDiagram, level::Int)
+
+Return a vector of the particles after applying the vertices and tie of the diagram up to the given level. If no level is given, apply all. The tie comes last and is its own "level".
+"""
+function get_particles(fd::FeynmanDiagram, level::Int = -1)
+ if level == -1
+ level = length(fd.vertices) + 1
+ end
+
+ working_particles = copy(fd.particles)
+ for l in 1:length(fd.vertices)
+ if l > level
+ break
+ end
+ for v in fd.vertices[l]
+ apply_vertex!(working_particles, v)
+ end
+ end
+
+ if (level > length(fd.vertices))
+ apply_tie!(working_particles, fd.tie[])
+ end
+
+ return working_particles
+end
+
+"""
+ add_vertex!(fd::FeynmanDiagram, vertex::FeynmanVertex)
+
+Add the given vertex to the diagram, at the earliest level possible.
+"""
+function add_vertex!(fd::FeynmanDiagram, vertex::FeynmanVertex)
+ for i in eachindex(fd.vertices)
+ if (can_apply_vertex(get_particles(fd, i - 1), vertex))
+ push!(fd.vertices[i], vertex)
+ fd.type_ids[vertex.out.particle] += 1
+ return nothing
+ end
+ end
+
+ if !can_apply_vertex(get_particles(fd), vertex)
+ #@assert false "Can't add vertex $vertex to diagram"
+ end
+
+ push!(fd.vertices, Set{FeynmanVertex}())
+ push!(fd.vertices[end], vertex)
+ fd.type_ids[vertex.out.particle] += 1
+
+ return nothing
+end
+
+"""
+ add_vertex(fd::FeynmanDiagram, vertex::FeynmanVertex)
+
+Add the given vertex to the diagram, at the earliest level possible. Return the new diagram without muting the given one.
+"""
+function add_vertex(fd::FeynmanDiagram, vertex::FeynmanVertex)
+ newfd = copy(fd)
+ add_vertex!(newfd, vertex)
+ return newfd
+end
+
+"""
+ add_tie!(fd::FeynmanDiagram, tie::FeynmanTie)
+
+Add the given tie to the diagram, always at the last level.
+"""
+function add_tie!(fd::FeynmanDiagram, tie::FeynmanTie)
+ if !can_apply_tie(get_particles(fd), tie)
+ @assert false "Can't add tie $tie to diagram"
+ end
+
+ fd.tie[] = tie
+ #=
+ @assert length(fd.vertices) >= 2
+ #if the last vertex is involved in the tie and alone, lower it one level down
+ if (length(fd.vertices[end]) != 1)
+ return nothing
+ end
+
+ vert = fd.vertices[end][1]
+ if (vert != vertex_after_tie(vert, tie))
+ return nothing
+ end
+
+ pop!(fd.vertices)
+ push!(fd.vertices[end], vert)
+ =#
+ return nothing
+end
+
+"""
+ add_tie(fd::FeynmanDiagram, tie::FeynmanTie)
+
+Add the given tie to the diagram, at the earliest level possible. Return the new diagram without muting the given one.
+"""
+function add_tie(fd::FeynmanDiagram, tie::FeynmanTie)
+ newfd = copy(fd)
+ add_tie!(newfd, tie)
+ return newfd
+end
+
+"""
+ isvalid(fd::FeynmanDiagram)
+
+Return whether the given diagram is valid. A diagram is valid iff the following are true:
+- After applying all vertices and the tie, there are no more particles left
+- The diagram is connected
+"""
+function isvalid(fd::FeynmanDiagram)
+ if ismissing(fd.tie[])
+ # diagram is connected iff there is one tie
+ return false
+ end
+
+ if get_particles(fd) != []
+ return false
+ end
+
+ return true
+end
+
+"""
+ possible_vertices(fd::FeynmanDiagram)
+
+Return a vector of all possible vertices that can be applied to the diagram at its current state.
+"""
+function possible_vertices(fd::FeynmanDiagram)
+ possibilities = Vector{FeynmanVertex}()
+ fully_generated_particles = get_particles(fd)
+
+ min_level = max(0, length(fd.vertices) - 1)
+ for l in min_level:length(fd.vertices)
+ particles = get_particles(fd, l)
+ for i in 1:length(particles)
+ for j in (i + 1):length(particles)
+ p1 = particles[i]
+ p2 = particles[j]
+ if (caninteract(p1.particle, p2.particle))
+ interaction_res = propagation_result(interaction_result(p1.particle, p2.particle))
+ v = FeynmanVertex(p1, p2, FeynmanParticle(interaction_res, id_for_type(fd, interaction_res)))
+ #@assert !(v.out in particles) "$v is in $fd"
+ if !can_apply_vertex(fully_generated_particles, v)
+ continue
+ end
+ push!(possibilities, v)
+ end
+ end
+ end
+ if (!isempty(possibilities))
+ return possibilities
+ end
+ end
+ return possibilities
+end
+
+"""
+ can_tie(p1::Type, p2::Type)
+
+For two given [`QEDParitcle`](@ref) types, return whether they can be tied together.
+
+They can be tied iff one is the [`propagation_result`](@ref) of the other, or if both are photons, in which case their direction does not matter.
+"""
+function can_tie(p1::Type, p2::Type)
+ if p1 == propagation_result(p2)
+ return true
+ end
+ if (p1 <: PhotonStateful && p2 <: PhotonStateful)
+ return true
+ end
+ return false
+end
+
+"""
+ possible_tie(fd::FeynmanDiagram)
+
+Return a possible tie or `missing` for the diagram at its current state.
+"""
+function possible_tie(fd::FeynmanDiagram)
+ particles = get_particles(fd)
+ if (length(particles) != 2)
+ return missing
+ end
+
+ if (particles[1] in fd.particles || particles[2] in fd.particles)
+ return missing
+ end
+
+ tie = FeynmanTie(particles[1], particles[2])
+ if (can_apply_tie(particles, tie))
+ return tie
+ end
+ return missing
+end
+
+function remove_duplicates(compare_set::Set{FeynmanDiagram})
+ result = Set()
+
+ while !isempty(compare_set)
+ x = pop!(compare_set)
+ # we know there will only be one duplicate if any, so search for that and delete it
+ for y in compare_set
+ if x == y
+ delete!(compare_set, y)
+ break
+ end
+ end
+ push!(result, x)
+ end
+
+ return result
+end
+
+"""
+ gen_diagrams(fd::FeynmanDiagram)
+
+From a given feynman diagram in its initial state, e.g. when created through the [`FeynmanDiagram(pd::ProcessDescription)`](@ref) constructor, generate and return all possible [`FeynmanDiagram`](@ref)s that describe that process.
+"""
+function gen_diagrams(fd::FeynmanDiagram)
+ working = Set{FeynmanDiagram}()
+ results = Set{FeynmanDiagram}()
+
+ push!(working, fd)
+
+ # we know there will be particle_number - 2 vertices, followed by 1 tie
+ n_particles = length(fd.particles)
+ n_vertices = n_particles - 2
+
+ # doing this in iterations should reduce the intermediate number of diagrams by hash collisions
+ for _ in 1:n_vertices
+ next_iter_set = Set{FeynmanDiagram}()
+
+ while !isempty(working)
+ d = pop!(working)
+
+ possibilities = possible_vertices(d)
+ for v in possibilities
+ push!(next_iter_set, add_vertex(d, v))
+ end
+ end
+
+ working = next_iter_set
+ end
+
+ # add the tie
+ for d in working
+ tie = possible_tie(d)
+ if ismissing(tie)
+ continue
+ end
+ add_tie!(d, tie)
+ if isvalid(d)
+ push!(results, d)
+ end
+ end
+
+ return remove_duplicates(results)
+end
diff --git a/src/models/qed/parse.jl b/src/models/qed/parse.jl
new file mode 100644
index 0000000..ffcac74
--- /dev/null
+++ b/src/models/qed/parse.jl
@@ -0,0 +1,44 @@
+
+"""
+ parse_process(string::AbstractString, model::QEDModel)
+
+Parse a string representation of a process, such as "ke->ke" into the corresponding [`QEDProcessDescription`](@ref).
+"""
+function parse_process(str::AbstractString, model::QEDModel)
+ inParticles = Dict{Type, Int}()
+ outParticles = Dict{Type, Int}()
+
+ if !(contains(str, "->"))
+ throw("Did not find -> while parsing process \"$str\"")
+ end
+
+ (inStr, outStr) = split(str, "->")
+
+ if (isempty(inStr) || isempty(outStr))
+ throw("Process (\"$str\") input or output part is empty!")
+ end
+
+ for t in types(model)
+ if (isincoming(t))
+ inCount = count(x -> x == String(t)[1], inStr)
+
+ if inCount != 0
+ inParticles[t] = inCount
+ end
+ end
+ if (isoutgoing(t))
+ outCount = count(x -> x == String(t)[1], outStr)
+ if outCount != 0
+ outParticles[t] = outCount
+ end
+ end
+ end
+
+ if length(inStr) != sum(values(inParticles))
+ throw("Encountered unknown characters in the input part of process \"$str\"")
+ elseif length(outStr) != sum(values(outParticles))
+ throw("Encountered unknown characters in the output part of process \"$str\"")
+ end
+
+ return QEDProcessDescription(inParticles, outParticles)
+end
diff --git a/src/models/qed/particle.jl b/src/models/qed/particle.jl
new file mode 100644
index 0000000..208a5ae
--- /dev/null
+++ b/src/models/qed/particle.jl
@@ -0,0 +1,348 @@
+using QEDprocesses
+import QEDbase.mass
+
+# TODO check
+const e = sqrt(4π / 137)
+
+"""
+ QEDModel <: AbstractPhysicsModel
+
+Singleton definition for identification of the QED-Model.
+"""
+struct QEDModel <: AbstractPhysicsModel end
+
+"""
+ QEDParticle
+
+Base type for all particles in the [`QEDModel`](@ref).
+
+Its template parameter specifies the particle's direction.
+
+The concrete types contain singletons of the types that they are, like `Photon` and `Electron` from QEDbase, and their state descriptions.
+"""
+abstract type QEDParticle{Direction <: ParticleDirection} <: AbstractParticle end
+
+"""
+ QEDProcessDescription <: AbstractProcessDescription
+
+A description of a process in the QED-Model. Contains the input and output particles.
+
+See also: [`in_particles`](@ref), [`out_particles`](@ref), [`parse_process`](@ref)
+"""
+struct QEDProcessDescription <: AbstractProcessDescription
+ inParticles::Dict{Type{<:QEDParticle{Incoming}}, Int}
+ outParticles::Dict{Type{<:QEDParticle{Outgoing}}, Int}
+end
+
+"""
+ QEDProcessInput <: AbstractProcessInput
+
+Input for a QED Process. Contains the [`QEDProcessDescription`](@ref) of the process it is an input for, and the values of the in and out particles.
+
+See also: [`gen_process_input`](@ref)
+"""
+struct QEDProcessInput <: AbstractProcessInput
+ process::QEDProcessDescription
+ inParticles::Vector{QEDParticle}
+ outParticles::Vector{QEDParticle}
+end
+
+QEDParticleValue{ParticleType <: QEDParticle} = Union{
+ ParticleValue{ParticleType, BiSpinor},
+ ParticleValue{ParticleType, AdjointBiSpinor},
+ ParticleValue{ParticleType, DiracMatrix},
+ ParticleValue{ParticleType, SLorentzVector{Float64}},
+ ParticleValue{ParticleType, ComplexF64},
+}
+
+"""
+ PhotonStateful <: QEDParticle
+
+A photon of the [`QEDModel`](@ref) with its state.
+"""
+struct PhotonStateful{Direction <: ParticleDirection} <: QEDParticle{Direction}
+ momentum::SFourMomentum
+ # this will maybe change to the full polarization vector? or do i need both
+ polarization::AbstractDefinitePolarization
+end
+
+PhotonStateful{Direction}(mom::SFourMomentum) where {Direction <: ParticleDirection} =
+ PhotonStateful{Direction}(mom, PolX()) # TODO: make allpol possible
+
+PhotonStateful{Dir1}(ph::PhotonStateful{Dir2}) where {Dir1 <: ParticleDirection, Dir2 <: ParticleDirection} =
+ PhotonStateful{Dir1}(ph.momentum, ph.polarization)
+
+"""
+ FermionStateful <: QEDParticle
+
+A fermion of the [`QEDModel`](@ref) with its state.
+"""
+struct FermionStateful{Direction <: ParticleDirection} <: QEDParticle{Direction}
+ momentum::SFourMomentum
+ spin::AbstractDefiniteSpin
+ # TODO: mass for electron/muon/tauon representation?
+end
+
+FermionStateful{Direction}(mom::SFourMomentum) where {Direction <: ParticleDirection} =
+ FermionStateful{Direction}(mom, SpinUp()) # TODO: make allspin possible
+
+FermionStateful{Dir1}(f::FermionStateful{Dir2}) where {Dir1 <: ParticleDirection, Dir2 <: ParticleDirection} =
+ FermionStateful{Dir1}(f.momentum, f.spin)
+
+"""
+ AntiFermionStateful <: QEDParticle
+
+An anti-fermion of the [`QEDModel`](@ref) with its state.
+"""
+struct AntiFermionStateful{Direction <: ParticleDirection} <: QEDParticle{Direction}
+ momentum::SFourMomentum
+ spin::AbstractDefiniteSpin
+ # TODO: mass for electron/muon/tauon representation?
+end
+
+AntiFermionStateful{Direction}(mom::SFourMomentum) where {Direction <: ParticleDirection} =
+ AntiFermionStateful{Direction}(mom, SpinUp()) # TODO: make allspin possible
+
+AntiFermionStateful{Dir1}(f::AntiFermionStateful{Dir2}) where {Dir1 <: ParticleDirection, Dir2 <: ParticleDirection} =
+ AntiFermionStateful{Dir1}(f.momentum, f.spin)
+
+"""
+ interaction_result(t1::Type{T1}, t2::Type{T2}) where {T1 <: QEDParticle, T2 <: QEDParticle}
+
+For two given particle types that can interact, return the third.
+"""
+function interaction_result(t1::Type{T1}, t2::Type{T2}) where {T1 <: QEDParticle, T2 <: QEDParticle}
+ @assert false "Invalid interaction between particles of types $t1 and $t2"
+end
+
+interaction_result(::Type{FermionStateful{Incoming}}, ::Type{FermionStateful{Outgoing}}) = PhotonStateful{Incoming}
+interaction_result(::Type{FermionStateful{Incoming}}, ::Type{AntiFermionStateful{Incoming}}) = PhotonStateful{Incoming}
+interaction_result(::Type{FermionStateful{Incoming}}, ::Type{<:PhotonStateful}) = FermionStateful{Outgoing}
+
+interaction_result(::Type{FermionStateful{Outgoing}}, ::Type{FermionStateful{Incoming}}) = PhotonStateful{Incoming}
+interaction_result(::Type{FermionStateful{Outgoing}}, ::Type{AntiFermionStateful{Outgoing}}) = PhotonStateful{Incoming}
+interaction_result(::Type{FermionStateful{Outgoing}}, ::Type{<:PhotonStateful}) = FermionStateful{Incoming}
+
+# antifermion mirror
+interaction_result(::Type{AntiFermionStateful{Incoming}}, t2::Type{<:QEDParticle}) =
+ interaction_result(FermionStateful{Outgoing}, t2)
+interaction_result(::Type{AntiFermionStateful{Outgoing}}, t2::Type{<:QEDParticle}) =
+ interaction_result(FermionStateful{Incoming}, t2)
+
+# photon commutativity
+interaction_result(t1::Type{<:PhotonStateful}, t2::Type{<:QEDParticle}) = interaction_result(t2, t1)
+
+# but prevent stack overflow
+function interaction_result(t1::Type{<:PhotonStateful}, t2::Type{<:PhotonStateful})
+ @assert false "Invalid interaction between particles of types $t1 and $t2"
+end
+
+"""
+ propagation_result(t1::Type{T}) where {T <: QEDParticle}
+
+Return the type of the inverted direction. E.g.
+"""
+propagation_result(::Type{FermionStateful{Incoming}}) = FermionStateful{Outgoing}
+propagation_result(::Type{FermionStateful{Outgoing}}) = FermionStateful{Incoming}
+propagation_result(::Type{AntiFermionStateful{Incoming}}) = AntiFermionStateful{Outgoing}
+propagation_result(::Type{AntiFermionStateful{Outgoing}}) = AntiFermionStateful{Incoming}
+propagation_result(::Type{PhotonStateful{Incoming}}) = PhotonStateful{Outgoing}
+propagation_result(::Type{PhotonStateful{Outgoing}}) = PhotonStateful{Incoming}
+
+"""
+ types(::QEDModel)
+
+Return a Vector of the possible types of particle in the [`QEDModel`](@ref).
+"""
+function types(::QEDModel)
+ return [
+ PhotonStateful{Incoming},
+ PhotonStateful{Outgoing},
+ FermionStateful{Incoming},
+ FermionStateful{Outgoing},
+ AntiFermionStateful{Incoming},
+ AntiFermionStateful{Outgoing},
+ ]
+end
+
+# type piracy?
+String(::Type{Incoming}) = "Incoming"
+String(::Type{Outgoing}) = "Outgoing"
+
+String(::Incoming) = "i"
+String(::Outgoing) = "o"
+
+function String(::Type{<:PhotonStateful})
+ return "k"
+end
+function String(::Type{<:FermionStateful})
+ return "e"
+end
+function String(::Type{<:AntiFermionStateful})
+ return "p"
+end
+
+@inline particle(::PhotonStateful) = Photon()
+@inline particle(::FermionStateful) = Electron()
+@inline particle(::AntiFermionStateful) = Positron()
+
+@inline momentum(p::PhotonStateful)::SFourMomentum = p.momentum
+@inline momentum(p::FermionStateful)::SFourMomentum = p.momentum
+@inline momentum(p::AntiFermionStateful)::SFourMomentum = p.momentum
+
+@inline spin_or_pol(p::PhotonStateful)::AbstractPolarization = p.polarization
+@inline spin_or_pol(p::FermionStateful)::AbstractSpin = p.spin
+@inline spin_or_pol(p::AntiFermionStateful)::AbstractSpin = p.spin
+
+@inline direction(::PhotonStateful{Dir}) where {Dir <: ParticleDirection} = Dir()
+@inline direction(::FermionStateful{Dir}) where {Dir <: ParticleDirection} = Dir()
+@inline direction(::AntiFermionStateful{Dir}) where {Dir <: ParticleDirection} = Dir()
+
+@inline direction(::Type{PhotonStateful{Dir}}) where {Dir <: ParticleDirection} = Dir()
+@inline direction(::Type{FermionStateful{Dir}}) where {Dir <: ParticleDirection} = Dir()
+@inline direction(::Type{AntiFermionStateful{Dir}}) where {Dir <: ParticleDirection} = Dir()
+
+@inline isincoming(::QEDParticle{Incoming}) = true
+@inline isincoming(::QEDParticle{Outgoing}) = false
+@inline isoutgoing(::QEDParticle{Incoming}) = false
+@inline isoutgoing(::QEDParticle{Outgoing}) = true
+
+@inline isincoming(::Type{<:QEDParticle{Incoming}}) = true
+@inline isincoming(::Type{<:QEDParticle{Outgoing}}) = false
+@inline isoutgoing(::Type{<:QEDParticle{Incoming}}) = false
+@inline isoutgoing(::Type{<:QEDParticle{Outgoing}}) = true
+
+@inline mass(::Type{<:FermionStateful}) = 1.0
+@inline mass(::Type{<:AntiFermionStateful}) = 1.0
+@inline mass(::Type{<:PhotonStateful}) = 0.0
+
+@inline invert_momentum(p::FermionStateful{Dir}) where {Dir <: ParticleDirection} =
+ FermionStateful{Dir}(-p.momentum, p.spin)
+@inline invert_momentum(p::AntiFermionStateful{Dir}) where {Dir <: ParticleDirection} =
+ AntiFermionStateful{Dir}(-p.momentum, p.spin)
+@inline invert_momentum(k::PhotonStateful{Dir}) where {Dir <: ParticleDirection} =
+ PhotonStateful{Dir}(-k.momentum, k.polarization)
+
+
+"""
+ caninteract(T1::Type{<:QEDParticle}, T2::Type{<:QEDParticle})
+
+For two given [`QEDParticle`](@ref) types, return whether they can interact at a vertex. This is equivalent to `!issame(T1, T2)`.
+
+See also: [`issame`](@ref) and [`interaction_result`](@ref)
+"""
+function caninteract(T1::Type{<:QEDParticle}, T2::Type{<:QEDParticle})
+ if (T1 == T2)
+ return false
+ end
+ if (T1 <: PhotonStateful && T2 <: PhotonStateful)
+ return false
+ end
+
+ for (P1, P2) in [(T1, T2), (T2, T1)]
+ if (P1 == FermionStateful{Incoming} && P2 == AntiFermionStateful{Outgoing})
+ return false
+ end
+ if (P1 == FermionStateful{Outgoing} && P2 == AntiFermionStateful{Incoming})
+ return false
+ end
+ end
+
+ return true
+end
+
+function type_index_from_name(::QEDModel, name::String)
+ if startswith(name, "ki")
+ return (PhotonStateful{Incoming}, parse(Int, name[3:end]))
+ elseif startswith(name, "ko")
+ return (PhotonStateful{Outgoing}, parse(Int, name[3:end]))
+ elseif startswith(name, "ei")
+ return (FermionStateful{Incoming}, parse(Int, name[3:end]))
+ elseif startswith(name, "eo")
+ return (FermionStateful{Outgoing}, parse(Int, name[3:end]))
+ elseif startswith(name, "pi")
+ return (AntiFermionStateful{Incoming}, parse(Int, name[3:end]))
+ elseif startswith(name, "po")
+ return (AntiFermionStateful{Outgoing}, parse(Int, name[3:end]))
+ else
+ throw("Invalid name for a particle in the QED model")
+ end
+end
+
+"""
+ issame(T1::Type{<:QEDParticle}, T2::Type{<:QEDParticle})
+
+For two given [`QEDParticle`](@ref) types, return whether they are equivalent for the purpose of a Feynman Diagram. That means e.g. an `Incoming` `AntiFermion` is the same as an `Outgoing` `Fermion`. This is equivalent to `!caninteract(T1, T2)`.
+
+See also: [`caninteract`](@ref) and [`interaction_result`](@ref)
+"""
+function issame(T1::Type{<:QEDParticle}, T2::Type{<:QEDParticle})
+ return !caninteract(T1, T2)
+end
+
+"""
+ QED_vertex()
+
+Return the factor of a vertex in a QED feynman diagram.
+"""
+@inline function QED_vertex()::SLorentzVector{DiracMatrix}
+ # Peskin-Schroeder notation
+ return -1im * e * gamma()
+end
+
+@inline function QED_inner_edge(p::QEDParticle)
+ pos_mom = p.momentum
+ return propagator(particle(p), pos_mom)
+end
+
+"""
+ QED_conserve_momentum(p1::QEDParticle, p2::QEDParticle)
+
+Calculate and return a new particle from two given interacting ones at a vertex.
+"""
+function QED_conserve_momentum(p1::QEDParticle, p2::QEDParticle)
+ #println("Conserving momentum of \n$(direction(p1)) $(p1)\n and \n$(direction(p2)) $(p2)")
+ T3 = interaction_result(typeof(p1), typeof(p2))
+ # TODO: probably also need to do something about the spin/pol
+ p1_mom = p1.momentum
+ if (typeof(direction(p1)) <: Outgoing)
+ p1_mom *= -1
+ end
+ p2_mom = p2.momentum
+ if (typeof(direction(p2)) <: Outgoing)
+ p2_mom *= -1
+ end
+
+ p3_mom = p1_mom + p2_mom
+ if (typeof(direction(T3)) <: Incoming)
+ return T3(-p3_mom)
+ end
+ return T3(p3_mom)
+end
+
+"""
+ model(::AbstractProcessDescription)
+
+Return the model of this process description.
+"""
+model(::QEDProcessDescription) = QEDModel()
+model(::QEDProcessInput) = QEDModel()
+
+==(p1::QEDProcessDescription, p2::QEDProcessDescription) =
+ p1.inParticles == p2.inParticles && p1.outParticles == p2.outParticles
+
+function in_particles(process::QEDProcessDescription)
+ return process.inParticles
+end
+
+function in_particles(input::QEDProcessInput)
+ return input.inParticles
+end
+
+function out_particles(process::QEDProcessDescription)
+ return process.outParticles
+end
+
+function out_particles(input::QEDProcessInput)
+ return input.outParticles
+end
diff --git a/src/models/qed/print.jl b/src/models/qed/print.jl
new file mode 100644
index 0000000..76f2166
--- /dev/null
+++ b/src/models/qed/print.jl
@@ -0,0 +1,115 @@
+
+"""
+ show(io::IO, process::QEDProcessDescription)
+
+Pretty print an [`QEDProcessDescription`](@ref) (no newlines).
+
+```jldoctest
+julia> using MetagraphOptimization
+
+julia> print(parse_process("ke->ke", QEDModel()))
+QED Process: 'ke->ke'
+
+julia> print(parse_process("kk->ep", QEDModel()))
+QED Process: 'kk->ep'
+```
+"""
+function show(io::IO, process::QEDProcessDescription)
+ # types() gives the types in order (QED) instead of random like keys() would
+ print(io, "QED Process: \'")
+ for type in types(QEDModel())
+ for _ in 1:get(process.inParticles, type, 0)
+ print(io, String(type))
+ end
+ end
+ print(io, "->")
+ for type in types(QEDModel())
+ for _ in 1:get(process.outParticles, type, 0)
+ print(io, String(type))
+ end
+ end
+ print(io, "'")
+ return nothing
+end
+
+"""
+ show(io::IO, processInput::QEDProcessInput)
+
+Pretty print an [`QEDProcessInput`](@ref) (with newlines).
+"""
+function show(io::IO, processInput::QEDProcessInput)
+ println(io, "Input for $(processInput.process):")
+ println(io, " $(length(processInput.inParticles)) Incoming particles:")
+ for particle in processInput.inParticles
+ println(io, " $particle")
+ end
+ println(io, " $(length(processInput.outParticles)) Outgoing Particles:")
+ for particle in processInput.outParticles
+ println(io, " $particle")
+ end
+ return nothing
+end
+
+"""
+ show(io::IO, particle::T) where {T <: QEDParticle}
+
+Pretty print an [`QEDParticle`](@ref) (no newlines).
+"""
+function show(io::IO, particle::T) where {T <: QEDParticle}
+ print(io, "$(String(typeof(particle))): $(particle.momentum)")
+ return nothing
+end
+
+"""
+ show(io::IO, particle::FeynmanParticle)
+
+Pretty print a [`FeynmanParticle`](@ref) (no newlines).
+"""
+show(io::IO, p::FeynmanParticle) = print(io, "$(String(p.particle))_$(String(direction(p.particle)))_$(p.id)")
+
+"""
+ show(io::IO, particle::FeynmanVertex)
+
+Pretty print a [`FeynmanVertex`](@ref) (no newlines).
+"""
+show(io::IO, v::FeynmanVertex) = print(io, "$(v.in1) + $(v.in2) -> $(v.out)")
+
+"""
+ show(io::IO, particle::FeynmanTie)
+
+Pretty print a [`FeynmanTie`](@ref) (no newlines).
+"""
+show(io::IO, t::FeynmanTie) = print(io, "$(t.in1) -- $(t.in2)")
+
+"""
+ show(io::IO, particle::FeynmanDiagram)
+
+Pretty print a [`FeynmanDiagram`](@ref) (with newlines).
+"""
+function show(io::IO, d::FeynmanDiagram)
+ print(io, "Initial Particles: [")
+ first = true
+ for p in d.particles
+ if first
+ first = false
+ print(io, "$p")
+ else
+ print(io, ", $p")
+ end
+ end
+ print(io, "]\n")
+ for l in eachindex(d.vertices)
+ print(io, " Virtuality Level $l Vertices: [")
+ first = true
+ for v in d.vertices[l]
+ if first
+ first = false
+ print(io, "$v")
+ else
+ print(io, ", $v")
+ end
+ end
+ print(io, "]\n")
+ end
+ return print(io, " Tie: $(d.tie[])\n")
+end
diff --git a/src/models/qed/properties.jl b/src/models/qed/properties.jl
new file mode 100644
index 0000000..995e69c
--- /dev/null
+++ b/src/models/qed/properties.jl
@@ -0,0 +1,135 @@
+# TODO use correct numbers
+
+"""
+ compute_effort(t::ComputeTaskQED_S1)
+
+Return the compute effort of an S1 task.
+"""
+compute_effort(t::ComputeTaskQED_S1)::Float64 = 11.0
+
+"""
+ compute_effort(t::ComputeTaskQED_S2)
+
+Return the compute effort of an S2 task.
+"""
+compute_effort(t::ComputeTaskQED_S2)::Float64 = 12.0
+
+"""
+ compute_effort(t::ComputeTaskQED_U)
+
+Return the compute effort of a U task.
+"""
+compute_effort(t::ComputeTaskQED_U)::Float64 = 1.0
+
+"""
+ compute_effort(t::ComputeTaskQED_V)
+
+Return the compute effort of a V task.
+"""
+compute_effort(t::ComputeTaskQED_V)::Float64 = 6.0
+
+"""
+ compute_effort(t::ComputeTaskQED_P)
+
+Return the compute effort of a P task.
+"""
+compute_effort(t::ComputeTaskQED_P)::Float64 = 0.0
+
+"""
+ compute_effort(t::ComputeTaskQED_Sum)
+
+Return the compute effort of a Sum task.
+
+Note: This is a constant compute effort, even though sum scales with the number of its inputs. Since there is only ever a single sum node in a graph generated from the QED-Model,
+this doesn't matter.
+"""
+compute_effort(t::ComputeTaskQED_Sum)::Float64 = 1.0
+
+"""
+ show(io::IO, t::ComputeTaskQED_S1)
+
+Print the S1 task to io.
+"""
+show(io::IO, t::ComputeTaskQED_S1) = print(io, "ComputeS1")
+
+"""
+ show(io::IO, t::ComputeTaskQED_S2)
+
+Print the S2 task to io.
+"""
+show(io::IO, t::ComputeTaskQED_S2) = print(io, "ComputeS2")
+
+"""
+ show(io::IO, t::ComputeTaskQED_P)
+
+Print the P task to io.
+"""
+show(io::IO, t::ComputeTaskQED_P) = print(io, "ComputeP")
+
+"""
+ show(io::IO, t::ComputeTaskQED_U)
+
+Print the U task to io.
+"""
+show(io::IO, t::ComputeTaskQED_U) = print(io, "ComputeU")
+
+"""
+ show(io::IO, t::ComputeTaskQED_V)
+
+Print the V task to io.
+"""
+show(io::IO, t::ComputeTaskQED_V) = print(io, "ComputeV")
+
+"""
+ show(io::IO, t::ComputeTaskQED_Sum)
+
+Print the sum task to io.
+"""
+show(io::IO, t::ComputeTaskQED_Sum) = print(io, "ComputeSum")
+
+"""
+ children(::ComputeTaskQED_S1)
+
+Return the number of children of a ComputeTaskQED_S1 (always 1).
+"""
+children(::ComputeTaskQED_S1) = 1
+
+"""
+ children(::ComputeTaskQED_S2)
+
+Return the number of children of a ComputeTaskQED_S2 (always 2).
+"""
+children(::ComputeTaskQED_S2) = 2
+
+"""
+ children(::ComputeTaskQED_P)
+
+Return the number of children of a ComputeTaskQED_P (always 1).
+"""
+children(::ComputeTaskQED_P) = 1
+
+"""
+ children(::ComputeTaskQED_U)
+
+Return the number of children of a ComputeTaskQED_U (always 1).
+"""
+children(::ComputeTaskQED_U) = 1
+
+"""
+ children(::ComputeTaskQED_V)
+
+Return the number of children of a ComputeTaskQED_V (always 2).
+"""
+children(::ComputeTaskQED_V) = 2
+
+"""
+ children(::ComputeTaskQED_Sum)
+
+Return the number of children of a ComputeTaskQED_Sum.
+"""
+children(t::ComputeTaskQED_Sum) = t.children_number
+
+function add_child!(t::ComputeTaskQED_Sum)
+ t.children_number += 1
+ return nothing
+end
diff --git a/src/models/qed/types.jl b/src/models/qed/types.jl
new file mode 100644
index 0000000..9923014
--- /dev/null
+++ b/src/models/qed/types.jl
@@ -0,0 +1,51 @@
+"""
+ ComputeTaskQED_S1 <: AbstractComputeTask
+
+S task with a single child.
+"""
+struct ComputeTaskQED_S1 <: AbstractComputeTask end
+
+"""
+ ComputeTaskQED_S2 <: AbstractComputeTask
+
+S task with two children.
+"""
+struct ComputeTaskQED_S2 <: AbstractComputeTask end
+
+"""
+ ComputeTaskQED_P <: AbstractComputeTask
+
+P task with no children.
+"""
+struct ComputeTaskQED_P <: AbstractComputeTask end
+
+"""
+ ComputeTaskQED_V <: AbstractComputeTask
+
+v task with two children.
+"""
+struct ComputeTaskQED_V <: AbstractComputeTask end
+
+"""
+ ComputeTaskQED_U <: AbstractComputeTask
+
+u task with a single child.
+"""
+struct ComputeTaskQED_U <: AbstractComputeTask end
+
+"""
+ ComputeTaskQED_Sum <: AbstractComputeTask
+
+Task that sums all its inputs, n children.
+"""
+mutable struct ComputeTaskQED_Sum <: AbstractComputeTask
+ children_number::Int
+end
+
+"""
+ QED_TASKS
+
+Constant vector of all tasks of the QED-Model.
+"""
+QED_TASKS =
+ [ComputeTaskQED_S1, ComputeTaskQED_S2, ComputeTaskQED_P, ComputeTaskQED_V, ComputeTaskQED_U, ComputeTaskQED_Sum]
diff --git a/src/task/print.jl b/src/task/print.jl
index 5dcd9ac..2613ba1 100644
--- a/src/task/print.jl
+++ b/src/task/print.jl
@@ -6,3 +6,12 @@ Print a string representation of the fused compute task to io.
function show(io::IO, t::FusedComputeTask)
return print(io, "ComputeFuse($(t.first_task), $(t.second_task))")
end
+
+"""
+ show(io::IO, t::DataTask)
+
+Print the data task to io.
+"""
+function show(io::IO, t::DataTask)
+ return print(io, "Data", t.data)
+end
diff --git a/src/task/properties.jl b/src/task/properties.jl
index 68f7cd6..53eef11 100644
--- a/src/task/properties.jl
+++ b/src/task/properties.jl
@@ -58,6 +58,29 @@ Return the data of a data task. Given by the task's `.data` field.
"""
data(t::AbstractDataTask)::Float64 = getfield(t, :data)
+"""
+ copy(t::DataTask)
+
+Copy the data task and return it.
+"""
+copy(t::DataTask) = DataTask(t.data)
+
+"""
+ children(::DataTask)
+
+Return the number of children of a data task (always 1).
+"""
+children(::DataTask) = 1
+
+"""
+ children(t::FusedComputeTask)
+
+Return the number of children of a FusedComputeTask.
+"""
+function children(t::FusedComputeTask)
+ return length(union(Set(t.t1_inputs), Set(t.t2_inputs)))
+end
+
"""
data(t::AbstractComputeTask)
diff --git a/src/task/type.jl b/src/task/type.jl
index 0f5bf22..fb12b5b 100644
--- a/src/task/type.jl
+++ b/src/task/type.jl
@@ -19,6 +19,15 @@ The shared base type for any data task.
"""
abstract type AbstractDataTask <: AbstractTask end
+"""
+ DataTask <: AbstractDataTask
+
+Task representing a specific data transfer.
+"""
+struct DataTask <: AbstractDataTask
+ data::Float64
+end
+
"""
FusedComputeTask{T1 <: AbstractComputeTask, T2 <: AbstractComputeTask} <: AbstractComputeTask
diff --git a/src/utility.jl b/src/utility.jl
index 38328d1..55e6df3 100644
--- a/src/utility.jl
+++ b/src/utility.jl
@@ -103,3 +103,139 @@ function unroll_symbol_vector(vec::Vector)
end
return result
end
+
+
+
+####################
+# CODE FROM HERE BORROWED FROM SOURCE: https://codebase.helmholtz.cloud/qedsandbox/QEDphasespaces.jl/
+# use qedphasespaces directly once released
+#
+# quick and dirty implementation of the RAMBO algorithm
+#
+# reference:
+# * https://cds.cern.ch/record/164736/files/198601282.pdf
+# * https://www.sciencedirect.com/science/article/pii/0010465586901190
+####################
+
+function generate_initial_moms(ss, masses)
+ E1 = (ss^2 + masses[1]^2 - masses[2]^2) / (2 * ss)
+ E2 = (ss^2 + masses[2]^2 - masses[1]^2) / (2 * ss)
+
+ rho1 = sqrt(E1^2 - masses[1]^2)
+ rho2 = sqrt(E2^2 - masses[2]^2)
+
+ return [SFourMomentum(E1, 0, 0, rho1), SFourMomentum(E2, 0, 0, -rho2)]
+end
+
+
+Random.rand(rng::AbstractRNG, ::Random.SamplerType{SFourMomentum}) = SFourMomentum(rand(rng, 4))
+Random.rand(rng::AbstractRNG, ::Random.SamplerType{NTuple{N, Float64}}) where {N} = Tuple(rand(rng, N))
+
+
+function _transform_uni_to_mom(u1, u2, u3, u4)
+ cth = 2 * u1 - 1
+ sth = sqrt(1 - cth^2)
+ phi = 2 * pi * u2
+ q0 = -log(u3 * u4)
+ qx = q0 * sth * cos(phi)
+ qy = q0 * sth * sin(phi)
+ qz = q0 * cth
+
+ return SFourMomentum(q0, qx, qy, qz)
+end
+
+function _transform_uni_to_mom!(uni_mom, dest)
+ u1, u2, u3, u4 = Tuple(uni_mom)
+ cth = 2 * u1 - 1
+ sth = sqrt(1 - cth^2)
+ phi = 2 * pi * u2
+ q0 = -log(u3 * u4)
+ qx = q0 * sth * cos(phi)
+ qy = q0 * sth * sin(phi)
+ qz = q0 * cth
+
+ return dest = SFourMomentum(q0, qx, qy, qz)
+end
+
+_transform_uni_to_mom(u1234::Tuple) = _transform_uni_to_mom(u1234...)
+_transform_uni_to_mom(u1234::SFourMomentum) = _transform_uni_to_mom(Tuple(u1234))
+
+function generate_massless_moms(rng, n::Int)
+ a = Vector{SFourMomentum}(undef, n)
+ rand!(rng, a)
+ return map(_transform_uni_to_mom, a)
+end
+
+function generate_physical_massless_moms(rng, ss, n)
+ r_moms = generate_massless_moms(rng, n)
+ Q = sum(r_moms)
+ M = sqrt(Q * Q)
+ fac = -1 / M
+ Qx = getX(Q)
+ Qy = getY(Q)
+ Qz = getZ(Q)
+ bx = fac * Qx
+ by = fac * Qy
+ bz = fac * Qz
+ gamma = getT(Q) / M
+ a = 1 / (1 + gamma)
+ x = ss / M
+
+ i = 1
+ while i <= n
+ mom = r_moms[i]
+ mom0 = getT(mom)
+ mom1 = getX(mom)
+ mom2 = getY(mom)
+ mom3 = getZ(mom)
+
+ bq = bx * mom1 + by * mom2 + bz * mom3
+
+ p0 = x * (gamma * mom0 + bq)
+ px = x * (mom1 + bx * mom0 + a * bq * bx)
+ py = x * (mom2 + by * mom0 + a * bq * by)
+ pz = x * (mom3 + bz * mom0 + a * bq * bz)
+
+ r_moms[i] = SFourMomentum(p0, px, py, pz)
+ i += 1
+ end
+ return r_moms
+end
+
+function _to_be_solved(xi, masses, p0s, ss)
+ sum = 0.0
+ for (i, E) in enumerate(p0s)
+ sum += sqrt(masses[i]^2 + xi^2 * E^2)
+ end
+ return sum - ss
+end
+
+function _build_massive_momenta(xi, masses, massless_moms)
+ vec = SFourMomentum[]
+ i = 1
+ while i <= length(massless_moms)
+ massless_mom = massless_moms[i]
+ k0 = sqrt(getT(massless_mom)^2 * xi^2 + masses[i]^2)
+
+ kx = xi * getX(massless_mom)
+ ky = xi * getY(massless_mom)
+ kz = xi * getZ(massless_mom)
+
+ push!(vec, SFourMomentum(k0, kx, ky, kz))
+
+ i += 1
+ end
+ return vec
+end
+
+first_derivative(func) = x -> ForwardDiff.derivative(func, float(x))
+
+
+function generate_physical_massive_moms(rng, ss, masses; x0 = 0.1)
+ n = length(masses)
+ massless_moms = generate_physical_massless_moms(rng, ss, n)
+ energies = getT.(massless_moms)
+ f = x -> _to_be_solved(x, masses, energies, ss)
+ xi = find_zero((f, first_derivative(f)), x0, Roots.Newton())
+ return _build_massive_momenta(xi, masses, massless_moms)
+end
diff --git a/test/Project.toml b/test/Project.toml
index 666301a..5c459da 100644
--- a/test/Project.toml
+++ b/test/Project.toml
@@ -1,6 +1,8 @@
[deps]
AccurateArithmetic = "22286c92-06ac-501d-9306-4abd417d9753"
QEDbase = "10e22c08-3ccb-4172-bfcf-7d7aa3d04d93"
+QEDprocesses = "46de9c38-1bb3-4547-a1ec-da24d767fdad"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
+StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
diff --git a/test/node_reduction.jl b/test/node_reduction.jl
index c507840..bb14e9b 100644
--- a/test/node_reduction.jl
+++ b/test/node_reduction.jl
@@ -8,23 +8,23 @@ graph = MetagraphOptimization.DAG()
d_exit = insert_node!(graph, make_node(DataTask(10)), track = false)
-s0 = insert_node!(graph, make_node(ComputeTaskS2()), track = false)
+s0 = insert_node!(graph, make_node(ComputeTaskABC_S2()), track = false)
ED = insert_node!(graph, make_node(DataTask(3)), track = false)
FD = insert_node!(graph, make_node(DataTask(3)), track = false)
-EC = insert_node!(graph, make_node(ComputeTaskV()), track = false)
-FC = insert_node!(graph, make_node(ComputeTaskV()), track = false)
+EC = insert_node!(graph, make_node(ComputeTaskABC_V()), track = false)
+FC = insert_node!(graph, make_node(ComputeTaskABC_V()), track = false)
A1D = insert_node!(graph, make_node(DataTask(4)), track = false)
B1D_1 = insert_node!(graph, make_node(DataTask(4)), track = false)
B1D_2 = insert_node!(graph, make_node(DataTask(4)), track = false)
C1D = insert_node!(graph, make_node(DataTask(4)), track = false)
-A1C = insert_node!(graph, make_node(ComputeTaskU()), track = false)
-B1C_1 = insert_node!(graph, make_node(ComputeTaskU()), track = false)
-B1C_2 = insert_node!(graph, make_node(ComputeTaskU()), track = false)
-C1C = insert_node!(graph, make_node(ComputeTaskU()), track = false)
+A1C = insert_node!(graph, make_node(ComputeTaskABC_U()), track = false)
+B1C_1 = insert_node!(graph, make_node(ComputeTaskABC_U()), track = false)
+B1C_2 = insert_node!(graph, make_node(ComputeTaskABC_U()), track = false)
+C1C = insert_node!(graph, make_node(ComputeTaskABC_U()), track = false)
AD = insert_node!(graph, make_node(DataTask(5)), track = false)
BD = insert_node!(graph, make_node(DataTask(5)), track = false)
diff --git a/test/runtests.jl b/test/runtests.jl
index 83c0c8e..39a7cca 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -1,35 +1,41 @@
using SafeTestsets
-@safetestset "Utility Unit Tests" begin
+@safetestset "Utility Unit Tests " begin
include("unit_tests_utility.jl")
end
-@safetestset "Task Unit Tests" begin
+@safetestset "Task Unit Tests " begin
include("unit_tests_tasks.jl")
end
-@safetestset "Node Unit Tests" begin
+@safetestset "Node Unit Tests " begin
include("unit_tests_nodes.jl")
end
-@safetestset "Properties Unit Tests" begin
+@safetestset "Properties Unit Tests " begin
include("unit_tests_properties.jl")
end
-@safetestset "Estimation Unit Tests" begin
+@safetestset "Estimation Unit Tests " begin
include("unit_tests_estimator.jl")
end
-@safetestset "ABC-Model Unit Tests" begin
+@safetestset "ABC-Model Unit Tests " begin
include("unit_tests_abcmodel.jl")
end
-@safetestset "Node Reduction Unit Tests" begin
+@safetestset "QED Feynman Diagram Generation Tests" begin
+ include("unit_tests_qed_diagrams.jl")
+end
+@safetestset "QED-Model Unit Tests " begin
+ include("unit_tests_qedmodel.jl")
+end
+@safetestset "Node Reduction Unit Tests " begin
include("node_reduction.jl")
end
-@safetestset "Graph Unit Tests" begin
+@safetestset "Graph Unit Tests " begin
include("unit_tests_graph.jl")
end
-@safetestset "Execution Unit Tests" begin
+@safetestset "Execution Unit Tests " begin
include("unit_tests_execution.jl")
end
-@safetestset "Optimization Unit Tests" begin
+@safetestset "Optimization Unit Tests " begin
include("unit_tests_optimization.jl")
end
-@safetestset "Known Graph Tests" begin
+@safetestset "Known Graph Tests " begin
include("known_graphs.jl")
end
diff --git a/test/unit_tests_abcmodel.jl b/test/unit_tests_abcmodel.jl
index 0814ba0..cff7ecc 100644
--- a/test/unit_tests_abcmodel.jl
+++ b/test/unit_tests_abcmodel.jl
@@ -19,5 +19,5 @@ testparticles = [ParticleA(def_momentum), ParticleB(def_momentum), ParticleC(def
end
@testset "Vertex" begin
- @test isapprox(MetagraphOptimization.vertex(), 1 / 137.0)
+ @test isapprox(MetagraphOptimization.ABC_vertex(), 1 / 137.0)
end
diff --git a/test/unit_tests_execution.jl b/test/unit_tests_execution.jl
index b44bdfc..dea4072 100644
--- a/test/unit_tests_execution.jl
+++ b/test/unit_tests_execution.jl
@@ -38,8 +38,8 @@ function ground_truth_graph_result(input::ABCProcessInput)
@test isapprox(getMass2(diagram1_C.momentum), getMass2(diagram1_Cp.momentum))
@test isapprox(getMass2(diagram2_C.momentum), getMass2(diagram2_Cp.momentum))
- inner1 = MetagraphOptimization.inner_edge(diagram1_C)
- inner2 = MetagraphOptimization.inner_edge(diagram2_C)
+ inner1 = MetagraphOptimization.ABC_inner_edge(diagram1_C)
+ inner2 = MetagraphOptimization.ABC_inner_edge(diagram2_C)
diagram1_result = inner1 * constant
diagram2_result = inner2 * constant
@@ -122,95 +122,101 @@ end
end
end
-@testset "AB->AB large sum fusion" for _ in 1:20
- graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->AB.txt"), ABCModel())
+@testset "AB->AB large sum fusion" begin
+ for _ in 1:20
+ graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->AB.txt"), ABCModel())
- # push a fusion with the sum node
- ops = get_operations(graph)
- for fusion in ops.nodeFusions
- if isa(fusion.input[3].task, ComputeTaskSum)
- push_operation!(graph, fusion)
- break
- end
- end
-
- # push two more fusions with the fused node
- for _ in 1:15
+ # push a fusion with the sum node
ops = get_operations(graph)
for fusion in ops.nodeFusions
- if isa(fusion.input[3].task, FusedComputeTask)
+ if isa(fusion.input[3].task, ComputeTaskABC_Sum)
push_operation!(graph, fusion)
break
end
end
- end
- # try execute
- @test is_valid(graph)
- expected_result = ground_truth_graph_result(particles_2_2)
- @test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
+ # push two more fusions with the fused node
+ for _ in 1:15
+ ops = get_operations(graph)
+ for fusion in ops.nodeFusions
+ if isa(fusion.input[3].task, FusedComputeTask)
+ push_operation!(graph, fusion)
+ break
+ end
+ end
+ end
+
+ # try execute
+ @test is_valid(graph)
+ expected_result = ground_truth_graph_result(particles_2_2)
+ @test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
+ end
end
-@testset "AB->AB large sum fusion" for _ in 1:20
- graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->AB.txt"), ABCModel())
+@testset "AB->AB large sum fusion" begin
+ for _ in 1:20
+ graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->AB.txt"), ABCModel())
- # push a fusion with the sum node
- ops = get_operations(graph)
- for fusion in ops.nodeFusions
- if isa(fusion.input[3].task, ComputeTaskSum)
- push_operation!(graph, fusion)
- break
- end
- end
-
- # push two more fusions with the fused node
- for _ in 1:15
+ # push a fusion with the sum node
ops = get_operations(graph)
for fusion in ops.nodeFusions
- if isa(fusion.input[3].task, FusedComputeTask)
+ if isa(fusion.input[3].task, ComputeTaskABC_Sum)
push_operation!(graph, fusion)
break
end
end
- end
- # try execute
- @test is_valid(graph)
- expected_result = ground_truth_graph_result(particles_2_2)
- @test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
+ # push two more fusions with the fused node
+ for _ in 1:15
+ ops = get_operations(graph)
+ for fusion in ops.nodeFusions
+ if isa(fusion.input[3].task, FusedComputeTask)
+ push_operation!(graph, fusion)
+ break
+ end
+ end
+ end
+
+ # try execute
+ @test is_valid(graph)
+ expected_result = ground_truth_graph_result(particles_2_2)
+ @test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
+ end
end
-@testset "AB->AB fusion edge case" for _ in 1:20
- graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->AB.txt"), ABCModel())
+@testset "AB->AB fusion edge case" begin
+ for _ in 1:20
+ graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->AB.txt"), ABCModel())
- # push two fusions with ComputeTaskV
- for _ in 1:2
- ops = get_operations(graph)
- for fusion in ops.nodeFusions
- if isa(fusion.input[1].task, ComputeTaskV)
- push_operation!(graph, fusion)
- break
+ # push two fusions with ComputeTaskABC_V
+ for _ in 1:2
+ ops = get_operations(graph)
+ for fusion in ops.nodeFusions
+ if isa(fusion.input[1].task, ComputeTaskABC_V)
+ push_operation!(graph, fusion)
+ break
+ end
end
end
- end
- # push fusions until the end
- cont = true
- while cont
- cont = false
- ops = get_operations(graph)
- for fusion in ops.nodeFusions
- if isa(fusion.input[1].task, FusedComputeTask)
- push_operation!(graph, fusion)
- cont = true
- break
+ # push fusions until the end
+ cont = true
+ while cont
+ cont = false
+ ops = get_operations(graph)
+ for fusion in ops.nodeFusions
+ if isa(fusion.input[1].task, FusedComputeTask)
+ push_operation!(graph, fusion)
+ cont = true
+ break
+ end
end
end
- end
- # try execute
- @test is_valid(graph)
- expected_result = ground_truth_graph_result(particles_2_2)
- @test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
+ # try execute
+ @test is_valid(graph)
+ expected_result = ground_truth_graph_result(particles_2_2)
+ @test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
+ end
end
diff --git a/test/unit_tests_graph.jl b/test/unit_tests_graph.jl
index 2b90641..c6e59ff 100644
--- a/test/unit_tests_graph.jl
+++ b/test/unit_tests_graph.jl
@@ -22,7 +22,7 @@ d_exit = insert_node!(graph, make_node(DataTask(10)), track = false)
@test length(graph.dirtyNodes) == 1
# final s compute
-s0 = insert_node!(graph, make_node(ComputeTaskS2()), track = false)
+s0 = insert_node!(graph, make_node(ComputeTaskABC_S2()), track = false)
@test length(graph.nodes) == 2
@test length(graph.dirtyNodes) == 2
@@ -32,8 +32,8 @@ d_v0_s0 = insert_node!(graph, make_node(DataTask(5)), track = false)
d_v1_s0 = insert_node!(graph, make_node(DataTask(5)), track = false)
# v0 and v1 compute
-v0 = insert_node!(graph, make_node(ComputeTaskV()), track = false)
-v1 = insert_node!(graph, make_node(ComputeTaskV()), track = false)
+v0 = insert_node!(graph, make_node(ComputeTaskABC_V()), track = false)
+v1 = insert_node!(graph, make_node(ComputeTaskABC_V()), track = false)
# data from uB, uA, uBp and uAp to v0 and v1
d_uB_v0 = insert_node!(graph, make_node(DataTask(3)), track = false)
@@ -42,10 +42,10 @@ d_uBp_v1 = insert_node!(graph, make_node(DataTask(3)), track = false)
d_uAp_v1 = insert_node!(graph, make_node(DataTask(3)), track = false)
# uB, uA, uBp and uAp computes
-uB = insert_node!(graph, make_node(ComputeTaskU()), track = false)
-uA = insert_node!(graph, make_node(ComputeTaskU()), track = false)
-uBp = insert_node!(graph, make_node(ComputeTaskU()), track = false)
-uAp = insert_node!(graph, make_node(ComputeTaskU()), track = false)
+uB = insert_node!(graph, make_node(ComputeTaskABC_U()), track = false)
+uA = insert_node!(graph, make_node(ComputeTaskABC_U()), track = false)
+uBp = insert_node!(graph, make_node(ComputeTaskABC_U()), track = false)
+uAp = insert_node!(graph, make_node(ComputeTaskABC_U()), track = false)
# data from PB, PA, PBp and PAp to uB, uA, uBp and uAp
d_PB_uB = insert_node!(graph, make_node(DataTask(6)), track = false)
@@ -54,10 +54,10 @@ d_PBp_uBp = insert_node!(graph, make_node(DataTask(6)), track = false)
d_PAp_uAp = insert_node!(graph, make_node(DataTask(6)), track = false)
# P computes PB, PA, PBp and PAp
-PB = insert_node!(graph, make_node(ComputeTaskP()), track = false)
-PA = insert_node!(graph, make_node(ComputeTaskP()), track = false)
-PBp = insert_node!(graph, make_node(ComputeTaskP()), track = false)
-PAp = insert_node!(graph, make_node(ComputeTaskP()), track = false)
+PB = insert_node!(graph, make_node(ComputeTaskABC_P()), track = false)
+PA = insert_node!(graph, make_node(ComputeTaskABC_P()), track = false)
+PBp = insert_node!(graph, make_node(ComputeTaskABC_P()), track = false)
+PAp = insert_node!(graph, make_node(ComputeTaskABC_P()), track = false)
# entry nodes getting data for P computes
d_PB = insert_node!(graph, make_node(DataTask(4)), track = false)
diff --git a/test/unit_tests_nodes.jl b/test/unit_tests_nodes.jl
index 37a419c..d46dfd9 100644
--- a/test/unit_tests_nodes.jl
+++ b/test/unit_tests_nodes.jl
@@ -1,9 +1,9 @@
using MetagraphOptimization
-nC1 = MetagraphOptimization.make_node(MetagraphOptimization.ComputeTaskU())
-nC2 = MetagraphOptimization.make_node(MetagraphOptimization.ComputeTaskV())
-nC3 = MetagraphOptimization.make_node(MetagraphOptimization.ComputeTaskP())
-nC4 = MetagraphOptimization.make_node(MetagraphOptimization.ComputeTaskSum())
+nC1 = MetagraphOptimization.make_node(MetagraphOptimization.ComputeTaskABC_U())
+nC2 = MetagraphOptimization.make_node(MetagraphOptimization.ComputeTaskABC_V())
+nC3 = MetagraphOptimization.make_node(MetagraphOptimization.ComputeTaskABC_P())
+nC4 = MetagraphOptimization.make_node(MetagraphOptimization.ComputeTaskABC_Sum())
nD1 = MetagraphOptimization.make_node(MetagraphOptimization.DataTask(10))
nD2 = MetagraphOptimization.make_node(MetagraphOptimization.DataTask(20))
diff --git a/test/unit_tests_qed_diagrams.jl b/test/unit_tests_qed_diagrams.jl
new file mode 100644
index 0000000..802fb50
--- /dev/null
+++ b/test/unit_tests_qed_diagrams.jl
@@ -0,0 +1,47 @@
+using MetagraphOptimization
+
+import MetagraphOptimization.gen_diagrams
+import MetagraphOptimization.isincoming
+import MetagraphOptimization.types
+
+
+model = QEDModel()
+compton = ("Compton Scattering", parse_process("ke->ke", model), 2)
+compton_3 = ("3-Photon Compton Scattering", parse_process("kkke->ke", QEDModel()), 24)
+compton_4 = ("4-Photon Compton Scattering", parse_process("kkkke->ke", QEDModel()), 120)
+bhabha = ("Bhabha Scattering", parse_process("ep->ep", model), 2)
+moller = ("Møller Scattering", parse_process("ee->ee", model), 2)
+pair_production = ("Pair production", parse_process("kk->ep", model), 2)
+pair_annihilation = ("Pair annihilation", parse_process("ep->kk", model), 2)
+trident = ("Trident", parse_process("ke->epe", model), 8)
+
+@testset "Known Processes" begin
+ @testset "$name" for (name, process, n) in
+ [compton, bhabha, moller, pair_production, pair_annihilation, trident, compton_3, compton_4]
+ initial_diagram = FeynmanDiagram(process)
+
+ n_particles = 0
+ for type in types(model)
+ if (isincoming(type))
+ n_particles += get(process.inParticles, type, 0)
+ else
+ n_particles += get(process.outParticles, type, 0)
+ end
+ end
+ @test n_particles == length(initial_diagram.particles)
+ @test ismissing(initial_diagram.tie[])
+ @test isempty(initial_diagram.vertices)
+
+ result_diagrams = gen_diagrams(initial_diagram)
+ @test length(result_diagrams) == n
+
+ for d in result_diagrams
+ n_vertices = 0
+ for vs in d.vertices
+ n_vertices += length(vs)
+ end
+ @test n_vertices == n_particles - 2
+ @test !ismissing(d.tie[])
+ end
+ end
+end
diff --git a/test/unit_tests_qedmodel.jl b/test/unit_tests_qedmodel.jl
new file mode 100644
index 0000000..af63fdf
--- /dev/null
+++ b/test/unit_tests_qedmodel.jl
@@ -0,0 +1,291 @@
+using MetagraphOptimization
+using QEDbase
+using QEDprocesses
+using StatsBase # for countmap
+using Random
+
+import MetagraphOptimization.caninteract
+import MetagraphOptimization.issame
+import MetagraphOptimization.interaction_result
+import MetagraphOptimization.propagation_result
+import MetagraphOptimization.direction
+import MetagraphOptimization.spin_or_pol
+import MetagraphOptimization.QED_vertex
+
+def_momentum = SFourMomentum(1.0, 0.0, 0.0, 0.0)
+
+RNG = Random.default_rng()
+
+testparticleTypes = [
+ PhotonStateful{Incoming},
+ PhotonStateful{Outgoing},
+ FermionStateful{Incoming},
+ FermionStateful{Outgoing},
+ AntiFermionStateful{Incoming},
+ AntiFermionStateful{Outgoing},
+]
+
+testparticleTypesPropagated = [
+ PhotonStateful{Outgoing},
+ PhotonStateful{Incoming},
+ FermionStateful{Outgoing},
+ FermionStateful{Incoming},
+ AntiFermionStateful{Outgoing},
+ AntiFermionStateful{Incoming},
+]
+
+function compton_groundtruth(input::QEDProcessInput)
+ # p1k1 -> p2k2
+ # formula: −(ie)^2 (u(p2) slashed(ε1) S(p2 − k1) slashed(ε2) u(p1) + u(p2) slashed(ε2) S(p1 + k1) slashed(ε1) u(p1))
+
+ p1 = input.inParticles[findfirst(x -> typeof(x) <: FermionStateful, input.inParticles)]
+ p2 = input.outParticles[findfirst(x -> typeof(x) <: FermionStateful, input.outParticles)]
+
+ k1 = input.inParticles[findfirst(x -> typeof(x) <: PhotonStateful, input.inParticles)]
+ k2 = input.outParticles[findfirst(x -> typeof(x) <: PhotonStateful, input.outParticles)]
+
+ u_p1 = base_state(Electron(), Incoming(), p1.momentum, spin_or_pol(p1))
+ u_p2 = base_state(Electron(), Outgoing(), p2.momentum, spin_or_pol(p2))
+
+ eps_1 = base_state(Photon(), Incoming(), k1.momentum, spin_or_pol(k1))
+ eps_2 = base_state(Photon(), Outgoing(), k2.momentum, spin_or_pol(k2))
+
+ virt1_mom = p2.momentum - k1.momentum
+ @test isapprox(p1.momentum - k2.momentum, virt1_mom)
+
+ virt2_mom = p1.momentum + k1.momentum
+ @test isapprox(p2.momentum + k2.momentum, virt2_mom)
+
+ s_p2_k1 = propagator(Electron(), virt1_mom)
+ s_p1_k1 = propagator(Electron(), virt2_mom)
+
+ diagram1 = u_p2 * (eps_1 * QED_vertex()) * s_p2_k1 * (eps_2 * QED_vertex()) * u_p1
+ diagram2 = u_p2 * (eps_2 * QED_vertex()) * s_p1_k1 * (eps_1 * QED_vertex()) * u_p1
+
+ return diagram1 + diagram2
+end
+
+
+@testset "Interaction Result" begin
+ import MetagraphOptimization.QED_conserve_momentum
+
+ for p1 in testparticleTypes, p2 in testparticleTypes
+ if !caninteract(p1, p2)
+ @test_throws AssertionError interaction_result(p1, p2)
+ continue
+ end
+
+ @test interaction_result(p1, p2) in setdiff(testparticleTypes, [p1, p2])
+ @test issame(interaction_result(p1, p2), interaction_result(p2, p1))
+
+ testParticle1 = p1(rand(RNG, SFourMomentum))
+ testParticle2 = p2(rand(RNG, SFourMomentum))
+ p3 = interaction_result(p1, p2)
+
+ resultParticle = QED_conserve_momentum(testParticle1, testParticle2)
+
+ @test issame(typeof(resultParticle), interaction_result(p1, p2))
+
+ totalMom = zero(SFourMomentum)
+ for (p, mom) in [(p1, testParticle1.momentum), (p2, testParticle2.momentum), (p3, resultParticle.momentum)]
+ if (typeof(direction(p)) <: Incoming)
+ totalMom += mom
+ else
+ totalMom -= mom
+ end
+ end
+
+ @test isapprox(totalMom, zero(SFourMomentum); atol = sqrt(eps()))
+ end
+end
+
+@testset "Propagation Result" begin
+ for (p, propResult) in zip(testparticleTypes, testparticleTypesPropagated)
+ @test issame(propagation_result(p), propResult)
+ @test direction(propagation_result(p)(def_momentum)) != direction(p(def_momentum))
+ end
+end
+
+@testset "Parse Process" begin
+ @testset "Order invariance" begin
+ @test parse_process("ke->ke", QEDModel()) == parse_process("ek->ke", QEDModel())
+ @test parse_process("ke->ke", QEDModel()) == parse_process("ek->ek", QEDModel())
+ @test parse_process("ke->ke", QEDModel()) == parse_process("ke->ek", QEDModel())
+
+ @test parse_process("kkke->eep", QEDModel()) == parse_process("kkek->epe", QEDModel())
+ end
+
+ @testset "Known processes" begin
+ compton_process = QEDProcessDescription(
+ Dict{Type, Int}(PhotonStateful{Incoming} => 1, FermionStateful{Incoming} => 1),
+ Dict{Type, Int}(PhotonStateful{Outgoing} => 1, FermionStateful{Outgoing} => 1),
+ )
+
+ @test parse_process("ke->ke", QEDModel()) == compton_process
+
+ positron_compton_process = QEDProcessDescription(
+ Dict{Type, Int}(PhotonStateful{Incoming} => 1, AntiFermionStateful{Incoming} => 1),
+ Dict{Type, Int}(PhotonStateful{Outgoing} => 1, AntiFermionStateful{Outgoing} => 1),
+ )
+
+ @test parse_process("kp->kp", QEDModel()) == positron_compton_process
+
+ trident_process = QEDProcessDescription(
+ Dict{Type, Int}(PhotonStateful{Incoming} => 1, FermionStateful{Incoming} => 1),
+ Dict{Type, Int}(FermionStateful{Outgoing} => 2, AntiFermionStateful{Outgoing} => 1),
+ )
+
+ @test parse_process("ke->eep", QEDModel()) == trident_process
+
+ pair_production_process = QEDProcessDescription(
+ Dict{Type, Int}(PhotonStateful{Incoming} => 2),
+ Dict{Type, Int}(FermionStateful{Outgoing} => 1, AntiFermionStateful{Outgoing} => 1),
+ )
+
+ @test parse_process("kk->pe", QEDModel()) == pair_production_process
+
+ pair_annihilation_process = QEDProcessDescription(
+ Dict{Type, Int}(FermionStateful{Incoming} => 1, AntiFermionStateful{Incoming} => 1),
+ Dict{Type, Int}(PhotonStateful{Outgoing} => 2),
+ )
+
+ @test parse_process("pe->kk", QEDModel()) == pair_annihilation_process
+ end
+end
+
+@testset "Generate Process Inputs" begin
+ @testset "Process $proc_str" for proc_str in ["ke->ke", "kp->kp", "kk->ep", "ep->kk"]
+ # currently can only generate for 2->2 processes
+ process = parse_process(proc_str, QEDModel())
+
+ for i in 1:100
+ input = gen_process_input(process)
+ @test countmap(typeof.(input.inParticles)) == process.inParticles
+ @test countmap(typeof.(input.outParticles)) == process.outParticles
+
+ @test isapprox(
+ sum(getfield.(input.inParticles, :momentum)),
+ sum(getfield.(input.outParticles, :momentum));
+ atol = sqrt(eps()),
+ )
+ end
+ end
+end
+
+@testset "Compton" begin
+ import MetagraphOptimization.insert_node!
+ import MetagraphOptimization.insert_edge!
+ import MetagraphOptimization.make_node
+
+ model = QEDModel()
+ process = parse_process("ke->ke", model)
+ machine = get_machine_info()
+
+ graph = MetagraphOptimization.DAG()
+
+ # manually build a graph for compton
+ graph = DAG()
+
+ # s to output (exit node)
+ d_exit = insert_node!(graph, make_node(DataTask(16)), track = false)
+
+ sum_node = insert_node!(graph, make_node(ComputeTaskQED_Sum(2)), track = false)
+
+ d_s0_sum = insert_node!(graph, make_node(DataTask(16)), track = false)
+ d_s1_sum = insert_node!(graph, make_node(DataTask(16)), track = false)
+
+ # final s compute
+ s0 = insert_node!(graph, make_node(ComputeTaskQED_S2()), track = false)
+ s1 = insert_node!(graph, make_node(ComputeTaskQED_S2()), track = false)
+
+ # data from v0 and v1 to s0
+ d_v0_s0 = insert_node!(graph, make_node(DataTask(96)), track = false)
+ d_v1_s0 = insert_node!(graph, make_node(DataTask(96)), track = false)
+ d_v2_s1 = insert_node!(graph, make_node(DataTask(96)), track = false)
+ d_v3_s1 = insert_node!(graph, make_node(DataTask(96)), track = false)
+
+ # v0 and v1 compute
+ v0 = insert_node!(graph, make_node(ComputeTaskQED_V()), track = false)
+ v1 = insert_node!(graph, make_node(ComputeTaskQED_V()), track = false)
+ v2 = insert_node!(graph, make_node(ComputeTaskQED_V()), track = false)
+ v3 = insert_node!(graph, make_node(ComputeTaskQED_V()), track = false)
+
+ # data from uPhIn, uPhOut, uElIn, uElOut to v0 and v1
+ d_uPhIn_v0 = insert_node!(graph, make_node(DataTask(96)), track = false)
+ d_uElIn_v0 = insert_node!(graph, make_node(DataTask(96)), track = false)
+ d_uPhOut_v1 = insert_node!(graph, make_node(DataTask(96)), track = false)
+ d_uElOut_v1 = insert_node!(graph, make_node(DataTask(96)), track = false)
+
+ # data from uPhIn, uPhOut, uElIn, uElOut to v2 and v3
+ d_uPhOut_v2 = insert_node!(graph, make_node(DataTask(96)), track = false)
+ d_uElIn_v2 = insert_node!(graph, make_node(DataTask(96)), track = false)
+ d_uPhIn_v3 = insert_node!(graph, make_node(DataTask(96)), track = false)
+ d_uElOut_v3 = insert_node!(graph, make_node(DataTask(96)), track = false)
+
+ # uPhIn, uPhOut, uElIn and uElOut computes
+ uPhIn = insert_node!(graph, make_node(ComputeTaskQED_U()), track = false)
+ uPhOut = insert_node!(graph, make_node(ComputeTaskQED_U()), track = false)
+ uElIn = insert_node!(graph, make_node(ComputeTaskQED_U()), track = false)
+ uElOut = insert_node!(graph, make_node(ComputeTaskQED_U()), track = false)
+
+ # data into U
+ d_uPhIn = insert_node!(graph, make_node(DataTask(16), "ki1"), track = false)
+ d_uPhOut = insert_node!(graph, make_node(DataTask(16), "ko1"), track = false)
+ d_uElIn = insert_node!(graph, make_node(DataTask(16), "ei1"), track = false)
+ d_uElOut = insert_node!(graph, make_node(DataTask(16), "eo1"), track = false)
+
+ # now for all the edges
+ insert_edge!(graph, d_uPhIn, uPhIn, track = false)
+ insert_edge!(graph, d_uPhOut, uPhOut, track = false)
+ insert_edge!(graph, d_uElIn, uElIn, track = false)
+ insert_edge!(graph, d_uElOut, uElOut, track = false)
+
+ insert_edge!(graph, uPhIn, d_uPhIn_v0, track = false)
+ insert_edge!(graph, uPhOut, d_uPhOut_v1, track = false)
+ insert_edge!(graph, uElIn, d_uElIn_v0, track = false)
+ insert_edge!(graph, uElOut, d_uElOut_v1, track = false)
+
+ insert_edge!(graph, uPhIn, d_uPhIn_v3, track = false)
+ insert_edge!(graph, uPhOut, d_uPhOut_v2, track = false)
+ insert_edge!(graph, uElIn, d_uElIn_v2, track = false)
+ insert_edge!(graph, uElOut, d_uElOut_v3, track = false)
+
+ insert_edge!(graph, d_uPhIn_v0, v0, track = false)
+ insert_edge!(graph, d_uPhOut_v1, v1, track = false)
+ insert_edge!(graph, d_uElIn_v0, v0, track = false)
+ insert_edge!(graph, d_uElOut_v1, v1, track = false)
+
+ insert_edge!(graph, d_uPhIn_v3, v3, track = false)
+ insert_edge!(graph, d_uPhOut_v2, v2, track = false)
+ insert_edge!(graph, d_uElIn_v2, v2, track = false)
+ insert_edge!(graph, d_uElOut_v3, v3, track = false)
+
+ insert_edge!(graph, v0, d_v0_s0, track = false)
+ insert_edge!(graph, v1, d_v1_s0, track = false)
+ insert_edge!(graph, v2, d_v2_s1, track = false)
+ insert_edge!(graph, v3, d_v3_s1, track = false)
+
+ insert_edge!(graph, d_v0_s0, s0, track = false)
+ insert_edge!(graph, d_v1_s0, s0, track = false)
+
+ insert_edge!(graph, d_v2_s1, s1, track = false)
+ insert_edge!(graph, d_v3_s1, s1, track = false)
+
+ insert_edge!(graph, s0, d_s0_sum, track = false)
+ insert_edge!(graph, s1, d_s1_sum, track = false)
+
+ insert_edge!(graph, d_s0_sum, sum_node, track = false)
+ insert_edge!(graph, d_s1_sum, sum_node, track = false)
+
+ insert_edge!(graph, sum_node, d_exit, track = false)
+
+ input = [gen_process_input(process) for _ in 1:1000]
+
+ compton_function = get_compute_function(graph, process, machine)
+ @test isapprox(compton_function.(input), compton_groundtruth.(input))
+
+ graph_generated = gen_graph(process)
+
+ compton_function = get_compute_function(graph_generated, process, machine)
+ @test isapprox(compton_function.(input), compton_groundtruth.(input))
+end
diff --git a/test/unit_tests_tasks.jl b/test/unit_tests_tasks.jl
index 4bcdacb..6df7804 100644
--- a/test/unit_tests_tasks.jl
+++ b/test/unit_tests_tasks.jl
@@ -1,11 +1,11 @@
using MetagraphOptimization
-S1 = MetagraphOptimization.ComputeTaskS1()
-S2 = MetagraphOptimization.ComputeTaskS2()
-U = MetagraphOptimization.ComputeTaskU()
-V = MetagraphOptimization.ComputeTaskV()
-P = MetagraphOptimization.ComputeTaskP()
-Sum = MetagraphOptimization.ComputeTaskSum()
+S1 = MetagraphOptimization.ComputeTaskABC_S1()
+S2 = MetagraphOptimization.ComputeTaskABC_S2()
+U = MetagraphOptimization.ComputeTaskABC_U()
+V = MetagraphOptimization.ComputeTaskABC_V()
+P = MetagraphOptimization.ComputeTaskABC_P()
+Sum = MetagraphOptimization.ComputeTaskABC_Sum()
Data10 = MetagraphOptimization.DataTask(10)
Data20 = MetagraphOptimization.DataTask(20)
@@ -46,4 +46,4 @@ Data10_3 = copy(Data10)
S1_2 = copy(S1)
@test S1_2 == S1
-@test S1 == MetagraphOptimization.ComputeTaskS1()
+@test S1 == MetagraphOptimization.ComputeTaskABC_S1()