24 Commits

Author SHA1 Message Date
6a09ecf33d Improve gen_access_expr with dispatch 2023-10-12 15:58:39 +02:00
4dcb616606 Use the scheduling information in the execution 2023-10-12 15:15:36 +02:00
9b28601f18 Add device info to nodes during scheduling 2023-10-12 00:29:48 +02:00
3267daadfd Actually fix the rare execution error this time 2023-10-10 21:49:31 +02:00
140a954d01 Add scheduler interface 2023-10-10 14:12:42 +02:00
a86901e425 Fix occasional execution error 2023-10-04 17:32:23 +02:00
0f50b59933 input/AB->ABBBBBBBBB.txt: convert to Git LFS 2023-10-04 11:48:55 +02:00
cbfed20b82 WIP 2023-10-04 11:05:49 +02:00
f9e60a7b5e Update docs 2023-10-03 18:00:25 +02:00
314330f00f Add cache strategy information to devices 2023-10-03 17:13:53 +02:00
dd01a5e691 Reimplement same code generation through new cache strategy interface 2023-10-03 16:47:14 +02:00
37d645cb4e WIP Adding machine/device info and caching strategies 2023-09-29 18:02:57 +02:00
afb6af44ca Add more notebooks 2023-09-29 01:12:43 +02:00
bef017130b Add notebook abc model showcase, add some pretty print functions 2023-09-28 19:42:19 +02:00
7dd9fedf2e Refactor model into an interface and remove any ABC Model specific code from src/code_gen/. Also generate functions instead of direct code evaluation in execute() 2023-09-28 17:59:17 +02:00
a69dd6018e WIP 2023-09-28 00:48:57 +02:00
4b44eb5286 Add number of children information to sum tasks 2023-09-27 16:16:33 +02:00
24ade323f0 Add tests for AB->ABBB execution and fix errors 2023-09-26 18:30:37 +02:00
95f92f080c Fix execution with fusion 2023-09-26 16:52:50 +02:00
cc05cae1cd Fix abc test value 2023-09-26 10:23:30 +02:00
c88898a502 WIP 2023-09-25 18:49:44 +02:00
0d8d824540 Fix Format check 2023-09-25 16:40:01 +02:00
c428613c80 Make FusedComputeTasks usable in execution 2023-09-25 16:11:15 +02:00
f8a591991c Start adding device and machine info 2023-09-17 23:06:14 +02:00
20 changed files with 521 additions and 878 deletions

View File

@ -8,7 +8,7 @@ env:
jobs:
prepare:
runs-on: ubuntu-22.04
runs-on: arch-latest
steps:
- name: Checkout repository
@ -65,7 +65,7 @@ jobs:
test:
needs: prepare
runs-on: ubuntu-22.04
runs-on: arch-latest
steps:
- name: Checkout repository
@ -127,7 +127,7 @@ jobs:
docs:
needs: prepare
runs-on: ubuntu-22.04
runs-on: arch-latest
steps:
- name: Checkout repository

View File

@ -1,21 +0,0 @@
# Models
## Interface
The interface that has to be implemented for an estimator.
```@autodocs
Modules = [MetagraphOptimization]
Pages = ["estimator/interafce.jl"]
Order = [:type, :constant, :function]
```
## Global Metric Estimator
Implementation of a global metric estimator. It uses the graph properties compute effort, data transfer, and compute intensity.
```@autodocs
Modules = [MetagraphOptimization]
Pages = ["estimator/global_metric.jl"]
Order = [:type, :function]
```

View File

@ -1,34 +0,0 @@
using MetagraphOptimization
using BenchmarkTools
println("Getting machine info")
@time machine = get_machine_info()
println("Making model")
@time model = ABCModel()
println("Making process")
process_str = "AB->ABBBBB"
@time process = parse_process(process_str, model)
println("Parsing DAG")
@time graph = parse_dag("input/$process_str.txt", model)
println("Generating input data")
@time input_data = [gen_process_input(process) for _ in 1:1000]
include("profiling_utilities.jl")
println("Reducing graph")
@time reduce_all!(graph)
println("Generating compute function")
@time compute_func = get_compute_function(graph, process, machine)
println("First run, single argument")
@time compute_func(input_data[1])
println("\nBenchmarking function, 1 input")
display(@benchmark compute_func($(input_data[1])))
println("\nBenchmarking function, 1000 inputs")
display(@benchmark compute_func.($input_data))

View File

@ -1,34 +0,0 @@
using MetagraphOptimization
using BenchmarkTools
println("Getting machine info")
@time machine = get_machine_info()
println("Making model")
@time model = ABCModel()
println("Making process")
process_str = "AB->ABBBBBBB"
@time process = parse_process(process_str, model)
println("Parsing DAG")
@time graph = parse_dag("input/$process_str.txt", model)
println("Generating input data")
@time input_data = [gen_process_input(process) for _ in 1:1000]
include("profiling_utilities.jl")
println("Reducing graph")
@time reduce_all!(graph)
println("Generating compute function")
@time compute_func = get_compute_function(graph, process, machine)
println("First run, single argument")
@time compute_func(input_data[1])
println("\nBenchmarking function, 1 input")
display(@benchmark compute_func($(input_data[1])))
println("\nBenchmarking function, 1000 inputs")
display(@benchmark compute_func.($input_data))

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,6 @@ A module containing tools to work on DAGs.
"""
module MetagraphOptimization
# graph types
export DAG
export Node
export Edge
@ -19,7 +18,6 @@ export FusedComputeTask
export PossibleOperations
export GraphProperties
# graph functions
export make_node
export make_edge
export insert_node
@ -29,13 +27,10 @@ export is_exit_node
export parents
export children
export compute
export data
export compute_effort
export get_properties
export get_exit_node
export is_valid, is_scheduled
# graph operation related
export Operation
export AppliedOperation
export NodeFusion
@ -47,10 +42,6 @@ export can_pop
export reset_graph!
export get_operations
# ABC model
export ParticleValue
export ParticleA, ParticleB, ParticleC
export ABCProcessDescription, ABCProcessInput, ABCModel
export ComputeTaskP
export ComputeTaskS1
export ComputeTaskS2
@ -58,17 +49,14 @@ export ComputeTaskV
export ComputeTaskU
export ComputeTaskSum
# code generation related
export execute
export parse_dag, parse_process
export gen_process_input
export get_compute_function
export ParticleValue
export ParticleA, ParticleB, ParticleC
export ABCProcessDescription, ABCProcessInput, ABCModel
# estimator
export cost_type, graph_cost, operation_effect
export GlobalMetricEstimator, CDCost
# machine info
export Machine
export get_machine_info
@ -133,9 +121,6 @@ include("task/compute.jl")
include("task/print.jl")
include("task/properties.jl")
include("estimator/interface.jl")
include("estimator/global_metric.jl")
include("models/interface.jl")
include("models/print.jl")

View File

@ -79,7 +79,7 @@ function gen_input_assignment_code(
# TODO: how to get the "default" cpu device?
device = entry_device(machine)
evalExpr = eval(gen_access_expr(device, symbol))
push!(assignInputs, Meta.parse("$(evalExpr)::ParticleValue{$type} = ParticleValue($p, 1.0)"))
push!(assignInputs, Meta.parse("$(evalExpr) = ParticleValue($p, 1.0)"))
end
end
@ -102,7 +102,6 @@ function get_compute_function(graph::DAG, process::AbstractProcessDescription, m
expr = Meta.parse(
"function compute_$(functionId)(input::AbstractProcessInput) $initCaches; $assignInputs; $code; return $resSym; end",
)
func = eval(expr)
return func

View File

@ -1,65 +0,0 @@
"""
CDCost
Representation of a [`DAG`](@ref)'s cost as estimated by the [`GlobalMetricEstimator`](@ref).
# Fields:
`.data`: The total data transfer.\\
`.computeEffort`: The total compute effort.\\
`.computeIntensity`: The compute intensity, will always equal `.computeEffort / .data`.
!!! note
Note that the `computeIntensity` doesn't necessarily make sense in the context of only operation costs.
For example, for node fusions this will always be 0, since the computeEffort is zero.
It will still work as intended when adding/subtracting to/from a `graph_cost` estimate.
"""
const CDCost = NamedTuple{(:data, :computeEffort, :computeIntensity), Tuple{Float64, Float64, Float64}}
function +(cost1::CDCost, cost2::CDCost)::CDCost
d = cost1.data + cost2.data
ce = computeEffort = cost1.computeEffort + cost2.computeEffort
return (data = d, computeEffort = ce, computeIntensity = ce / d)::CDCost
end
function -(cost1::CDCost, cost2::CDCost)::CDCost
d = cost1.data - cost2.data
ce = computeEffort = cost1.computeEffort - cost2.computeEffort
return (data = d, computeEffort = ce, computeIntensity = ce / d)::CDCost
end
struct GlobalMetricEstimator <: AbstractEstimator end
function cost_type(estimator::GlobalMetricEstimator)
return CDCost
end
function graph_cost(estimator::GlobalMetricEstimator, graph::DAG)
properties = get_properties(graph)
return (
data = properties.data,
computeEffort = properties.computeEffort,
computeIntensity = properties.computeIntensity,
)::CDCost
end
function operation_effect(estimator::GlobalMetricEstimator, graph::DAG, operation::NodeFusion)
return (data = -data(operation.input[2].task), computeEffort = 0.0, computeIntensity = 0.0)::CDCost
end
function operation_effect(estimator::GlobalMetricEstimator, graph::DAG, operation::NodeReduction)
s = length(operation.input) - 1
return (
data = s * -data(operation.input[1].task),
computeEffort = s * -compute_effort(operation.input[1].task),
computeIntensity = typeof(operation.input) <: DataTaskNode ? 0.0 : Inf,
)::CDCost
end
function operation_effect(estimator::GlobalMetricEstimator, graph::DAG, operation::NodeSplit)
s = length(operation.input.parents) - 1
d = s * data(operation.input.task)
ce = s * compute_effort(operation.input.task)
return (data = d, computeEffort = ce, computeIntensity = ce / d)::CDCost
end

View File

@ -1,44 +0,0 @@
"""
AbstractEstimator
Abstract base type for an estimator. An estimator estimates the cost of a graph or the difference an operation applied to a graph will make to its cost.
Interface functions are
- [`graph_cost`](@ref)
- [`operation_effect`](@ref)
"""
abstract type AbstractEstimator end
"""
cost_type(estimator::AbstractEstimator)
Interface function returning a specific estimator's cost type, i.e., the type returned by its implementation of [`graph_cost`](@ref) and [`operation_effect`](@ref).
"""
function cost_type end
"""
graph_cost(estimator::AbstractEstimator, graph::DAG)
Get the total estimated cost of the graph. The cost's data type can be chosen by the implementation, but should have usable comparison operators (<, <=, >, >=, ==) and basic math operators (+, -, *, /).
"""
function graph_cost end
"""
operation_effect(estimator::AbstractEstimator, graph::DAG, operation::Operation)
Get the estimated effect on the cost of the graph, such that `graph_cost(estimator, graph) + operation_effect(estimator, graph, operation) ~= graph_cost(estimator, graph_with_operation_applied)`. There is no hard requirement for this, but the better the estimate, the better an optimization algorithm will be.
!!! note
There is a default implementation of this function, applying the operation, calling [`graph_cost`](@ref), then popping the operation again.
It can be much faster to overload this function for a specific estimator and directly compute the effects from the operation if possible.
"""
function operation_effect(estimator::AbstractEstimator, graph::DAG, operation::Operation)
# This is currently not stably working, see issue #16
cost = graph_cost(estimator, graph)
push_operation!(graph, operation)
cost_after = graph_cost(estimator, graph)
pop_operation!(graph)
return cost_after - cost
end

View File

@ -7,7 +7,7 @@ Return the particle and value as is.
0 FLOP.
"""
function compute(::ComputeTaskP, data::ParticleValue{P})::ParticleValue{P} where {P <: ABCParticle}
function compute(::ComputeTaskP, data::ParticleValue)
return data
end
@ -18,7 +18,7 @@ Compute an outer edge. Return the particle value with the same particle and the
1 FLOP.
"""
function compute(::ComputeTaskU, data::ParticleValue{P})::ParticleValue{P} where {P <: ABCParticle}
function compute(::ComputeTaskU, data::ParticleValue)
return ParticleValue(data.p, data.v * outer_edge(data.p))
end
@ -29,11 +29,7 @@ Compute a vertex. Preserve momentum and particle types (AB->C etc.) to create re
6 FLOP.
"""
function compute(
::ComputeTaskV,
data1::ParticleValue{P1},
data2::ParticleValue{P2},
)::ParticleValue where {P1 <: ABCParticle, P2 <: ABCParticle}
function compute(::ComputeTaskV, data1::ParticleValue, data2::ParticleValue)
p3 = preserve_momentum(data1.p, data2.p)
dataOut = ParticleValue(p3, data1.v * vertex() * data2.v)
return dataOut
@ -48,15 +44,14 @@ For valid inputs, both input particles should have the same momenta at this poin
12 FLOP.
"""
function compute(::ComputeTaskS2, data1::ParticleValue{P}, data2::ParticleValue{P})::Float64 where {P <: ABCParticle}
function compute(::ComputeTaskS2, data1::ParticleValue, data2::ParticleValue)
#=
@assert isapprox(abs(data1.p.momentum.E), abs(data2.p.momentum.E), rtol = 0.001, atol = sqrt(eps())) "E: $(data1.p.momentum.E) vs. $(data2.p.momentum.E)"
@assert isapprox(data1.p.momentum.px, -data2.p.momentum.px, rtol = 0.001, atol = sqrt(eps())) "px: $(data1.p.momentum.px) vs. $(data2.p.momentum.px)"
@assert isapprox(data1.p.momentum.py, -data2.p.momentum.py, rtol = 0.001, atol = sqrt(eps())) "py: $(data1.p.momentum.py) vs. $(data2.p.momentum.py)"
@assert isapprox(data1.p.momentum.pz, -data2.p.momentum.pz, rtol = 0.001, atol = sqrt(eps())) "pz: $(data1.p.momentum.pz) vs. $(data2.p.momentum.pz)"
=#
inner = inner_edge(data1.p)
return data1.v * inner * data2.v
return data1.v * inner_edge(data1.p) * data2.v
end
"""
@ -66,7 +61,7 @@ Compute inner edge (1 input particle, 1 output particle).
11 FLOP.
"""
function compute(::ComputeTaskS1, data::ParticleValue{P})::ParticleValue{P} where {P <: ABCParticle}
function compute(::ComputeTaskS1, data::ParticleValue)
return ParticleValue(data.p, data.v * inner_edge(data.p))
end
@ -77,7 +72,7 @@ Compute a sum over the vector. Use an algorithm that accounts for accumulated er
Linearly many FLOP with growing data.
"""
function compute(::ComputeTaskSum, data::Vector{Float64})::Float64
function compute(::ComputeTaskSum, data::Vector{Float64})
return sum_kbn(data)
end

View File

@ -1,7 +1,5 @@
using QEDbase
import QEDbase.mass
"""
ABCModel <: AbstractPhysicsModel
@ -89,9 +87,9 @@ For 2 given (non-equal) particle types, return the third of ABC.
"""
function interaction_result(t1::Type{T1}, t2::Type{T2}) where {T1 <: ABCParticle, T2 <: ABCParticle}
@assert t1 != t2
if t1 != ParticleA && t2 != ParticleA
if t1 != Type{ParticleA} && t2 != Type{ParticleA}
return ParticleA
elseif t1 != ParticleB && t2 != ParticleB
elseif t1 != Type{ParticleB} && t2 != Type{ParticleB}
return ParticleB
else
return ParticleC
@ -163,6 +161,7 @@ Takes 4 effective FLOP.
function preserve_momentum(p1::ABCParticle, p2::ABCParticle)
t3 = interaction_result(typeof(p1), typeof(p2))
p3 = t3(p1.momentum + p2.momentum)
return p3
end

View File

@ -3,35 +3,35 @@
Return the compute effort of an S1 task.
"""
compute_effort(t::ComputeTaskS1) = 11.0
compute_effort(t::ComputeTaskS1) = 11
"""
compute_effort(t::ComputeTaskS2)
Return the compute effort of an S2 task.
"""
compute_effort(t::ComputeTaskS2) = 12.0
compute_effort(t::ComputeTaskS2) = 12
"""
compute_effort(t::ComputeTaskU)
Return the compute effort of a U task.
"""
compute_effort(t::ComputeTaskU) = 1.0
compute_effort(t::ComputeTaskU) = 1
"""
compute_effort(t::ComputeTaskV)
Return the compute effort of a V task.
"""
compute_effort(t::ComputeTaskV) = 6.0
compute_effort(t::ComputeTaskV) = 6
"""
compute_effort(t::ComputeTaskP)
Return the compute effort of a P task.
"""
compute_effort(t::ComputeTaskP) = 0.0
compute_effort(t::ComputeTaskP) = 0
"""
compute_effort(t::ComputeTaskSum)
@ -41,7 +41,7 @@ Return the compute effort of a Sum task.
Note: This is a constant compute effort, even though sum scales with the number of its inputs. Since there is only ever a single sum node in a graph generated from the ABC-Model,
this doesn't matter.
"""
compute_effort(t::ComputeTaskSum) = 1.0
compute_effort(t::ComputeTaskSum) = 1
"""
show(io::IO, t::DataTask)

View File

@ -4,7 +4,7 @@
Task representing a specific data transfer in the ABC Model.
"""
struct DataTask <: AbstractDataTask
data::Float64
data::UInt64
end
"""

View File

@ -1,8 +0,0 @@
"""
GreedyOptimizer
An implementation of the greedy optimization algorithm, simply choosing the best next option evaluated with the given estimator.
"""
struct GreedyOptimizer
estimator::AbstractEstimator
end

View File

@ -49,7 +49,7 @@ end
Return the compute effort of a data task, always zero, regardless of the specific task.
"""
compute_effort(t::AbstractDataTask) = 0.0
compute_effort(t::AbstractDataTask) = 0
"""
data(t::AbstractDataTask)
@ -63,7 +63,7 @@ data(t::AbstractDataTask) = getfield(t, :data)
Return the data of a compute task, always zero, regardless of the specific task.
"""
data(t::AbstractComputeTask) = 0.0
data(t::AbstractComputeTask) = 0
"""
compute_effort(t::FusedComputeTask)

View File

@ -1,5 +1,4 @@
[deps]
AccurateArithmetic = "22286c92-06ac-501d-9306-4abd417d9753"
QEDbase = "10e22c08-3ccb-4172-bfcf-7d7aa3d04d93"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

View File

@ -6,8 +6,6 @@ using Test
include("unit_tests_tasks.jl")
include("unit_tests_nodes.jl")
include("unit_tests_properties.jl")
include("unit_tests_estimator.jl")
include("unit_tests_abcmodel.jl")
include("node_reduction.jl")
include("unit_tests_graph.jl")
include("unit_tests_execution.jl")

View File

@ -1,26 +0,0 @@
using MetagraphOptimization
using QEDbase
import MetagraphOptimization.interaction_result
def_momentum = SFourMomentum(1.0, 0.0, 0.0, 0.0)
testparticleTypes = [ParticleA, ParticleB, ParticleC]
testparticles = [ParticleA(def_momentum), ParticleB(def_momentum), ParticleC(def_momentum)]
@testset "Unit Tests ABC-Model" begin
@testset "Interaction Result" begin
for p1 in testparticleTypes, p2 in testparticleTypes
if (p1 == p2)
@test_throws AssertionError interaction_result(p1, p2)
else
@test interaction_result(p1, p2) == setdiff(testparticleTypes, [p1, p2])[1]
end
end
end
@testset "Vertex" begin
@test isapprox(MetagraphOptimization.vertex(), 1 / 137.0)
end
end
println("ABC-Model Unit Tests Complete!")

View File

@ -1,99 +0,0 @@
function test_op_specific(estimator, graph, nf::NodeFusion)
estimate = operation_effect(estimator, graph, nf)
data_reduce = data(nf.input[2].task)
@test isapprox(estimate.data, -data_reduce)
@test isapprox(estimate.computeEffort, 0; atol = eps(Float64))
@test isapprox(estimate.computeIntensity, 0; atol = eps(Float64))
return nothing
end
function test_op_specific(estimator, graph, nr::NodeReduction)
estimate = operation_effect(estimator, graph, nr)
data_reduce = data(nr.input[1].task) * (length(nr.input) - 1)
compute_effort_reduce = compute_effort(nr.input[1].task) * (length(nr.input) - 1)
@test isapprox(estimate.data, -data_reduce; atol = eps(Float64))
@test isapprox(estimate.computeEffort, -compute_effort_reduce)
@test isapprox(estimate.computeIntensity, compute_effort_reduce / data_reduce)
return nothing
end
function test_op_specific(estimator, graph, ns::NodeSplit)
estimate = operation_effect(estimator, graph, ns)
copies = length(ns.input.parents) - 1
data_increase = data(ns.input.task) * copies
compute_effort_increase = compute_effort(ns.input.task) * copies
@test isapprox(estimate.data, data_increase; atol = eps(Float64))
@test isapprox(estimate.computeEffort, compute_effort_increase)
@test isapprox(estimate.computeIntensity, compute_effort_increase / data_increase)
return nothing
end
function test_op(estimator, graph, op)
#=
See issue #16
estimate_before = graph_cost(estimator, graph)
estimate = operation_effect(estimator, graph, op)
push_operation!(graph, op)
estimate_after_apply = graph_cost(estimator, graph)
reset_graph!(graph)
@test isapprox((estimate_before + estimate).data, estimate_after_apply.data)
@test isapprox((estimate_before + estimate).computeEffort, estimate_after_apply.computeEffort)
@test isapprox((estimate_before + estimate).computeIntensity, estimate_after_apply.computeIntensity)
=#
test_op_specific(estimator, graph, op)
return nothing
end
@testset "Unit Tests Estimator" begin
@testset "Global Metric Estimator" for (graph_string, exp_data, exp_computeEffort) in
zip(["AB->AB", "AB->ABBB"], [976, 10944], [53, 1075])
estimator = GlobalMetricEstimator()
@test cost_type(estimator) == CDCost
graph = parse_dag(joinpath(@__DIR__, "..", "input", "$(graph_string).txt"), ABCModel())
@testset "Graph Cost" begin
estimate = graph_cost(estimator, graph)
@test estimate.data == exp_data
@test estimate.computeEffort == exp_computeEffort
@test isapprox(estimate.computeIntensity, exp_computeEffort / exp_data)
end
@testset "Operation Cost" begin
ops = get_operations(graph)
nfs = copy(ops.nodeFusions)
nrs = copy(ops.nodeReductions)
nss = copy(ops.nodeSplits)
println(
"Testing $(length(ops.nodeFusions))xNF, $(length(ops.nodeReductions))xNR, $(length(ops.nodeSplits))xNS",
)
for nf in nfs
test_op(estimator, graph, nf)
end
for nr in nrs
test_op(estimator, graph, nr)
end
for ns in nss
test_op(estimator, graph, ns)
end
end
end
end
println("Estimator Unit Tests Complete!")

View File

@ -1,51 +1,9 @@
import MetagraphOptimization.ABCParticle
import MetagraphOptimization.interaction_result
using QEDbase
using AccurateArithmetic
include("../examples/profiling_utilities.jl")
const RTOL = sqrt(eps(Float64))
function check_particle_reverse_moment(p1::SFourMomentum, p2::SFourMomentum)
@test isapprox(abs(p1.E), abs(p2.E))
@test isapprox(p1.px, -p2.px)
@test isapprox(p1.py, -p2.py)
@test isapprox(p1.pz, -p2.pz)
return nothing
end
function ground_truth_graph_result(input::ABCProcessInput)
# formula for one diagram:
# u_Bp * iλ * u_Ap * S_C * u_B * iλ * u_A
# for the second diagram:
# u_B * iλ * u_Ap * S_C * u_Bp * iλ * u_Ap
# the "u"s are all 1, we ignore the i, λ is 1/137.
constant = (1 / 137.0)^2
# calculate particle C in diagram 1
diagram1_C = ParticleC(input.inParticles[1].momentum + input.inParticles[2].momentum)
diagram2_C = ParticleC(input.inParticles[1].momentum + input.outParticles[2].momentum)
diagram1_Cp = ParticleC(input.outParticles[1].momentum + input.outParticles[2].momentum)
diagram2_Cp = ParticleC(input.outParticles[1].momentum + input.inParticles[2].momentum)
check_particle_reverse_moment(diagram1_Cp.momentum, diagram1_C.momentum)
check_particle_reverse_moment(diagram2_Cp.momentum, diagram2_C.momentum)
@test isapprox(getMass2(diagram1_C.momentum), getMass2(diagram1_Cp.momentum))
@test isapprox(getMass2(diagram2_C.momentum), getMass2(diagram2_Cp.momentum))
inner1 = MetagraphOptimization.inner_edge(diagram1_C)
inner2 = MetagraphOptimization.inner_edge(diagram2_C)
diagram1_result = inner1 * constant
diagram2_result = inner2 * constant
return sum_kbn([diagram1_result, diagram2_result])
end
@testset "Unit Tests Execution" begin
machine = get_machine_info()
@ -65,29 +23,29 @@ end
ParticleB(SFourMomentum(0.823648, 0.835061, 0.474802, -0.277915)),
],
)
expected_result = ground_truth_graph_result(particles_2_2)
expected_result = 0.00013916495566048735
@testset "AB->AB no optimization" begin
for _ in 1:10 # test in a loop because graph layout should not change the result
graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->AB.txt"), ABCModel())
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = 0.001)
# graph should be fully scheduled after being executed
@test is_scheduled(graph)
func = get_compute_function(graph, process_2_2, machine)
@test isapprox(func(particles_2_2), expected_result; rtol = RTOL)
@test isapprox(func(particles_2_2), expected_result; rtol = 0.001)
end
end
@testset "AB->AB after random walk" begin
for i in 1:200
for i in 1:1000
graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->AB.txt"), ABCModel())
random_walk!(graph, 50)
@test is_valid(graph)
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = 0.001)
# graph should be fully scheduled after being executed
@test is_scheduled(graph)
@ -105,20 +63,20 @@ end
@testset "AB->ABBB no optimization" begin
for _ in 1:5 # test in a loop because graph layout should not change the result
graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->ABBB.txt"), ABCModel())
@test isapprox(execute(graph, process_2_4, machine, particles_2_4), expected_result; rtol = RTOL)
@test isapprox(execute(graph, process_2_4, machine, particles_2_4), expected_result; rtol = 0.001)
func = get_compute_function(graph, process_2_4, machine)
@test isapprox(func(particles_2_4), expected_result; rtol = RTOL)
@test isapprox(func(particles_2_4), expected_result; rtol = 0.001)
end
end
@testset "AB->ABBB after random walk" begin
for i in 1:50
for i in 1:200
graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->ABBB.txt"), ABCModel())
random_walk!(graph, 100)
@test is_valid(graph)
@test isapprox(execute(graph, process_2_4, machine, particles_2_4), expected_result; rtol = RTOL)
@test isapprox(execute(graph, process_2_4, machine, particles_2_4), expected_result; rtol = 0.001)
end
end
@ -147,8 +105,8 @@ end
# try execute
@test is_valid(graph)
expected_result = ground_truth_graph_result(particles_2_2)
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
expected_result = 0.00013916495566048735
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = 0.001)
end
@ -177,8 +135,8 @@ end
# try execute
@test is_valid(graph)
expected_result = ground_truth_graph_result(particles_2_2)
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
expected_result = 0.00013916495566048735
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = 0.001)
end
@testset "AB->AB fusion edge case" for _ in 1:20
@ -211,8 +169,8 @@ end
# try execute
@test is_valid(graph)
expected_result = ground_truth_graph_result(particles_2_2)
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = RTOL)
expected_result = 0.00013916495566048735
@test isapprox(execute(graph, process_2_2, machine, particles_2_2), expected_result; rtol = 0.001)
end
end