1
<?xml version="1.0" encoding="UTF-8" ?>
2
<!DOCTYPE testproblem SYSTEM "regressiontest.dtd">
4
<name>Burgers Equation MMS</name>
6
<tags>burgers adjoint revolve</tags>
7
<problem_definition length="medium" nprocs="1">
8
<command_line>../../bin/optimality op_A.oml; ../../bin/optimality op_B.oml; ../../bin/optimality op_C.oml; burgers_equation mms_A.bml; burgers_equation mms_B.bml; burgers_equation mms_C.bml; burgers_equation mms_D.bml; burgers_equation mms_E.bml</command_line>
11
<variable name="gradient_conv" language="python">
12
from fluidity_tools import stat_parser
14
gradient_conv = [stat_parser(x)["time_integral_ad_gradient_error"]["convergence"][-1] for x in sorted(glob.glob("op_?.stat"))]
16
<variable name="functional_value_conv" language="python">
17
from fluidity_tools import stat_parser
21
functional_errors = [abs(stat_parser(x)["time_integral_ad"]["value"][-1] - 10.0) for x in sorted(glob.glob("mms_adjoint_?.stat"))]
22
functional_value_conv = [math.log(functional_errors[i]/functional_errors[i+1], 2) for i in range(0, len(functional_errors)-1)]
26
<test name="functional_convergence" language="python">
27
assert min(functional_value_conv) > 1.9
29
<test name="gradient_convergence" language="python">
30
assert min(gradient_conv) > 1.8 # more tolerant because it's stochastic