LinearBlockJac#
LinearBlockJac uses the block Jacobi method to solve the linear system. The method is similar to that used by the LinearBlockGS solver, except that it propagates the derivatives from outputs to inputs only once per iteration. When to choose this solver over the other ones is an advanced topic.
LinearBlockJac is a block solver, so you can specify different linear solvers in the subsystems and they will be utilized to solve the subsystem linear problem.
Here, we calculate the total derivatives across the Sellar system.
SellarDis1withDerivatives
class definition
class SellarDis1withDerivatives(SellarDis1):
"""
Component containing Discipline 1 -- derivatives version.
"""
def setup_partials(self):
# Analytic Derivs
self.declare_partials(of='*', wrt='*')
def compute_partials(self, inputs, partials):
"""
Jacobian for Sellar discipline 1.
"""
partials['y1', 'y2'] = -0.2
partials['y1', 'z'] = np.array([[2.0 * inputs['z'][0], 1.0]])
partials['y1', 'x'] = 1.0
SellarDis2withDerivatives
class definition
class SellarDis2withDerivatives(SellarDis2):
"""
Component containing Discipline 2 -- derivatives version.
"""
def setup_partials(self):
# Analytic Derivs
self.declare_partials(of='*', wrt='*')
def compute_partials(self, inputs, J):
"""
Jacobian for Sellar discipline 2.
"""
y1 = inputs['y1']
if y1.real < 0.0:
y1 *= -1
if y1.real < 1e-8:
y1 = 1e-8
J['y2', 'y1'] = .5*y1**-.5
J['y2', 'z'] = np.array([[1.0, 1.0]])
import numpy as np
import openmdao.api as om
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.LinearBlockJac()
prob.setup()
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
NL: NLBGS Converged in 8 iterations
LN: LNBJSolver 'LN: LNBJ' on system '' failed to converge in 10 iterations.
print(J['obj', 'z'][0][0])
print(J['obj', 'z'][0][1])
9.610009089277705
1.7844852158189335
LinearBlockJac Options#
Option | Default | Acceptable Values | Acceptable Types | Description |
---|---|---|---|---|
assemble_jac | False | [True, False] | ['bool'] | Activates use of assembled jacobian by this solver. |
atol | 1e-10 | N/A | N/A | absolute error tolerance |
err_on_non_converge | False | [True, False] | ['bool'] | When True, AnalysisError will be raised if we don't converge. |
iprint | 1 | N/A | ['int'] | whether to print output |
maxiter | 10 | N/A | ['int'] | maximum number of iterations |
rtol | 1e-10 | N/A | N/A | relative error tolerance |
LinearBlockJac Constructor#
The call signature for the LinearBlockJac
constructor is:
- LinearBlockJac.__init__(**kwargs)
Initialize attributes.
LinearBlockJac Option Examples#
maxiter
This lets you specify the maximum number of Gauss-Seidel iterations to apply. In this example, we cut it back from the default, ten, down to five, so that it terminates a few iterations earlier and doesn’t reach the specified absolute or relative tolerance. Note that due to the delayed transfer of information, this takes more iterations to converge than the LinearBlockGS solver.
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.LinearBlockJac()
model.linear_solver.options['maxiter'] = 5
prob.setup()
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
NL: NLBGS Converged in 8 iterations
LN: LNBJSolver 'LN: LNBJ' on system '' failed to converge in 5 iterations.
print(J['obj', 'z'][0][0])
print(J['obj', 'z'][0][1])
9.602301066797569
1.7802249941460035
atol
Here, we set the absolute tolerance to a looser value that will trigger an earlier termination. After
each iteration, the norm of the linear residuals is calculated by calling apply_linear
. If this norm value is lower than the absolute tolerance atol
, the iteration will terminate.
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.LinearBlockJac()
model.linear_solver.options['atol'] = 1.0e-3
prob.setup(mode='rev')
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
NL: NLBGS Converged in 8 iterations
LN: LNBJ Converged in 7 iterations
print(J['obj', 'z'][0][0])
print(J['obj', 'z'][0][1])
9.610162963993043
1.784569557267524
rtol
Here, we set the relative tolerance to a looser value that will trigger an earlier termination. After
each iteration, the norm of the linear residuals is calculated by calling apply_linear
. If the ratio of the currently calculated norm to the initial residual norm is lower than the relative tolerance rtol
, the iteration will terminate.
from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives
prob = om.Problem()
model = prob.model
model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])
model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])
model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
model.nonlinear_solver = om.NonlinearBlockGS()
model.linear_solver = om.LinearBlockJac()
model.linear_solver.options['rtol'] = 1.0e-3
prob.setup(mode='rev')
prob.set_val('x', 1.)
prob.set_val('z', np.array([5.0, 2.0]))
prob.run_model()
wrt = ['z']
of = ['obj']
J = prob.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
NL: NLBGS Converged in 8 iterations
LN: LNBJ Converged in 7 iterations
print(J['obj', 'z'][0][0])
print(J['obj', 'z'][0][1])
9.610162963993043
1.784569557267524