# LinearUserDefined¶

LinearUserDefined is a solver that lets you define a custom method for performing a linear solve on a component. The default method is named “solve_linear”, but you can give it any name by passing in the function or method handle to the “solve_function” attribute.

The function needs to have the following signature:

def my_solve_function(d_outputs, d_residuals, mode):
r"""
Apply inverse jac product. The model is assumed to be in an unscaled state.

Parameters
----------
d_outputs : Vector
unscaled, dimensional quantities read via d_outputs[key]
d_residuals : Vector
unscaled, dimensional quantities read via d_residuals[key]
mode : str
either 'fwd' or 'rev'

Returns
-------
None or bool or (bool, float, float)
The bool is the failure flag; and the two floats are absolute and relative error.
"""


Here is a rather contrived example where an identity preconditioner is used by giving the component’s “mysolve” method to a LinearUserDefined solver.

import numpy as np

from openmdao.api import Problem, ImplicitComponent, IndepVarComp, LinearRunOnce, PETScKrylov, PETScVector, LinearUserDefined
from openmdao.utils.array_utils import evenly_distrib_idxs

class CustomSolveImplicit(ImplicitComponent):

def setup(self):

rank = self.comm.rank
GLOBAL_SIZE = 15
sizes, offsets = evenly_distrib_idxs(self.comm.size, GLOBAL_SIZE)

self.local_size = sizes[rank]

self.linear_solver = PETScKrylov()
self.linear_solver.precon = LinearUserDefined(solve_function=self.mysolve)

def solve_nonlinear(self, i, o):
o['states'] = i['a']

local_sum = np.zeros(1)
local_sum[0] = np.sum(o['states'])
tmp = np.zeros(1)

o['out_var'] = tmp[0]

def apply_nonlinear(self, i, o, r):
r['states'] = o['states'] - i['a']

local_sum = np.zeros(1)
local_sum[0] = np.sum(o['states'])
global_sum = np.zeros(1)

r['out_var'] = o['out_var'] - tmp[0]

def apply_linear(self, i, o, d_i, d_o, d_r, mode):
if mode == 'fwd':
if 'states' in d_o:
d_r['states'] += d_o['states']

local_sum = np.array([np.sum(d_o['states'])])
global_sum = np.zeros(1)
self.comm.Allreduce(local_sum, global_sum, op=MPI.SUM)
d_r['out_var'] -= global_sum

if 'out_var' in d_o:
d_r['out_var'] += d_o['out_var']

if 'a' in d_i:
d_r['states'] -= d_i['a']

elif mode == 'rev':
if 'states' in d_o:
d_o['states'] += d_r['states']

tmp = np.zeros(1)
if self.comm.rank == 0:
tmp[0] = d_r['out_var'].copy()
self.comm.Bcast(tmp, root=0)

d_o['states'] -= tmp

if 'out_var' in d_o:
d_o['out_var'] += d_r['out_var']

if 'a' in d_i:
d_i['a'] -= np.sum(d_r['states'])

def mysolve(self, d_outputs, d_residuals, mode):
r"""
Apply inverse jac product. The model is assumed to be in an unscaled state.

If mode is:
'fwd': d_residuals \|-> d_outputs

'rev': d_outputs \|-> d_residuals

Parameters
----------
d_outputs : Vector
unscaled, dimensional quantities read via d_outputs[key]
d_residuals : Vector
unscaled, dimensional quantities read via d_residuals[key]
mode : str
either 'fwd' or 'rev'

Returns
-------
None or bool or (bool, float, float)
The bool is the failure flag; and the two floats are absolute and relative error.
"""
# Note: we are just preconditioning with Identity as a proof of concept.
if mode == 'fwd':
d_outputs.set_vec(d_residuals)
elif mode == 'rev':
d_residuals.set_vec(d_outputs)

return False, 0., 0.

prob = Problem()

model = prob.model

model.linear_solver = PETScKrylov()
model.linear_solver.precon = LinearRunOnce()

prob.setup(mode='rev', check=False)
prob.run_model()
jac = prob.compute_totals(of=['out_var'], wrt=['a'], return_format='dict')

print(15.0)

15.0

## LinearUserDefined Options¶

Option Default Acceptable Values Acceptable Types Description
assemble_jac False N/A [‘bool’] Activates use of assembled jacobian by this solver.
atol 1e-10 N/A N/A absolute error tolerance
err_on_maxiter False N/A [‘bool’] When True, AnalysisError will be raised if we don’t converge.
iprint 1 N/A [‘int’] whether to print output
maxiter 10 N/A [‘int’] maximum number of iterations
rtol 1e-10 N/A N/A relative error tolerance

Note: Options can be passed as keyword arguments at initialization.

Tags