Skip to content

4. integrator

This library provides a set of integrators for simulations. The implementation of ETDRK method is modified from exponax

ETDRK¤

torchfsm.integrator.ETDRKIntegrator ¤

Bases: Enum

ETDRK Integrator Provides a unified interface for all ETDRK methods.

Source code in torchfsm/integrator/_etdrk.py
246
247
248
249
250
251
252
253
254
255
class ETDRKIntegrator(Enum):
    """
    ETDRK Integrator
    Provides a unified interface for all ETDRK methods.
    """
    ETDRK0 = ETDRK0
    ETDRK1 = ETDRK1
    ETDRK2 = ETDRK2
    ETDRK3 = ETDRK3
    ETDRK4 = ETDRK4
ETDRK0 class-attribute instance-attribute ¤
ETDRK0 = ETDRK0
ETDRK1 class-attribute instance-attribute ¤
ETDRK1 = ETDRK1
ETDRK2 class-attribute instance-attribute ¤
ETDRK2 = ETDRK2
ETDRK3 class-attribute instance-attribute ¤
ETDRK3 = ETDRK3
ETDRK4 class-attribute instance-attribute ¤
ETDRK4 = ETDRK4

torchfsm.integrator._etdrk._ETDRKBase ¤

Bases: ABC

Solve $$ u_t=Lu+N(u) $$ using the ETDRK method.

Parameters:

Name Type Description Default
dt(float)

Time step.

required
linear_coef(torch.Tensor)

Coefficient of the linear term, i.e., \(L\).

required
nonlinear_func(Callable[[torch.Tensor],torch.Tensor])

Function that computes the nonlinear term, i.e., \(N(u)\).

required
num_circle_points(int)

Number of points on the unit circle. See [2] for details.

required
circle_radius(float)

Radius of the unit circle. See [2] for details.

required
Reference

[1] Cox, Steven M., and Paul C. Matthews. "Exponential time differencing for stiff systems." Journal of Computational Physics 176.2 (2002): 430-455. [2] Kassam, Aly-Khan, and Lloyd N. Trefethen. "Fourth-order time-stepping for stiff PDEs." SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233.

Source code in torchfsm/integrator/_etdrk.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
class _ETDRKBase(ABC):

    """
    Solve
    $$
    u_t=Lu+N(u)
    $$
    using the ETDRK method.

    Args:
        dt(float): Time step.
        linear_coef(torch.Tensor): Coefficient of the linear term, i.e., $L$.
        nonlinear_func(Callable[[torch.Tensor],torch.Tensor]): Function that computes the nonlinear term, i.e., $N(u)$.
        num_circle_points(int): Number of points on the unit circle. See [2] for details.
        circle_radius(float): Radius of the unit circle. See [2] for details.

    Reference:
        [1] Cox, Steven M., and Paul C. Matthews. "Exponential time differencing for stiff systems." Journal of Computational Physics 176.2 (2002): 430-455.
        [2] Kassam, Aly-Khan, and Lloyd N. Trefethen. "Fourth-order time-stepping for stiff PDEs." SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233.
    """

    def __init__(
        self,
        dt: float,
        linear_coef:torch.Tensor,
        nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
        num_circle_points: int = 16,
        circle_radius: float = 1.0,
        ):
        self.dt = dt
        self._nonlinear_func = nonlinear_func
        self._exp_term = torch.exp(self.dt * linear_coef)
        self.LR=(
            circle_radius * roots_of_unity(num_circle_points,device=linear_coef.device,dtype=linear_coef.real.dtype)
            + linear_coef.unsqueeze(-1) * dt
            )

    @abstractmethod
    def step(
        self,
        u_hat,
        ):
        """
        Advance the state in Fourier space.
        """
dt instance-attribute ¤
dt = dt
_nonlinear_func instance-attribute ¤
_nonlinear_func = nonlinear_func
_exp_term instance-attribute ¤
_exp_term = exp(dt * linear_coef)
LR instance-attribute ¤
LR = (
    circle_radius
    * roots_of_unity(
        num_circle_points, device=device, dtype=dtype
    )
    + unsqueeze(-1) * dt
)
__init__ ¤
__init__(
    dt: float,
    linear_coef: torch.Tensor,
    nonlinear_func: Callable[[torch.Tensor], torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
)
Source code in torchfsm/integrator/_etdrk.py
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
def __init__(
    self,
    dt: float,
    linear_coef:torch.Tensor,
    nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
    ):
    self.dt = dt
    self._nonlinear_func = nonlinear_func
    self._exp_term = torch.exp(self.dt * linear_coef)
    self.LR=(
        circle_radius * roots_of_unity(num_circle_points,device=linear_coef.device,dtype=linear_coef.real.dtype)
        + linear_coef.unsqueeze(-1) * dt
        )
step abstractmethod ¤
step(u_hat)

Advance the state in Fourier space.

Source code in torchfsm/integrator/_etdrk.py
56
57
58
59
60
61
62
63
@abstractmethod
def step(
    self,
    u_hat,
    ):
    """
    Advance the state in Fourier space.
    """

torchfsm.integrator._etdrk.ETDRK0 ¤

Exactly solve a linear PDE in Fourier space

Source code in torchfsm/integrator/_etdrk.py
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
class ETDRK0():
    """
    Exactly solve a linear PDE in Fourier space
    """
    def __init__(
        self,
        dt: float,
        linear_coef: torch.Tensor,
    ):
        self.dt = dt
        self._exp_term = torch.exp(self.dt * linear_coef)

    def step(
        self,
        u_hat,
    ):
        return self._exp_term * u_hat
dt instance-attribute ¤
dt = dt
_exp_term instance-attribute ¤
_exp_term = exp(dt * linear_coef)
__init__ ¤
__init__(dt: float, linear_coef: torch.Tensor)
Source code in torchfsm/integrator/_etdrk.py
69
70
71
72
73
74
75
def __init__(
    self,
    dt: float,
    linear_coef: torch.Tensor,
):
    self.dt = dt
    self._exp_term = torch.exp(self.dt * linear_coef)
step ¤
step(u_hat)
Source code in torchfsm/integrator/_etdrk.py
77
78
79
80
81
def step(
    self,
    u_hat,
):
    return self._exp_term * u_hat

torchfsm.integrator._etdrk.ETDRK1 ¤

Bases: _ETDRKBase

First-order ETDRK method.

Source code in torchfsm/integrator/_etdrk.py
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
class ETDRK1(_ETDRKBase):

    """
    First-order ETDRK method.
    """

    def __init__(
        self,
        dt: float,
        linear_coef: torch.Tensor,
        nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
        num_circle_points: int = 16,
        circle_radius: float = 1.0,
        ):
        super().__init__(dt, linear_coef,nonlinear_func,num_circle_points,circle_radius)
        self._coef_1 = dt * torch.mean((torch.exp(self.LR) - 1) / self.LR, axis=-1).real

    def step(
        self,
        u_hat,
        ):
        return self._exp_term * u_hat + self._coef_1 * self._nonlinear_func(u_hat)
dt instance-attribute ¤
dt = dt
_nonlinear_func instance-attribute ¤
_nonlinear_func = nonlinear_func
_exp_term instance-attribute ¤
_exp_term = exp(dt * linear_coef)
LR instance-attribute ¤
LR = (
    circle_radius
    * roots_of_unity(
        num_circle_points, device=device, dtype=dtype
    )
    + unsqueeze(-1) * dt
)
_coef_1 instance-attribute ¤
_coef_1 = dt * real
__init__ ¤
__init__(
    dt: float,
    linear_coef: torch.Tensor,
    nonlinear_func: Callable[[torch.Tensor], torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
)
Source code in torchfsm/integrator/_etdrk.py
89
90
91
92
93
94
95
96
97
98
def __init__(
    self,
    dt: float,
    linear_coef: torch.Tensor,
    nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
    ):
    super().__init__(dt, linear_coef,nonlinear_func,num_circle_points,circle_radius)
    self._coef_1 = dt * torch.mean((torch.exp(self.LR) - 1) / self.LR, axis=-1).real
step ¤
step(u_hat)
Source code in torchfsm/integrator/_etdrk.py
100
101
102
103
104
def step(
    self,
    u_hat,
    ):
    return self._exp_term * u_hat + self._coef_1 * self._nonlinear_func(u_hat)

torchfsm.integrator._etdrk.ETDRK2 ¤

Bases: _ETDRKBase

Second-order ETDRK method.

Source code in torchfsm/integrator/_etdrk.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
class ETDRK2(_ETDRKBase):
    """
    Second-order ETDRK method.
    """

    def __init__(
        self,
        dt: float,
        linear_coef: torch.Tensor,
        nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
        num_circle_points: int = 16,
        circle_radius: float = 1.0,
        ):
        super().__init__(dt, linear_coef,nonlinear_func,num_circle_points,circle_radius)
        self._coef_1 = dt * torch.mean((torch.exp(self.LR) - 1) / self.LR, axis=-1).real
        self._coef_2 = dt * torch.mean((torch.exp(self.LR) - 1 - self.LR) / self.LR**2, axis=-1).real

    def step(
        self,
        u_hat,
        ):
        u_nonlin_hat = self._nonlinear_func(u_hat)
        u_stage_1_hat = self._exp_term * u_hat + self._coef_1 * u_nonlin_hat
        u_stage_1_nonlin_hat = self._nonlinear_func(u_stage_1_hat)
        u_next_hat = u_stage_1_hat + self._coef_2 * (
            u_stage_1_nonlin_hat - u_nonlin_hat
            )
        return u_next_hat
dt instance-attribute ¤
dt = dt
_nonlinear_func instance-attribute ¤
_nonlinear_func = nonlinear_func
_exp_term instance-attribute ¤
_exp_term = exp(dt * linear_coef)
LR instance-attribute ¤
LR = (
    circle_radius
    * roots_of_unity(
        num_circle_points, device=device, dtype=dtype
    )
    + unsqueeze(-1) * dt
)
_coef_1 instance-attribute ¤
_coef_1 = dt * real
_coef_2 instance-attribute ¤
_coef_2 = dt * real
__init__ ¤
__init__(
    dt: float,
    linear_coef: torch.Tensor,
    nonlinear_func: Callable[[torch.Tensor], torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
)
Source code in torchfsm/integrator/_etdrk.py
111
112
113
114
115
116
117
118
119
120
121
def __init__(
    self,
    dt: float,
    linear_coef: torch.Tensor,
    nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
    ):
    super().__init__(dt, linear_coef,nonlinear_func,num_circle_points,circle_radius)
    self._coef_1 = dt * torch.mean((torch.exp(self.LR) - 1) / self.LR, axis=-1).real
    self._coef_2 = dt * torch.mean((torch.exp(self.LR) - 1 - self.LR) / self.LR**2, axis=-1).real
step ¤
step(u_hat)
Source code in torchfsm/integrator/_etdrk.py
123
124
125
126
127
128
129
130
131
132
133
def step(
    self,
    u_hat,
    ):
    u_nonlin_hat = self._nonlinear_func(u_hat)
    u_stage_1_hat = self._exp_term * u_hat + self._coef_1 * u_nonlin_hat
    u_stage_1_nonlin_hat = self._nonlinear_func(u_stage_1_hat)
    u_next_hat = u_stage_1_hat + self._coef_2 * (
        u_stage_1_nonlin_hat - u_nonlin_hat
        )
    return u_next_hat

torchfsm.integrator._etdrk.ETDRK3 ¤

Bases: _ETDRKBase

Third-order ETDRK method.

Source code in torchfsm/integrator/_etdrk.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
class ETDRK3(_ETDRKBase):
    """
    Third-order ETDRK method.
    """
    def __init__(
        self,
        dt: float,
        linear_coef: torch.Tensor,
        nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
        num_circle_points: int = 16,
        circle_radius: float = 1.0,
    ):
        super().__init__(dt, linear_coef,nonlinear_func,num_circle_points,circle_radius)
        self._half_exp_term = torch.exp(0.5 * dt * linear_coef)
        self._coef_1 = dt * torch.mean((torch.exp(self.LR / 2) - 1) / self.LR, axis=-1).real
        self._coef_2 = dt * torch.mean((torch.exp(self.LR) - 1) / self.LR, axis=-1).real
        self._coef_3 = (
            dt
            * torch.mean(
                (-4 - self.LR + torch.exp(self.LR) * (4 - 3 * self.LR + self.LR**2)) / (self.LR**3), axis=-1
            ).real
        )
        self._coef_4 = (
            dt
            * torch.mean(
                (4.0 * (2.0 + self.LR + torch.exp(self.LR) * (-2 + self.LR))) / (self.LR**3), axis=-1
            ).real
        )
        self._coef_5 = (
            dt
            * torch.mean(
                (-4 - 3 * self.LR - self.LR**2 + torch.exp(self.LR) * (4 - self.LR)) / (self.LR**3), axis=-1
            ).real
        )

    def step(
        self,
        u_hat,
        ):
        u_nonlin_hat = self._nonlinear_func(u_hat)
        u_stage_1_hat = self._half_exp_term * u_hat + self._coef_1 * u_nonlin_hat
        u_stage_1_nonlin_hat = self._nonlinear_func(u_stage_1_hat)
        u_stage_2_hat = self._exp_term * u_hat + self._coef_2 * (
            2 * u_stage_1_nonlin_hat - u_nonlin_hat
            )
        u_stage_2_nonlin_hat = self._nonlinear_func(u_stage_2_hat)
        u_next_hat = (
            self._exp_term * u_hat
            + self._coef_3 * u_nonlin_hat
            + self._coef_4 * u_stage_1_nonlin_hat
            + self._coef_5 * u_stage_2_nonlin_hat
            )
        return u_next_hat
dt instance-attribute ¤
dt = dt
_nonlinear_func instance-attribute ¤
_nonlinear_func = nonlinear_func
_exp_term instance-attribute ¤
_exp_term = exp(dt * linear_coef)
LR instance-attribute ¤
LR = (
    circle_radius
    * roots_of_unity(
        num_circle_points, device=device, dtype=dtype
    )
    + unsqueeze(-1) * dt
)
_half_exp_term instance-attribute ¤
_half_exp_term = exp(0.5 * dt * linear_coef)
_coef_1 instance-attribute ¤
_coef_1 = dt * real
_coef_2 instance-attribute ¤
_coef_2 = dt * real
_coef_3 instance-attribute ¤
_coef_3 = dt * real
_coef_4 instance-attribute ¤
_coef_4 = dt * real
_coef_5 instance-attribute ¤
_coef_5 = dt * real
__init__ ¤
__init__(
    dt: float,
    linear_coef: torch.Tensor,
    nonlinear_func: Callable[[torch.Tensor], torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
)
Source code in torchfsm/integrator/_etdrk.py
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def __init__(
    self,
    dt: float,
    linear_coef: torch.Tensor,
    nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
):
    super().__init__(dt, linear_coef,nonlinear_func,num_circle_points,circle_radius)
    self._half_exp_term = torch.exp(0.5 * dt * linear_coef)
    self._coef_1 = dt * torch.mean((torch.exp(self.LR / 2) - 1) / self.LR, axis=-1).real
    self._coef_2 = dt * torch.mean((torch.exp(self.LR) - 1) / self.LR, axis=-1).real
    self._coef_3 = (
        dt
        * torch.mean(
            (-4 - self.LR + torch.exp(self.LR) * (4 - 3 * self.LR + self.LR**2)) / (self.LR**3), axis=-1
        ).real
    )
    self._coef_4 = (
        dt
        * torch.mean(
            (4.0 * (2.0 + self.LR + torch.exp(self.LR) * (-2 + self.LR))) / (self.LR**3), axis=-1
        ).real
    )
    self._coef_5 = (
        dt
        * torch.mean(
            (-4 - 3 * self.LR - self.LR**2 + torch.exp(self.LR) * (4 - self.LR)) / (self.LR**3), axis=-1
        ).real
    )
step ¤
step(u_hat)
Source code in torchfsm/integrator/_etdrk.py
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def step(
    self,
    u_hat,
    ):
    u_nonlin_hat = self._nonlinear_func(u_hat)
    u_stage_1_hat = self._half_exp_term * u_hat + self._coef_1 * u_nonlin_hat
    u_stage_1_nonlin_hat = self._nonlinear_func(u_stage_1_hat)
    u_stage_2_hat = self._exp_term * u_hat + self._coef_2 * (
        2 * u_stage_1_nonlin_hat - u_nonlin_hat
        )
    u_stage_2_nonlin_hat = self._nonlinear_func(u_stage_2_hat)
    u_next_hat = (
        self._exp_term * u_hat
        + self._coef_3 * u_nonlin_hat
        + self._coef_4 * u_stage_1_nonlin_hat
        + self._coef_5 * u_stage_2_nonlin_hat
        )
    return u_next_hat

torchfsm.integrator._etdrk.ETDRK4 ¤

Bases: _ETDRKBase

Fourth-order ETDRK method.

Source code in torchfsm/integrator/_etdrk.py
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
class ETDRK4(_ETDRKBase):
    """
    Fourth-order ETDRK method.
    """

    def __init__(
        self,
        dt: float,
        linear_coef: torch.Tensor,
        nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
        num_circle_points: int = 16,
        circle_radius: float = 1.0,
    ):
        super().__init__(dt, linear_coef,nonlinear_func,num_circle_points,circle_radius)
        self._half_exp_term = torch.exp(0.5 * dt * linear_coef)
        self._coef_1 = dt * torch.mean((torch.exp(self.LR / 2) - 1) / self.LR, axis=-1).real
        self._coef_2 = self._coef_1
        self._coef_3 = self._coef_1
        self._coef_4 = (
            dt
            * torch.mean(
                (-4 - self.LR + torch.exp(self.LR) * (4 - 3 * self.LR + self.LR**2)) / (self.LR**3), axis=-1
            ).real
        )
        self._coef_5 = (
            dt * torch.mean((2 + self.LR + torch.exp(self.LR) * (-2 + self.LR)) / (self.LR**3), axis=-1).real
        )
        self._coef_6 = (
            dt
            * torch.mean(
                (-4 - 3 * self.LR - self.LR**2 + torch.exp(self.LR) * (4 - self.LR)) / (self.LR**3), axis=-1
            ).real
        )

    def step(
        self,
        u_hat,
        ):
        u_nonlin_hat = self._nonlinear_func(u_hat)
        u_stage_1_hat = self._half_exp_term * u_hat + self._coef_1 * u_nonlin_hat
        u_stage_1_nonlin_hat = self._nonlinear_func(u_stage_1_hat)
        u_stage_2_hat = (
            self._half_exp_term * u_hat + self._coef_2 * u_stage_1_nonlin_hat
            )
        u_stage_2_nonlin_hat = self._nonlinear_func(u_stage_2_hat)
        u_stage_3_hat = self._half_exp_term * u_stage_1_hat + self._coef_3 * (
            2 * u_stage_2_nonlin_hat - u_nonlin_hat
            )
        u_stage_3_nonlin_hat = self._nonlinear_func(u_stage_3_hat)
        u_next_hat = (
            self._exp_term * u_hat
            + self._coef_4 * u_nonlin_hat
            + self._coef_5 * 2 * (u_stage_1_nonlin_hat + u_stage_2_nonlin_hat)
            + self._coef_6 * u_stage_3_nonlin_hat
            )
        return u_next_hat
dt instance-attribute ¤
dt = dt
_nonlinear_func instance-attribute ¤
_nonlinear_func = nonlinear_func
_exp_term instance-attribute ¤
_exp_term = exp(dt * linear_coef)
LR instance-attribute ¤
LR = (
    circle_radius
    * roots_of_unity(
        num_circle_points, device=device, dtype=dtype
    )
    + unsqueeze(-1) * dt
)
_half_exp_term instance-attribute ¤
_half_exp_term = exp(0.5 * dt * linear_coef)
_coef_1 instance-attribute ¤
_coef_1 = dt * real
_coef_2 instance-attribute ¤
_coef_2 = _coef_1
_coef_3 instance-attribute ¤
_coef_3 = _coef_1
_coef_4 instance-attribute ¤
_coef_4 = dt * real
_coef_5 instance-attribute ¤
_coef_5 = dt * real
_coef_6 instance-attribute ¤
_coef_6 = dt * real
__init__ ¤
__init__(
    dt: float,
    linear_coef: torch.Tensor,
    nonlinear_func: Callable[[torch.Tensor], torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
)
Source code in torchfsm/integrator/_etdrk.py
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
def __init__(
    self,
    dt: float,
    linear_coef: torch.Tensor,
    nonlinear_func:Callable[[torch.Tensor],torch.Tensor],
    num_circle_points: int = 16,
    circle_radius: float = 1.0,
):
    super().__init__(dt, linear_coef,nonlinear_func,num_circle_points,circle_radius)
    self._half_exp_term = torch.exp(0.5 * dt * linear_coef)
    self._coef_1 = dt * torch.mean((torch.exp(self.LR / 2) - 1) / self.LR, axis=-1).real
    self._coef_2 = self._coef_1
    self._coef_3 = self._coef_1
    self._coef_4 = (
        dt
        * torch.mean(
            (-4 - self.LR + torch.exp(self.LR) * (4 - 3 * self.LR + self.LR**2)) / (self.LR**3), axis=-1
        ).real
    )
    self._coef_5 = (
        dt * torch.mean((2 + self.LR + torch.exp(self.LR) * (-2 + self.LR)) / (self.LR**3), axis=-1).real
    )
    self._coef_6 = (
        dt
        * torch.mean(
            (-4 - 3 * self.LR - self.LR**2 + torch.exp(self.LR) * (4 - self.LR)) / (self.LR**3), axis=-1
        ).real
    )
step ¤
step(u_hat)
Source code in torchfsm/integrator/_etdrk.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
def step(
    self,
    u_hat,
    ):
    u_nonlin_hat = self._nonlinear_func(u_hat)
    u_stage_1_hat = self._half_exp_term * u_hat + self._coef_1 * u_nonlin_hat
    u_stage_1_nonlin_hat = self._nonlinear_func(u_stage_1_hat)
    u_stage_2_hat = (
        self._half_exp_term * u_hat + self._coef_2 * u_stage_1_nonlin_hat
        )
    u_stage_2_nonlin_hat = self._nonlinear_func(u_stage_2_hat)
    u_stage_3_hat = self._half_exp_term * u_stage_1_hat + self._coef_3 * (
        2 * u_stage_2_nonlin_hat - u_nonlin_hat
        )
    u_stage_3_nonlin_hat = self._nonlinear_func(u_stage_3_hat)
    u_next_hat = (
        self._exp_term * u_hat
        + self._coef_4 * u_nonlin_hat
        + self._coef_5 * 2 * (u_stage_1_nonlin_hat + u_stage_2_nonlin_hat)
        + self._coef_6 * u_stage_3_nonlin_hat
        )
    return u_next_hat

RK¤

torchfsm.integrator.RKIntegrator ¤

Bases: Enum

Enum class for Runge-Kutta integrators. This class provides a set of predefined Runge-Kutta integrators for solving ordinary differential equations (ODEs). Each integrator is represented as a member of the enum, and can be used to create an instance of the corresponding integrator class. The integrators include: - Euler: First-order Euler method. - Midpoint: Second-order Midpoint method. - Heun12: Second-order Heun method. - Ralston12: Second-order Ralston method. - BogackiShampine23: Third-order - RK4: Fourth-order Runge-Kutta method. - RK4_38Rule: Fourth-order Runge-Kutta method with ⅜ rule. - Dorpi45: Fifth-order Dormand-Prince method. - Fehlberg45: Fifth-order Fehlberg method. - CashKarp45: Fifth-order Cash-Karp method.

Source code in torchfsm/integrator/_rk.py
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
class RKIntegrator(Enum):
    """
    Enum class for Runge-Kutta integrators.
    This class provides a set of predefined Runge-Kutta integrators for solving ordinary differential equations (ODEs).
    Each integrator is represented as a member of the enum, and can be used to create an instance of the corresponding integrator class.
    The integrators include:
        - Euler: First-order Euler method.
        - Midpoint: Second-order Midpoint method.
        - Heun12: Second-order Heun method.
        - Ralston12: Second-order Ralston method.
        - BogackiShampine23: Third-order
        - RK4: Fourth-order Runge-Kutta method.
        - RK4_38Rule: Fourth-order Runge-Kutta method with 3/8 rule.
        - Dorpi45: Fifth-order Dormand-Prince method.
        - Fehlberg45: Fifth-order Fehlberg method.
        - CashKarp45: Fifth-order Cash-Karp method.
    """
    Euler = Euler
    Midpoint = Midpoint
    Heun12 = Heun12
    Ralston12 = Ralston12
    BogackiShampine23 = BogackiShampine23
    RK4 = RK4
    RK4_38Rule = RK4_38Rule
    Dorpi45 = Dorpi45
    Fehlberg45 = Fehlberg45
    CashKarp45 = CashKarp45
Euler class-attribute instance-attribute ¤
Euler = Euler
Midpoint class-attribute instance-attribute ¤
Midpoint = Midpoint
Heun12 class-attribute instance-attribute ¤
Heun12 = Heun12
Ralston12 class-attribute instance-attribute ¤
Ralston12 = Ralston12
BogackiShampine23 class-attribute instance-attribute ¤
BogackiShampine23 = BogackiShampine23
RK4 class-attribute instance-attribute ¤
RK4 = RK4
RK4_38Rule class-attribute instance-attribute ¤
RK4_38Rule = RK4_38Rule
Dorpi45 class-attribute instance-attribute ¤
Dorpi45 = Dorpi45
Fehlberg45 class-attribute instance-attribute ¤
Fehlberg45 = Fehlberg45
CashKarp45 class-attribute instance-attribute ¤
CashKarp45 = CashKarp45

torchfsm.integrator._rk._RKBase ¤

Base class for Runge-Kutta integrators. This class implements the Runge-Kutta method for solving ordinary differential equations (ODEs). The Runge-Kutta method is a numerical technique used to solve ODEs by approximating the solution at discrete time steps. The class provides a flexible interface for defining different Runge-Kutta methods by specifying the coefficients and weights. The class also supports adaptive step size control, allowing for dynamic adjustment of the time step based on the estimated error.

Parameters:

Name Type Description Default
ca Sequence[float]

Coefficients for the Runge-Kutta method.

required
b Sequence[float]

Weights for the Runge-Kutta method.

required
b_star Optional[Sequence]

Optional coefficients for error estimation.

None
adaptive bool

If True, enables adaptive step size control.

False
atol float

Absolute tolerance for adaptive step size control.

1e-06
rtol float

Relative tolerance for adaptive step size control.

1e-05
Source code in torchfsm/integrator/_rk.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
class _RKBase:
    """
    Base class for Runge-Kutta integrators.
        This class implements the Runge-Kutta method for solving ordinary differential equations (ODEs).
        The Runge-Kutta method is a numerical technique used to solve ODEs by approximating the solution at discrete time steps.
        The class provides a flexible interface for defining different Runge-Kutta methods by specifying the coefficients and weights.
        The class also supports adaptive step size control, allowing for dynamic adjustment of the time step based on the estimated error.

    Args:
        ca (Sequence[float]): Coefficients for the Runge-Kutta method.
        b (Sequence[float]): Weights for the Runge-Kutta method.
        b_star (Optional[Sequence]): Optional coefficients for error estimation.
        adaptive (bool): If True, enables adaptive step size control.
        atol (float): Absolute tolerance for adaptive step size control.
        rtol (float): Relative tolerance for adaptive step size control.
    """

    def __init__(
        self,
        ca: Sequence[float],
        b: Sequence[float],
        b_star: Optional[Sequence] = None,
        adaptive: bool = False,
        atol: float = 1e-6,
        rtol: float = 1e-5,
    ):
        self.ca = ca
        self.b = b
        self.b_star = b_star
        self.adaptive = adaptive
        self.atol = atol
        self.rtol = rtol
        if self.adaptive and self.b_star is None:
            raise ValueError("adaptive step requires b_star")
        self.step= self._adaptive_step if self.adaptive else self._rk_step

    def _rk_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
        return_error: bool = False,
    ):
        ks = [f(x_t)]
        for ca_i in self.ca:
            ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
        x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
        if self.b_star is not None and return_error:
            b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
            error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
            return x_new, error
        return x_new

    def _adaptive_step(
            self,
            f: Callable[[Tensor], Tensor],
            x_t: Tensor,
            dt: float,
        ):
        t = 0.0
        t_1 = dt - t
        while t_1 - t > 0:
            dt = min(dt, abs(t_1 - t))
            if self.adaptive:
                while True:
                    y, error = self._rk_step(f, x_t, dt, return_error=True)
                    tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                    error = torch.max(error / tolerance).clip(min=1e-9).item()
                    if error < 1.0:
                        x_t, t = y, t + dt
                        break
                    dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
        return x_t
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
__init__ ¤
__init__(
    ca: Sequence[float],
    b: Sequence[float],
    b_star: Optional[Sequence] = None,
    adaptive: bool = False,
    atol: float = 1e-06,
    rtol: float = 1e-05,
)
Source code in torchfsm/integrator/_rk.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def __init__(
    self,
    ca: Sequence[float],
    b: Sequence[float],
    b_star: Optional[Sequence] = None,
    adaptive: bool = False,
    atol: float = 1e-6,
    rtol: float = 1e-5,
):
    self.ca = ca
    self.b = b
    self.b_star = b_star
    self.adaptive = adaptive
    self.atol = atol
    self.rtol = rtol
    if self.adaptive and self.b_star is None:
        raise ValueError("adaptive step requires b_star")
    self.step= self._adaptive_step if self.adaptive else self._rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t

torchfsm.integrator._rk.Euler ¤

Bases: _RKBase

First-order Euler method.

Source code in torchfsm/integrator/_rk.py
82
83
84
85
86
87
class Euler(_RKBase):
    """
    First-order Euler method.
    """
    def __init__(self):
        super().__init__([[1.0]], [1.0], adaptive=False)
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__()
Source code in torchfsm/integrator/_rk.py
86
87
def __init__(self):
    super().__init__([[1.0]], [1.0], adaptive=False)

torchfsm.integrator._rk.Midpoint ¤

Bases: _RKBase

Midpoint method.

Source code in torchfsm/integrator/_rk.py
90
91
92
93
94
95
class Midpoint(_RKBase):
    """
    Midpoint method.
    """
    def __init__(self):
        super().__init__([[1 / 2, 1 / 2]], [0, 1], adaptive=False)
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__()
Source code in torchfsm/integrator/_rk.py
94
95
def __init__(self):
    super().__init__([[1 / 2, 1 / 2]], [0, 1], adaptive=False)

torchfsm.integrator._rk.Heun12 ¤

Bases: _RKBase

Heun's second-order method.

Source code in torchfsm/integrator/_rk.py
 98
 99
100
101
102
103
104
105
class Heun12(_RKBase):
    """
    Heun's second-order method.
    """
    def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
        super().__init__(
            [[1, 1]], [1 / 2, 1 / 2], [1, 0], adaptive=adaptive, atol=atol, rtol=rtol
        )
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__(
    adaptive: bool = False,
    atol: float = 1e-06,
    rtol: float = 1e-05,
)
Source code in torchfsm/integrator/_rk.py
102
103
104
105
def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
    super().__init__(
        [[1, 1]], [1 / 2, 1 / 2], [1, 0], adaptive=adaptive, atol=atol, rtol=rtol
    )

torchfsm.integrator._rk.Ralston12 ¤

Bases: _RKBase

Ralston's second-order method.

Source code in torchfsm/integrator/_rk.py
108
109
110
111
112
113
114
115
116
117
118
119
120
class Ralston12(_RKBase):
    """
    Ralston's second-order method.
    """
    def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
        super().__init__(
            [[2 / 3, 2 / 3]],
            [1 / 4, 3 / 4],
            [2 / 3, 1 / 3],
            adaptive=adaptive,
            atol=atol,
            rtol=rtol,
        )
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__(
    adaptive: bool = False,
    atol: float = 1e-06,
    rtol: float = 1e-05,
)
Source code in torchfsm/integrator/_rk.py
112
113
114
115
116
117
118
119
120
def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
    super().__init__(
        [[2 / 3, 2 / 3]],
        [1 / 4, 3 / 4],
        [2 / 3, 1 / 3],
        adaptive=adaptive,
        atol=atol,
        rtol=rtol,
    )

torchfsm.integrator._rk.BogackiShampine23 ¤

Bases: _RKBase

Third-order Bogack and Shampine method.

Source code in torchfsm/integrator/_rk.py
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
class BogackiShampine23(_RKBase):
    """
    Third-order Bogack and Shampine method.
    """
    def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
        super().__init__(
            ca=[
                [1 / 2, 1 / 2],
                [3 / 4, 0, 3 / 4],
                [1, 2 / 9, 1 / 3, 4 / 9],
            ],
            b=[2 / 9, 1 / 3, 4 / 9, 0],
            b_star=[7 / 24, 1 / 4, 1 / 3, 1 / 8],
            adaptive=adaptive,
            atol=atol,
            rtol=rtol,
        )
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__(
    adaptive: bool = False,
    atol: float = 1e-06,
    rtol: float = 1e-05,
)
Source code in torchfsm/integrator/_rk.py
127
128
129
130
131
132
133
134
135
136
137
138
139
def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
    super().__init__(
        ca=[
            [1 / 2, 1 / 2],
            [3 / 4, 0, 3 / 4],
            [1, 2 / 9, 1 / 3, 4 / 9],
        ],
        b=[2 / 9, 1 / 3, 4 / 9, 0],
        b_star=[7 / 24, 1 / 4, 1 / 3, 1 / 8],
        adaptive=adaptive,
        atol=atol,
        rtol=rtol,
    )

torchfsm.integrator._rk.RK4 ¤

Bases: _RKBase

Fourth-order Runge-Kutta method.

Source code in torchfsm/integrator/_rk.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
class RK4(_RKBase):
    """
    Fourth-order Runge-Kutta method.
    """
    def __init__(self):
        super().__init__(
            ca=[
                [1 / 2, 1 / 2],
                [1 / 2, 0, 1 / 2],
                [1, 0, 0, 1],
            ],
            b=[1 / 6, 1 / 3, 1 / 3, 1 / 6],
            adaptive=False,
        )
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__()
Source code in torchfsm/integrator/_rk.py
146
147
148
149
150
151
152
153
154
155
def __init__(self):
    super().__init__(
        ca=[
            [1 / 2, 1 / 2],
            [1 / 2, 0, 1 / 2],
            [1, 0, 0, 1],
        ],
        b=[1 / 6, 1 / 3, 1 / 3, 1 / 6],
        adaptive=False,
    )

torchfsm.integrator._rk.RK4_38Rule ¤

Bases: _RKBase

Fourth-order Runge-Kutta method with ⅜ rule.

Source code in torchfsm/integrator/_rk.py
158
159
160
161
162
163
164
165
166
167
168
169
170
171
class RK4_38Rule(_RKBase):
    """
    Fourth-order Runge-Kutta method with 3/8 rule.
    """
    def __init__(self):
        super().__init__(
            ca=[
                [1 / 3, 1 / 3],
                [2 / 3, -1 / 3, 1],
                [1, -1, 1, 1],
            ],
            b=[1 / 8, 3 / 8, 3 / 8, 1 / 8],
            adaptive=False,
        )
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__()
Source code in torchfsm/integrator/_rk.py
162
163
164
165
166
167
168
169
170
171
def __init__(self):
    super().__init__(
        ca=[
            [1 / 3, 1 / 3],
            [2 / 3, -1 / 3, 1],
            [1, -1, 1, 1],
        ],
        b=[1 / 8, 3 / 8, 3 / 8, 1 / 8],
        adaptive=False,
    )

torchfsm.integrator._rk.Dorpi45 ¤

Bases: _RKBase

Fifth-order Dormand-Prince method.

Source code in torchfsm/integrator/_rk.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
class Dorpi45(_RKBase):
    """
    Fifth-order Dormand-Prince method.
    """

    def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
        super().__init__(
            ca=[
                [1 / 5, 1 / 5],
                [3 / 10, 3 / 40, 9 / 40],
                [4 / 5, 44 / 45, -56 / 15, 32 / 9],
                [8 / 9, 19372 / 6561, -25360 / 2187, 64448 / 6561, -212 / 729],
                [1, 9017 / 3168, -355 / 33, 46732 / 5247, 49 / 176, -5103 / 18656],
                [1, 35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
            ],
            b=[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
            b_star=[
                5179 / 57600,
                0,
                7571 / 16695,
                393 / 640,
                -92097 / 339200,
                187 / 2100,
                1 / 40,
            ],
            adaptive=adaptive,
            atol=atol,
            rtol=rtol,
        )
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__(
    adaptive: bool = False,
    atol: float = 1e-06,
    rtol: float = 1e-05,
)
Source code in torchfsm/integrator/_rk.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
    super().__init__(
        ca=[
            [1 / 5, 1 / 5],
            [3 / 10, 3 / 40, 9 / 40],
            [4 / 5, 44 / 45, -56 / 15, 32 / 9],
            [8 / 9, 19372 / 6561, -25360 / 2187, 64448 / 6561, -212 / 729],
            [1, 9017 / 3168, -355 / 33, 46732 / 5247, 49 / 176, -5103 / 18656],
            [1, 35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
        ],
        b=[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
        b_star=[
            5179 / 57600,
            0,
            7571 / 16695,
            393 / 640,
            -92097 / 339200,
            187 / 2100,
            1 / 40,
        ],
        adaptive=adaptive,
        atol=atol,
        rtol=rtol,
    )

torchfsm.integrator._rk.Fehlberg45 ¤

Bases: _RKBase

Fehlberg 4(5) method for adaptive step size control. This method is a Runge-Kutta method that provides a fourth-order and fifth-order approximation of the solution to an ODE.

Source code in torchfsm/integrator/_rk.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
class Fehlberg45(_RKBase):
    """
    Fehlberg 4(5) method for adaptive step size control.
    This method is a Runge-Kutta method that provides a fourth-order and fifth-order approximation of the solution to an ODE.
    """

    def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
        super().__init__(
            ca=[
                [1 / 4, 1 / 4],
                [3 / 8, 3 / 32, 9 / 32],
                [12 / 13, 1932 / 2197, -7200 / 2197, 7296 / 2197],
                [1, 439 / 216, -8, 3680 / 513, -845 / 4104],
                [1 / 2, -8 / 27, 2, -3544 / 2565, 1859 / 4104, -11 / 40],
            ],
            b=[16 / 135, 0, 6656 / 12825, 28561 / 56430, -9 / 50, 2 / 55],
            b_star=[25 / 216, 0, 1408 / 2565, 2197 / 4104, -1 / 5, 0],
            adaptive=adaptive,
            atol=atol,
            rtol=rtol,
        )
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__(
    adaptive: bool = False,
    atol: float = 1e-06,
    rtol: float = 1e-05,
)
Source code in torchfsm/integrator/_rk.py
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
    super().__init__(
        ca=[
            [1 / 4, 1 / 4],
            [3 / 8, 3 / 32, 9 / 32],
            [12 / 13, 1932 / 2197, -7200 / 2197, 7296 / 2197],
            [1, 439 / 216, -8, 3680 / 513, -845 / 4104],
            [1 / 2, -8 / 27, 2, -3544 / 2565, 1859 / 4104, -11 / 40],
        ],
        b=[16 / 135, 0, 6656 / 12825, 28561 / 56430, -9 / 50, 2 / 55],
        b_star=[25 / 216, 0, 1408 / 2565, 2197 / 4104, -1 / 5, 0],
        adaptive=adaptive,
        atol=atol,
        rtol=rtol,
    )

torchfsm.integrator._rk.CashKarp45 ¤

Bases: _RKBase

Cash-Karp 4(5) method for adaptive step size control. This method is a Runge-Kutta method that provides a fourth-order and fifth-order approximation of the solution to an ODE.

Source code in torchfsm/integrator/_rk.py
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
class CashKarp45(_RKBase):
    """
    Cash-Karp 4(5) method for adaptive step size control.
    This method is a Runge-Kutta method that provides a fourth-order and fifth-order approximation of the solution to an ODE.
    """

    def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
        super().__init__(
            ca=[
                [1 / 5, 1 / 5],
                [3 / 10, 3 / 40, 9 / 40],
                [3 / 5, 3 / 10, -9 / 10, 6 / 5],
                [1, -11 / 54, 5 / 2, -70 / 27, 35 / 27],
                [
                    7 / 8,
                    1631 / 55296,
                    175 / 512,
                    575 / 13824,
                    44275 / 110592,
                    253 / 4096,
                ],
            ],
            b=[37 / 378, 0, 250 / 621, 125 / 594, 0, 512 / 1771],
            b_star=[2825 / 27648, 0, 18575 / 48384, 13525 / 55296, 277 / 14336, 1 / 4],
            adaptive=adaptive,
            atol=atol,
            rtol=rtol,
        )
ca instance-attribute ¤
ca = ca
b instance-attribute ¤
b = b
b_star instance-attribute ¤
b_star = b_star
adaptive instance-attribute ¤
adaptive = adaptive
atol instance-attribute ¤
atol = atol
rtol instance-attribute ¤
rtol = rtol
step instance-attribute ¤
step = _adaptive_step if adaptive else _rk_step
_rk_step ¤
_rk_step(
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
)
Source code in torchfsm/integrator/_rk.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def _rk_step(
    self,
    f: Callable[[Tensor], Tensor],
    x_t: Tensor,
    dt: float,
    return_error: bool = False,
):
    ks = [f(x_t)]
    for ca_i in self.ca:
        ks.append(f(x_t + dt * sum([a_i * k for a_i, k in zip(ca_i[1:], ks)])))
    x_new = x_t + dt * sum([b_i * k for b_i, k in zip(self.b, ks)])
    if self.b_star is not None and return_error:
        b_dif = [b_i - b_star_i for b_i, b_star_i in zip(self.b, self.b_star)]
        error = dt * sum([b_dif_i * k for b_dif_i, k in zip(b_dif, ks)])
        return x_new, error
    return x_new
_adaptive_step ¤
_adaptive_step(
    f: Callable[[Tensor], Tensor], x_t: Tensor, dt: float
)
Source code in torchfsm/integrator/_rk.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def _adaptive_step(
        self,
        f: Callable[[Tensor], Tensor],
        x_t: Tensor,
        dt: float,
    ):
    t = 0.0
    t_1 = dt - t
    while t_1 - t > 0:
        dt = min(dt, abs(t_1 - t))
        if self.adaptive:
            while True:
                y, error = self._rk_step(f, x_t, dt, return_error=True)
                tolerance = self.atol + self.rtol * torch.max(abs(x_t), abs(y))
                error = torch.max(error / tolerance).clip(min=1e-9).item()
                if error < 1.0:
                    x_t, t = y, t + dt
                    break
                dt = dt * min(10.0, max(0.1, 0.9 / error ** (1 / 5)))
    return x_t
__init__ ¤
__init__(
    adaptive: bool = False,
    atol: float = 1e-06,
    rtol: float = 1e-05,
)
Source code in torchfsm/integrator/_rk.py
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
def __init__(self, adaptive: bool = False, atol: float = 1e-6, rtol: float = 1e-5):
    super().__init__(
        ca=[
            [1 / 5, 1 / 5],
            [3 / 10, 3 / 40, 9 / 40],
            [3 / 5, 3 / 10, -9 / 10, 6 / 5],
            [1, -11 / 54, 5 / 2, -70 / 27, 35 / 27],
            [
                7 / 8,
                1631 / 55296,
                175 / 512,
                575 / 13824,
                44275 / 110592,
                253 / 4096,
            ],
        ],
        b=[37 / 378, 0, 250 / 621, 125 / 594, 0, 512 / 1771],
        b_star=[2825 / 27648, 0, 18575 / 48384, 13525 / 55296, 277 / 14336, 1 / 4],
        adaptive=adaptive,
        atol=atol,
        rtol=rtol,
    )