From 9ca9e36dde3df5f2359eafbed70340df325f0673 Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 11:21:10 +0100 Subject: [PATCH 01/15] Rename `IntArgType` -> `IntLike` --- .../implementing_a_probnum_method.ipynb | 20 +++++++++---------- .../quadopt_example/_probsolve_qp.py | 4 ++-- .../probabilistic_quadratic_optimizer.py | 6 +++--- .../quadopt_example/stopping_criteria.py | 10 +++++----- src/probnum/diffeq/_odesolution.py | 6 +++--- .../diffeq/odefilter/_odefilter_solution.py | 6 +++--- .../_information_operator.py | 6 +++--- .../information_operators/_ode_residual.py | 6 +++--- .../perturbed/step/_perturbation_functions.py | 6 +++--- src/probnum/diffeq/stepsize/_steprule.py | 8 ++++---- .../filtsmooth/_timeseriesposterior.py | 6 +++--- .../filtsmooth/gaussian/_kalmanposterior.py | 14 ++++++------- .../filtsmooth/particle/_particle_filter.py | 4 ++-- .../zoo/filtsmooth/_filtsmooth_problems.py | 8 ++++---- .../problems/zoo/linalg/_random_spd_matrix.py | 6 +++--- src/probnum/quad/_bayesquad.py | 6 +++--- src/probnum/quad/_integration_measures.py | 14 ++++++------- .../quad/solvers/bayesian_quadrature.py | 6 +++--- .../solvers/stopping_criteria/_max_nevals.py | 4 ++-- src/probnum/randprocs/_random_process.py | 6 +++--- .../kernels/_exponentiated_quadratic.py | 4 ++-- src/probnum/randprocs/kernels/_kernel.py | 4 ++-- src/probnum/randprocs/kernels/_linear.py | 4 ++-- src/probnum/randprocs/kernels/_matern.py | 4 ++-- src/probnum/randprocs/kernels/_polynomial.py | 6 +++--- .../randprocs/kernels/_rational_quadratic.py | 4 ++-- src/probnum/randprocs/kernels/_white_noise.py | 4 ++-- src/probnum/randprocs/markov/_transition.py | 4 ++-- .../markov/continuous/_linear_sde.py | 6 +++--- .../randprocs/markov/continuous/_sde.py | 6 +++--- .../markov/discrete/_linear_gaussian.py | 6 +++--- .../markov/discrete/_nonlinear_gaussian.py | 10 +++++----- .../markov/integrator/convert/_convert.py | 6 +++--- src/probnum/typing.py | 10 ++++++++-- tests/test_quad/util.py | 10 +++++----- 35 files changed, 123 insertions(+), 117 deletions(-) diff --git a/docs/source/development/implementing_a_probnum_method.ipynb b/docs/source/development/implementing_a_probnum_method.ipynb index b55da1249..0ec7d5118 100644 --- a/docs/source/development/implementing_a_probnum_method.ipynb +++ b/docs/source/development/implementing_a_probnum_method.ipynb @@ -67,7 +67,7 @@ "\n", "import probnum as pn\n", "from probnum import randvars, linops\n", - "from probnum.typing import FloatArgType, IntArgType\n", + "from probnum.typing import FloatArgType, IntLike\n", "\n", "rng = np.random.default_rng(seed=123)" ] @@ -87,7 +87,7 @@ " fun_params0: Optional[Union[np.ndarray, randvars.RandomVariable]] = None,\n", " assume_fun: Optional[str] = None,\n", " tol: FloatArgType = 10 ** -5,\n", - " maxiter: IntArgType = 10 ** 4,\n", + " maxiter: IntLike = 10 ** 4,\n", " noise_cov: Optional[Union[np.ndarray, linops.LinearOperator]] = None,\n", " callback: Optional[\n", " Callable[[FloatArgType, FloatArgType, randvars.RandomVariable], None]\n", @@ -333,7 +333,7 @@ " randvars.RandomVariable,\n", "]\n", "QuadOptStoppingCriterionType = Callable[\n", - " [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntArgType],\n", + " [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntLike],\n", " Tuple[bool, Union[str, None]],\n", "]\n", "\n", @@ -430,7 +430,7 @@ " self.stopping_criteria = stopping_criteria\n", "\n", " def has_converged(\n", - " self, fun: Callable[[FloatArgType], FloatArgType], iteration: IntArgType\n", + " self, fun: Callable[[FloatArgType], FloatArgType], iteration: IntLike\n", " ) -> Tuple[bool, Union[str, None]]:\n", " \"\"\"Check whether the optimizer has converged.\n", "\n", @@ -584,10 +584,10 @@ "internal representation of those same objects. Canonical examples are different kinds of integer or float types, which might be passed by a user. These are all unified internally.\n", "\n", "```python\n", - "IntArgType = Union[int, numbers.Integral, np.integer]\n", + "IntLike = Union[int, numbers.Integral, np.integer]\n", "FloatArgType = Union[float, numbers.Real, np.floating]\n", "\n", - "ShapeArgType = Union[IntArgType, Iterable[IntArgType]]\n", + "ShapeArgType = Union[IntLike, Iterable[IntLike]]\n", "\"\"\"Type of a public API argument for supplying a shape. Values of this type should\n", "always be converted into :class:`ShapeType` using the function\n", ":func:`probnum.utils.as_shape` before further internal processing.\"\"\"\n", @@ -602,11 +602,11 @@ "metadata": {}, "outputs": [], "source": [ - "from probnum.typing import ShapeType, IntArgType, ShapeArgType\n", + "from probnum.typing import ShapeType, IntLike, ShapeArgType\n", "from probnum.utils import as_shape\n", "\n", "\n", - "def extend_shape(shape: ShapeArgType, extension: IntArgType) -> ShapeType:\n", + "def extend_shape(shape: ShapeArgType, extension: IntLike) -> ShapeType:\n", " return as_shape(shape) + as_shape(extension)" ] }, @@ -823,7 +823,7 @@ "The stopping criteria are also implemented as simple methods, which return a `bool` determining convergence and a string giving the name of the criterion.\n", "```python\n", "QuadOptStoppingCriterionType = Callable[\n", - " [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntArgType],\n", + " [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntLike],\n", " Tuple[bool, Union[str, None]],\n", "]\n", "```\n", @@ -840,7 +840,7 @@ "def parameter_uncertainty(\n", " fun: Callable[[FloatArgType], FloatArgType],\n", " fun_params0: randvars.RandomVariable,\n", - " current_iter: IntArgType,\n", + " current_iter: IntLike,\n", " abstol: FloatArgType,\n", " reltol: FloatArgType,\n", ") -> Tuple[bool, Union[str, None]]:\n", diff --git a/docs/source/development/quadopt_example/_probsolve_qp.py b/docs/source/development/quadopt_example/_probsolve_qp.py index 22b022f78..fbd64a76f 100644 --- a/docs/source/development/quadopt_example/_probsolve_qp.py +++ b/docs/source/development/quadopt_example/_probsolve_qp.py @@ -6,7 +6,7 @@ import probnum as pn import probnum.utils as _utils from probnum import linops, randvars -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike from .belief_updates import gaussian_belief_update from .observation_operators import function_evaluation @@ -21,7 +21,7 @@ def probsolve_qp( fun_params0: Optional[Union[np.ndarray, randvars.RandomVariable]] = None, assume_fun: Optional[str] = None, tol: FloatArgType = 10 ** -5, - maxiter: IntArgType = 10 ** 4, + maxiter: IntLike = 10 ** 4, noise_cov: Optional[Union[np.ndarray, linops.LinearOperator]] = None, callback: Optional[ Callable[[FloatArgType, FloatArgType, randvars.RandomVariable], None] diff --git a/docs/source/development/quadopt_example/probabilistic_quadratic_optimizer.py b/docs/source/development/quadopt_example/probabilistic_quadratic_optimizer.py index 60b2e980f..412750940 100644 --- a/docs/source/development/quadopt_example/probabilistic_quadratic_optimizer.py +++ b/docs/source/development/quadopt_example/probabilistic_quadratic_optimizer.py @@ -7,7 +7,7 @@ import probnum as pn import probnum.utils as _utils from probnum import linops, randvars -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike from .belief_updates import gaussian_belief_update from .observation_operators import function_evaluation @@ -34,7 +34,7 @@ randvars.RandomVariable, ] QuadOptStoppingCriterionType = Callable[ - [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntArgType], + [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntLike], Tuple[bool, Union[str, None]], ] @@ -131,7 +131,7 @@ def __init__( self.stopping_criteria = stopping_criteria def has_converged( - self, fun: Callable[[FloatArgType], FloatArgType], iteration: IntArgType + self, fun: Callable[[FloatArgType], FloatArgType], iteration: IntLike ) -> Tuple[bool, Union[str, None]]: """Check whether the optimizer has converged. diff --git a/docs/source/development/quadopt_example/stopping_criteria.py b/docs/source/development/quadopt_example/stopping_criteria.py index 1fc50af0d..c92176024 100644 --- a/docs/source/development/quadopt_example/stopping_criteria.py +++ b/docs/source/development/quadopt_example/stopping_criteria.py @@ -5,13 +5,13 @@ import numpy as np from probnum import randvars -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike def parameter_uncertainty( fun: Callable[[FloatArgType], FloatArgType], fun_params0: randvars.RandomVariable, - current_iter: IntArgType, + current_iter: IntLike, abstol: FloatArgType, reltol: FloatArgType, ) -> Tuple[bool, Union[str, None]]: @@ -43,8 +43,8 @@ def parameter_uncertainty( def maximum_iterations( fun: Callable[[FloatArgType], FloatArgType], fun_params0: randvars.RandomVariable, - current_iter: IntArgType, - maxiter: IntArgType, + current_iter: IntLike, + maxiter: IntLike, ) -> Tuple[bool, Union[str, None]]: """Termination based on maximum number of iterations. @@ -68,7 +68,7 @@ def maximum_iterations( def residual( fun: Callable[[FloatArgType], FloatArgType], fun_params0: randvars.RandomVariable, - current_iter: IntArgType, + current_iter: IntLike, abstol: FloatArgType, reltol: FloatArgType, ) -> Tuple[bool, Union[str, None]]: diff --git a/src/probnum/diffeq/_odesolution.py b/src/probnum/diffeq/_odesolution.py index 0c7e913cf..4200c3327 100644 --- a/src/probnum/diffeq/_odesolution.py +++ b/src/probnum/diffeq/_odesolution.py @@ -11,7 +11,7 @@ from probnum import filtsmooth, randvars from probnum.filtsmooth._timeseriesposterior import DenseOutputLocationArgType -from probnum.typing import FloatArgType, IntArgType, ShapeArgType +from probnum.typing import FloatArgType, IntLike, ShapeArgType class ODESolution(filtsmooth.TimeSeriesPosterior): @@ -44,8 +44,8 @@ def __init__( def interpolate( self, t: FloatArgType, - previous_index: Optional[IntArgType] = None, - next_index: Optional[IntArgType] = None, + previous_index: Optional[IntLike] = None, + next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: raise NotImplementedError("Dense output is not implemented.") diff --git a/src/probnum/diffeq/odefilter/_odefilter_solution.py b/src/probnum/diffeq/odefilter/_odefilter_solution.py index 80c2b24b0..bd186fd24 100644 --- a/src/probnum/diffeq/odefilter/_odefilter_solution.py +++ b/src/probnum/diffeq/odefilter/_odefilter_solution.py @@ -7,7 +7,7 @@ from probnum import filtsmooth, randvars, utils from probnum.diffeq import _odesolution from probnum.filtsmooth._timeseriesposterior import DenseOutputLocationArgType -from probnum.typing import FloatArgType, IntArgType, ShapeArgType +from probnum.typing import FloatArgType, IntLike, ShapeArgType class ODEFilterSolution(_odesolution.ODESolution): @@ -91,8 +91,8 @@ def __init__(self, kalman_posterior: filtsmooth.gaussian.KalmanPosterior): def interpolate( self, t: FloatArgType, - previous_index: Optional[IntArgType] = None, - next_index: Optional[IntArgType] = None, + previous_index: Optional[IntLike] = None, + next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: out_rv = self.kalman_posterior.interpolate( t, previous_index=previous_index, next_index=next_index diff --git a/src/probnum/diffeq/odefilter/information_operators/_information_operator.py b/src/probnum/diffeq/odefilter/information_operators/_information_operator.py index 05cbe74f8..d5da0ee19 100644 --- a/src/probnum/diffeq/odefilter/information_operators/_information_operator.py +++ b/src/probnum/diffeq/odefilter/information_operators/_information_operator.py @@ -6,7 +6,7 @@ import numpy as np from probnum import problems, randprocs -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike __all__ = ["InformationOperator", "ODEInformationOperator"] @@ -36,7 +36,7 @@ class InformationOperator(abc.ABC): Therefore, they are one important component in a probabilistic ODE solver. """ - def __init__(self, input_dim: IntArgType, output_dim: IntArgType): + def __init__(self, input_dim: IntLike, output_dim: IntLike): self.input_dim = input_dim self.output_dim = output_dim @@ -84,7 +84,7 @@ class ODEInformationOperator(InformationOperator): :class:`InitialValueProblem`. Not all information operators that are used in ODE solvers do. """ - def __init__(self, input_dim: IntArgType, output_dim: IntArgType): + def __init__(self, input_dim: IntLike, output_dim: IntLike): super().__init__(input_dim=input_dim, output_dim=output_dim) # Initialized once the ODE can be seen diff --git a/src/probnum/diffeq/odefilter/information_operators/_ode_residual.py b/src/probnum/diffeq/odefilter/information_operators/_ode_residual.py index 01f953aba..5875cac1f 100644 --- a/src/probnum/diffeq/odefilter/information_operators/_ode_residual.py +++ b/src/probnum/diffeq/odefilter/information_operators/_ode_residual.py @@ -6,7 +6,7 @@ from probnum import problems, randprocs from probnum.diffeq.odefilter.information_operators import _information_operator -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike __all__ = ["ODEResidual"] @@ -14,7 +14,7 @@ class ODEResidual(_information_operator.ODEInformationOperator): """Information operator that measures the residual of an explicit ODE.""" - def __init__(self, num_prior_derivatives: IntArgType, ode_dimension: IntArgType): + def __init__(self, num_prior_derivatives: IntLike, ode_dimension: IntLike): integrator_dimension = ode_dimension * (num_prior_derivatives + 1) super().__init__(input_dim=integrator_dimension, output_dim=ode_dimension) # Store remaining attributes @@ -47,7 +47,7 @@ def incorporate_ode(self, ode: problems.InitialValueProblem): self._residual, self._residual_jacobian = res, res_jac def _match_residual_and_jacobian_to_ode_order( - self, ode_order: IntArgType + self, ode_order: IntLike ) -> Tuple[Callable, Callable]: """Choose the correct residual (and Jacobian) implementation based on the order of the ODE.""" diff --git a/src/probnum/diffeq/perturbed/step/_perturbation_functions.py b/src/probnum/diffeq/perturbed/step/_perturbation_functions.py index 6544635ac..ce31d3ca2 100644 --- a/src/probnum/diffeq/perturbed/step/_perturbation_functions.py +++ b/src/probnum/diffeq/perturbed/step/_perturbation_functions.py @@ -4,13 +4,13 @@ import numpy as np import scipy -from probnum.typing import FloatArgType, IntArgType, ShapeArgType +from probnum.typing import FloatArgType, IntLike, ShapeArgType def perturb_uniform( rng: np.random.Generator, step: FloatArgType, - solver_order: IntArgType, + solver_order: IntLike, noise_scale: FloatArgType, size: Optional[ShapeArgType] = (), ) -> Union[float, np.ndarray]: @@ -51,7 +51,7 @@ def perturb_uniform( def perturb_lognormal( rng: np.random.Generator, step: FloatArgType, - solver_order: IntArgType, + solver_order: IntLike, noise_scale: FloatArgType, size: Optional[ShapeArgType] = (), ) -> Union[float, np.ndarray]: diff --git a/src/probnum/diffeq/stepsize/_steprule.py b/src/probnum/diffeq/stepsize/_steprule.py index 9a0feb24f..6f770eaca 100644 --- a/src/probnum/diffeq/stepsize/_steprule.py +++ b/src/probnum/diffeq/stepsize/_steprule.py @@ -5,7 +5,7 @@ import numpy as np -from probnum.typing import FloatArgType, IntArgType, ToleranceDiffusionType +from probnum.typing import FloatArgType, IntLike, ToleranceDiffusionType class StepRule(ABC): @@ -19,7 +19,7 @@ def suggest( self, laststep: FloatArgType, scaled_error: FloatArgType, - localconvrate: Optional[IntArgType] = None, + localconvrate: Optional[IntLike] = None, ): """Suggest a new step h_{n+1} given error estimate e_n at step h_n.""" raise NotImplementedError @@ -58,7 +58,7 @@ def suggest( self, laststep: FloatArgType, scaled_error: FloatArgType, - localconvrate: Optional[IntArgType] = None, + localconvrate: Optional[IntLike] = None, ): return self.step @@ -112,7 +112,7 @@ def suggest( self, laststep: FloatArgType, scaled_error: FloatArgType, - localconvrate: Optional[IntArgType] = None, + localconvrate: Optional[IntLike] = None, ): small, large = self.limitchange diff --git a/src/probnum/filtsmooth/_timeseriesposterior.py b/src/probnum/filtsmooth/_timeseriesposterior.py index 5fb0ac51c..261f07694 100644 --- a/src/probnum/filtsmooth/_timeseriesposterior.py +++ b/src/probnum/filtsmooth/_timeseriesposterior.py @@ -10,7 +10,7 @@ ArrayLikeGetitemArgType, DenseOutputLocationArgType, FloatArgType, - IntArgType, + IntLike, ShapeArgType, ) @@ -160,8 +160,8 @@ def __call__(self, t: DenseOutputLocationArgType) -> DenseOutputValueType: def interpolate( self, t: FloatArgType, - previous_index: Optional[IntArgType] = None, - next_index: Optional[IntArgType] = None, + previous_index: Optional[IntLike] = None, + next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: """Evaluate the posterior at a measurement-free point. diff --git a/src/probnum/filtsmooth/gaussian/_kalmanposterior.py b/src/probnum/filtsmooth/gaussian/_kalmanposterior.py index 1f41c2132..580019842 100644 --- a/src/probnum/filtsmooth/gaussian/_kalmanposterior.py +++ b/src/probnum/filtsmooth/gaussian/_kalmanposterior.py @@ -15,7 +15,7 @@ from probnum.typing import ( DenseOutputLocationArgType, FloatArgType, - IntArgType, + IntLike, ShapeArgType, ) @@ -61,8 +61,8 @@ def __init__( def interpolate( self, t: FloatArgType, - previous_index: Optional[IntArgType] = None, - next_index: Optional[IntArgType] = None, + previous_index: Optional[IntLike] = None, + next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: raise NotImplementedError @@ -172,8 +172,8 @@ def __init__( def interpolate( self, t: FloatArgType, - previous_index: Optional[IntArgType] = None, - next_index: Optional[IntArgType] = None, + previous_index: Optional[IntLike] = None, + next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: # Assert either previous_location or next_location is not None @@ -365,8 +365,8 @@ class FilteringPosterior(KalmanPosterior): def interpolate( self, t: FloatArgType, - previous_index: Optional[IntArgType] = None, - next_index: Optional[IntArgType] = None, + previous_index: Optional[IntLike] = None, + next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: # Assert either previous_location or next_location is not None diff --git a/src/probnum/filtsmooth/particle/_particle_filter.py b/src/probnum/filtsmooth/particle/_particle_filter.py index 4332dc14e..6d6b3aabe 100644 --- a/src/probnum/filtsmooth/particle/_particle_filter.py +++ b/src/probnum/filtsmooth/particle/_particle_filter.py @@ -10,7 +10,7 @@ _importance_distributions, _particle_filter_posterior, ) -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike # Terribly long variable names, but internal only, so no worries. ParticleFilterMeasurementModelArgType = Union[ @@ -65,7 +65,7 @@ def __init__( self, prior_process: randprocs.markov.MarkovProcess, importance_distribution: _importance_distributions.ImportanceDistribution, - num_particles: IntArgType, + num_particles: IntLike, rng: np.random.Generator, with_resampling: bool = True, resampling_percentage_threshold: FloatArgType = 0.1, diff --git a/src/probnum/problems/zoo/filtsmooth/_filtsmooth_problems.py b/src/probnum/problems/zoo/filtsmooth/_filtsmooth_problems.py index 947f9f5d4..6328f7332 100644 --- a/src/probnum/problems/zoo/filtsmooth/_filtsmooth_problems.py +++ b/src/probnum/problems/zoo/filtsmooth/_filtsmooth_problems.py @@ -4,7 +4,7 @@ from probnum import diffeq, filtsmooth, problems, randprocs, randvars from probnum.problems.zoo import diffeq as diffeq_zoo -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike __all__ = [ "benes_daum", @@ -19,7 +19,7 @@ def car_tracking( rng: np.random.Generator, measurement_variance: FloatArgType = 0.5, process_diffusion: FloatArgType = 1.0, - num_prior_derivatives: IntArgType = 1, + num_prior_derivatives: IntLike = 1, timespan: Tuple[FloatArgType, FloatArgType] = (0.0, 20.0), step: FloatArgType = 0.2, initrv: Optional[randvars.RandomVariable] = None, @@ -512,9 +512,9 @@ def logistic_ode( params: Tuple[FloatArgType, FloatArgType] = (6.0, 1.0), initrv: Optional[randvars.RandomVariable] = None, evlvar: Optional[Union[np.ndarray, FloatArgType]] = None, - ek0_or_ek1: IntArgType = 1, + ek0_or_ek1: IntLike = 1, exclude_initial_condition: bool = True, - order: IntArgType = 3, + order: IntLike = 3, forward_implementation: str = "classic", backward_implementation: str = "classic", ): diff --git a/src/probnum/problems/zoo/linalg/_random_spd_matrix.py b/src/probnum/problems/zoo/linalg/_random_spd_matrix.py index 9e103f8b2..327a9d09a 100644 --- a/src/probnum/problems/zoo/linalg/_random_spd_matrix.py +++ b/src/probnum/problems/zoo/linalg/_random_spd_matrix.py @@ -5,12 +5,12 @@ import numpy as np import scipy.stats -from probnum.typing import IntArgType +from probnum.typing import IntLike def random_spd_matrix( rng: np.random.Generator, - dim: IntArgType, + dim: IntLike, spectrum: Sequence = None, ) -> np.ndarray: r"""Random symmetric positive definite matrix. @@ -92,7 +92,7 @@ def random_spd_matrix( def random_sparse_spd_matrix( rng: np.random.Generator, - dim: IntArgType, + dim: IntLike, density: float, chol_entry_min: float = 0.1, chol_entry_max: float = 1.0, diff --git a/src/probnum/quad/_bayesquad.py b/src/probnum/quad/_bayesquad.py index f9d8e3863..5516c6c25 100644 --- a/src/probnum/quad/_bayesquad.py +++ b/src/probnum/quad/_bayesquad.py @@ -14,7 +14,7 @@ from probnum.randprocs.kernels import Kernel from probnum.randvars import Normal -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike from ._integration_measures import GaussianMeasure, IntegrationMeasure, LebesgueMeasure from .solvers import BayesianQuadrature @@ -30,10 +30,10 @@ def bayesquad( ] = None, measure: Optional[IntegrationMeasure] = None, policy: Optional[str] = "bmc", - max_evals: Optional[IntArgType] = None, + max_evals: Optional[IntLike] = None, var_tol: Optional[FloatArgType] = None, rel_tol: Optional[FloatArgType] = None, - batch_size: Optional[IntArgType] = 1, + batch_size: Optional[IntLike] = 1, rng: Optional[np.random.Generator] = np.random.default_rng(), ) -> Tuple[Normal, Dict]: r"""Infer the solution of the uni- or multivariate integral :math:`\int_\Omega f(x) d \mu(x)` diff --git a/src/probnum/quad/_integration_measures.py b/src/probnum/quad/_integration_measures.py index 6d66a05dc..bfdc6eec4 100644 --- a/src/probnum/quad/_integration_measures.py +++ b/src/probnum/quad/_integration_measures.py @@ -7,7 +7,7 @@ import scipy.stats from probnum.randvars import Normal -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike class IntegrationMeasure(abc.ABC): @@ -28,7 +28,7 @@ class IntegrationMeasure(abc.ABC): def __init__( self, domain: Union[Tuple[FloatArgType, FloatArgType], Tuple[np.ndarray, np.ndarray]], - input_dim: IntArgType, + input_dim: IntLike, ) -> None: self._set_dimension_domain(input_dim, domain) @@ -51,7 +51,7 @@ def __call__(self, points: Union[FloatArgType, np.ndarray]) -> np.ndarray: def sample( self, - n_sample: IntArgType, + n_sample: IntLike, rng: Optional[np.random.Generator] = np.random.default_rng(), ) -> np.ndarray: """Sample ``n_sample`` points from the integration measure. @@ -76,7 +76,7 @@ def sample( def _set_dimension_domain( self, - input_dim: IntArgType, + input_dim: IntLike, domain: Union[Tuple[FloatArgType, FloatArgType], Tuple[np.ndarray, np.ndarray]], ) -> None: """Sets the integration domain and input_dimension. @@ -151,7 +151,7 @@ class LebesgueMeasure(IntegrationMeasure): def __init__( self, domain: Union[Tuple[FloatArgType, FloatArgType], Tuple[np.ndarray, np.ndarray]], - input_dim: Optional[IntArgType] = None, + input_dim: Optional[IntLike] = None, normalized: Optional[bool] = False, ) -> None: super().__init__(input_dim=input_dim, domain=domain) @@ -181,7 +181,7 @@ def __call__(self, points: np.ndarray) -> np.ndarray: def sample( self, - n_sample: IntArgType, + n_sample: IntLike, rng: Optional[np.random.Generator] = np.random.default_rng(), ) -> np.ndarray: return self.random_variable.rvs( @@ -211,7 +211,7 @@ def __init__( self, mean: Union[float, np.floating, np.ndarray], cov: Union[float, np.floating, np.ndarray], - input_dim: Optional[IntArgType] = None, + input_dim: Optional[IntLike] = None, ) -> None: # Extend scalar mean and covariance to higher dimensions if input_dim has been diff --git a/src/probnum/quad/solvers/bayesian_quadrature.py b/src/probnum/quad/solvers/bayesian_quadrature.py index bfe5042d7..ea918e0a8 100644 --- a/src/probnum/quad/solvers/bayesian_quadrature.py +++ b/src/probnum/quad/solvers/bayesian_quadrature.py @@ -13,7 +13,7 @@ ) from probnum.randprocs.kernels import ExpQuad, Kernel from probnum.randvars import Normal -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike from .._integration_measures import IntegrationMeasure, LebesgueMeasure from ..kernel_embeddings import KernelEmbedding @@ -67,10 +67,10 @@ def from_problem( Union[Tuple[FloatArgType, FloatArgType], Tuple[np.ndarray, np.ndarray]] ] = None, policy: str = "bmc", - max_evals: Optional[IntArgType] = None, + max_evals: Optional[IntLike] = None, var_tol: Optional[FloatArgType] = None, rel_tol: Optional[FloatArgType] = None, - batch_size: IntArgType = 1, + batch_size: IntLike = 1, rng: np.random.Generator = None, ) -> "BayesianQuadrature": diff --git a/src/probnum/quad/solvers/stopping_criteria/_max_nevals.py b/src/probnum/quad/solvers/stopping_criteria/_max_nevals.py index fb40e3f32..59c9a8ce1 100644 --- a/src/probnum/quad/solvers/stopping_criteria/_max_nevals.py +++ b/src/probnum/quad/solvers/stopping_criteria/_max_nevals.py @@ -2,7 +2,7 @@ from probnum.quad.solvers.bq_state import BQState from probnum.quad.solvers.stopping_criteria import BQStoppingCriterion -from probnum.typing import IntArgType +from probnum.typing import IntLike # pylint: disable=too-few-public-methods @@ -16,7 +16,7 @@ class MaxNevals(BQStoppingCriterion): Maximum number of integrand evaluations. """ - def __init__(self, max_nevals: IntArgType): + def __init__(self, max_nevals: IntLike): self.max_nevals = max_nevals def __call__(self, bq_state: BQState) -> bool: diff --git a/src/probnum/randprocs/_random_process.py b/src/probnum/randprocs/_random_process.py index 5c9ef0a8e..691a7cf38 100644 --- a/src/probnum/randprocs/_random_process.py +++ b/src/probnum/randprocs/_random_process.py @@ -7,7 +7,7 @@ from probnum import randvars from probnum import utils as _utils -from probnum.typing import DTypeArgType, IntArgType, ShapeArgType +from probnum.typing import DTypeArgType, IntLike, ShapeArgType _InputType = TypeVar("InputType") _OutputType = TypeVar("OutputType") @@ -47,8 +47,8 @@ class RandomProcess(Generic[_InputType, _OutputType], abc.ABC): def __init__( self, - input_dim: IntArgType, - output_dim: Optional[IntArgType], + input_dim: IntLike, + output_dim: Optional[IntLike], dtype: DTypeArgType, ): self._input_dim = np.int_(_utils.as_numpy_scalar(input_dim)) diff --git a/src/probnum/randprocs/kernels/_exponentiated_quadratic.py b/src/probnum/randprocs/kernels/_exponentiated_quadratic.py index a9972d4ed..0d823b9fc 100644 --- a/src/probnum/randprocs/kernels/_exponentiated_quadratic.py +++ b/src/probnum/randprocs/kernels/_exponentiated_quadratic.py @@ -5,7 +5,7 @@ import numpy as np import probnum.utils as _utils -from probnum.typing import IntArgType, ScalarArgType +from probnum.typing import IntLike, ScalarArgType from ._kernel import IsotropicMixin, Kernel @@ -46,7 +46,7 @@ class ExpQuad(Kernel, IsotropicMixin): [1.92874985e-22, 3.72665317e-06, 1.00000000e+00]]) """ - def __init__(self, input_dim: IntArgType, lengthscale: ScalarArgType = 1.0): + def __init__(self, input_dim: IntLike, lengthscale: ScalarArgType = 1.0): self.lengthscale = _utils.as_numpy_scalar(lengthscale) super().__init__(input_dim=input_dim) diff --git a/src/probnum/randprocs/kernels/_kernel.py b/src/probnum/randprocs/kernels/_kernel.py index 234f0bc9e..6b3031382 100644 --- a/src/probnum/randprocs/kernels/_kernel.py +++ b/src/probnum/randprocs/kernels/_kernel.py @@ -6,7 +6,7 @@ import numpy as np from probnum import utils as _pn_utils -from probnum.typing import ArrayLike, IntArgType, ShapeArgType, ShapeType +from probnum.typing import ArrayLike, IntLike, ShapeArgType, ShapeType class Kernel(abc.ABC): @@ -134,7 +134,7 @@ class Kernel(abc.ABC): def __init__( self, - input_dim: IntArgType, + input_dim: IntLike, shape: ShapeArgType = (), ): self._input_dim = int(input_dim) diff --git a/src/probnum/randprocs/kernels/_linear.py b/src/probnum/randprocs/kernels/_linear.py index f97ad7629..9d99910f0 100644 --- a/src/probnum/randprocs/kernels/_linear.py +++ b/src/probnum/randprocs/kernels/_linear.py @@ -5,7 +5,7 @@ import numpy as np import probnum.utils as _utils -from probnum.typing import IntArgType, ScalarArgType +from probnum.typing import IntLike, ScalarArgType from ._kernel import Kernel @@ -40,7 +40,7 @@ class Linear(Kernel): [ 8., 13.]]) """ - def __init__(self, input_dim: IntArgType, constant: ScalarArgType = 0.0): + def __init__(self, input_dim: IntLike, constant: ScalarArgType = 0.0): self.constant = _utils.as_numpy_scalar(constant) super().__init__(input_dim=input_dim) diff --git a/src/probnum/randprocs/kernels/_matern.py b/src/probnum/randprocs/kernels/_matern.py index d8af2a08f..d36259dd5 100644 --- a/src/probnum/randprocs/kernels/_matern.py +++ b/src/probnum/randprocs/kernels/_matern.py @@ -7,7 +7,7 @@ import scipy.special import probnum.utils as _utils -from probnum.typing import IntArgType, ScalarArgType +from probnum.typing import IntLike, ScalarArgType from ._kernel import IsotropicMixin, Kernel @@ -63,7 +63,7 @@ class Matern(Kernel, IsotropicMixin): def __init__( self, - input_dim: IntArgType, + input_dim: IntLike, lengthscale: ScalarArgType = 1.0, nu: ScalarArgType = 1.5, ): diff --git a/src/probnum/randprocs/kernels/_polynomial.py b/src/probnum/randprocs/kernels/_polynomial.py index 13895803f..01ab4fb55 100644 --- a/src/probnum/randprocs/kernels/_polynomial.py +++ b/src/probnum/randprocs/kernels/_polynomial.py @@ -5,7 +5,7 @@ import numpy as np import probnum.utils as _utils -from probnum.typing import IntArgType, ScalarArgType +from probnum.typing import IntLike, ScalarArgType from ._kernel import Kernel @@ -44,9 +44,9 @@ class Polynomial(Kernel): def __init__( self, - input_dim: IntArgType, + input_dim: IntLike, constant: ScalarArgType = 0.0, - exponent: IntArgType = 1.0, + exponent: IntLike = 1.0, ): self.constant = _utils.as_numpy_scalar(constant) self.exponent = _utils.as_numpy_scalar(exponent) diff --git a/src/probnum/randprocs/kernels/_rational_quadratic.py b/src/probnum/randprocs/kernels/_rational_quadratic.py index acc144303..963d7ec62 100644 --- a/src/probnum/randprocs/kernels/_rational_quadratic.py +++ b/src/probnum/randprocs/kernels/_rational_quadratic.py @@ -5,7 +5,7 @@ import numpy as np import probnum.utils as _utils -from probnum.typing import IntArgType, ScalarArgType +from probnum.typing import IntLike, ScalarArgType from ._kernel import IsotropicMixin, Kernel @@ -58,7 +58,7 @@ class RatQuad(Kernel, IsotropicMixin): def __init__( self, - input_dim: IntArgType, + input_dim: IntLike, lengthscale: ScalarArgType = 1.0, alpha: ScalarArgType = 1.0, ): diff --git a/src/probnum/randprocs/kernels/_white_noise.py b/src/probnum/randprocs/kernels/_white_noise.py index 55c9222bf..e5220e428 100644 --- a/src/probnum/randprocs/kernels/_white_noise.py +++ b/src/probnum/randprocs/kernels/_white_noise.py @@ -5,7 +5,7 @@ import numpy as np from probnum import utils as _utils -from probnum.typing import IntArgType, ScalarArgType +from probnum.typing import IntLike, ScalarArgType from ._kernel import Kernel @@ -26,7 +26,7 @@ class WhiteNoise(Kernel): Noise level :math:`\sigma`. """ - def __init__(self, input_dim: IntArgType, sigma: ScalarArgType = 1.0): + def __init__(self, input_dim: IntLike, sigma: ScalarArgType = 1.0): self.sigma = _utils.as_numpy_scalar(sigma) self._sigma_sq = self.sigma ** 2 super().__init__(input_dim=input_dim) diff --git a/src/probnum/randprocs/markov/_transition.py b/src/probnum/randprocs/markov/_transition.py index 0135826a4..0dd0d850e 100644 --- a/src/probnum/randprocs/markov/_transition.py +++ b/src/probnum/randprocs/markov/_transition.py @@ -5,7 +5,7 @@ import numpy as np from probnum import randvars -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike class Transition(abc.ABC): @@ -46,7 +46,7 @@ class Transition(abc.ABC): Markov-chains and general discrete-time transitions (likelihoods). """ - def __init__(self, input_dim: IntArgType, output_dim: IntArgType): + def __init__(self, input_dim: IntLike, output_dim: IntLike): self.input_dim = input_dim self.output_dim = output_dim diff --git a/src/probnum/randprocs/markov/continuous/_linear_sde.py b/src/probnum/randprocs/markov/continuous/_linear_sde.py index c0594fbe0..e3129263b 100644 --- a/src/probnum/randprocs/markov/continuous/_linear_sde.py +++ b/src/probnum/randprocs/markov/continuous/_linear_sde.py @@ -8,7 +8,7 @@ from probnum import randvars from probnum.randprocs.markov.continuous import _sde -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike from probnum.utils.linalg import tril_to_positive_tril @@ -46,8 +46,8 @@ class LinearSDE(_sde.SDE): def __init__( self, - state_dimension: IntArgType, - wiener_process_dimension: IntArgType, + state_dimension: IntLike, + wiener_process_dimension: IntLike, drift_matrix_function: Callable[[FloatArgType], np.ndarray], force_vector_function: Callable[[FloatArgType], np.ndarray], dispersion_matrix_function: Callable[[FloatArgType], np.ndarray], diff --git a/src/probnum/randprocs/markov/continuous/_sde.py b/src/probnum/randprocs/markov/continuous/_sde.py index d0e71bb09..91b86d938 100644 --- a/src/probnum/randprocs/markov/continuous/_sde.py +++ b/src/probnum/randprocs/markov/continuous/_sde.py @@ -5,7 +5,7 @@ import numpy as np from probnum.randprocs.markov import _transition -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike class SDE(_transition.Transition): @@ -18,8 +18,8 @@ class SDE(_transition.Transition): def __init__( self, - state_dimension: IntArgType, - wiener_process_dimension: IntArgType, + state_dimension: IntLike, + wiener_process_dimension: IntLike, drift_function: Callable[[FloatArgType, np.ndarray], np.ndarray], dispersion_function: Callable[[FloatArgType, np.ndarray], np.ndarray], drift_jacobian: Optional[Callable[[FloatArgType, np.ndarray], np.ndarray]], diff --git a/src/probnum/randprocs/markov/discrete/_linear_gaussian.py b/src/probnum/randprocs/markov/discrete/_linear_gaussian.py index d6c3a3830..5bdcdb362 100644 --- a/src/probnum/randprocs/markov/discrete/_linear_gaussian.py +++ b/src/probnum/randprocs/markov/discrete/_linear_gaussian.py @@ -8,7 +8,7 @@ from probnum import config, linops, randvars from probnum.randprocs.markov.discrete import _nonlinear_gaussian -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike from probnum.utils.linalg import cholesky_update, tril_to_positive_tril @@ -38,8 +38,8 @@ class LinearGaussian(_nonlinear_gaussian.NonlinearGaussian): def __init__( self, - input_dim: IntArgType, - output_dim: IntArgType, + input_dim: IntLike, + output_dim: IntLike, state_trans_mat_fun: Callable[[FloatArgType], np.ndarray], shift_vec_fun: Callable[[FloatArgType], np.ndarray], proc_noise_cov_mat_fun: Callable[[FloatArgType], np.ndarray], diff --git a/src/probnum/randprocs/markov/discrete/_nonlinear_gaussian.py b/src/probnum/randprocs/markov/discrete/_nonlinear_gaussian.py index d118861d5..60c49c07f 100644 --- a/src/probnum/randprocs/markov/discrete/_nonlinear_gaussian.py +++ b/src/probnum/randprocs/markov/discrete/_nonlinear_gaussian.py @@ -8,7 +8,7 @@ from probnum import randvars from probnum.randprocs.markov import _transition from probnum.randprocs.markov.discrete import _condition_state -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike class NonlinearGaussian(_transition.Transition): @@ -43,8 +43,8 @@ class NonlinearGaussian(_transition.Transition): def __init__( self, - input_dim: IntArgType, - output_dim: IntArgType, + input_dim: IntLike, + output_dim: IntLike, state_trans_fun: Callable[[FloatArgType, np.ndarray], np.ndarray], proc_noise_cov_mat_fun: Callable[[FloatArgType], np.ndarray], jacob_state_trans_fun: Optional[ @@ -152,8 +152,8 @@ def proc_noise_cov_cholesky_fun(self, t): @classmethod def from_callable( cls, - input_dim: IntArgType, - output_dim: IntArgType, + input_dim: IntLike, + output_dim: IntLike, state_trans_fun: Callable[[FloatArgType, np.ndarray], np.ndarray], jacob_state_trans_fun: Callable[[FloatArgType, np.ndarray], np.ndarray], ): diff --git a/src/probnum/randprocs/markov/integrator/convert/_convert.py b/src/probnum/randprocs/markov/integrator/convert/_convert.py index d80b0da6d..97fa440df 100644 --- a/src/probnum/randprocs/markov/integrator/convert/_convert.py +++ b/src/probnum/randprocs/markov/integrator/convert/_convert.py @@ -3,11 +3,11 @@ import numpy as np from probnum.randprocs.markov.integrator import _integrator -from probnum.typing import IntArgType +from probnum.typing import IntLike def convert_derivwise_to_coordwise( - state: np.ndarray, num_derivatives: IntArgType, wiener_process_dimension: IntArgType + state: np.ndarray, num_derivatives: IntLike, wiener_process_dimension: IntLike ) -> np.ndarray: """Convert coordinate-wise representation to derivative-wise representation. @@ -29,7 +29,7 @@ def convert_derivwise_to_coordwise( def convert_coordwise_to_derivwise( - state: np.ndarray, num_derivatives: IntArgType, wiener_process_dimension: IntArgType + state: np.ndarray, num_derivatives: IntLike, wiener_process_dimension: IntLike ) -> np.ndarray: """Convert coordinate-wise representation to derivative-wise representation. diff --git a/src/probnum/typing.py b/src/probnum/typing.py index c7592f58a..379f31683 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -31,10 +31,16 @@ # Argument Types ######################################################################################## -IntArgType = Union[int, numbers.Integral, np.integer] +# Python Numbers +IntLike = Union[int, numbers.Integral, np.integer] +"""Type of a public API argument for supplying an integer. + +Values of this type should always be converted into :class:`int`\\ s before further +internal processing.""" + FloatArgType = Union[float, numbers.Real, np.floating] -ShapeArgType = Union[IntArgType, Iterable[IntArgType]] +ShapeArgType = Union[IntLike, Iterable[IntLike]] """Type of a public API argument for supplying a shape. Values of this type should always be converted into :class:`ShapeType` using the function :func:`probnum.utils.as_shape` before further internal processing.""" diff --git a/tests/test_quad/util.py b/tests/test_quad/util.py index 3b9a7af6c..5e02e85a8 100644 --- a/tests/test_quad/util.py +++ b/tests/test_quad/util.py @@ -5,13 +5,13 @@ from scipy.linalg import sqrtm from scipy.special import roots_legendre -from probnum.typing import FloatArgType, IntArgType +from probnum.typing import FloatArgType, IntLike # Auxiliary functions for quadrature tests def gauss_hermite_tensor( - n_points: IntArgType, - input_dim: IntArgType, + n_points: IntLike, + input_dim: IntLike, mean: Union[np.ndarray, FloatArgType], cov: Union[np.ndarray, FloatArgType], ): @@ -31,8 +31,8 @@ def gauss_hermite_tensor( def gauss_legendre_tensor( - n_points: IntArgType, - input_dim: IntArgType, + n_points: IntLike, + input_dim: IntLike, domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]], normalized: Optional[bool] = False, ): From 6c9690a9cefbb4e2be3aa65461b7939c0f2b205c Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 11:32:55 +0100 Subject: [PATCH 02/15] Rename `FloatArgType` -> `FloatLike` --- .../implementing_a_probnum_method.ipynb | 56 +++++++++---------- .../quadopt_example/_probsolve_qp.py | 8 +-- .../quadopt_example/belief_updates.py | 6 +- .../quadopt_example/observation_operators.py | 4 +- .../development/quadopt_example/policies.py | 6 +- .../probabilistic_quadratic_optimizer.py | 20 +++---- .../quadopt_example/stopping_criteria.py | 16 +++--- docs/source/development/styleguide.md | 2 +- src/probnum/diffeq/_odesolution.py | 4 +- src/probnum/diffeq/_odesolver.py | 6 +- .../diffeq/odefilter/_odefilter_solution.py | 4 +- .../_information_operator.py | 14 ++--- .../information_operators/_ode_residual.py | 10 ++-- .../initialization_routines/_runge_kutta.py | 4 +- .../diffeq/odefilter/utils/_problem_utils.py | 4 +- .../scipy_wrapper/_wrapped_scipy_solver.py | 4 +- .../perturbed/step/_perturbation_functions.py | 10 ++-- .../perturbed/step/_perturbedstepsolution.py | 8 +-- .../perturbed/step/_perturbedstepsolver.py | 10 ++-- src/probnum/diffeq/stepsize/_steprule.py | 34 +++++------ .../filtsmooth/_timeseriesposterior.py | 10 ++-- .../filtsmooth/gaussian/_kalmanposterior.py | 12 ++-- .../gaussian/approx/_unscentedkalman.py | 24 ++++---- .../filtsmooth/particle/_particle_filter.py | 4 +- .../particle/_particle_filter_posterior.py | 4 +- .../_solution_based_proj_rhs_belief_update.py | 4 +- src/probnum/problems/_problems.py | 8 +-- .../zoo/filtsmooth/_filtsmooth_problems.py | 36 ++++++------ src/probnum/quad/_bayesquad.py | 10 ++-- src/probnum/quad/_integration_measures.py | 10 ++-- .../quad/solvers/bayesian_quadrature.py | 8 +-- .../_integral_variance_tol.py | 4 +- .../stopping_criteria/_rel_mean_change.py | 4 +- src/probnum/randprocs/markov/_transition.py | 6 +- .../markov/continuous/_diffusions.py | 8 +-- .../markov/continuous/_linear_sde.py | 12 ++-- .../randprocs/markov/continuous/_sde.py | 8 +-- .../markov/discrete/_linear_gaussian.py | 12 ++-- .../markov/discrete/_nonlinear_gaussian.py | 16 +++--- src/probnum/randvars/_normal.py | 16 +++--- src/probnum/randvars/_random_variable.py | 10 ++-- src/probnum/typing.py | 10 +++- tests/test_quad/util.py | 8 +-- 43 files changed, 237 insertions(+), 237 deletions(-) diff --git a/docs/source/development/implementing_a_probnum_method.ipynb b/docs/source/development/implementing_a_probnum_method.ipynb index 0ec7d5118..3a6b50f9b 100644 --- a/docs/source/development/implementing_a_probnum_method.ipynb +++ b/docs/source/development/implementing_a_probnum_method.ipynb @@ -51,7 +51,7 @@ "source": [ "### Method `probsolve_qp`\n", "\n", - "We will now take a closer look at the interface of our 1D noisy quadratic optimization method. At a basic level `probsolve_qp` takes a function of the type `Callable[[FloatArgType], FloatArgType]`. This hints that the optimization objective is a 1D function. Our prior knowledge about the parameters $(a,b,c)$ is encoded in the random variable `fun_params0`. However, we want to also give a user the option to not specify any prior knowledge or just a guess about the parameter values, hence this argument is optional or can be an `np.ndarray`. \n", + "We will now take a closer look at the interface of our 1D noisy quadratic optimization method. At a basic level `probsolve_qp` takes a function of the type `Callable[[FloatLike], FloatLike]`. This hints that the optimization objective is a 1D function. Our prior knowledge about the parameters $(a,b,c)$ is encoded in the random variable `fun_params0`. However, we want to also give a user the option to not specify any prior knowledge or just a guess about the parameter values, hence this argument is optional or can be an `np.ndarray`. \n", "\n", "The interface also has an `assume_fun` argument, which allows specification of the variant of the probabilistic numerical method to use based on the assumptions about the problem. For convenience, this can be inferred from the problem itself. The actual implementation of the PN method variant which is initialized in a modular fashion is separate from the interface and will be explained later. Finally, the actual optimization routine is called and the result is returned." ] @@ -67,7 +67,7 @@ "\n", "import probnum as pn\n", "from probnum import randvars, linops\n", - "from probnum.typing import FloatArgType, IntLike\n", + "from probnum.typing import FloatLike, IntLike\n", "\n", "rng = np.random.default_rng(seed=123)" ] @@ -83,14 +83,14 @@ "# %load -s probsolve_qp quadopt_example/_probsolve_qp\n", "def probsolve_qp(\n", " rng: np.random.Generator,\n", - " fun: Callable[[FloatArgType], FloatArgType],\n", + " fun: Callable[[FloatLike], FloatLike],\n", " fun_params0: Optional[Union[np.ndarray, randvars.RandomVariable]] = None,\n", " assume_fun: Optional[str] = None,\n", - " tol: FloatArgType = 10 ** -5,\n", + " tol: FloatLike = 10 ** -5,\n", " maxiter: IntLike = 10 ** 4,\n", " noise_cov: Optional[Union[np.ndarray, linops.LinearOperator]] = None,\n", " callback: Optional[\n", - " Callable[[FloatArgType, FloatArgType, randvars.RandomVariable], None]\n", + " Callable[[FloatLike, FloatLike, randvars.RandomVariable], None]\n", " ] = None,\n", ") -> Tuple[float, randvars.RandomVariable, randvars.RandomVariable, Dict]:\n", " \"\"\"Probabilistic 1D Quadratic Optimization.\n", @@ -316,24 +316,24 @@ "# Type aliases for quadratic optimization\n", "QuadOptPolicyType = Callable[\n", " [\n", - " Callable[[FloatArgType], FloatArgType],\n", + " Callable[[FloatLike], FloatLike],\n", " randvars.RandomVariable,\n", " ],\n", - " FloatArgType,\n", + " FloatLike,\n", "]\n", "QuadOptObservationOperatorType = Callable[\n", - " [Callable[[FloatArgType], FloatArgType], FloatArgType], FloatArgType\n", + " [Callable[[FloatLike], FloatLike], FloatLike], FloatLike\n", "]\n", "QuadOptBeliefUpdateType = Callable[\n", " [\n", " randvars.RandomVariable,\n", - " FloatArgType,\n", - " FloatArgType,\n", + " FloatLike,\n", + " FloatLike,\n", " ],\n", " randvars.RandomVariable,\n", "]\n", "QuadOptStoppingCriterionType = Callable[\n", - " [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntLike],\n", + " [Callable[[FloatLike], FloatLike], randvars.RandomVariable, IntLike],\n", " Tuple[bool, Union[str, None]],\n", "]\n", "\n", @@ -430,7 +430,7 @@ " self.stopping_criteria = stopping_criteria\n", "\n", " def has_converged(\n", - " self, fun: Callable[[FloatArgType], FloatArgType], iteration: IntLike\n", + " self, fun: Callable[[FloatLike], FloatLike], iteration: IntLike\n", " ) -> Tuple[bool, Union[str, None]]:\n", " \"\"\"Check whether the optimizer has converged.\n", "\n", @@ -451,7 +451,7 @@ "\n", " def optim_iterator(\n", " self,\n", - " fun: Callable[[FloatArgType], FloatArgType],\n", + " fun: Callable[[FloatLike], FloatLike],\n", " ) -> Tuple[float, float, randvars.RandomVariable]:\n", " \"\"\"Generator implementing the optimization iteration.\n", "\n", @@ -486,7 +486,7 @@ "\n", " def optimize(\n", " self,\n", - " fun: Callable[[FloatArgType], FloatArgType],\n", + " fun: Callable[[FloatLike], FloatLike],\n", " callback: Optional[\n", " Callable[[float, float, randvars.RandomVariable], None]\n", " ] = None,\n", @@ -585,7 +585,7 @@ "\n", "```python\n", "IntLike = Union[int, numbers.Integral, np.integer]\n", - "FloatArgType = Union[float, numbers.Real, np.floating]\n", + "FloatLike = Union[float, numbers.Real, np.floating]\n", "\n", "ShapeArgType = Union[IntLike, Iterable[IntLike]]\n", "\"\"\"Type of a public API argument for supplying a shape. Values of this type should\n", @@ -674,7 +674,7 @@ "source": [ "# %load -s explore_exploit_policy quadopt_example/policies\n", "def explore_exploit_policy(\n", - " fun: Callable[[FloatArgType], FloatArgType],\n", + " fun: Callable[[FloatLike], FloatLike],\n", " fun_params0: randvars.RandomVariable,\n", " rng: np.random.Generator,\n", ") -> float:\n", @@ -704,16 +704,16 @@ "```python\n", "QuadOptPolicyType = Callable[\n", " [\n", - " Callable[[FloatArgType], FloatArgType],\n", + " Callable[[FloatLike], FloatLike],\n", " randvars.RandomVariable\n", " ],\n", - " FloatArgType,\n", + " FloatLike,\n", "]\n", "```\n", "The observation process for this problem is very simple. It just evaluates the objective function. \n", "```python\n", "QuadOptObservationOperatorType = Callable[\n", - " [Callable[[FloatArgType], FloatArgType], FloatArgType], FloatArgType\n", + " [Callable[[FloatLike], FloatLike], FloatLike], FloatLike\n", "]\n", "```\n", "One can imagine a different probabilistic optimization method which evaluates the gradient as well. In this case the different observation processes would all get the function, its gradient and an evaluation point / action as arguments." @@ -727,7 +727,7 @@ "source": [ "# %load -s function_evaluation quadopt_example/observation_operators\n", "def function_evaluation(\n", - " fun: Callable[[FloatArgType], FloatArgType], action: FloatArgType\n", + " fun: Callable[[FloatLike], FloatLike], action: FloatLike\n", ") -> np.float_:\n", " \"\"\"Observe a (noisy) function evaluation of the quadratic objective.\n", "\n", @@ -758,8 +758,8 @@ "QuadOptBeliefUpdateType = Callable[\n", " [\n", " randvars.RandomVariable,\n", - " FloatArgType,\n", - " FloatArgType,\n", + " FloatLike,\n", + " FloatLike,\n", " ],\n", " randvars.RandomVariable,\n", "]\n", @@ -776,8 +776,8 @@ "# %load -s gaussian_belief_update quadopt_example/belief_updates\n", "def gaussian_belief_update(\n", " fun_params0: randvars.RandomVariable,\n", - " action: FloatArgType,\n", - " observation: FloatArgType,\n", + " action: FloatLike,\n", + " observation: FloatLike,\n", " noise_cov: Union[np.ndarray, linops.LinearOperator],\n", ") -> randvars.RandomVariable:\n", " \"\"\"Update the belief over the parameters with an observation.\n", @@ -823,7 +823,7 @@ "The stopping criteria are also implemented as simple methods, which return a `bool` determining convergence and a string giving the name of the criterion.\n", "```python\n", "QuadOptStoppingCriterionType = Callable[\n", - " [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntLike],\n", + " [Callable[[FloatLike], FloatLike], randvars.RandomVariable, IntLike],\n", " Tuple[bool, Union[str, None]],\n", "]\n", "```\n", @@ -838,11 +838,11 @@ "source": [ "# %load -s parameter_uncertainty quadopt_example/stopping_criteria\n", "def parameter_uncertainty(\n", - " fun: Callable[[FloatArgType], FloatArgType],\n", + " fun: Callable[[FloatLike], FloatLike],\n", " fun_params0: randvars.RandomVariable,\n", " current_iter: IntLike,\n", - " abstol: FloatArgType,\n", - " reltol: FloatArgType,\n", + " abstol: FloatLike,\n", + " reltol: FloatLike,\n", ") -> Tuple[bool, Union[str, None]]:\n", " \"\"\"Termination based on numerical uncertainty about the parameters.\n", "\n", diff --git a/docs/source/development/quadopt_example/_probsolve_qp.py b/docs/source/development/quadopt_example/_probsolve_qp.py index fbd64a76f..0ea7e6938 100644 --- a/docs/source/development/quadopt_example/_probsolve_qp.py +++ b/docs/source/development/quadopt_example/_probsolve_qp.py @@ -6,7 +6,7 @@ import probnum as pn import probnum.utils as _utils from probnum import linops, randvars -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike from .belief_updates import gaussian_belief_update from .observation_operators import function_evaluation @@ -17,14 +17,14 @@ def probsolve_qp( rng: np.random.Generator, - fun: Callable[[FloatArgType], FloatArgType], + fun: Callable[[FloatLike], FloatLike], fun_params0: Optional[Union[np.ndarray, randvars.RandomVariable]] = None, assume_fun: Optional[str] = None, - tol: FloatArgType = 10 ** -5, + tol: FloatLike = 10 ** -5, maxiter: IntLike = 10 ** 4, noise_cov: Optional[Union[np.ndarray, linops.LinearOperator]] = None, callback: Optional[ - Callable[[FloatArgType, FloatArgType, randvars.RandomVariable], None] + Callable[[FloatLike, FloatLike, randvars.RandomVariable], None] ] = None, ) -> Tuple[float, randvars.RandomVariable, randvars.RandomVariable, Dict]: """Probabilistic 1D Quadratic Optimization. diff --git a/docs/source/development/quadopt_example/belief_updates.py b/docs/source/development/quadopt_example/belief_updates.py index 1071f0392..d2572ea26 100644 --- a/docs/source/development/quadopt_example/belief_updates.py +++ b/docs/source/development/quadopt_example/belief_updates.py @@ -7,13 +7,13 @@ import probnum as pn from probnum import linops, randvars -from probnum.typing import FloatArgType +from probnum.typing import FloatLike def gaussian_belief_update( fun_params0: randvars.RandomVariable, - action: FloatArgType, - observation: FloatArgType, + action: FloatLike, + observation: FloatLike, noise_cov: Union[np.ndarray, linops.LinearOperator], ) -> randvars.RandomVariable: """Update the belief over the parameters with an observation. diff --git a/docs/source/development/quadopt_example/observation_operators.py b/docs/source/development/quadopt_example/observation_operators.py index 74b684cee..a08e25cf4 100644 --- a/docs/source/development/quadopt_example/observation_operators.py +++ b/docs/source/development/quadopt_example/observation_operators.py @@ -5,11 +5,11 @@ import numpy as np from probnum import utils -from probnum.typing import FloatArgType +from probnum.typing import FloatLike def function_evaluation( - fun: Callable[[FloatArgType], FloatArgType], action: FloatArgType + fun: Callable[[FloatLike], FloatLike], action: FloatLike ) -> np.float_: """Observe a (noisy) function evaluation of the quadratic objective. diff --git a/docs/source/development/quadopt_example/policies.py b/docs/source/development/quadopt_example/policies.py index 2b1b2621e..45e95adbe 100644 --- a/docs/source/development/quadopt_example/policies.py +++ b/docs/source/development/quadopt_example/policies.py @@ -5,11 +5,11 @@ import numpy as np from probnum import randvars -from probnum.typing import FloatArgType +from probnum.typing import FloatLike def explore_exploit_policy( - fun: Callable[[FloatArgType], FloatArgType], + fun: Callable[[FloatLike], FloatLike], fun_params0: randvars.RandomVariable, rng: np.random.Generator, ) -> float: @@ -31,7 +31,7 @@ def explore_exploit_policy( def stochastic_policy( - fun: Callable[[FloatArgType], FloatArgType], + fun: Callable[[FloatLike], FloatLike], fun_params0: randvars.RandomVariable, rng: np.random.Generator, ) -> float: diff --git a/docs/source/development/quadopt_example/probabilistic_quadratic_optimizer.py b/docs/source/development/quadopt_example/probabilistic_quadratic_optimizer.py index 412750940..20d76fad4 100644 --- a/docs/source/development/quadopt_example/probabilistic_quadratic_optimizer.py +++ b/docs/source/development/quadopt_example/probabilistic_quadratic_optimizer.py @@ -7,7 +7,7 @@ import probnum as pn import probnum.utils as _utils from probnum import linops, randvars -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike from .belief_updates import gaussian_belief_update from .observation_operators import function_evaluation @@ -17,24 +17,24 @@ # Type aliases for quadratic optimization QuadOptPolicyType = Callable[ [ - Callable[[FloatArgType], FloatArgType], + Callable[[FloatLike], FloatLike], randvars.RandomVariable, ], - FloatArgType, + FloatLike, ] QuadOptObservationOperatorType = Callable[ - [Callable[[FloatArgType], FloatArgType], FloatArgType], FloatArgType + [Callable[[FloatLike], FloatLike], FloatLike], FloatLike ] QuadOptBeliefUpdateType = Callable[ [ randvars.RandomVariable, - FloatArgType, - FloatArgType, + FloatLike, + FloatLike, ], randvars.RandomVariable, ] QuadOptStoppingCriterionType = Callable[ - [Callable[[FloatArgType], FloatArgType], randvars.RandomVariable, IntLike], + [Callable[[FloatLike], FloatLike], randvars.RandomVariable, IntLike], Tuple[bool, Union[str, None]], ] @@ -131,7 +131,7 @@ def __init__( self.stopping_criteria = stopping_criteria def has_converged( - self, fun: Callable[[FloatArgType], FloatArgType], iteration: IntLike + self, fun: Callable[[FloatLike], FloatLike], iteration: IntLike ) -> Tuple[bool, Union[str, None]]: """Check whether the optimizer has converged. @@ -152,7 +152,7 @@ def has_converged( def optim_iterator( self, - fun: Callable[[FloatArgType], FloatArgType], + fun: Callable[[FloatLike], FloatLike], ) -> Tuple[float, float, randvars.RandomVariable]: """Generator implementing the optimization iteration. @@ -187,7 +187,7 @@ def optim_iterator( def optimize( self, - fun: Callable[[FloatArgType], FloatArgType], + fun: Callable[[FloatLike], FloatLike], callback: Optional[ Callable[[float, float, randvars.RandomVariable], None] ] = None, diff --git a/docs/source/development/quadopt_example/stopping_criteria.py b/docs/source/development/quadopt_example/stopping_criteria.py index c92176024..dad3bfc04 100644 --- a/docs/source/development/quadopt_example/stopping_criteria.py +++ b/docs/source/development/quadopt_example/stopping_criteria.py @@ -5,15 +5,15 @@ import numpy as np from probnum import randvars -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike def parameter_uncertainty( - fun: Callable[[FloatArgType], FloatArgType], + fun: Callable[[FloatLike], FloatLike], fun_params0: randvars.RandomVariable, current_iter: IntLike, - abstol: FloatArgType, - reltol: FloatArgType, + abstol: FloatLike, + reltol: FloatLike, ) -> Tuple[bool, Union[str, None]]: """Termination based on numerical uncertainty about the parameters. @@ -41,7 +41,7 @@ def parameter_uncertainty( def maximum_iterations( - fun: Callable[[FloatArgType], FloatArgType], + fun: Callable[[FloatLike], FloatLike], fun_params0: randvars.RandomVariable, current_iter: IntLike, maxiter: IntLike, @@ -66,11 +66,11 @@ def maximum_iterations( def residual( - fun: Callable[[FloatArgType], FloatArgType], + fun: Callable[[FloatLike], FloatLike], fun_params0: randvars.RandomVariable, current_iter: IntLike, - abstol: FloatArgType, - reltol: FloatArgType, + abstol: FloatLike, + reltol: FloatLike, ) -> Tuple[bool, Union[str, None]]: """Termination based on the residual. diff --git a/docs/source/development/styleguide.md b/docs/source/development/styleguide.md index c5eb6e09c..9ec31cecd 100644 --- a/docs/source/development/styleguide.md +++ b/docs/source/development/styleguide.md @@ -41,7 +41,7 @@ An exception from these rules are type-related modules, which include `typing` a Types are always imported directly. - `from typing import Optional, Callable` -- `from probnum.typing import FloatArgType` +- `from probnum.typing import FloatLike` Please do not abbreviate import paths unnecessarily. We do **not** use the following imports: - `import probnum.random_variables as pnrv` or `import probnum.filtsmooth as pnfs` (correct would be `from probnum import randvars, filtsmooth`) diff --git a/src/probnum/diffeq/_odesolution.py b/src/probnum/diffeq/_odesolution.py index 4200c3327..cc1992a38 100644 --- a/src/probnum/diffeq/_odesolution.py +++ b/src/probnum/diffeq/_odesolution.py @@ -11,7 +11,7 @@ from probnum import filtsmooth, randvars from probnum.filtsmooth._timeseriesposterior import DenseOutputLocationArgType -from probnum.typing import FloatArgType, IntLike, ShapeArgType +from probnum.typing import FloatLike, IntLike, ShapeArgType class ODESolution(filtsmooth.TimeSeriesPosterior): @@ -43,7 +43,7 @@ def __init__( def interpolate( self, - t: FloatArgType, + t: FloatLike, previous_index: Optional[IntLike] = None, next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: diff --git a/src/probnum/diffeq/_odesolver.py b/src/probnum/diffeq/_odesolver.py index d32fe1837..777430dbd 100644 --- a/src/probnum/diffeq/_odesolver.py +++ b/src/probnum/diffeq/_odesolver.py @@ -8,7 +8,7 @@ from probnum import problems from probnum.diffeq import callbacks -from probnum.typing import FloatArgType +from probnum.typing import FloatLike CallbackType = Union[callbacks.ODESolverCallback, Iterable[callbacks.ODESolverCallback]] """Callback interface type.""" @@ -29,7 +29,7 @@ def __init__( def solve( self, ivp: problems.InitialValueProblem, - stop_at: Iterable[FloatArgType] = None, + stop_at: Iterable[FloatLike] = None, callbacks: Optional[CallbackType] = None, ): """Solve an IVP. @@ -54,7 +54,7 @@ def solve( def solution_generator( self, ivp: problems.InitialValueProblem, - stop_at: Iterable[FloatArgType] = None, + stop_at: Iterable[FloatLike] = None, callbacks: Optional[CallbackType] = None, ): """Generate ODE solver steps.""" diff --git a/src/probnum/diffeq/odefilter/_odefilter_solution.py b/src/probnum/diffeq/odefilter/_odefilter_solution.py index bd186fd24..447ff4fe9 100644 --- a/src/probnum/diffeq/odefilter/_odefilter_solution.py +++ b/src/probnum/diffeq/odefilter/_odefilter_solution.py @@ -7,7 +7,7 @@ from probnum import filtsmooth, randvars, utils from probnum.diffeq import _odesolution from probnum.filtsmooth._timeseriesposterior import DenseOutputLocationArgType -from probnum.typing import FloatArgType, IntLike, ShapeArgType +from probnum.typing import FloatLike, IntLike, ShapeArgType class ODEFilterSolution(_odesolution.ODESolution): @@ -90,7 +90,7 @@ def __init__(self, kalman_posterior: filtsmooth.gaussian.KalmanPosterior): def interpolate( self, - t: FloatArgType, + t: FloatLike, previous_index: Optional[IntLike] = None, next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: diff --git a/src/probnum/diffeq/odefilter/information_operators/_information_operator.py b/src/probnum/diffeq/odefilter/information_operators/_information_operator.py index d5da0ee19..33e11d893 100644 --- a/src/probnum/diffeq/odefilter/information_operators/_information_operator.py +++ b/src/probnum/diffeq/odefilter/information_operators/_information_operator.py @@ -6,7 +6,7 @@ import numpy as np from probnum import problems, randprocs -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike __all__ = ["InformationOperator", "ODEInformationOperator"] @@ -41,17 +41,17 @@ def __init__(self, input_dim: IntLike, output_dim: IntLike): self.output_dim = output_dim @abc.abstractmethod - def __call__(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: + def __call__(self, t: FloatLike, x: np.ndarray) -> np.ndarray: raise NotImplementedError - def jacobian(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: + def jacobian(self, t: FloatLike, x: np.ndarray) -> np.ndarray: raise NotImplementedError def as_transition( self, - measurement_cov_fun: Optional[Callable[[FloatArgType], np.ndarray]] = None, + measurement_cov_fun: Optional[Callable[[FloatLike], np.ndarray]] = None, measurement_cov_cholesky_fun: Optional[ - Callable[[FloatArgType], np.ndarray] + Callable[[FloatLike], np.ndarray] ] = None, ): @@ -103,9 +103,9 @@ def ode_has_been_incorporated(self) -> bool: def as_transition( self, - measurement_cov_fun: Optional[Callable[[FloatArgType], np.ndarray]] = None, + measurement_cov_fun: Optional[Callable[[FloatLike], np.ndarray]] = None, measurement_cov_cholesky_fun: Optional[ - Callable[[FloatArgType], np.ndarray] + Callable[[FloatLike], np.ndarray] ] = None, ): if not self.ode_has_been_incorporated: diff --git a/src/probnum/diffeq/odefilter/information_operators/_ode_residual.py b/src/probnum/diffeq/odefilter/information_operators/_ode_residual.py index 5875cac1f..c77b65b9f 100644 --- a/src/probnum/diffeq/odefilter/information_operators/_ode_residual.py +++ b/src/probnum/diffeq/odefilter/information_operators/_ode_residual.py @@ -6,7 +6,7 @@ from probnum import problems, randprocs from probnum.diffeq.odefilter.information_operators import _information_operator -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike __all__ = ["ODEResidual"] @@ -56,20 +56,20 @@ def _match_residual_and_jacobian_to_ode_order( } return choose_implementation[ode_order] - def __call__(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: + def __call__(self, t: FloatLike, x: np.ndarray) -> np.ndarray: return self._residual(t, x) - def jacobian(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: + def jacobian(self, t: FloatLike, x: np.ndarray) -> np.ndarray: return self._residual_jacobian(t, x) # Implementation of different residuals - def _residual_first_order_ode(self, t: FloatArgType, x: np.ndarray) -> np.ndarray: + def _residual_first_order_ode(self, t: FloatLike, x: np.ndarray) -> np.ndarray: h0, h1 = self.projection_matrices return h1 @ x - self.ode.f(t, h0 @ x) def _residual_first_order_ode_jacobian( - self, t: FloatArgType, x: np.ndarray + self, t: FloatLike, x: np.ndarray ) -> np.ndarray: h0, h1 = self.projection_matrices return h1 - self.ode.df(t, h0 @ x) @ h0 diff --git a/src/probnum/diffeq/odefilter/initialization_routines/_runge_kutta.py b/src/probnum/diffeq/odefilter/initialization_routines/_runge_kutta.py index f3d92a5a5..cd30fa4eb 100644 --- a/src/probnum/diffeq/odefilter/initialization_routines/_runge_kutta.py +++ b/src/probnum/diffeq/odefilter/initialization_routines/_runge_kutta.py @@ -8,7 +8,7 @@ from probnum import filtsmooth, problems, randprocs, randvars from probnum.diffeq.odefilter.initialization_routines import _initialization_routine -from probnum.typing import FloatArgType +from probnum.typing import FloatLike class RungeKuttaInitialization(_initialization_routine.InitializationRoutine): @@ -52,7 +52,7 @@ class RungeKuttaInitialization(_initialization_routine.InitializationRoutine): """ def __init__( - self, dt: Optional[FloatArgType] = 1e-2, method: Optional[str] = "DOP853" + self, dt: Optional[FloatLike] = 1e-2, method: Optional[str] = "DOP853" ): self.dt = dt self.method = method diff --git a/src/probnum/diffeq/odefilter/utils/_problem_utils.py b/src/probnum/diffeq/odefilter/utils/_problem_utils.py index d81f54aa0..00fde3c54 100644 --- a/src/probnum/diffeq/odefilter/utils/_problem_utils.py +++ b/src/probnum/diffeq/odefilter/utils/_problem_utils.py @@ -6,7 +6,7 @@ from probnum import problems, randprocs from probnum.diffeq.odefilter import approx_strategies, information_operators -from probnum.typing import FloatArgType +from probnum.typing import FloatLike __all__ = ["ivp_to_regression_problem"] @@ -19,7 +19,7 @@ def ivp_to_regression_problem( locations: Union[Sequence, np.ndarray], ode_information_operator: information_operators.InformationOperator, approx_strategy: Optional[approx_strategies.ApproximationStrategy] = None, - ode_measurement_variance: Optional[FloatArgType] = 0.0, + ode_measurement_variance: Optional[FloatLike] = 0.0, exclude_initial_condition=False, ): """Transform an initial value problem into a regression problem. diff --git a/src/probnum/diffeq/perturbed/scipy_wrapper/_wrapped_scipy_solver.py b/src/probnum/diffeq/perturbed/scipy_wrapper/_wrapped_scipy_solver.py index d92c2347a..32a447212 100644 --- a/src/probnum/diffeq/perturbed/scipy_wrapper/_wrapped_scipy_solver.py +++ b/src/probnum/diffeq/perturbed/scipy_wrapper/_wrapped_scipy_solver.py @@ -10,7 +10,7 @@ from probnum import randvars from probnum.diffeq import _odesolver, _odesolver_state from probnum.diffeq.perturbed.scipy_wrapper import _wrapped_scipy_odesolution -from probnum.typing import FloatArgType +from probnum.typing import FloatLike class WrappedScipyRungeKutta(_odesolver.ODESolver): @@ -62,7 +62,7 @@ def initialize(self, ivp): ) return state - def attempt_step(self, state: _odesolver_state.ODESolverState, dt: FloatArgType): + def attempt_step(self, state: _odesolver_state.ODESolverState, dt: FloatLike): """Perform one ODE-step from start to stop and set variables to the corresponding values. diff --git a/src/probnum/diffeq/perturbed/step/_perturbation_functions.py b/src/probnum/diffeq/perturbed/step/_perturbation_functions.py index ce31d3ca2..d28aa5d22 100644 --- a/src/probnum/diffeq/perturbed/step/_perturbation_functions.py +++ b/src/probnum/diffeq/perturbed/step/_perturbation_functions.py @@ -4,14 +4,14 @@ import numpy as np import scipy -from probnum.typing import FloatArgType, IntLike, ShapeArgType +from probnum.typing import FloatLike, IntLike, ShapeArgType def perturb_uniform( rng: np.random.Generator, - step: FloatArgType, + step: FloatLike, solver_order: IntLike, - noise_scale: FloatArgType, + noise_scale: FloatLike, size: Optional[ShapeArgType] = (), ) -> Union[float, np.ndarray]: """Perturb the step with uniformly distributed noise. @@ -50,9 +50,9 @@ def perturb_uniform( def perturb_lognormal( rng: np.random.Generator, - step: FloatArgType, + step: FloatLike, solver_order: IntLike, - noise_scale: FloatArgType, + noise_scale: FloatLike, size: Optional[ShapeArgType] = (), ) -> Union[float, np.ndarray]: """Perturb the step with log-normally distributed noise. diff --git a/src/probnum/diffeq/perturbed/step/_perturbedstepsolution.py b/src/probnum/diffeq/perturbed/step/_perturbedstepsolution.py index 3febdbed2..bfe0fd22f 100644 --- a/src/probnum/diffeq/perturbed/step/_perturbedstepsolution.py +++ b/src/probnum/diffeq/perturbed/step/_perturbedstepsolution.py @@ -7,7 +7,7 @@ from probnum import randvars from probnum.diffeq import _odesolution -from probnum.typing import FloatArgType +from probnum.typing import FloatLike class PerturbedStepSolution(_odesolution.ODESolution): @@ -26,9 +26,9 @@ def __init__( def interpolate( self, - t: FloatArgType, - previous_index: Optional[FloatArgType] = None, - next_index: Optional[FloatArgType] = None, + t: FloatLike, + previous_index: Optional[FloatLike] = None, + next_index: Optional[FloatLike] = None, ): # For the first state, no interpolation has to be performed. if t == self.locations[0]: diff --git a/src/probnum/diffeq/perturbed/step/_perturbedstepsolver.py b/src/probnum/diffeq/perturbed/step/_perturbedstepsolver.py index feb5634bc..4ff9447da 100644 --- a/src/probnum/diffeq/perturbed/step/_perturbedstepsolver.py +++ b/src/probnum/diffeq/perturbed/step/_perturbedstepsolver.py @@ -11,7 +11,7 @@ _perturbation_functions, _perturbedstepsolution, ) -from probnum.typing import FloatArgType +from probnum.typing import FloatLike class PerturbedStepSolver(_odesolver.ODESolver): @@ -44,7 +44,7 @@ def __init__( self, rng: np.random.Generator, solver: scipy_wrapper.WrappedScipyRungeKutta, - noise_scale: FloatArgType, + noise_scale: FloatLike, perturb_function: Callable, ): def perturb_step(rng, step): @@ -67,7 +67,7 @@ def construct_with_lognormal_perturbation( cls, rng: np.random.Generator, solver: scipy_wrapper.WrappedScipyRungeKutta, - noise_scale: FloatArgType, + noise_scale: FloatLike, ): pertfun = _perturbation_functions.perturb_lognormal return cls( @@ -82,7 +82,7 @@ def construct_with_uniform_perturbation( cls, rng: np.random.Generator, solver: scipy_wrapper.WrappedScipyRungeKutta, - noise_scale: FloatArgType, + noise_scale: FloatLike, ): pertfun = _perturbation_functions.perturb_uniform return cls( @@ -97,7 +97,7 @@ def initialize(self, ivp): self.scales = [] return self.solver.initialize(ivp) - def attempt_step(self, state: _odesolver_state.ODESolverState, dt: FloatArgType): + def attempt_step(self, state: _odesolver_state.ODESolverState, dt: FloatLike): """Perturb the original stopping point. Perform one perturbed step and project the solution back to the original diff --git a/src/probnum/diffeq/stepsize/_steprule.py b/src/probnum/diffeq/stepsize/_steprule.py index 6f770eaca..605a5bca7 100644 --- a/src/probnum/diffeq/stepsize/_steprule.py +++ b/src/probnum/diffeq/stepsize/_steprule.py @@ -5,27 +5,27 @@ import numpy as np -from probnum.typing import FloatArgType, IntLike, ToleranceDiffusionType +from probnum.typing import FloatLike, IntLike, ToleranceDiffusionType class StepRule(ABC): """Step-size selection rules for ODE solvers.""" - def __init__(self, firststep: FloatArgType): + def __init__(self, firststep: FloatLike): self.firststep = firststep @abstractmethod def suggest( self, - laststep: FloatArgType, - scaled_error: FloatArgType, + laststep: FloatLike, + scaled_error: FloatLike, localconvrate: Optional[IntLike] = None, ): """Suggest a new step h_{n+1} given error estimate e_n at step h_n.""" raise NotImplementedError @abstractmethod - def is_accepted(self, scaled_error: FloatArgType): + def is_accepted(self, scaled_error: FloatLike): """Check if the proposed step should be accepted or not. Variable "proposedstep" not used yet, but may be important in @@ -50,19 +50,19 @@ def errorest_to_norm( class ConstantSteps(StepRule): """Constant step-sizes.""" - def __init__(self, stepsize: FloatArgType): + def __init__(self, stepsize: FloatLike): self.step = stepsize super().__init__(firststep=stepsize) def suggest( self, - laststep: FloatArgType, - scaled_error: FloatArgType, + laststep: FloatLike, + scaled_error: FloatLike, localconvrate: Optional[IntLike] = None, ): return self.step - def is_accepted(self, scaled_error: FloatArgType): + def is_accepted(self, scaled_error: FloatLike): """Always True.""" return True @@ -92,13 +92,13 @@ class AdaptiveSteps(StepRule): def __init__( self, - firststep: FloatArgType, + firststep: FloatLike, atol: ToleranceDiffusionType, rtol: ToleranceDiffusionType, - limitchange: Optional[Tuple[FloatArgType]] = (0.2, 10.0), - safetyscale: Optional[FloatArgType] = 0.95, - minstep: Optional[FloatArgType] = 1e-15, - maxstep: Optional[FloatArgType] = 1e15, + limitchange: Optional[Tuple[FloatLike]] = (0.2, 10.0), + safetyscale: Optional[FloatLike] = 0.95, + minstep: Optional[FloatLike] = 1e-15, + maxstep: Optional[FloatLike] = 1e15, ): self.safetyscale = safetyscale self.limitchange = limitchange @@ -110,8 +110,8 @@ def __init__( def suggest( self, - laststep: FloatArgType, - scaled_error: FloatArgType, + laststep: FloatLike, + scaled_error: FloatLike, localconvrate: Optional[IntLike] = None, ): small, large = self.limitchange @@ -133,7 +133,7 @@ def suggest( raise RuntimeError("Step-size larger than maximum step-size") return step - def is_accepted(self, scaled_error: FloatArgType): + def is_accepted(self, scaled_error: FloatLike): return scaled_error < 1 def errorest_to_norm( diff --git a/src/probnum/filtsmooth/_timeseriesposterior.py b/src/probnum/filtsmooth/_timeseriesposterior.py index 261f07694..fe943b38e 100644 --- a/src/probnum/filtsmooth/_timeseriesposterior.py +++ b/src/probnum/filtsmooth/_timeseriesposterior.py @@ -9,7 +9,7 @@ from probnum.typing import ( ArrayLikeGetitemArgType, DenseOutputLocationArgType, - FloatArgType, + FloatLike, IntLike, ShapeArgType, ) @@ -33,14 +33,14 @@ class TimeSeriesPosterior(abc.ABC): def __init__( self, - locations: Optional[Iterable[FloatArgType]] = None, + locations: Optional[Iterable[FloatLike]] = None, states: Optional[Iterable[randvars.RandomVariable]] = None, ) -> None: self._locations = list(locations) if locations is not None else [] self._states = list(states) if states is not None else [] self._frozen = False - def _check_location(self, location: FloatArgType) -> FloatArgType: + def _check_location(self, location: FloatLike) -> FloatLike: if len(self._locations) > 0 and location <= self._locations[-1]: _err_msg = "Locations have to be strictly ascending. " _err_msg += f"Received {location} <= {self._locations[-1]}." @@ -49,7 +49,7 @@ def _check_location(self, location: FloatArgType) -> FloatArgType: def append( self, - location: FloatArgType, + location: FloatLike, state: randvars.RandomVariable, ) -> None: @@ -159,7 +159,7 @@ def __call__(self, t: DenseOutputLocationArgType) -> DenseOutputValueType: @abc.abstractmethod def interpolate( self, - t: FloatArgType, + t: FloatLike, previous_index: Optional[IntLike] = None, next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: diff --git a/src/probnum/filtsmooth/gaussian/_kalmanposterior.py b/src/probnum/filtsmooth/gaussian/_kalmanposterior.py index 580019842..e784073c1 100644 --- a/src/probnum/filtsmooth/gaussian/_kalmanposterior.py +++ b/src/probnum/filtsmooth/gaussian/_kalmanposterior.py @@ -14,7 +14,7 @@ from probnum.filtsmooth.gaussian import approx from probnum.typing import ( DenseOutputLocationArgType, - FloatArgType, + FloatLike, IntLike, ShapeArgType, ) @@ -46,7 +46,7 @@ class KalmanPosterior(_timeseriesposterior.TimeSeriesPosterior, abc.ABC): def __init__( self, transition: GaussMarkovPriorTransitionArgType, - locations: Optional[Iterable[FloatArgType]] = None, + locations: Optional[Iterable[FloatLike]] = None, states: Optional[Iterable[randvars.RandomVariable]] = None, diffusion_model=None, ) -> None: @@ -60,7 +60,7 @@ def __init__( @abc.abstractmethod def interpolate( self, - t: FloatArgType, + t: FloatLike, previous_index: Optional[IntLike] = None, next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: @@ -157,7 +157,7 @@ def __init__( self, filtering_posterior: _timeseriesposterior.TimeSeriesPosterior, transition: GaussMarkovPriorTransitionArgType, - locations: Iterable[FloatArgType], + locations: Iterable[FloatLike], states: Iterable[randvars.RandomVariable], diffusion_model=None, ): @@ -171,7 +171,7 @@ def __init__( def interpolate( self, - t: FloatArgType, + t: FloatLike, previous_index: Optional[IntLike] = None, next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: @@ -364,7 +364,7 @@ class FilteringPosterior(KalmanPosterior): def interpolate( self, - t: FloatArgType, + t: FloatLike, previous_index: Optional[IntLike] = None, next_index: Optional[IntLike] = None, ) -> randvars.RandomVariable: diff --git a/src/probnum/filtsmooth/gaussian/approx/_unscentedkalman.py b/src/probnum/filtsmooth/gaussian/approx/_unscentedkalman.py index acc083f35..6e1a373e1 100644 --- a/src/probnum/filtsmooth/gaussian/approx/_unscentedkalman.py +++ b/src/probnum/filtsmooth/gaussian/approx/_unscentedkalman.py @@ -11,7 +11,7 @@ from probnum import randprocs, randvars from probnum.filtsmooth.gaussian.approx import _unscentedtransform -from probnum.typing import FloatArgType +from probnum.typing import FloatLike class UKFComponent: @@ -20,9 +20,9 @@ class UKFComponent: def __init__( self, non_linear_model, - spread: Optional[FloatArgType] = 1e-4, - priorpar: Optional[FloatArgType] = 2.0, - special_scale: Optional[FloatArgType] = 0.0, + spread: Optional[FloatLike] = 1e-4, + priorpar: Optional[FloatLike] = 2.0, + special_scale: Optional[FloatLike] = 0.0, ) -> None: self.non_linear_model = non_linear_model self.ut = _unscentedtransform.UnscentedTransform( @@ -57,11 +57,11 @@ class ContinuousUKFComponent(UKFComponent, randprocs.markov.continuous.SDE): def __init__( self, non_linear_model, - spread: Optional[FloatArgType] = 1e-4, - priorpar: Optional[FloatArgType] = 2.0, - special_scale: Optional[FloatArgType] = 0.0, - mde_atol: Optional[FloatArgType] = 1e-6, - mde_rtol: Optional[FloatArgType] = 1e-6, + spread: Optional[FloatLike] = 1e-4, + priorpar: Optional[FloatLike] = 2.0, + special_scale: Optional[FloatLike] = 0.0, + mde_atol: Optional[FloatLike] = 1e-6, + mde_rtol: Optional[FloatLike] = 1e-6, mde_solver: Optional[str] = "LSODA", ) -> None: @@ -153,9 +153,9 @@ class DiscreteUKFComponent(UKFComponent, randprocs.markov.discrete.NonlinearGaus def __init__( self, non_linear_model, - spread: Optional[FloatArgType] = 1e-4, - priorpar: Optional[FloatArgType] = 2.0, - special_scale: Optional[FloatArgType] = 0.0, + spread: Optional[FloatLike] = 1e-4, + priorpar: Optional[FloatLike] = 2.0, + special_scale: Optional[FloatLike] = 0.0, ) -> None: UKFComponent.__init__( self, diff --git a/src/probnum/filtsmooth/particle/_particle_filter.py b/src/probnum/filtsmooth/particle/_particle_filter.py index 6d6b3aabe..1ce628d13 100644 --- a/src/probnum/filtsmooth/particle/_particle_filter.py +++ b/src/probnum/filtsmooth/particle/_particle_filter.py @@ -10,7 +10,7 @@ _importance_distributions, _particle_filter_posterior, ) -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike # Terribly long variable names, but internal only, so no worries. ParticleFilterMeasurementModelArgType = Union[ @@ -68,7 +68,7 @@ def __init__( num_particles: IntLike, rng: np.random.Generator, with_resampling: bool = True, - resampling_percentage_threshold: FloatArgType = 0.1, + resampling_percentage_threshold: FloatLike = 0.1, ) -> None: super().__init__( prior_process=prior_process, diff --git a/src/probnum/filtsmooth/particle/_particle_filter_posterior.py b/src/probnum/filtsmooth/particle/_particle_filter_posterior.py index 2cc54125f..d48329103 100644 --- a/src/probnum/filtsmooth/particle/_particle_filter_posterior.py +++ b/src/probnum/filtsmooth/particle/_particle_filter_posterior.py @@ -6,7 +6,7 @@ from probnum import randvars from probnum.filtsmooth import _timeseriesposterior -from probnum.typing import DenseOutputLocationArgType, FloatArgType, ShapeArgType +from probnum.typing import DenseOutputLocationArgType, FloatLike, ShapeArgType class ParticleFilterPosterior(_timeseriesposterior.TimeSeriesPosterior): @@ -17,7 +17,7 @@ def __call__(self, t): # The methods below are not implemented (yet?). - def interpolate(self, t: FloatArgType) -> randvars.RandomVariable: + def interpolate(self, t: FloatLike) -> randvars.RandomVariable: raise NotImplementedError def sample( diff --git a/src/probnum/linalg/solvers/belief_updates/solution_based/_solution_based_proj_rhs_belief_update.py b/src/probnum/linalg/solvers/belief_updates/solution_based/_solution_based_proj_rhs_belief_update.py index 6327c3892..e926a88a2 100644 --- a/src/probnum/linalg/solvers/belief_updates/solution_based/_solution_based_proj_rhs_belief_update.py +++ b/src/probnum/linalg/solvers/belief_updates/solution_based/_solution_based_proj_rhs_belief_update.py @@ -5,7 +5,7 @@ import probnum # pylint: disable="unused-import" from probnum import randvars from probnum.linalg.solvers.beliefs import LinearSystemBelief -from probnum.typing import FloatArgType +from probnum.typing import FloatLike from .._linear_solver_belief_update import LinearSolverBeliefUpdate @@ -35,7 +35,7 @@ class SolutionBasedProjectedRHSBeliefUpdate(LinearSolverBeliefUpdate): Analysis*, 2019, 14, 937-1012 """ - def __init__(self, noise_var: FloatArgType = 0.0) -> None: + def __init__(self, noise_var: FloatLike = 0.0) -> None: if noise_var < 0.0: raise ValueError(f"Noise variance {noise_var} must be non-negative.") self._noise_var = noise_var diff --git a/src/probnum/problems/_problems.py b/src/probnum/problems/_problems.py index 4bf260e3a..69f1727a1 100644 --- a/src/probnum/problems/_problems.py +++ b/src/probnum/problems/_problems.py @@ -8,7 +8,7 @@ import scipy.sparse from probnum import linops, randvars -from probnum.typing import FloatArgType +from probnum.typing import FloatLike @dataclasses.dataclass @@ -147,7 +147,7 @@ class InitialValueProblem: f: Callable[[float, np.ndarray], np.ndarray] t0: float tmax: float - y0: Union[FloatArgType, np.ndarray] + y0: Union[FloatLike, np.ndarray] df: Optional[Callable[[float, np.ndarray], np.ndarray]] = None ddf: Optional[Callable[[float, np.ndarray], np.ndarray]] = None @@ -248,8 +248,8 @@ class QuadratureProblem: """ integrand: Callable[[np.ndarray], Union[float, np.ndarray]] - lower_bd: Union[FloatArgType, np.ndarray] - upper_bd: Union[FloatArgType, np.ndarray] + lower_bd: Union[FloatLike, np.ndarray] + upper_bd: Union[FloatLike, np.ndarray] output_dim: Optional[int] = 1 # For testing and benchmarking diff --git a/src/probnum/problems/zoo/filtsmooth/_filtsmooth_problems.py b/src/probnum/problems/zoo/filtsmooth/_filtsmooth_problems.py index 6328f7332..725622c68 100644 --- a/src/probnum/problems/zoo/filtsmooth/_filtsmooth_problems.py +++ b/src/probnum/problems/zoo/filtsmooth/_filtsmooth_problems.py @@ -4,7 +4,7 @@ from probnum import diffeq, filtsmooth, problems, randprocs, randvars from probnum.problems.zoo import diffeq as diffeq_zoo -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike __all__ = [ "benes_daum", @@ -17,11 +17,11 @@ def car_tracking( rng: np.random.Generator, - measurement_variance: FloatArgType = 0.5, - process_diffusion: FloatArgType = 1.0, + measurement_variance: FloatLike = 0.5, + process_diffusion: FloatLike = 1.0, num_prior_derivatives: IntLike = 1, - timespan: Tuple[FloatArgType, FloatArgType] = (0.0, 20.0), - step: FloatArgType = 0.2, + timespan: Tuple[FloatLike, FloatLike] = (0.0, 20.0), + step: FloatLike = 0.2, initrv: Optional[randvars.RandomVariable] = None, forward_implementation: str = "classic", backward_implementation: str = "classic", @@ -149,9 +149,9 @@ def car_tracking( def ornstein_uhlenbeck( rng: np.random.Generator, - measurement_variance: FloatArgType = 0.1, - driftspeed: FloatArgType = 0.21, - process_diffusion: FloatArgType = 0.5, + measurement_variance: FloatLike = 0.1, + driftspeed: FloatLike = 0.21, + process_diffusion: FloatLike = 0.5, time_grid: Optional[np.ndarray] = None, initrv: Optional[randvars.RandomVariable] = None, forward_implementation: str = "classic", @@ -252,9 +252,9 @@ def ornstein_uhlenbeck( def pendulum( rng: np.random.Generator, - measurement_variance: FloatArgType = 0.1024, - timespan: Tuple[FloatArgType, FloatArgType] = (0.0, 4.0), - step: FloatArgType = 0.0075, + measurement_variance: FloatLike = 0.1024, + timespan: Tuple[FloatLike, FloatLike] = (0.0, 4.0), + step: FloatLike = 0.0075, initrv: Optional[randvars.RandomVariable] = None, initarg: Optional[float] = None, ): @@ -400,8 +400,8 @@ def dh(t, x): def benes_daum( rng: np.random.Generator, - measurement_variance: FloatArgType = 0.1, - process_diffusion: FloatArgType = 1.0, + measurement_variance: FloatLike = 0.1, + process_diffusion: FloatLike = 1.0, time_grid: Optional[np.ndarray] = None, initrv: Optional[randvars.RandomVariable] = None, ): @@ -506,12 +506,12 @@ def l(t, x): def logistic_ode( - y0: Optional[Union[np.ndarray, FloatArgType]] = None, - timespan: Tuple[FloatArgType, FloatArgType] = (0.0, 2.0), - step: FloatArgType = 0.1, - params: Tuple[FloatArgType, FloatArgType] = (6.0, 1.0), + y0: Optional[Union[np.ndarray, FloatLike]] = None, + timespan: Tuple[FloatLike, FloatLike] = (0.0, 2.0), + step: FloatLike = 0.1, + params: Tuple[FloatLike, FloatLike] = (6.0, 1.0), initrv: Optional[randvars.RandomVariable] = None, - evlvar: Optional[Union[np.ndarray, FloatArgType]] = None, + evlvar: Optional[Union[np.ndarray, FloatLike]] = None, ek0_or_ek1: IntLike = 1, exclude_initial_condition: bool = True, order: IntLike = 3, diff --git a/src/probnum/quad/_bayesquad.py b/src/probnum/quad/_bayesquad.py index 5516c6c25..27b227ced 100644 --- a/src/probnum/quad/_bayesquad.py +++ b/src/probnum/quad/_bayesquad.py @@ -14,7 +14,7 @@ from probnum.randprocs.kernels import Kernel from probnum.randvars import Normal -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike from ._integration_measures import GaussianMeasure, IntegrationMeasure, LebesgueMeasure from .solvers import BayesianQuadrature @@ -26,13 +26,13 @@ def bayesquad( input_dim: int, kernel: Optional[Kernel] = None, domain: Optional[ - Union[Tuple[FloatArgType, FloatArgType], Tuple[np.ndarray, np.ndarray]] + Union[Tuple[FloatLike, FloatLike], Tuple[np.ndarray, np.ndarray]] ] = None, measure: Optional[IntegrationMeasure] = None, policy: Optional[str] = "bmc", max_evals: Optional[IntLike] = None, - var_tol: Optional[FloatArgType] = None, - rel_tol: Optional[FloatArgType] = None, + var_tol: Optional[FloatLike] = None, + rel_tol: Optional[FloatLike] = None, batch_size: Optional[IntLike] = 1, rng: Optional[np.random.Generator] = np.random.default_rng(), ) -> Tuple[Normal, Dict]: @@ -162,7 +162,7 @@ def bayesquad_from_data( fun_evals: np.ndarray, kernel: Optional[Kernel] = None, domain: Optional[ - Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]] + Tuple[Union[np.ndarray, FloatLike], Union[np.ndarray, FloatLike]] ] = None, measure: Optional[IntegrationMeasure] = None, ) -> Tuple[Normal, Dict]: diff --git a/src/probnum/quad/_integration_measures.py b/src/probnum/quad/_integration_measures.py index bfdc6eec4..b6693edfc 100644 --- a/src/probnum/quad/_integration_measures.py +++ b/src/probnum/quad/_integration_measures.py @@ -7,7 +7,7 @@ import scipy.stats from probnum.randvars import Normal -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike class IntegrationMeasure(abc.ABC): @@ -27,13 +27,13 @@ class IntegrationMeasure(abc.ABC): def __init__( self, - domain: Union[Tuple[FloatArgType, FloatArgType], Tuple[np.ndarray, np.ndarray]], + domain: Union[Tuple[FloatLike, FloatLike], Tuple[np.ndarray, np.ndarray]], input_dim: IntLike, ) -> None: self._set_dimension_domain(input_dim, domain) - def __call__(self, points: Union[FloatArgType, np.ndarray]) -> np.ndarray: + def __call__(self, points: Union[FloatLike, np.ndarray]) -> np.ndarray: """Evaluate the density function of the integration measure. Parameters @@ -77,7 +77,7 @@ def sample( def _set_dimension_domain( self, input_dim: IntLike, - domain: Union[Tuple[FloatArgType, FloatArgType], Tuple[np.ndarray, np.ndarray]], + domain: Union[Tuple[FloatLike, FloatLike], Tuple[np.ndarray, np.ndarray]], ) -> None: """Sets the integration domain and input_dimension. @@ -150,7 +150,7 @@ class LebesgueMeasure(IntegrationMeasure): def __init__( self, - domain: Union[Tuple[FloatArgType, FloatArgType], Tuple[np.ndarray, np.ndarray]], + domain: Union[Tuple[FloatLike, FloatLike], Tuple[np.ndarray, np.ndarray]], input_dim: Optional[IntLike] = None, normalized: Optional[bool] = False, ) -> None: diff --git a/src/probnum/quad/solvers/bayesian_quadrature.py b/src/probnum/quad/solvers/bayesian_quadrature.py index ea918e0a8..9729ccc6c 100644 --- a/src/probnum/quad/solvers/bayesian_quadrature.py +++ b/src/probnum/quad/solvers/bayesian_quadrature.py @@ -13,7 +13,7 @@ ) from probnum.randprocs.kernels import ExpQuad, Kernel from probnum.randvars import Normal -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike from .._integration_measures import IntegrationMeasure, LebesgueMeasure from ..kernel_embeddings import KernelEmbedding @@ -64,12 +64,12 @@ def from_problem( kernel: Optional[Kernel] = None, measure: Optional[IntegrationMeasure] = None, domain: Optional[ - Union[Tuple[FloatArgType, FloatArgType], Tuple[np.ndarray, np.ndarray]] + Union[Tuple[FloatLike, FloatLike], Tuple[np.ndarray, np.ndarray]] ] = None, policy: str = "bmc", max_evals: Optional[IntLike] = None, - var_tol: Optional[FloatArgType] = None, - rel_tol: Optional[FloatArgType] = None, + var_tol: Optional[FloatLike] = None, + rel_tol: Optional[FloatLike] = None, batch_size: IntLike = 1, rng: np.random.Generator = None, ) -> "BayesianQuadrature": diff --git a/src/probnum/quad/solvers/stopping_criteria/_integral_variance_tol.py b/src/probnum/quad/solvers/stopping_criteria/_integral_variance_tol.py index bd863ac11..5276892cd 100644 --- a/src/probnum/quad/solvers/stopping_criteria/_integral_variance_tol.py +++ b/src/probnum/quad/solvers/stopping_criteria/_integral_variance_tol.py @@ -2,7 +2,7 @@ from probnum.quad.solvers.bq_state import BQState from probnum.quad.solvers.stopping_criteria import BQStoppingCriterion -from probnum.typing import FloatArgType +from probnum.typing import FloatLike # pylint: disable=too-few-public-methods, fixme @@ -16,7 +16,7 @@ class IntegralVarianceTolerance(BQStoppingCriterion): Tolerance value of the variance. """ - def __init__(self, var_tol: FloatArgType): + def __init__(self, var_tol: FloatLike): self.var_tol = var_tol def __call__(self, bq_state: BQState) -> bool: diff --git a/src/probnum/quad/solvers/stopping_criteria/_rel_mean_change.py b/src/probnum/quad/solvers/stopping_criteria/_rel_mean_change.py index f74fb6262..cb32b89d3 100644 --- a/src/probnum/quad/solvers/stopping_criteria/_rel_mean_change.py +++ b/src/probnum/quad/solvers/stopping_criteria/_rel_mean_change.py @@ -4,7 +4,7 @@ from probnum.quad.solvers.bq_state import BQState from probnum.quad.solvers.stopping_criteria import BQStoppingCriterion -from probnum.typing import FloatArgType +from probnum.typing import FloatLike # pylint: disable=too-few-public-methods @@ -23,7 +23,7 @@ class RelativeMeanChange(BQStoppingCriterion): Relative error tolerance on consecutive integral mean values. """ - def __init__(self, rel_tol: FloatArgType): + def __init__(self, rel_tol: FloatLike): self.rel_tol = rel_tol def __call__(self, bq_state: BQState) -> bool: diff --git a/src/probnum/randprocs/markov/_transition.py b/src/probnum/randprocs/markov/_transition.py index 0dd0d850e..ed0bbf253 100644 --- a/src/probnum/randprocs/markov/_transition.py +++ b/src/probnum/randprocs/markov/_transition.py @@ -5,7 +5,7 @@ import numpy as np from probnum import randvars -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike class Transition(abc.ABC): @@ -300,7 +300,7 @@ def smooth_list( def jointly_transform_base_measure_realization_list_backward( self, base_measure_realizations: np.ndarray, - t: FloatArgType, + t: FloatLike, rv_list: randvars._RandomVariableList, _diffusion_list: np.ndarray, _previous_posterior=None, @@ -367,7 +367,7 @@ def jointly_transform_base_measure_realization_list_backward( def jointly_transform_base_measure_realization_list_forward( self, base_measure_realizations: np.ndarray, - t: FloatArgType, + t: FloatLike, initrv: randvars.RandomVariable, _diffusion_list: np.ndarray, _previous_posterior=None, diff --git a/src/probnum/randprocs/markov/continuous/_diffusions.py b/src/probnum/randprocs/markov/continuous/_diffusions.py index 45febd251..f659cb5c3 100644 --- a/src/probnum/randprocs/markov/continuous/_diffusions.py +++ b/src/probnum/randprocs/markov/continuous/_diffusions.py @@ -11,7 +11,7 @@ from probnum.typing import ( ArrayLikeGetitemArgType, DenseOutputLocationArgType, - FloatArgType, + FloatLike, ToleranceDiffusionType, ) @@ -40,7 +40,7 @@ def estimate_locally( self, meas_rv: randvars.RandomVariable, meas_rv_assuming_zero_previous_cov: randvars.RandomVariable, - t: FloatArgType, + t: FloatLike, ) -> ToleranceDiffusionType: r"""Estimate the (local) diffusion and update current (global) estimation in- place. @@ -87,7 +87,7 @@ def estimate_locally( self, meas_rv: randvars.RandomVariable, meas_rv_assuming_zero_previous_cov: randvars.RandomVariable, - t: FloatArgType, + t: FloatLike, ) -> ToleranceDiffusionType: new_increment = _compute_local_quasi_mle(meas_rv) return new_increment @@ -171,7 +171,7 @@ def estimate_locally( self, meas_rv: randvars.RandomVariable, meas_rv_assuming_zero_previous_cov: randvars.RandomVariable, - t: FloatArgType, + t: FloatLike, ) -> ToleranceDiffusionType: if not t >= self.tmax: raise ValueError( diff --git a/src/probnum/randprocs/markov/continuous/_linear_sde.py b/src/probnum/randprocs/markov/continuous/_linear_sde.py index e3129263b..ed3b82e9f 100644 --- a/src/probnum/randprocs/markov/continuous/_linear_sde.py +++ b/src/probnum/randprocs/markov/continuous/_linear_sde.py @@ -8,7 +8,7 @@ from probnum import randvars from probnum.randprocs.markov.continuous import _sde -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike from probnum.utils.linalg import tril_to_positive_tril @@ -48,11 +48,11 @@ def __init__( self, state_dimension: IntLike, wiener_process_dimension: IntLike, - drift_matrix_function: Callable[[FloatArgType], np.ndarray], - force_vector_function: Callable[[FloatArgType], np.ndarray], - dispersion_matrix_function: Callable[[FloatArgType], np.ndarray], - mde_atol: Optional[FloatArgType] = 1e-6, - mde_rtol: Optional[FloatArgType] = 1e-6, + drift_matrix_function: Callable[[FloatLike], np.ndarray], + force_vector_function: Callable[[FloatLike], np.ndarray], + dispersion_matrix_function: Callable[[FloatLike], np.ndarray], + mde_atol: Optional[FloatLike] = 1e-6, + mde_rtol: Optional[FloatLike] = 1e-6, mde_solver: Optional[str] = "RK45", forward_implementation: Optional[str] = "classic", ): diff --git a/src/probnum/randprocs/markov/continuous/_sde.py b/src/probnum/randprocs/markov/continuous/_sde.py index 91b86d938..06896d10a 100644 --- a/src/probnum/randprocs/markov/continuous/_sde.py +++ b/src/probnum/randprocs/markov/continuous/_sde.py @@ -5,7 +5,7 @@ import numpy as np from probnum.randprocs.markov import _transition -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike class SDE(_transition.Transition): @@ -20,9 +20,9 @@ def __init__( self, state_dimension: IntLike, wiener_process_dimension: IntLike, - drift_function: Callable[[FloatArgType, np.ndarray], np.ndarray], - dispersion_function: Callable[[FloatArgType, np.ndarray], np.ndarray], - drift_jacobian: Optional[Callable[[FloatArgType, np.ndarray], np.ndarray]], + drift_function: Callable[[FloatLike, np.ndarray], np.ndarray], + dispersion_function: Callable[[FloatLike, np.ndarray], np.ndarray], + drift_jacobian: Optional[Callable[[FloatLike, np.ndarray], np.ndarray]], ): super().__init__(input_dim=state_dimension, output_dim=state_dimension) diff --git a/src/probnum/randprocs/markov/discrete/_linear_gaussian.py b/src/probnum/randprocs/markov/discrete/_linear_gaussian.py index 5bdcdb362..0c21fbbad 100644 --- a/src/probnum/randprocs/markov/discrete/_linear_gaussian.py +++ b/src/probnum/randprocs/markov/discrete/_linear_gaussian.py @@ -8,7 +8,7 @@ from probnum import config, linops, randvars from probnum.randprocs.markov.discrete import _nonlinear_gaussian -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike from probnum.utils.linalg import cholesky_update, tril_to_positive_tril @@ -40,12 +40,10 @@ def __init__( self, input_dim: IntLike, output_dim: IntLike, - state_trans_mat_fun: Callable[[FloatArgType], np.ndarray], - shift_vec_fun: Callable[[FloatArgType], np.ndarray], - proc_noise_cov_mat_fun: Callable[[FloatArgType], np.ndarray], - proc_noise_cov_cholesky_fun: Optional[ - Callable[[FloatArgType], np.ndarray] - ] = None, + state_trans_mat_fun: Callable[[FloatLike], np.ndarray], + shift_vec_fun: Callable[[FloatLike], np.ndarray], + proc_noise_cov_mat_fun: Callable[[FloatLike], np.ndarray], + proc_noise_cov_cholesky_fun: Optional[Callable[[FloatLike], np.ndarray]] = None, forward_implementation="classic", backward_implementation="classic", ): diff --git a/src/probnum/randprocs/markov/discrete/_nonlinear_gaussian.py b/src/probnum/randprocs/markov/discrete/_nonlinear_gaussian.py index 60c49c07f..ce676273c 100644 --- a/src/probnum/randprocs/markov/discrete/_nonlinear_gaussian.py +++ b/src/probnum/randprocs/markov/discrete/_nonlinear_gaussian.py @@ -8,7 +8,7 @@ from probnum import randvars from probnum.randprocs.markov import _transition from probnum.randprocs.markov.discrete import _condition_state -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike class NonlinearGaussian(_transition.Transition): @@ -45,14 +45,12 @@ def __init__( self, input_dim: IntLike, output_dim: IntLike, - state_trans_fun: Callable[[FloatArgType, np.ndarray], np.ndarray], - proc_noise_cov_mat_fun: Callable[[FloatArgType], np.ndarray], + state_trans_fun: Callable[[FloatLike, np.ndarray], np.ndarray], + proc_noise_cov_mat_fun: Callable[[FloatLike], np.ndarray], jacob_state_trans_fun: Optional[ - Callable[[FloatArgType, np.ndarray], np.ndarray] - ] = None, - proc_noise_cov_cholesky_fun: Optional[ - Callable[[FloatArgType], np.ndarray] + Callable[[FloatLike, np.ndarray], np.ndarray] ] = None, + proc_noise_cov_cholesky_fun: Optional[Callable[[FloatLike], np.ndarray]] = None, ): self.state_trans_fun = state_trans_fun self.proc_noise_cov_mat_fun = proc_noise_cov_mat_fun @@ -154,8 +152,8 @@ def from_callable( cls, input_dim: IntLike, output_dim: IntLike, - state_trans_fun: Callable[[FloatArgType, np.ndarray], np.ndarray], - jacob_state_trans_fun: Callable[[FloatArgType, np.ndarray], np.ndarray], + state_trans_fun: Callable[[FloatLike, np.ndarray], np.ndarray], + jacob_state_trans_fun: Callable[[FloatLike, np.ndarray], np.ndarray], ): """Turn a callable into a deterministic transition.""" diff --git a/src/probnum/randvars/_normal.py b/src/probnum/randvars/_normal.py index 5daa700a7..aef3f6bef 100644 --- a/src/probnum/randvars/_normal.py +++ b/src/probnum/randvars/_normal.py @@ -10,7 +10,7 @@ from probnum import utils as _utils from probnum.typing import ( ArrayLikeGetitemArgType, - FloatArgType, + FloatLike, ShapeArgType, ShapeType, ) @@ -254,7 +254,7 @@ def cov_cholesky(self) -> _ValueType: def precompute_cov_cholesky( self, - damping_factor: Optional[FloatArgType] = None, + damping_factor: Optional[FloatLike] = None, ): """(P)recompute Cholesky factors (careful: in-place operation!).""" if damping_factor is None: @@ -414,7 +414,7 @@ def _sub_normal(self, other: "Normal") -> "Normal": # Univariate Gaussians def _univariate_cov_cholesky( self, - damping_factor: FloatArgType, + damping_factor: FloatLike, ) -> np.floating: return np.sqrt(self.cov + damping_factor) @@ -452,7 +452,7 @@ def _univariate_cdf(self, x: _ValueType) -> np.float_: def _univariate_logcdf(self, x: _ValueType) -> np.float_: return scipy.stats.norm.logcdf(x, loc=self.mean, scale=self.std) - def _univariate_quantile(self, p: FloatArgType) -> np.floating: + def _univariate_quantile(self, p: FloatLike) -> np.floating: return scipy.stats.norm.ppf(p, loc=self.mean, scale=self.std) def _univariate_entropy(self: _ValueType) -> np.float_: @@ -464,7 +464,7 @@ def _univariate_entropy(self: _ValueType) -> np.float_: # Multi- and matrixvariate Gaussians def dense_cov_cholesky( self, - damping_factor: Optional[FloatArgType] = None, + damping_factor: Optional[FloatLike] = None, ) -> np.ndarray: """Compute the Cholesky factorization of the covariance from its dense representation.""" @@ -478,7 +478,7 @@ def dense_cov_cholesky( ) def _dense_cov_cholesky_as_linop( - self, damping_factor: FloatArgType + self, damping_factor: FloatLike ) -> linops.LinearOperator: return linops.aslinop(self.dense_cov_cholesky(damping_factor=damping_factor)) @@ -550,7 +550,7 @@ def _dense_entropy(self) -> np.float_: # Matrixvariate Gaussian with Kronecker covariance def _kronecker_cov_cholesky( self, - damping_factor: FloatArgType, + damping_factor: FloatLike, ) -> linops.Kronecker: assert isinstance(self.cov, linops.Kronecker) @@ -572,7 +572,7 @@ def _kronecker_cov_cholesky( # factors def _symmetric_kronecker_identical_factors_cov_cholesky( self, - damping_factor: FloatArgType, + damping_factor: FloatLike, ) -> linops.SymmetricKronecker: assert ( isinstance(self.cov, linops.SymmetricKronecker) diff --git a/src/probnum/randvars/_random_variable.py b/src/probnum/randvars/_random_variable.py index c7e332c52..cd9e7b418 100644 --- a/src/probnum/randvars/_random_variable.py +++ b/src/probnum/randvars/_random_variable.py @@ -8,7 +8,7 @@ from probnum.typing import ( ArrayLikeGetitemArgType, DTypeArgType, - FloatArgType, + FloatLike, ShapeArgType, ShapeType, ) @@ -123,7 +123,7 @@ def __init__( in_support: Optional[Callable[[_ValueType], bool]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, - quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, + quantile: Optional[Callable[[FloatLike], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, @@ -490,7 +490,7 @@ def logcdf(self, x: _ValueType) -> np.float_: f"with type `{type(self).__name__}` is implemented." ) - def quantile(self, p: FloatArgType) -> _ValueType: + def quantile(self, p: FloatLike) -> _ValueType: """Quantile function. The quantile function :math:`Q \\colon [0, 1] \\to \\mathbb{R}` of a random @@ -982,7 +982,7 @@ def __init__( logpmf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, - quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, + quantile: Optional[Callable[[FloatLike], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, @@ -1200,7 +1200,7 @@ def __init__( logpdf: Optional[Callable[[_ValueType], np.float_]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, - quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, + quantile: Optional[Callable[[FloatLike], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, diff --git a/src/probnum/typing.py b/src/probnum/typing.py index 379f31683..3f1adb5c2 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -38,7 +38,11 @@ Values of this type should always be converted into :class:`int`\\ s before further internal processing.""" -FloatArgType = Union[float, numbers.Real, np.floating] +FloatLike = Union[float, numbers.Real, np.floating] +"""Type of a public API argument for supplying a float. + +Values of this type should always be converteg into :class:`float`\\ s before further +internal processing.""" ShapeArgType = Union[IntLike, Iterable[IntLike]] """Type of a public API argument for supplying a shape. Values of this type should @@ -71,7 +75,7 @@ # Other Types ######################################################################################## -ToleranceDiffusionType = Union[FloatArgType, np.ndarray] +ToleranceDiffusionType = Union[FloatLike, np.ndarray] r"""Type of a quantity that describes tolerances, errors, and diffusions. Used for absolute (atol) and relative tolerances (rtol), local error estimates, as well as @@ -84,7 +88,7 @@ respectively. Currently, the array-support for diffusions is experimental (at best). """ -DenseOutputLocationArgType = Union[FloatArgType, np.ndarray] +DenseOutputLocationArgType = Union[FloatLike, np.ndarray] """TimeSeriesPosteriors and derived classes can be evaluated at a single location 't' or an array of locations.""" diff --git a/tests/test_quad/util.py b/tests/test_quad/util.py index 5e02e85a8..51e14b0f6 100644 --- a/tests/test_quad/util.py +++ b/tests/test_quad/util.py @@ -5,15 +5,15 @@ from scipy.linalg import sqrtm from scipy.special import roots_legendre -from probnum.typing import FloatArgType, IntLike +from probnum.typing import FloatLike, IntLike # Auxiliary functions for quadrature tests def gauss_hermite_tensor( n_points: IntLike, input_dim: IntLike, - mean: Union[np.ndarray, FloatArgType], - cov: Union[np.ndarray, FloatArgType], + mean: Union[np.ndarray, FloatLike], + cov: Union[np.ndarray, FloatLike], ): """Returns the points and weights of a tensor-product Gauss-Hermite rule for integration w.r.t a Gaussian measure.""" @@ -33,7 +33,7 @@ def gauss_hermite_tensor( def gauss_legendre_tensor( n_points: IntLike, input_dim: IntLike, - domain: Tuple[Union[np.ndarray, FloatArgType], Union[np.ndarray, FloatArgType]], + domain: Tuple[Union[np.ndarray, FloatLike], Union[np.ndarray, FloatLike]], normalized: Optional[bool] = False, ): """Returns the points and weights of a tensor-product Gauss-Legendre rule for From 751dd0f86b39f27a9790715b15839f3cfa056e97 Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 11:39:39 +0100 Subject: [PATCH 03/15] Rename `ShapeArgType` -> `ShapeLike` --- .../implementing_a_probnum_method.ipynb | 6 +++--- src/probnum/diffeq/_odesolution.py | 4 ++-- .../diffeq/odefilter/_odefilter_solution.py | 4 ++-- .../perturbed/step/_perturbation_functions.py | 6 +++--- .../filtsmooth/_timeseriesposterior.py | 4 ++-- .../filtsmooth/gaussian/_kalmanposterior.py | 6 +++--- .../particle/_particle_filter_posterior.py | 4 ++-- src/probnum/linops/_arithmetic.py | 4 ++-- src/probnum/linops/_linear_operator.py | 6 +++--- src/probnum/linops/_scaling.py | 4 ++-- src/probnum/randprocs/_gaussian_process.py | 4 ++-- src/probnum/randprocs/_random_process.py | 6 +++--- src/probnum/randprocs/kernels/_kernel.py | 4 ++-- .../randprocs/markov/_markov_process.py | 4 ++-- src/probnum/randvars/_constant.py | 4 ++-- src/probnum/randvars/_normal.py | 4 ++-- src/probnum/randvars/_random_variable.py | 20 ++++++++----------- src/probnum/typing.py | 10 ++++++---- src/probnum/utils/argutils.py | 4 ++-- .../test_randvars/test_arithmetic/conftest.py | 10 +++++----- .../test_arithmetic/test_generic.py | 4 ++-- 21 files changed, 60 insertions(+), 62 deletions(-) diff --git a/docs/source/development/implementing_a_probnum_method.ipynb b/docs/source/development/implementing_a_probnum_method.ipynb index 3a6b50f9b..8f27e9549 100644 --- a/docs/source/development/implementing_a_probnum_method.ipynb +++ b/docs/source/development/implementing_a_probnum_method.ipynb @@ -587,7 +587,7 @@ "IntLike = Union[int, numbers.Integral, np.integer]\n", "FloatLike = Union[float, numbers.Real, np.floating]\n", "\n", - "ShapeArgType = Union[IntLike, Iterable[IntLike]]\n", + "ShapeLike = Union[IntLike, Iterable[IntLike]]\n", "\"\"\"Type of a public API argument for supplying a shape. Values of this type should\n", "always be converted into :class:`ShapeType` using the function\n", ":func:`probnum.utils.as_shape` before further internal processing.\"\"\"\n", @@ -602,11 +602,11 @@ "metadata": {}, "outputs": [], "source": [ - "from probnum.typing import ShapeType, IntLike, ShapeArgType\n", + "from probnum.typing import ShapeType, IntLike, ShapeLike\n", "from probnum.utils import as_shape\n", "\n", "\n", - "def extend_shape(shape: ShapeArgType, extension: IntLike) -> ShapeType:\n", + "def extend_shape(shape: ShapeLike, extension: IntLike) -> ShapeType:\n", " return as_shape(shape) + as_shape(extension)" ] }, diff --git a/src/probnum/diffeq/_odesolution.py b/src/probnum/diffeq/_odesolution.py index cc1992a38..aa892100f 100644 --- a/src/probnum/diffeq/_odesolution.py +++ b/src/probnum/diffeq/_odesolution.py @@ -11,7 +11,7 @@ from probnum import filtsmooth, randvars from probnum.filtsmooth._timeseriesposterior import DenseOutputLocationArgType -from probnum.typing import FloatLike, IntLike, ShapeArgType +from probnum.typing import FloatLike, IntLike, ShapeLike class ODESolution(filtsmooth.TimeSeriesPosterior): @@ -61,7 +61,7 @@ def sample( self, rng: np.random.Generator, t: Optional[DenseOutputLocationArgType] = None, - size: Optional[ShapeArgType] = (), + size: Optional[ShapeLike] = (), ) -> np.ndarray: """Sample from the ODE solution. diff --git a/src/probnum/diffeq/odefilter/_odefilter_solution.py b/src/probnum/diffeq/odefilter/_odefilter_solution.py index 447ff4fe9..5068aa4f8 100644 --- a/src/probnum/diffeq/odefilter/_odefilter_solution.py +++ b/src/probnum/diffeq/odefilter/_odefilter_solution.py @@ -7,7 +7,7 @@ from probnum import filtsmooth, randvars, utils from probnum.diffeq import _odesolution from probnum.filtsmooth._timeseriesposterior import DenseOutputLocationArgType -from probnum.typing import FloatLike, IntLike, ShapeArgType +from probnum.typing import FloatLike, IntLike, ShapeLike class ODEFilterSolution(_odesolution.ODESolution): @@ -103,7 +103,7 @@ def sample( self, rng: np.random.Generator, t: Optional[DenseOutputLocationArgType] = None, - size: Optional[ShapeArgType] = (), + size: Optional[ShapeLike] = (), ) -> np.ndarray: samples = self.kalman_posterior.sample(rng=rng, t=t, size=size) diff --git a/src/probnum/diffeq/perturbed/step/_perturbation_functions.py b/src/probnum/diffeq/perturbed/step/_perturbation_functions.py index d28aa5d22..2e56b0fd9 100644 --- a/src/probnum/diffeq/perturbed/step/_perturbation_functions.py +++ b/src/probnum/diffeq/perturbed/step/_perturbation_functions.py @@ -4,7 +4,7 @@ import numpy as np import scipy -from probnum.typing import FloatLike, IntLike, ShapeArgType +from probnum.typing import FloatLike, IntLike, ShapeLike def perturb_uniform( @@ -12,7 +12,7 @@ def perturb_uniform( step: FloatLike, solver_order: IntLike, noise_scale: FloatLike, - size: Optional[ShapeArgType] = (), + size: Optional[ShapeLike] = (), ) -> Union[float, np.ndarray]: """Perturb the step with uniformly distributed noise. @@ -53,7 +53,7 @@ def perturb_lognormal( step: FloatLike, solver_order: IntLike, noise_scale: FloatLike, - size: Optional[ShapeArgType] = (), + size: Optional[ShapeLike] = (), ) -> Union[float, np.ndarray]: """Perturb the step with log-normally distributed noise. diff --git a/src/probnum/filtsmooth/_timeseriesposterior.py b/src/probnum/filtsmooth/_timeseriesposterior.py index fe943b38e..015e1d156 100644 --- a/src/probnum/filtsmooth/_timeseriesposterior.py +++ b/src/probnum/filtsmooth/_timeseriesposterior.py @@ -11,7 +11,7 @@ DenseOutputLocationArgType, FloatLike, IntLike, - ShapeArgType, + ShapeLike, ) DenseOutputValueType = Union[randvars.RandomVariable, randvars._RandomVariableList] @@ -177,7 +177,7 @@ def sample( self, rng: np.random.Generator, t: Optional[DenseOutputLocationArgType] = None, - size: Optional[ShapeArgType] = (), + size: Optional[ShapeLike] = (), ) -> np.ndarray: """Draw samples from the filtering/smoothing posterior. diff --git a/src/probnum/filtsmooth/gaussian/_kalmanposterior.py b/src/probnum/filtsmooth/gaussian/_kalmanposterior.py index e784073c1..a9ce4dc80 100644 --- a/src/probnum/filtsmooth/gaussian/_kalmanposterior.py +++ b/src/probnum/filtsmooth/gaussian/_kalmanposterior.py @@ -16,7 +16,7 @@ DenseOutputLocationArgType, FloatLike, IntLike, - ShapeArgType, + ShapeLike, ) GaussMarkovPriorTransitionArgType = Union[ @@ -70,7 +70,7 @@ def sample( self, rng: np.random.Generator, t: Optional[DenseOutputLocationArgType] = None, - size: Optional[ShapeArgType] = (), + size: Optional[ShapeLike] = (), ) -> np.ndarray: size = utils.as_shape(size) @@ -428,7 +428,7 @@ def sample( self, rng: np.random.Generator, t: Optional[DenseOutputLocationArgType] = None, - size: Optional[ShapeArgType] = (), + size: Optional[ShapeLike] = (), ) -> np.ndarray: # If this error would not be thrown here, trying to sample from a FilteringPosterior # would call FilteringPosterior.transform_base_measure_realizations which is not implemented. diff --git a/src/probnum/filtsmooth/particle/_particle_filter_posterior.py b/src/probnum/filtsmooth/particle/_particle_filter_posterior.py index d48329103..a8ae97bcd 100644 --- a/src/probnum/filtsmooth/particle/_particle_filter_posterior.py +++ b/src/probnum/filtsmooth/particle/_particle_filter_posterior.py @@ -6,7 +6,7 @@ from probnum import randvars from probnum.filtsmooth import _timeseriesposterior -from probnum.typing import DenseOutputLocationArgType, FloatLike, ShapeArgType +from probnum.typing import DenseOutputLocationArgType, FloatLike, ShapeLike class ParticleFilterPosterior(_timeseriesposterior.TimeSeriesPosterior): @@ -24,7 +24,7 @@ def sample( self, rng: np.random.Generator, t: Optional[DenseOutputLocationArgType] = None, - size: Optional[ShapeArgType] = (), + size: Optional[ShapeLike] = (), ) -> np.ndarray: raise NotImplementedError("Sampling is not implemented.") diff --git a/src/probnum/linops/_arithmetic.py b/src/probnum/linops/_arithmetic.py index 713542673..d4afa0df6 100644 --- a/src/probnum/linops/_arithmetic.py +++ b/src/probnum/linops/_arithmetic.py @@ -5,7 +5,7 @@ import scipy.sparse from probnum import config, utils -from probnum.typing import NotImplementedType, ScalarArgType, ShapeArgType +from probnum.typing import NotImplementedType, ScalarArgType, ShapeLike from ._arithmetic_fallbacks import ( NegatedLinearOperator, @@ -425,7 +425,7 @@ def _apply( ######################################################################################## -def _operand_to_linop(operand: Any, shape: ShapeArgType) -> Optional[LinearOperator]: +def _operand_to_linop(operand: Any, shape: ShapeLike) -> Optional[LinearOperator]: if isinstance(operand, LinearOperator): pass elif np.ndim(operand) == 0: diff --git a/src/probnum/linops/_linear_operator.py b/src/probnum/linops/_linear_operator.py index 3c21da6cc..0350d5277 100644 --- a/src/probnum/linops/_linear_operator.py +++ b/src/probnum/linops/_linear_operator.py @@ -8,7 +8,7 @@ import probnum.utils from probnum import config -from probnum.typing import DTypeArgType, ScalarArgType, ShapeArgType +from probnum.typing import DTypeArgType, ScalarArgType, ShapeLike BinaryOperandType = Union[ "LinearOperator", ScalarArgType, np.ndarray, scipy.sparse.spmatrix @@ -101,7 +101,7 @@ class LinearOperator: def __init__( self, - shape: ShapeArgType, + shape: ShapeLike, dtype: DTypeArgType, *, matmul: Callable[[np.ndarray], np.ndarray], @@ -979,7 +979,7 @@ class Identity(LinearOperator): def __init__( self, - shape: ShapeArgType, + shape: ShapeLike, dtype: DTypeArgType = np.double, ): shape = probnum.utils.as_shape(shape) diff --git a/src/probnum/linops/_scaling.py b/src/probnum/linops/_scaling.py index 562d31e61..eccd075ba 100644 --- a/src/probnum/linops/_scaling.py +++ b/src/probnum/linops/_scaling.py @@ -4,7 +4,7 @@ import numpy as np import probnum.utils -from probnum.typing import DTypeArgType, ScalarArgType, ShapeArgType +from probnum.typing import DTypeArgType, ScalarArgType, ShapeLike from . import _linear_operator @@ -38,7 +38,7 @@ class Scaling(_linear_operator.LinearOperator): def __init__( self, factors: Union[np.ndarray, ScalarArgType], - shape: Optional[ShapeArgType] = None, + shape: Optional[ShapeLike] = None, dtype: Optional[DTypeArgType] = None, ): self._factors = None diff --git a/src/probnum/randprocs/_gaussian_process.py b/src/probnum/randprocs/_gaussian_process.py index 583f6019f..23257fcff 100644 --- a/src/probnum/randprocs/_gaussian_process.py +++ b/src/probnum/randprocs/_gaussian_process.py @@ -5,7 +5,7 @@ import numpy as np from probnum import randvars -from probnum.typing import ShapeArgType +from probnum.typing import ShapeLike from . import _random_process, kernels @@ -107,7 +107,7 @@ def _sample_at_input( self, rng: np.random.Generator, args: _InputType, - size: ShapeArgType = (), + size: ShapeLike = (), ) -> _OutputType: gaussian_rv = self.__call__(args) return gaussian_rv.sample(rng=rng, size=size) diff --git a/src/probnum/randprocs/_random_process.py b/src/probnum/randprocs/_random_process.py index 691a7cf38..9522638e4 100644 --- a/src/probnum/randprocs/_random_process.py +++ b/src/probnum/randprocs/_random_process.py @@ -7,7 +7,7 @@ from probnum import randvars from probnum import utils as _utils -from probnum.typing import DTypeArgType, IntLike, ShapeArgType +from probnum.typing import DTypeArgType, IntLike, ShapeLike _InputType = TypeVar("InputType") _OutputType = TypeVar("OutputType") @@ -296,7 +296,7 @@ def sample( self, rng: np.random.Generator, args: _InputType = None, - size: ShapeArgType = (), + size: ShapeLike = (), ) -> Union[Callable[[_InputType], _OutputType], _OutputType]: """Sample paths from the random process. @@ -325,7 +325,7 @@ def _sample_at_input( self, rng: np.random.Generator, args: _InputType, - size: ShapeArgType = (), + size: ShapeLike = (), ) -> _OutputType: """Evaluate a set of sample paths at the given inputs. diff --git a/src/probnum/randprocs/kernels/_kernel.py b/src/probnum/randprocs/kernels/_kernel.py index 6b3031382..cb16d0f0d 100644 --- a/src/probnum/randprocs/kernels/_kernel.py +++ b/src/probnum/randprocs/kernels/_kernel.py @@ -6,7 +6,7 @@ import numpy as np from probnum import utils as _pn_utils -from probnum.typing import ArrayLike, IntLike, ShapeArgType, ShapeType +from probnum.typing import ArrayLike, IntLike, ShapeLike, ShapeType class Kernel(abc.ABC): @@ -135,7 +135,7 @@ class Kernel(abc.ABC): def __init__( self, input_dim: IntLike, - shape: ShapeArgType = (), + shape: ShapeLike = (), ): self._input_dim = int(input_dim) diff --git a/src/probnum/randprocs/markov/_markov_process.py b/src/probnum/randprocs/markov/_markov_process.py index bf38f3055..9b588de7f 100644 --- a/src/probnum/randprocs/markov/_markov_process.py +++ b/src/probnum/randprocs/markov/_markov_process.py @@ -8,7 +8,7 @@ from probnum import randvars, utils from probnum.randprocs import _random_process from probnum.randprocs.markov import _transition -from probnum.typing import ShapeArgType +from probnum.typing import ShapeLike _InputType = Union[np.floating, np.ndarray] _OutputType = Union[np.floating, np.ndarray] @@ -69,7 +69,7 @@ def _sample_at_input( self, rng: np.random.Generator, args: _InputType, - size: ShapeArgType = (), + size: ShapeLike = (), ) -> _OutputType: size = utils.as_shape(size) diff --git a/src/probnum/randvars/_constant.py b/src/probnum/randvars/_constant.py index 1f24a0008..4dc8e4e79 100644 --- a/src/probnum/randvars/_constant.py +++ b/src/probnum/randvars/_constant.py @@ -6,7 +6,7 @@ from probnum import config, linops from probnum import utils as _utils -from probnum.typing import ArrayLikeGetitemArgType, ShapeArgType, ShapeType +from probnum.typing import ArrayLikeGetitemArgType, ShapeLike, ShapeType from . import _random_variable @@ -146,7 +146,7 @@ def transpose(self, *axes: int) -> "Constant": support=self._support.transpose(*axes), ) - def _sample(self, rng: np.random.Generator, size: ShapeArgType = ()) -> _ValueType: + def _sample(self, rng: np.random.Generator, size: ShapeLike = ()) -> _ValueType: size = _utils.as_shape(size) if size == (): diff --git a/src/probnum/randvars/_normal.py b/src/probnum/randvars/_normal.py index aef3f6bef..345da6f5d 100644 --- a/src/probnum/randvars/_normal.py +++ b/src/probnum/randvars/_normal.py @@ -11,7 +11,7 @@ from probnum.typing import ( ArrayLikeGetitemArgType, FloatLike, - ShapeArgType, + ShapeLike, ShapeType, ) @@ -328,7 +328,7 @@ def __getitem__(self, key: ArrayLikeGetitemArgType) -> "Normal": cov=cov, ) - def reshape(self, newshape: ShapeArgType) -> "Normal": + def reshape(self, newshape: ShapeLike) -> "Normal": try: reshaped_mean = self.dense_mean.reshape(newshape) except ValueError as exc: diff --git a/src/probnum/randvars/_random_variable.py b/src/probnum/randvars/_random_variable.py index cd9e7b418..006b41f8a 100644 --- a/src/probnum/randvars/_random_variable.py +++ b/src/probnum/randvars/_random_variable.py @@ -9,7 +9,7 @@ ArrayLikeGetitemArgType, DTypeArgType, FloatLike, - ShapeArgType, + ShapeLike, ShapeType, ) @@ -116,7 +116,7 @@ class RandomVariable(Generic[_ValueType]): def __init__( self, - shape: ShapeArgType, + shape: ShapeLike, dtype: DTypeArgType, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[np.random.Generator, ShapeType], _ValueType]] = None, @@ -417,7 +417,7 @@ def in_support(self, x: _ValueType) -> bool: return in_support - def sample(self, rng: np.random.Generator, size: ShapeArgType = ()) -> _ValueType: + def sample(self, rng: np.random.Generator, size: ShapeLike = ()) -> _ValueType: """Draw realizations from a random variable. Parameters @@ -550,7 +550,7 @@ def __getitem__(self, key: ArrayLikeGetitemArgType) -> "RandomVariable": as_value_type=self.__as_value_type, ) - def reshape(self, newshape: ShapeArgType) -> "RandomVariable": + def reshape(self, newshape: ShapeLike) -> "RandomVariable": """Give a new shape to a random variable. Parameters @@ -971,12 +971,10 @@ class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self, - shape: ShapeArgType, + shape: ShapeLike, dtype: DTypeArgType, parameters: Optional[Dict[str, Any]] = None, - sample: Optional[ - Callable[[np.random.Generator, ShapeArgType], _ValueType] - ] = None, + sample: Optional[Callable[[np.random.Generator, ShapeLike], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, pmf: Optional[Callable[[_ValueType], np.float_]] = None, logpmf: Optional[Callable[[_ValueType], np.float_]] = None, @@ -1189,12 +1187,10 @@ class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self, - shape: ShapeArgType, + shape: ShapeLike, dtype: DTypeArgType, parameters: Optional[Dict[str, Any]] = None, - sample: Optional[ - Callable[[np.random.Generator, ShapeArgType], _ValueType] - ] = None, + sample: Optional[Callable[[np.random.Generator, ShapeLike], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, pdf: Optional[Callable[[_ValueType], np.float_]] = None, logpdf: Optional[Callable[[_ValueType], np.float_]] = None, diff --git a/src/probnum/typing.py b/src/probnum/typing.py index 3f1adb5c2..33052870d 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -44,10 +44,12 @@ Values of this type should always be converteg into :class:`float`\\ s before further internal processing.""" -ShapeArgType = Union[IntLike, Iterable[IntLike]] -"""Type of a public API argument for supplying a shape. Values of this type should -always be converted into :class:`ShapeType` using the function -:func:`probnum.utils.as_shape` before further internal processing.""" +# Array Utilities +ShapeLike = Union[IntLike, Iterable[IntLike]] +"""Type of a public API argument for supplying a shape. + +Values of this type should always be converted into :class:`ShapeType` using the +function :func:`probnum.utils.as_shape` before further internal processing.""" ScalarArgType = Union[int, float, complex, numbers.Number, np.number] """Type of a public API argument for supplying a scalar value. Values of this type diff --git a/src/probnum/utils/argutils.py b/src/probnum/utils/argutils.py index 25668536d..74935e889 100644 --- a/src/probnum/utils/argutils.py +++ b/src/probnum/utils/argutils.py @@ -5,12 +5,12 @@ import numpy as np -from probnum.typing import DTypeArgType, ScalarArgType, ShapeArgType, ShapeType +from probnum.typing import DTypeArgType, ScalarArgType, ShapeLike, ShapeType __all__ = ["as_shape", "as_numpy_scalar"] -def as_shape(x: ShapeArgType, ndim: Optional[numbers.Integral] = None) -> ShapeType: +def as_shape(x: ShapeLike, ndim: Optional[numbers.Integral] = None) -> ShapeType: """Convert a shape representation into a shape defined as a tuple of ints. Parameters diff --git a/tests/test_randvars/test_arithmetic/conftest.py b/tests/test_randvars/test_arithmetic/conftest.py index dd3cdfa65..7522c99ae 100644 --- a/tests/test_randvars/test_arithmetic/conftest.py +++ b/tests/test_randvars/test_arithmetic/conftest.py @@ -4,7 +4,7 @@ from probnum import linops, randvars from probnum.problems.zoo.linalg import random_spd_matrix -from probnum.typing import ShapeArgType +from probnum.typing import ShapeLike @pytest.fixture @@ -13,13 +13,13 @@ def rng() -> np.random.Generator: @pytest.fixture -def constant(shape_const: ShapeArgType, rng: np.random.Generator) -> randvars.Constant: +def constant(shape_const: ShapeLike, rng: np.random.Generator) -> randvars.Constant: return randvars.Constant(support=rng.normal(size=shape_const)) @pytest.fixture def multivariate_normal( - shape: ShapeArgType, precompute_cov_cholesky: bool, rng: np.random.Generator + shape: ShapeLike, precompute_cov_cholesky: bool, rng: np.random.Generator ) -> randvars.Normal: rv = randvars.Normal( mean=rng.normal(size=shape), @@ -32,7 +32,7 @@ def multivariate_normal( @pytest.fixture def matrixvariate_normal( - shape: ShapeArgType, precompute_cov_cholesky: bool, rng: np.random.Generator + shape: ShapeLike, precompute_cov_cholesky: bool, rng: np.random.Generator ) -> randvars.Normal: rv = randvars.Normal( mean=rng.normal(size=shape), @@ -48,7 +48,7 @@ def matrixvariate_normal( @pytest.fixture def symmetric_matrixvariate_normal( - shape: ShapeArgType, precompute_cov_cholesky: bool, rng: np.random.Generator + shape: ShapeLike, precompute_cov_cholesky: bool, rng: np.random.Generator ) -> randvars.Normal: rv = randvars.Normal( mean=random_spd_matrix(dim=shape[0], rng=rng), diff --git a/tests/test_randvars/test_arithmetic/test_generic.py b/tests/test_randvars/test_arithmetic/test_generic.py index 90bb03553..70109e448 100644 --- a/tests/test_randvars/test_arithmetic/test_generic.py +++ b/tests/test_randvars/test_arithmetic/test_generic.py @@ -5,11 +5,11 @@ from numpy.typing import DTypeLike from probnum import randvars -from probnum.typing import ShapeArgType +from probnum.typing import ShapeLike @pytest.mark.parametrize("shape,dtype", [((5,), np.single), ((2, 3), np.double)]) -def test_generic_randvar_dtype_shape_inference(shape: ShapeArgType, dtype: DTypeLike): +def test_generic_randvar_dtype_shape_inference(shape: ShapeLike, dtype: DTypeLike): x = randvars.RandomVariable( shape=shape, dtype=dtype, sample=lambda size, rng: np.zeros(size + shape) ) From 5e579189c0c3108d41c1f5fbc66af833d30ab01e Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 11:43:39 +0100 Subject: [PATCH 04/15] Documentation for `DTypeLike` --- src/probnum/typing.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/probnum/typing.py b/src/probnum/typing.py index 33052870d..889e3b381 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -19,7 +19,7 @@ import numpy as np import scipy.sparse from numpy.typing import ArrayLike # pylint: disable=unused-import -from numpy.typing import DTypeLike as DTypeArgType # pylint: disable=unused-import +from numpy.typing import DTypeLike as _NumPyDTypeLike ######################################################################################## # API Types @@ -51,6 +51,12 @@ Values of this type should always be converted into :class:`ShapeType` using the function :func:`probnum.utils.as_shape` before further internal processing.""" +DTypeLike = _NumPyDTypeLike +"""Type of a public API argument for supplying an array's dtype. + +Values of this type should always be converted into :class:`np.dtype`\\ s before further +internal processing.""" + ScalarArgType = Union[int, float, complex, numbers.Number, np.number] """Type of a public API argument for supplying a scalar value. Values of this type should always be converted into :class:`np.generic` using the function From cbaecd3be07420a98411c0462ae513634bd287ee Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 11:51:03 +0100 Subject: [PATCH 05/15] Rename `ArrayLikeGetitemArgType` -> `ArrayIndicesLike` --- .../filtsmooth/_timeseriesposterior.py | 4 ++-- .../markov/continuous/_diffusions.py | 8 +++---- src/probnum/randvars/_constant.py | 4 ++-- src/probnum/randvars/_normal.py | 4 ++-- src/probnum/randvars/_random_variable.py | 4 ++-- src/probnum/typing.py | 23 +++++++++++-------- 6 files changed, 25 insertions(+), 22 deletions(-) diff --git a/src/probnum/filtsmooth/_timeseriesposterior.py b/src/probnum/filtsmooth/_timeseriesposterior.py index 015e1d156..0f19a2480 100644 --- a/src/probnum/filtsmooth/_timeseriesposterior.py +++ b/src/probnum/filtsmooth/_timeseriesposterior.py @@ -7,7 +7,7 @@ from probnum import randvars from probnum.typing import ( - ArrayLikeGetitemArgType, + ArrayIndicesLike, DenseOutputLocationArgType, FloatLike, IntLike, @@ -81,7 +81,7 @@ def __len__(self) -> int: """ return len(self.locations) - def __getitem__(self, idx: ArrayLikeGetitemArgType) -> randvars.RandomVariable: + def __getitem__(self, idx: ArrayIndicesLike) -> randvars.RandomVariable: return self.states[idx] def __call__(self, t: DenseOutputLocationArgType) -> DenseOutputValueType: diff --git a/src/probnum/randprocs/markov/continuous/_diffusions.py b/src/probnum/randprocs/markov/continuous/_diffusions.py index f659cb5c3..a84d3e722 100644 --- a/src/probnum/randprocs/markov/continuous/_diffusions.py +++ b/src/probnum/randprocs/markov/continuous/_diffusions.py @@ -9,7 +9,7 @@ from probnum import randvars from probnum.typing import ( - ArrayLikeGetitemArgType, + ArrayIndicesLike, DenseOutputLocationArgType, FloatLike, ToleranceDiffusionType, @@ -31,7 +31,7 @@ def __call__( @abc.abstractmethod def __getitem__( - self, idx: ArrayLikeGetitemArgType + self, idx: ArrayIndicesLike ) -> Union[ToleranceDiffusionType, np.ndarray]: raise NotImplementedError @@ -74,7 +74,7 @@ def __call__( return self.diffusion * np.ones_like(t) def __getitem__( - self, idx: ArrayLikeGetitemArgType + self, idx: ArrayIndicesLike ) -> Union[ToleranceDiffusionType, np.ndarray]: if self.diffusion is None: raise NotImplementedError( @@ -159,7 +159,7 @@ def __call__( return self[indices] def __getitem__( - self, idx: ArrayLikeGetitemArgType + self, idx: ArrayIndicesLike ) -> Union[ToleranceDiffusionType, np.ndarray]: if len(self._locations) <= 1: raise NotImplementedError( diff --git a/src/probnum/randvars/_constant.py b/src/probnum/randvars/_constant.py index 4dc8e4e79..e90ac0c9f 100644 --- a/src/probnum/randvars/_constant.py +++ b/src/probnum/randvars/_constant.py @@ -6,7 +6,7 @@ from probnum import config, linops from probnum import utils as _utils -from probnum.typing import ArrayLikeGetitemArgType, ShapeLike, ShapeType +from probnum.typing import ArrayIndicesLike, ShapeLike, ShapeType from . import _random_variable @@ -121,7 +121,7 @@ def support(self) -> _ValueType: """Constant value taken by the random variable.""" return self._support - def __getitem__(self, key: ArrayLikeGetitemArgType) -> "Constant": + def __getitem__(self, key: ArrayIndicesLike) -> "Constant": """(Advanced) indexing, masking and slicing. This method supports all modes of array indexing presented in diff --git a/src/probnum/randvars/_normal.py b/src/probnum/randvars/_normal.py index 345da6f5d..b5fa3655e 100644 --- a/src/probnum/randvars/_normal.py +++ b/src/probnum/randvars/_normal.py @@ -9,7 +9,7 @@ from probnum import config, linops from probnum import utils as _utils from probnum.typing import ( - ArrayLikeGetitemArgType, + ArrayIndicesLike, FloatLike, ShapeLike, ShapeType, @@ -292,7 +292,7 @@ def dense_cov(self) -> Union[np.floating, np.ndarray]: else: return self.cov - def __getitem__(self, key: ArrayLikeGetitemArgType) -> "Normal": + def __getitem__(self, key: ArrayIndicesLike) -> "Normal": """Marginalization in multi- and matrixvariate normal random variables, expressed as (advanced) indexing, masking and slicing. diff --git a/src/probnum/randvars/_random_variable.py b/src/probnum/randvars/_random_variable.py index 006b41f8a..c4bb89c0e 100644 --- a/src/probnum/randvars/_random_variable.py +++ b/src/probnum/randvars/_random_variable.py @@ -6,7 +6,7 @@ from probnum import utils as _utils from probnum.typing import ( - ArrayLikeGetitemArgType, + ArrayIndicesLike, DTypeArgType, FloatLike, ShapeLike, @@ -537,7 +537,7 @@ def quantile(self, p: FloatLike) -> _ValueType: return quantile - def __getitem__(self, key: ArrayLikeGetitemArgType) -> "RandomVariable": + def __getitem__(self, key: ArrayIndicesLike) -> "RandomVariable": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, diff --git a/src/probnum/typing.py b/src/probnum/typing.py index 889e3b381..821918b84 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -57,6 +57,19 @@ Values of this type should always be converted into :class:`np.dtype`\\ s before further internal processing.""" +_ArrayIndexLike = Union[ + int, + slice, + type(Ellipsis), + None, + np.newaxis, + np.ndarray, +] +ArrayIndicesLike = Union[_ArrayIndexLike, Tuple[_ArrayIndexLike, ...]] +"""Type of the argument to the :meth:`__getitem__` method of a NumPy-like array type +such as :class:`np.ndarray`, :class:`probnum.linops.LinearOperator` or +:class:`probnum.randvars.RandomVariable`.""" + ScalarArgType = Union[int, float, complex, numbers.Number, np.number] """Type of a public API argument for supplying a scalar value. Values of this type should always be converted into :class:`np.generic` using the function @@ -69,16 +82,6 @@ ] """Type of a public API argument for supplying a matrix or finite-dimensional linear operator.""" -ArrayLikeGetitemArgType = Union[ - int, - slice, - np.ndarray, - np.newaxis, - None, - type(Ellipsis), - Tuple[Union[int, slice, np.ndarray, np.newaxis, None, type(Ellipsis)], ...], -] - ######################################################################################## # Other Types ######################################################################################## From 5db958d38a16882ccd54f40c5979a6a60d2c5086 Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 11:54:28 +0100 Subject: [PATCH 06/15] Rename `ScalarArgType` -> `ScalarLike` --- .../stopping_criteria/_posterior_contraction.py | 6 +++--- .../solvers/stopping_criteria/_residual_norm.py | 6 +++--- src/probnum/linops/_arithmetic.py | 14 +++++++------- src/probnum/linops/_arithmetic_fallbacks.py | 4 ++-- src/probnum/linops/_linear_operator.py | 4 ++-- src/probnum/linops/_scaling.py | 4 ++-- .../randprocs/kernels/_exponentiated_quadratic.py | 4 ++-- src/probnum/randprocs/kernels/_linear.py | 4 ++-- src/probnum/randprocs/kernels/_matern.py | 6 +++--- src/probnum/randprocs/kernels/_polynomial.py | 4 ++-- .../randprocs/kernels/_rational_quadratic.py | 6 +++--- src/probnum/randprocs/kernels/_white_noise.py | 4 ++-- src/probnum/typing.py | 10 ++++++---- src/probnum/utils/argutils.py | 4 ++-- 14 files changed, 41 insertions(+), 39 deletions(-) diff --git a/src/probnum/linalg/solvers/stopping_criteria/_posterior_contraction.py b/src/probnum/linalg/solvers/stopping_criteria/_posterior_contraction.py index cc5172224..401f4f115 100644 --- a/src/probnum/linalg/solvers/stopping_criteria/_posterior_contraction.py +++ b/src/probnum/linalg/solvers/stopping_criteria/_posterior_contraction.py @@ -3,7 +3,7 @@ import numpy as np import probnum # pylint: disable="unused-import" -from probnum.typing import ScalarArgType +from probnum.typing import ScalarLike from ._linear_solver_stopping_criterion import LinearSolverStoppingCriterion @@ -29,8 +29,8 @@ class PosteriorContractionStoppingCriterion(LinearSolverStoppingCriterion): def __init__( self, qoi: str = "x", - atol: ScalarArgType = 10 ** -5, - rtol: ScalarArgType = 10 ** -5, + atol: ScalarLike = 10 ** -5, + rtol: ScalarLike = 10 ** -5, ): self.qoi = qoi self.atol = probnum.utils.as_numpy_scalar(atol) diff --git a/src/probnum/linalg/solvers/stopping_criteria/_residual_norm.py b/src/probnum/linalg/solvers/stopping_criteria/_residual_norm.py index 57026417d..484db7a18 100644 --- a/src/probnum/linalg/solvers/stopping_criteria/_residual_norm.py +++ b/src/probnum/linalg/solvers/stopping_criteria/_residual_norm.py @@ -3,7 +3,7 @@ import numpy as np import probnum -from probnum.typing import ScalarArgType +from probnum.typing import ScalarLike from ._linear_solver_stopping_criterion import LinearSolverStoppingCriterion @@ -25,8 +25,8 @@ class ResidualNormStoppingCriterion(LinearSolverStoppingCriterion): def __init__( self, - atol: ScalarArgType = 10 ** -5, - rtol: ScalarArgType = 10 ** -5, + atol: ScalarLike = 10 ** -5, + rtol: ScalarLike = 10 ** -5, ): self.atol = probnum.utils.as_numpy_scalar(atol) self.rtol = probnum.utils.as_numpy_scalar(rtol) diff --git a/src/probnum/linops/_arithmetic.py b/src/probnum/linops/_arithmetic.py index d4afa0df6..7a88eb1a6 100644 --- a/src/probnum/linops/_arithmetic.py +++ b/src/probnum/linops/_arithmetic.py @@ -5,7 +5,7 @@ import scipy.sparse from probnum import config, utils -from probnum.typing import NotImplementedType, ScalarArgType, ShapeLike +from probnum.typing import NotImplementedType, ScalarLike, ShapeLike from ._arithmetic_fallbacks import ( NegatedLinearOperator, @@ -95,14 +95,14 @@ def matmul(op1: LinearOperator, op2: LinearOperator) -> LinearOperator: ######################################################################################## # Scaling -def _mul_scalar_scaling(scalar: ScalarArgType, scaling: Scaling) -> Scaling: +def _mul_scalar_scaling(scalar: ScalarLike, scaling: Scaling) -> Scaling: if scaling.is_isotropic: return Scaling(scalar * scaling.scalar, shape=scaling.shape) return Scaling(scalar * scaling.factors, shape=scaling.shape) -def _mul_scaling_scalar(scaling: Scaling, scalar: ScalarArgType) -> Scaling: +def _mul_scaling_scalar(scaling: Scaling, scalar: ScalarLike) -> Scaling: if scaling.is_isotropic: return Scaling(scalar * scaling.scalar, shape=scaling.shape) @@ -157,14 +157,14 @@ def _matmul_kronecker_scaling(kronecker: Kronecker, scaling: Scaling) -> Kroneck return NotImplemented -def _mul_scalar_kronecker(scalar: ScalarArgType, kronecker: Kronecker) -> Kronecker: +def _mul_scalar_kronecker(scalar: ScalarLike, kronecker: Kronecker) -> Kronecker: if scalar < 0.0: return NotImplemented sqrt_scalar = np.sqrt(scalar) return Kronecker(A=sqrt_scalar * kronecker.A, B=sqrt_scalar * kronecker.B) -def _mul_kronecker_scalar(kronecker: Kronecker, scalar: ScalarArgType) -> Kronecker: +def _mul_kronecker_scalar(kronecker: Kronecker, scalar: ScalarLike) -> Kronecker: if scalar < 0.0: return NotImplemented sqrt_scalar = np.sqrt(scalar) @@ -213,7 +213,7 @@ def _matmul_idkronecker_scaling( def _mul_scalar_idkronecker( - scalar: ScalarArgType, idkronecker: IdentityKronecker + scalar: ScalarLike, idkronecker: IdentityKronecker ) -> IdentityKronecker: return IdentityKronecker( @@ -222,7 +222,7 @@ def _mul_scalar_idkronecker( def _mul_idkronecker_scalar( - idkronecker: IdentityKronecker, scalar: ScalarArgType + idkronecker: IdentityKronecker, scalar: ScalarLike ) -> IdentityKronecker: return IdentityKronecker( diff --git a/src/probnum/linops/_arithmetic_fallbacks.py b/src/probnum/linops/_arithmetic_fallbacks.py index 0b1a3c49e..ea521c862 100644 --- a/src/probnum/linops/_arithmetic_fallbacks.py +++ b/src/probnum/linops/_arithmetic_fallbacks.py @@ -6,7 +6,7 @@ import numpy as np import probnum.utils -from probnum.typing import NotImplementedType, ScalarArgType +from probnum.typing import NotImplementedType, ScalarLike from ._linear_operator import BinaryOperandType, LinearOperator @@ -18,7 +18,7 @@ class ScaledLinearOperator(LinearOperator): """Linear operator scaled with a scalar.""" - def __init__(self, linop: LinearOperator, scalar: ScalarArgType): + def __init__(self, linop: LinearOperator, scalar: ScalarLike): if not isinstance(linop, LinearOperator): raise TypeError("`linop` must be a `LinearOperator`") diff --git a/src/probnum/linops/_linear_operator.py b/src/probnum/linops/_linear_operator.py index 0350d5277..6570593be 100644 --- a/src/probnum/linops/_linear_operator.py +++ b/src/probnum/linops/_linear_operator.py @@ -8,10 +8,10 @@ import probnum.utils from probnum import config -from probnum.typing import DTypeArgType, ScalarArgType, ShapeLike +from probnum.typing import DTypeArgType, ScalarLike, ShapeLike BinaryOperandType = Union[ - "LinearOperator", ScalarArgType, np.ndarray, scipy.sparse.spmatrix + "LinearOperator", ScalarLike, np.ndarray, scipy.sparse.spmatrix ] # pylint: disable="too-many-lines" diff --git a/src/probnum/linops/_scaling.py b/src/probnum/linops/_scaling.py index eccd075ba..0079f2c66 100644 --- a/src/probnum/linops/_scaling.py +++ b/src/probnum/linops/_scaling.py @@ -4,7 +4,7 @@ import numpy as np import probnum.utils -from probnum.typing import DTypeArgType, ScalarArgType, ShapeLike +from probnum.typing import DTypeArgType, ScalarLike, ShapeLike from . import _linear_operator @@ -37,7 +37,7 @@ class Scaling(_linear_operator.LinearOperator): def __init__( self, - factors: Union[np.ndarray, ScalarArgType], + factors: Union[np.ndarray, ScalarLike], shape: Optional[ShapeLike] = None, dtype: Optional[DTypeArgType] = None, ): diff --git a/src/probnum/randprocs/kernels/_exponentiated_quadratic.py b/src/probnum/randprocs/kernels/_exponentiated_quadratic.py index 0d823b9fc..3bdb442c1 100644 --- a/src/probnum/randprocs/kernels/_exponentiated_quadratic.py +++ b/src/probnum/randprocs/kernels/_exponentiated_quadratic.py @@ -5,7 +5,7 @@ import numpy as np import probnum.utils as _utils -from probnum.typing import IntLike, ScalarArgType +from probnum.typing import IntLike, ScalarLike from ._kernel import IsotropicMixin, Kernel @@ -46,7 +46,7 @@ class ExpQuad(Kernel, IsotropicMixin): [1.92874985e-22, 3.72665317e-06, 1.00000000e+00]]) """ - def __init__(self, input_dim: IntLike, lengthscale: ScalarArgType = 1.0): + def __init__(self, input_dim: IntLike, lengthscale: ScalarLike = 1.0): self.lengthscale = _utils.as_numpy_scalar(lengthscale) super().__init__(input_dim=input_dim) diff --git a/src/probnum/randprocs/kernels/_linear.py b/src/probnum/randprocs/kernels/_linear.py index 9d99910f0..1eb6111b9 100644 --- a/src/probnum/randprocs/kernels/_linear.py +++ b/src/probnum/randprocs/kernels/_linear.py @@ -5,7 +5,7 @@ import numpy as np import probnum.utils as _utils -from probnum.typing import IntLike, ScalarArgType +from probnum.typing import IntLike, ScalarLike from ._kernel import Kernel @@ -40,7 +40,7 @@ class Linear(Kernel): [ 8., 13.]]) """ - def __init__(self, input_dim: IntLike, constant: ScalarArgType = 0.0): + def __init__(self, input_dim: IntLike, constant: ScalarLike = 0.0): self.constant = _utils.as_numpy_scalar(constant) super().__init__(input_dim=input_dim) diff --git a/src/probnum/randprocs/kernels/_matern.py b/src/probnum/randprocs/kernels/_matern.py index d36259dd5..229101502 100644 --- a/src/probnum/randprocs/kernels/_matern.py +++ b/src/probnum/randprocs/kernels/_matern.py @@ -7,7 +7,7 @@ import scipy.special import probnum.utils as _utils -from probnum.typing import IntLike, ScalarArgType +from probnum.typing import IntLike, ScalarLike from ._kernel import IsotropicMixin, Kernel @@ -64,8 +64,8 @@ class Matern(Kernel, IsotropicMixin): def __init__( self, input_dim: IntLike, - lengthscale: ScalarArgType = 1.0, - nu: ScalarArgType = 1.5, + lengthscale: ScalarLike = 1.0, + nu: ScalarLike = 1.5, ): self.lengthscale = _utils.as_numpy_scalar(lengthscale) if not self.lengthscale > 0: diff --git a/src/probnum/randprocs/kernels/_polynomial.py b/src/probnum/randprocs/kernels/_polynomial.py index 01ab4fb55..70996b96c 100644 --- a/src/probnum/randprocs/kernels/_polynomial.py +++ b/src/probnum/randprocs/kernels/_polynomial.py @@ -5,7 +5,7 @@ import numpy as np import probnum.utils as _utils -from probnum.typing import IntLike, ScalarArgType +from probnum.typing import IntLike, ScalarLike from ._kernel import Kernel @@ -45,7 +45,7 @@ class Polynomial(Kernel): def __init__( self, input_dim: IntLike, - constant: ScalarArgType = 0.0, + constant: ScalarLike = 0.0, exponent: IntLike = 1.0, ): self.constant = _utils.as_numpy_scalar(constant) diff --git a/src/probnum/randprocs/kernels/_rational_quadratic.py b/src/probnum/randprocs/kernels/_rational_quadratic.py index 963d7ec62..e3d16769d 100644 --- a/src/probnum/randprocs/kernels/_rational_quadratic.py +++ b/src/probnum/randprocs/kernels/_rational_quadratic.py @@ -5,7 +5,7 @@ import numpy as np import probnum.utils as _utils -from probnum.typing import IntLike, ScalarArgType +from probnum.typing import IntLike, ScalarLike from ._kernel import IsotropicMixin, Kernel @@ -59,8 +59,8 @@ class RatQuad(Kernel, IsotropicMixin): def __init__( self, input_dim: IntLike, - lengthscale: ScalarArgType = 1.0, - alpha: ScalarArgType = 1.0, + lengthscale: ScalarLike = 1.0, + alpha: ScalarLike = 1.0, ): self.lengthscale = _utils.as_numpy_scalar(lengthscale) self.alpha = _utils.as_numpy_scalar(alpha) diff --git a/src/probnum/randprocs/kernels/_white_noise.py b/src/probnum/randprocs/kernels/_white_noise.py index e5220e428..b19aca39a 100644 --- a/src/probnum/randprocs/kernels/_white_noise.py +++ b/src/probnum/randprocs/kernels/_white_noise.py @@ -5,7 +5,7 @@ import numpy as np from probnum import utils as _utils -from probnum.typing import IntLike, ScalarArgType +from probnum.typing import IntLike, ScalarLike from ._kernel import Kernel @@ -26,7 +26,7 @@ class WhiteNoise(Kernel): Noise level :math:`\sigma`. """ - def __init__(self, input_dim: IntLike, sigma: ScalarArgType = 1.0): + def __init__(self, input_dim: IntLike, sigma: ScalarLike = 1.0): self.sigma = _utils.as_numpy_scalar(sigma) self._sigma_sq = self.sigma ** 2 super().__init__(input_dim=input_dim) diff --git a/src/probnum/typing.py b/src/probnum/typing.py index 821918b84..d7ef29b7e 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -70,10 +70,12 @@ such as :class:`np.ndarray`, :class:`probnum.linops.LinearOperator` or :class:`probnum.randvars.RandomVariable`.""" -ScalarArgType = Union[int, float, complex, numbers.Number, np.number] -"""Type of a public API argument for supplying a scalar value. Values of this type -should always be converted into :class:`np.generic` using the function -:func:`probnum.utils.as_scalar` before further internal processing.""" +# Scalars, Arrays and Matrices +ScalarLike = Union[int, float, complex, numbers.Number, np.number] +"""Type of a public API argument for supplying a scalar value. + +Values of this type should always be converted into :class:`np.number`\\ s using the +function :func:`probnum.utils.as_scalar` before further internal processing.""" LinearOperatorArgType = Union[ np.ndarray, diff --git a/src/probnum/utils/argutils.py b/src/probnum/utils/argutils.py index 74935e889..24deaf9f8 100644 --- a/src/probnum/utils/argutils.py +++ b/src/probnum/utils/argutils.py @@ -5,7 +5,7 @@ import numpy as np -from probnum.typing import DTypeArgType, ScalarArgType, ShapeLike, ShapeType +from probnum.typing import DTypeArgType, ScalarLike, ShapeLike, ShapeType __all__ = ["as_shape", "as_numpy_scalar"] @@ -42,7 +42,7 @@ def as_shape(x: ShapeLike, ndim: Optional[numbers.Integral] = None) -> ShapeType return shape -def as_numpy_scalar(x: ScalarArgType, dtype: DTypeArgType = None) -> np.generic: +def as_numpy_scalar(x: ScalarLike, dtype: DTypeArgType = None) -> np.generic: """Convert a scalar into a NumPy scalar. Parameters From 69223b8ebd75add800185b807bd90c5f90370e6a Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 11:55:59 +0100 Subject: [PATCH 07/15] Documentation for `ArrayLike` --- src/probnum/typing.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/probnum/typing.py b/src/probnum/typing.py index d7ef29b7e..36d2311b9 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -18,7 +18,7 @@ import numpy as np import scipy.sparse -from numpy.typing import ArrayLike # pylint: disable=unused-import +from numpy.typing import ArrayLike as _NumPyArrayLike from numpy.typing import DTypeLike as _NumPyDTypeLike ######################################################################################## @@ -77,6 +77,12 @@ Values of this type should always be converted into :class:`np.number`\\ s using the function :func:`probnum.utils.as_scalar` before further internal processing.""" +ArrayLike = _NumPyArrayLike +"""Type of a public API argument for supplying an array. + +Values of this type should always be converted into :class:`np.ndarray`\\ s using +the function :func:`np.asarray` before further internal processing.""" + LinearOperatorArgType = Union[ np.ndarray, scipy.sparse.spmatrix, From 985c272f70d6dacc6d79795d7fccf126ee4c2fe4 Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 11:59:06 +0100 Subject: [PATCH 08/15] Rename `LinearOperatorArgType` -> `LinearOperatorLike` --- .../adding_to_the_api_documentation.ipynb | 14 +++++++------- src/probnum/linalg/_problinsolve.py | 14 +++++++------- .../problems/zoo/linalg/_random_linear_system.py | 4 ++-- src/probnum/typing.py | 10 +++++++--- 4 files changed, 23 insertions(+), 19 deletions(-) diff --git a/docs/source/development/adding_to_the_api_documentation.ipynb b/docs/source/development/adding_to_the_api_documentation.ipynb index ccfa012ea..4dc1a8b14 100644 --- a/docs/source/development/adding_to_the_api_documentation.ipynb +++ b/docs/source/development/adding_to_the_api_documentation.ipynb @@ -45,27 +45,27 @@ "import probnum # pylint: disable=unused-import\n", "from probnum import linops, randvars, utils\n", "from probnum.linalg.solvers.matrixbased import SymmetricMatrixBasedSolver\n", - "from probnum.typing import LinearOperatorArgType\n", + "from probnum.typing import LinearOperatorLike\n", "\n", "# pylint: disable=too-many-branches\n", "\n", "\n", "def problinsolve(\n", " A: Union[\n", - " LinearOperatorArgType,\n", - " \"randvars.RandomVariable[LinearOperatorArgType]\",\n", + " LinearOperatorLike,\n", + " \"randvars.RandomVariable[LinearOperatorLike]\",\n", " ],\n", " b: Union[np.ndarray, \"randvars.RandomVariable[np.ndarray]\"],\n", " A0: Optional[\n", " Union[\n", - " LinearOperatorArgType,\n", - " \"randvars.RandomVariable[LinearOperatorArgType]\",\n", + " LinearOperatorLike,\n", + " \"randvars.RandomVariable[LinearOperatorLike]\",\n", " ]\n", " ] = None,\n", " Ainv0: Optional[\n", " Union[\n", - " LinearOperatorArgType,\n", - " \"randvars.RandomVariable[LinearOperatorArgType]\",\n", + " LinearOperatorLike,\n", + " \"randvars.RandomVariable[LinearOperatorLike]\",\n", " ]\n", " ] = None,\n", " x0: Optional[Union[np.ndarray, \"randvars.RandomVariable[np.ndarray]\"]] = None,\n", diff --git a/src/probnum/linalg/_problinsolve.py b/src/probnum/linalg/_problinsolve.py index b251b9705..b7c0f9c93 100644 --- a/src/probnum/linalg/_problinsolve.py +++ b/src/probnum/linalg/_problinsolve.py @@ -15,27 +15,27 @@ import probnum # pylint: disable=unused-import from probnum import linops, randvars, utils from probnum.linalg.solvers.matrixbased import SymmetricMatrixBasedSolver -from probnum.typing import LinearOperatorArgType +from probnum.typing import LinearOperatorLike # pylint: disable=too-many-branches def problinsolve( A: Union[ - LinearOperatorArgType, - "randvars.RandomVariable[LinearOperatorArgType]", + LinearOperatorLike, + "randvars.RandomVariable[LinearOperatorLike]", ], b: Union[np.ndarray, "randvars.RandomVariable[np.ndarray]"], A0: Optional[ Union[ - LinearOperatorArgType, - "randvars.RandomVariable[LinearOperatorArgType]", + LinearOperatorLike, + "randvars.RandomVariable[LinearOperatorLike]", ] ] = None, Ainv0: Optional[ Union[ - LinearOperatorArgType, - "randvars.RandomVariable[LinearOperatorArgType]", + LinearOperatorLike, + "randvars.RandomVariable[LinearOperatorLike]", ] ] = None, x0: Optional[Union[np.ndarray, "randvars.RandomVariable[np.ndarray]"]] = None, diff --git a/src/probnum/problems/zoo/linalg/_random_linear_system.py b/src/probnum/problems/zoo/linalg/_random_linear_system.py index 29086d825..6d888d997 100644 --- a/src/probnum/problems/zoo/linalg/_random_linear_system.py +++ b/src/probnum/problems/zoo/linalg/_random_linear_system.py @@ -6,13 +6,13 @@ import scipy.sparse from probnum import linops, problems, randvars -from probnum.typing import LinearOperatorArgType +from probnum.typing import LinearOperatorLike def random_linear_system( rng: np.random.Generator, matrix: Union[ - LinearOperatorArgType, + LinearOperatorLike, Callable[ [np.random.Generator, Optional[Any]], Union[np.ndarray, scipy.sparse.spmatrix, linops.LinearOperator], diff --git a/src/probnum/typing.py b/src/probnum/typing.py index 36d2311b9..c7d1e8235 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -83,12 +83,16 @@ Values of this type should always be converted into :class:`np.ndarray`\\ s using the function :func:`np.asarray` before further internal processing.""" -LinearOperatorArgType = Union[ - np.ndarray, +LinearOperatorLike = Union[ + ArrayLike, scipy.sparse.spmatrix, "probnum.linops.LinearOperator", ] -"""Type of a public API argument for supplying a matrix or finite-dimensional linear operator.""" +"""Type of a public API argument for supplying a finite-dimensional linear operator. + +Values of this type should always be converted into :class:`probnum.linops.\\ +LinearOperator`\\ s using the function :func:`probnum.linops.aslinop` before further +internal processing.""" ######################################################################################## # Other Types From ce23e58e8258e3487b57fcb1d6f35dc1e56b98f7 Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 12:07:08 +0100 Subject: [PATCH 09/15] Remove `ToleranceDiffusionType` and `DenseOutputLocationArgType` --- src/probnum/diffeq/_odesolution.py | 7 ++-- .../diffeq/odefilter/_odefilter_solution.py | 7 ++-- .../_wrapped_scipy_odesolution.py | 4 +- src/probnum/diffeq/stepsize/_steprule.py | 20 ++++------ .../filtsmooth/_timeseriesposterior.py | 14 ++----- .../filtsmooth/gaussian/_kalmanposterior.py | 15 +++----- .../particle/_particle_filter_posterior.py | 6 +-- .../markov/continuous/_diffusions.py | 37 +++++-------------- src/probnum/typing.py | 17 --------- 9 files changed, 37 insertions(+), 90 deletions(-) diff --git a/src/probnum/diffeq/_odesolution.py b/src/probnum/diffeq/_odesolution.py index aa892100f..8e4db2e0c 100644 --- a/src/probnum/diffeq/_odesolution.py +++ b/src/probnum/diffeq/_odesolution.py @@ -10,8 +10,7 @@ import numpy as np from probnum import filtsmooth, randvars -from probnum.filtsmooth._timeseriesposterior import DenseOutputLocationArgType -from probnum.typing import FloatLike, IntLike, ShapeLike +from probnum.typing import ArrayLike, FloatLike, IntLike, ShapeLike class ODESolution(filtsmooth.TimeSeriesPosterior): @@ -60,7 +59,7 @@ def __getitem__(self, idx: int) -> randvars.RandomVariable: def sample( self, rng: np.random.Generator, - t: Optional[DenseOutputLocationArgType] = None, + t: Optional[ArrayLike] = None, size: Optional[ShapeLike] = (), ) -> np.ndarray: """Sample from the ODE solution. @@ -83,7 +82,7 @@ def sample( def transform_base_measure_realizations( self, base_measure_realizations: np.ndarray, - t: DenseOutputLocationArgType, + t: ArrayLike, ) -> np.ndarray: raise NotImplementedError( "Transforming base measure realizations is not implemented." diff --git a/src/probnum/diffeq/odefilter/_odefilter_solution.py b/src/probnum/diffeq/odefilter/_odefilter_solution.py index 5068aa4f8..d611eafd0 100644 --- a/src/probnum/diffeq/odefilter/_odefilter_solution.py +++ b/src/probnum/diffeq/odefilter/_odefilter_solution.py @@ -6,8 +6,7 @@ from probnum import filtsmooth, randvars, utils from probnum.diffeq import _odesolution -from probnum.filtsmooth._timeseriesposterior import DenseOutputLocationArgType -from probnum.typing import FloatLike, IntLike, ShapeLike +from probnum.typing import ArrayLike, FloatLike, IntLike, ShapeLike class ODEFilterSolution(_odesolution.ODESolution): @@ -102,7 +101,7 @@ def interpolate( def sample( self, rng: np.random.Generator, - t: Optional[DenseOutputLocationArgType] = None, + t: Optional[ArrayLike] = None, size: Optional[ShapeLike] = (), ) -> np.ndarray: @@ -116,7 +115,7 @@ def sample( def transform_base_measure_realizations( self, base_measure_realizations: np.ndarray, - t: DenseOutputLocationArgType = None, + t: ArrayLike = None, ) -> np.ndarray: errormsg = ( "The ODEFilterSolution does not implement transformation of realizations of a base measure." diff --git a/src/probnum/diffeq/perturbed/scipy_wrapper/_wrapped_scipy_odesolution.py b/src/probnum/diffeq/perturbed/scipy_wrapper/_wrapped_scipy_odesolution.py index 147dcc565..768bf77f7 100644 --- a/src/probnum/diffeq/perturbed/scipy_wrapper/_wrapped_scipy_odesolution.py +++ b/src/probnum/diffeq/perturbed/scipy_wrapper/_wrapped_scipy_odesolution.py @@ -5,7 +5,7 @@ from probnum import randvars from probnum.diffeq import _odesolution from probnum.filtsmooth._timeseriesposterior import DenseOutputValueType -from probnum.typing import DenseOutputLocationArgType +from probnum.typing import ArrayLike class WrappedScipyODESolution(_odesolution.ODESolution): @@ -19,7 +19,7 @@ def __init__(self, scipy_solution: OdeSolution, rvs: list): rv_states = randvars._RandomVariableList(rvs) super().__init__(locations=scipy_solution.ts, states=rv_states) - def __call__(self, t: DenseOutputLocationArgType) -> DenseOutputValueType: + def __call__(self, t: ArrayLike) -> DenseOutputValueType: """Evaluate the time-continuous solution at time t. Parameters diff --git a/src/probnum/diffeq/stepsize/_steprule.py b/src/probnum/diffeq/stepsize/_steprule.py index 605a5bca7..0cfee8407 100644 --- a/src/probnum/diffeq/stepsize/_steprule.py +++ b/src/probnum/diffeq/stepsize/_steprule.py @@ -5,7 +5,7 @@ import numpy as np -from probnum.typing import FloatLike, IntLike, ToleranceDiffusionType +from probnum.typing import ArrayLike, FloatLike, IntLike class StepRule(ABC): @@ -35,9 +35,7 @@ def is_accepted(self, scaled_error: FloatLike): raise NotImplementedError @abstractmethod - def errorest_to_norm( - self, errorest: ToleranceDiffusionType, reference_state: np.ndarray - ): + def errorest_to_norm(self, errorest: ArrayLike, reference_state: np.ndarray): """Computes the norm of error per tolerance (usually referred to as 'E'). The norm is usually the current error estimate normalised with @@ -66,10 +64,8 @@ def is_accepted(self, scaled_error: FloatLike): """Always True.""" return True - def errorest_to_norm( - self, errorest: ToleranceDiffusionType, reference_state: np.ndarray - ): - pass + def errorest_to_norm(self, errorest: ArrayLike, reference_state: np.ndarray): + passArrayLike # Once we have other controls, e.g. PI control, we can rename this into ProportionalControl. @@ -93,8 +89,8 @@ class AdaptiveSteps(StepRule): def __init__( self, firststep: FloatLike, - atol: ToleranceDiffusionType, - rtol: ToleranceDiffusionType, + atol: ArrayLike, + rtol: ArrayLike, limitchange: Optional[Tuple[FloatLike]] = (0.2, 10.0), safetyscale: Optional[FloatLike] = 0.95, minstep: Optional[FloatLike] = 1e-15, @@ -136,9 +132,7 @@ def suggest( def is_accepted(self, scaled_error: FloatLike): return scaled_error < 1 - def errorest_to_norm( - self, errorest: ToleranceDiffusionType, reference_state: np.ndarray - ): + def errorest_to_norm(self, errorest: ArrayLike, reference_state: np.ndarray): tolerance = self.atol + self.rtol * reference_state ratio = errorest / tolerance dim = len(ratio) if ratio.ndim > 0 else 1 diff --git a/src/probnum/filtsmooth/_timeseriesposterior.py b/src/probnum/filtsmooth/_timeseriesposterior.py index 0f19a2480..2a4f0a89a 100644 --- a/src/probnum/filtsmooth/_timeseriesposterior.py +++ b/src/probnum/filtsmooth/_timeseriesposterior.py @@ -6,13 +6,7 @@ import numpy as np from probnum import randvars -from probnum.typing import ( - ArrayIndicesLike, - DenseOutputLocationArgType, - FloatLike, - IntLike, - ShapeLike, -) +from probnum.typing import ArrayIndicesLike, ArrayLike, FloatLike, IntLike, ShapeLike DenseOutputValueType = Union[randvars.RandomVariable, randvars._RandomVariableList] """Dense evaluation of a TimeSeriesPosterior returns a RandomVariable if evaluated at a single location, @@ -84,7 +78,7 @@ def __len__(self) -> int: def __getitem__(self, idx: ArrayIndicesLike) -> randvars.RandomVariable: return self.states[idx] - def __call__(self, t: DenseOutputLocationArgType) -> DenseOutputValueType: + def __call__(self, t: ArrayLike) -> DenseOutputValueType: """Evaluate the time-continuous posterior at location `t` Algorithm: @@ -176,7 +170,7 @@ def interpolate( def sample( self, rng: np.random.Generator, - t: Optional[DenseOutputLocationArgType] = None, + t: Optional[ArrayLike] = None, size: Optional[ShapeLike] = (), ) -> np.ndarray: """Draw samples from the filtering/smoothing posterior. @@ -213,7 +207,7 @@ def sample( def transform_base_measure_realizations( self, base_measure_realizations: np.ndarray, - t: Optional[DenseOutputLocationArgType], + t: Optional[ArrayLike], ) -> np.ndarray: """Transform a set of realizations from a base measure into realizations from the posterior. diff --git a/src/probnum/filtsmooth/gaussian/_kalmanposterior.py b/src/probnum/filtsmooth/gaussian/_kalmanposterior.py index a9ce4dc80..2f1359bdb 100644 --- a/src/probnum/filtsmooth/gaussian/_kalmanposterior.py +++ b/src/probnum/filtsmooth/gaussian/_kalmanposterior.py @@ -12,12 +12,7 @@ from probnum import randprocs, randvars, utils from probnum.filtsmooth import _timeseriesposterior from probnum.filtsmooth.gaussian import approx -from probnum.typing import ( - DenseOutputLocationArgType, - FloatLike, - IntLike, - ShapeLike, -) +from probnum.typing import ArrayLike, FloatLike, IntLike, ShapeLike GaussMarkovPriorTransitionArgType = Union[ randprocs.markov.discrete.LinearGaussian, @@ -69,7 +64,7 @@ def interpolate( def sample( self, rng: np.random.Generator, - t: Optional[DenseOutputLocationArgType] = None, + t: Optional[ArrayLike] = None, size: Optional[ShapeLike] = (), ) -> np.ndarray: @@ -108,7 +103,7 @@ def sample( def transform_base_measure_realizations( self, base_measure_realizations: np.ndarray, - t: DenseOutputLocationArgType, + t: ArrayLike, ) -> np.ndarray: """Transform samples from a base measure to samples from the KalmanPosterior. @@ -427,7 +422,7 @@ def interpolate( def sample( self, rng: np.random.Generator, - t: Optional[DenseOutputLocationArgType] = None, + t: Optional[ArrayLike] = None, size: Optional[ShapeLike] = (), ) -> np.ndarray: # If this error would not be thrown here, trying to sample from a FilteringPosterior @@ -441,7 +436,7 @@ def sample( def transform_base_measure_realizations( self, base_measure_realizations: np.ndarray, - t: Optional[DenseOutputLocationArgType] = None, + t: Optional[ArrayLike] = None, ) -> np.ndarray: raise NotImplementedError( "Transforming base measure realizations is not implemented." diff --git a/src/probnum/filtsmooth/particle/_particle_filter_posterior.py b/src/probnum/filtsmooth/particle/_particle_filter_posterior.py index a8ae97bcd..f19c6fa52 100644 --- a/src/probnum/filtsmooth/particle/_particle_filter_posterior.py +++ b/src/probnum/filtsmooth/particle/_particle_filter_posterior.py @@ -6,7 +6,7 @@ from probnum import randvars from probnum.filtsmooth import _timeseriesposterior -from probnum.typing import DenseOutputLocationArgType, FloatLike, ShapeLike +from probnum.typing import ArrayLike, FloatLike, ShapeLike class ParticleFilterPosterior(_timeseriesposterior.TimeSeriesPosterior): @@ -23,7 +23,7 @@ def interpolate(self, t: FloatLike) -> randvars.RandomVariable: def sample( self, rng: np.random.Generator, - t: Optional[DenseOutputLocationArgType] = None, + t: Optional[ArrayLike] = None, size: Optional[ShapeLike] = (), ) -> np.ndarray: raise NotImplementedError("Sampling is not implemented.") @@ -31,7 +31,7 @@ def sample( def transform_base_measure_realizations( self, base_measure_realizations: np.ndarray, - t: Optional[DenseOutputLocationArgType] = None, + t: Optional[ArrayLike] = None, ) -> np.ndarray: raise NotImplementedError( "Transforming base measure realizations is not implemented." diff --git a/src/probnum/randprocs/markov/continuous/_diffusions.py b/src/probnum/randprocs/markov/continuous/_diffusions.py index a84d3e722..99cc3bd33 100644 --- a/src/probnum/randprocs/markov/continuous/_diffusions.py +++ b/src/probnum/randprocs/markov/continuous/_diffusions.py @@ -8,12 +8,7 @@ import scipy.linalg from probnum import randvars -from probnum.typing import ( - ArrayIndicesLike, - DenseOutputLocationArgType, - FloatLike, - ToleranceDiffusionType, -) +from probnum.typing import ArrayIndicesLike, ArrayLike, FloatLike class Diffusion(abc.ABC): @@ -23,16 +18,12 @@ def __repr__(self): raise NotImplementedError @abc.abstractmethod - def __call__( - self, t: DenseOutputLocationArgType - ) -> Union[ToleranceDiffusionType, np.ndarray]: + def __call__(self, t: ArrayLike) -> Union[ArrayLike, np.ndarray]: r"""Evaluate the diffusion :math:`\sigma(t)` at :math:`t`.""" raise NotImplementedError @abc.abstractmethod - def __getitem__( - self, idx: ArrayIndicesLike - ) -> Union[ToleranceDiffusionType, np.ndarray]: + def __getitem__(self, idx: ArrayIndicesLike) -> Union[ArrayLike, np.ndarray]: raise NotImplementedError @abc.abstractmethod @@ -41,7 +32,7 @@ def estimate_locally( meas_rv: randvars.RandomVariable, meas_rv_assuming_zero_previous_cov: randvars.RandomVariable, t: FloatLike, - ) -> ToleranceDiffusionType: + ) -> ArrayLike: r"""Estimate the (local) diffusion and update current (global) estimation in- place. @@ -64,18 +55,14 @@ def __init__(self): def __repr__(self): return f"ConstantDiffusion({self.diffusion})" - def __call__( - self, t: DenseOutputLocationArgType - ) -> Union[ToleranceDiffusionType, np.ndarray]: + def __call__(self, t: ArrayLike) -> Union[ArrayLike, np.ndarray]: if self.diffusion is None: raise NotImplementedError( "No diffusions seen yet. Call estimate_locally_and_update_in_place first." ) return self.diffusion * np.ones_like(t) - def __getitem__( - self, idx: ArrayIndicesLike - ) -> Union[ToleranceDiffusionType, np.ndarray]: + def __getitem__(self, idx: ArrayIndicesLike) -> Union[ArrayLike, np.ndarray]: if self.diffusion is None: raise NotImplementedError( "No diffusions seen yet. Call estimate_locally_and_update_in_place first." @@ -88,7 +75,7 @@ def estimate_locally( meas_rv: randvars.RandomVariable, meas_rv_assuming_zero_previous_cov: randvars.RandomVariable, t: FloatLike, - ) -> ToleranceDiffusionType: + ) -> ArrayLike: new_increment = _compute_local_quasi_mle(meas_rv) return new_increment @@ -135,9 +122,7 @@ def __init__(self, t0): def __repr__(self): return f"PiecewiseConstantDiffusion({self.diffusions})" - def __call__( - self, t: DenseOutputLocationArgType - ) -> Union[ToleranceDiffusionType, np.ndarray]: + def __call__(self, t: ArrayLike) -> Union[ArrayLike, np.ndarray]: if len(self._locations) <= 1: raise NotImplementedError( "No diffusions seen yet. Call estimate_locally_and_update_in_place first." @@ -158,9 +143,7 @@ def __call__( return self[indices] - def __getitem__( - self, idx: ArrayIndicesLike - ) -> Union[ToleranceDiffusionType, np.ndarray]: + def __getitem__(self, idx: ArrayIndicesLike) -> Union[ArrayLike, np.ndarray]: if len(self._locations) <= 1: raise NotImplementedError( "No diffusions seen yet. Call estimate_locally_and_update_in_place first." @@ -172,7 +155,7 @@ def estimate_locally( meas_rv: randvars.RandomVariable, meas_rv_assuming_zero_previous_cov: randvars.RandomVariable, t: FloatLike, - ) -> ToleranceDiffusionType: + ) -> ArrayLike: if not t >= self.tmax: raise ValueError( "This time-point is not right of the current rightmost time-point." diff --git a/src/probnum/typing.py b/src/probnum/typing.py index c7d1e8235..cdb5217d0 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -98,21 +98,4 @@ # Other Types ######################################################################################## -ToleranceDiffusionType = Union[FloatLike, np.ndarray] -r"""Type of a quantity that describes tolerances, errors, and diffusions. - -Used for absolute (atol) and relative tolerances (rtol), local error estimates, as well as -(the diagonal entries of diagonal matrices representing) diffusion models. -atol, rtol, and diffusion are usually floats, but can be generalized to arrays -- essentially, -to every :math:`\tau` that allows arithmetic operations such as - -.. math:: \tau + tau * \text{vec}, \text{ or } L \otimes \text{diag}(\tau) - -respectively. Currently, the array-support for diffusions is experimental (at best). -""" - -DenseOutputLocationArgType = Union[FloatLike, np.ndarray] -"""TimeSeriesPosteriors and derived classes can be evaluated at a single location 't' -or an array of locations.""" - NotImplementedType = type(NotImplemented) From 29b807385083744a3ee68f7680c542c99463d228 Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 12:07:41 +0100 Subject: [PATCH 10/15] Restructure API types --- src/probnum/typing.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/probnum/typing.py b/src/probnum/typing.py index cdb5217d0..2c0667b4e 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -25,8 +25,13 @@ # API Types ######################################################################################## +# Array Utilities ShapeType = Tuple[int, ...] +# Scalars, Arrays and Matrices +ScalarType = np.number +MatrixType = Union[np.ndarray, "probnum.linops.LinearOperator"] + ######################################################################################## # Argument Types ######################################################################################## From a766aa351b9f2eedc06d7362e9a4768673a911af Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 12:07:58 +0100 Subject: [PATCH 11/15] Add annotations future import --- src/probnum/typing.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/probnum/typing.py b/src/probnum/typing.py index 2c0667b4e..7ad2accfc 100644 --- a/src/probnum/typing.py +++ b/src/probnum/typing.py @@ -13,6 +13,8 @@ internal representation of those same objects. """ +from __future__ import annotations + import numbers from typing import Iterable, Tuple, Union From de1c86cd7c0518d9f617b8d88d9d07daeab12ab8 Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 12:18:43 +0100 Subject: [PATCH 12/15] `DTypeArgType` -> `DTypeLike` --- src/probnum/linops/_kronecker.py | 8 ++++---- src/probnum/linops/_linear_operator.py | 10 +++++----- src/probnum/linops/_scaling.py | 4 ++-- src/probnum/randprocs/_random_process.py | 4 ++-- src/probnum/randvars/_random_variable.py | 12 ++++++------ src/probnum/utils/argutils.py | 4 ++-- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/probnum/linops/_kronecker.py b/src/probnum/linops/_kronecker.py index 43410bf4d..646ee54ad 100644 --- a/src/probnum/linops/_kronecker.py +++ b/src/probnum/linops/_kronecker.py @@ -3,7 +3,7 @@ import numpy as np -from probnum.typing import DTypeArgType, NotImplementedType +from probnum.typing import DTypeLike, NotImplementedType from . import _linear_operator, _utils @@ -21,7 +21,7 @@ class Symmetrize(_linear_operator.LinearOperator): Data type. """ - def __init__(self, n: int, dtype: DTypeArgType = np.double): + def __init__(self, n: int, dtype: DTypeLike = np.double): self._n = n super().__init__( @@ -142,7 +142,7 @@ def __init__(self, A: _utils.LinearOperatorLike, B: _utils.LinearOperatorLike): ) def _astype( - self, dtype: DTypeArgType, order: str, casting: str, copy: bool + self, dtype: DTypeLike, order: str, casting: str, copy: bool ) -> "Kronecker": A_astype = self.A.astype(dtype, order=order, casting=casting, copy=copy) B_astype = self.B.astype(dtype, order=order, casting=casting, copy=copy) @@ -355,7 +355,7 @@ def identical_factors(self) -> bool: return self._identical_factors def _astype( - self, dtype: DTypeArgType, order: str, casting: str, copy: bool + self, dtype: DTypeLike, order: str, casting: str, copy: bool ) -> Union["SymmetricKronecker", _linear_operator.LinearOperator]: if self._identical_factors: A_astype = self.A.astype(dtype, order=order, casting=casting, copy=copy) diff --git a/src/probnum/linops/_linear_operator.py b/src/probnum/linops/_linear_operator.py index 6570593be..f75d8d9cd 100644 --- a/src/probnum/linops/_linear_operator.py +++ b/src/probnum/linops/_linear_operator.py @@ -8,7 +8,7 @@ import probnum.utils from probnum import config -from probnum.typing import DTypeArgType, ScalarLike, ShapeLike +from probnum.typing import DTypeLike, ScalarLike, ShapeLike BinaryOperandType = Union[ "LinearOperator", ScalarLike, np.ndarray, scipy.sparse.spmatrix @@ -102,7 +102,7 @@ class LinearOperator: def __init__( self, shape: ShapeLike, - dtype: DTypeArgType, + dtype: DTypeLike, *, matmul: Callable[[np.ndarray], np.ndarray], rmatmul: Optional[Callable[[np.ndarray], np.ndarray]] = None, @@ -228,7 +228,7 @@ def __call__(self, x: np.ndarray, axis: Optional[int] = None) -> np.ndarray: def astype( self, - dtype: DTypeArgType, + dtype: DTypeLike, order: str = "K", casting: str = "unsafe", subok: bool = True, @@ -828,7 +828,7 @@ class _TypeCastLinearOperator(LinearOperator): def __init__( self, linop: LinearOperator, - dtype: DTypeArgType, + dtype: DTypeLike, order: str = "K", casting: str = "unsafe", copy: bool = True, @@ -980,7 +980,7 @@ class Identity(LinearOperator): def __init__( self, shape: ShapeLike, - dtype: DTypeArgType = np.double, + dtype: DTypeLike = np.double, ): shape = probnum.utils.as_shape(shape) diff --git a/src/probnum/linops/_scaling.py b/src/probnum/linops/_scaling.py index 0079f2c66..6879c60af 100644 --- a/src/probnum/linops/_scaling.py +++ b/src/probnum/linops/_scaling.py @@ -4,7 +4,7 @@ import numpy as np import probnum.utils -from probnum.typing import DTypeArgType, ScalarLike, ShapeLike +from probnum.typing import DTypeLike, ScalarLike, ShapeLike from . import _linear_operator @@ -39,7 +39,7 @@ def __init__( self, factors: Union[np.ndarray, ScalarLike], shape: Optional[ShapeLike] = None, - dtype: Optional[DTypeArgType] = None, + dtype: Optional[DTypeLike] = None, ): self._factors = None self._scalar = None diff --git a/src/probnum/randprocs/_random_process.py b/src/probnum/randprocs/_random_process.py index 9522638e4..8b3329b68 100644 --- a/src/probnum/randprocs/_random_process.py +++ b/src/probnum/randprocs/_random_process.py @@ -7,7 +7,7 @@ from probnum import randvars from probnum import utils as _utils -from probnum.typing import DTypeArgType, IntLike, ShapeLike +from probnum.typing import DTypeLike, IntLike, ShapeLike _InputType = TypeVar("InputType") _OutputType = TypeVar("OutputType") @@ -49,7 +49,7 @@ def __init__( self, input_dim: IntLike, output_dim: Optional[IntLike], - dtype: DTypeArgType, + dtype: DTypeLike, ): self._input_dim = np.int_(_utils.as_numpy_scalar(input_dim)) diff --git a/src/probnum/randvars/_random_variable.py b/src/probnum/randvars/_random_variable.py index c4bb89c0e..c40d927ca 100644 --- a/src/probnum/randvars/_random_variable.py +++ b/src/probnum/randvars/_random_variable.py @@ -7,7 +7,7 @@ from probnum import utils as _utils from probnum.typing import ( ArrayIndicesLike, - DTypeArgType, + DTypeLike, FloatLike, ShapeLike, ShapeType, @@ -117,7 +117,7 @@ class RandomVariable(Generic[_ValueType]): def __init__( self, shape: ShapeLike, - dtype: DTypeArgType, + dtype: DTypeLike, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[np.random.Generator, ShapeType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, @@ -758,7 +758,7 @@ def __rpow__(self, other: Any) -> "RandomVariable": return pow_(other, self) @staticmethod - def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype: + def infer_median_dtype(value_dtype: DTypeLike) -> np.dtype: """Infer the dtype of the median. Set the dtype to the dtype arising from @@ -777,7 +777,7 @@ def infer_median_dtype(value_dtype: DTypeArgType) -> np.dtype: return RandomVariable.infer_moment_dtype(value_dtype) @staticmethod - def infer_moment_dtype(value_dtype: DTypeArgType) -> np.dtype: + def infer_moment_dtype(value_dtype: DTypeLike) -> np.dtype: """Infer the dtype of any moment. Infers the dtype of any (function of a) moment of the random variable, e.g. its @@ -972,7 +972,7 @@ class DiscreteRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeLike, - dtype: DTypeArgType, + dtype: DTypeLike, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[np.random.Generator, ShapeLike], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, @@ -1188,7 +1188,7 @@ class ContinuousRandomVariable(RandomVariable[_ValueType]): def __init__( self, shape: ShapeLike, - dtype: DTypeArgType, + dtype: DTypeLike, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[np.random.Generator, ShapeLike], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, diff --git a/src/probnum/utils/argutils.py b/src/probnum/utils/argutils.py index 24deaf9f8..1bf69de6d 100644 --- a/src/probnum/utils/argutils.py +++ b/src/probnum/utils/argutils.py @@ -5,7 +5,7 @@ import numpy as np -from probnum.typing import DTypeArgType, ScalarLike, ShapeLike, ShapeType +from probnum.typing import DTypeLike, ScalarLike, ShapeLike, ShapeType __all__ = ["as_shape", "as_numpy_scalar"] @@ -42,7 +42,7 @@ def as_shape(x: ShapeLike, ndim: Optional[numbers.Integral] = None) -> ShapeType return shape -def as_numpy_scalar(x: ScalarLike, dtype: DTypeArgType = None) -> np.generic: +def as_numpy_scalar(x: ScalarLike, dtype: DTypeLike = None) -> np.generic: """Convert a scalar into a NumPy scalar. Parameters From f197536798314570b140718b16a51b981762e3df Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 12:21:36 +0100 Subject: [PATCH 13/15] Fix refactoring bug --- src/probnum/diffeq/stepsize/_steprule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/probnum/diffeq/stepsize/_steprule.py b/src/probnum/diffeq/stepsize/_steprule.py index 0cfee8407..987e52b6a 100644 --- a/src/probnum/diffeq/stepsize/_steprule.py +++ b/src/probnum/diffeq/stepsize/_steprule.py @@ -65,7 +65,7 @@ def is_accepted(self, scaled_error: FloatLike): return True def errorest_to_norm(self, errorest: ArrayLike, reference_state: np.ndarray): - passArrayLike + pass # Once we have other controls, e.g. PI control, we can rename this into ProportionalControl. From 8c6b0b4ca2b81800752c8b29797d2da608360930 Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 12:22:17 +0100 Subject: [PATCH 14/15] isort fix --- src/probnum/randvars/_normal.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/probnum/randvars/_normal.py b/src/probnum/randvars/_normal.py index b5fa3655e..42d87864b 100644 --- a/src/probnum/randvars/_normal.py +++ b/src/probnum/randvars/_normal.py @@ -8,12 +8,7 @@ from probnum import config, linops from probnum import utils as _utils -from probnum.typing import ( - ArrayIndicesLike, - FloatLike, - ShapeLike, - ShapeType, -) +from probnum.typing import ArrayIndicesLike, FloatLike, ShapeLike, ShapeType from . import _random_variable From 6afc3e5159507ff775d5887b66fc5faead4373ca Mon Sep 17 00:00:00 2001 From: Marvin Pfoertner Date: Thu, 30 Dec 2021 12:25:17 +0100 Subject: [PATCH 15/15] isort fix --- src/probnum/randvars/_random_variable.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/probnum/randvars/_random_variable.py b/src/probnum/randvars/_random_variable.py index c40d927ca..783c9e2ff 100644 --- a/src/probnum/randvars/_random_variable.py +++ b/src/probnum/randvars/_random_variable.py @@ -5,13 +5,7 @@ import numpy as np from probnum import utils as _utils -from probnum.typing import ( - ArrayIndicesLike, - DTypeLike, - FloatLike, - ShapeLike, - ShapeType, -) +from probnum.typing import ArrayIndicesLike, DTypeLike, FloatLike, ShapeLike, ShapeType try: # functools.cached_property is only available in Python >=3.8