diff --git a/navipy/errorprop/__init__.py b/navipy/errorprop/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..06af5bb7ca94fb1674e186c106820fcc5f20f5a0
--- /dev/null
+++ b/navipy/errorprop/__init__.py
@@ -0,0 +1,218 @@
+"""
+Numerical error propagation
+===========================
+Sources of observational errors
+-------------------------------
+
+Observational error (or measurement error) is the difference between \
+a measured value of quantity and its true value. An error is not \
+necessarly a "mistake" (e.g. in Statistics). Variability is an \
+inherent part of things being measured and of the measurement process.
+
+Measurement errors can be divided into two components:
+
+* random error
+* systematic error (bias).
+
+Types of bias (non exhaustiv):
+
+* *Selection bias* (Berksonian bias) involves individuals being more \
+likely to be selected for study than others.
+* *The bias of an estimator* is the difference between an estimator's \
+expectations and the true value of the parameter being estimated.
+* *Omitted-variable bias* is the bias that appears in estimates of \
+parameters in a regression analysis when the assumed specification \
+omits an independent variable that should be in the model.
+* *Detection bias* occurs when a phenomenon is more likely to be \
+observed for a particular set of study subjects.
+* *Funding bias* may lead to selection of outcomes, test samples, \
+or test procedures that favor a study's financial sponsor.
+* *Reporting bias* involves a skew in the availability of data, such \
+that observations of a certain kind are more likely to be reported.
+* *Analytical bias* arise due to the way that the results are evaluated.
+* *Exclusion bias* arise due to the systematic exclusion of certain \
+individuals from the study.
+* *Observer bias* arises when the researcher unconsciously influences \
+the experiment due to cognitive bias where judgement may alter how an \
+experiment is carried out / how results are recorded.
+
+Propagation of uncertainty
+--------------------------
+
+In statistics, propagation of uncertainty (or propagation of error) \
+is the effect of variables' uncertainties (or errors) on the \
+uncertainty of a function based on them. When the variables are \
+the values of experimental measurements they have uncertainties \
+due to measurement limitations (e.g., instrument precision) which \
+propagate to the combination of variables in the function.
+
+Non-linear function of one variable
+-----------------------------------
+
+Let's be :math:`f(x)` a non linear function of variables :math:`x`, and \
+:math:`\Delta x` the measurement error of :math:`x`.
+
+We see that the measurement error of $x$ (gray) propagate on $y$ \
+(red), via $f(x)$ (black line). The propagation depends on the \
+measured value $x_{o}$, the slope of $f(x)$, the curvature of \
+$f(x)$, ... , i.e. the derivatives of $f(x)$ relativ to $x$.
+
+The propagated error $\Delta y$ can therefore be express as a \
+Taylor-expansion of $f(x)$ around $x_0$ the measured value, i.e. \
+calculated the value $f(x_{o}+\Delta x)$ for $\Delta x \approx 0$
+
+.. math::
+   f(x_{o}+\Delta x) = \sum_{n=0}^{\inf} \
+\frac{f^{(n)}(x_{o})}{n!}(\Delta x)^{n}.
+
+Using only the first-order of the equation:
+
+.. math::
+   f(x_{o}+\Delta x) \approx f(x_{o})+\frac{df}{dx}(x_{o})\Delta x
+
+The propagated error is defined has \
+$\Delta y=\left|f(x_{o}+\Delta x)-f(x_{o})\right|$. From the \
+equation above we find that \
+$\Delta y=\left|\frac{df}{dx}(x_{o})\right|\Delta x $
+
+Instead of deriving the equation, one can approximate the derivitive \
+of the function numerically. For example, to propagate the error on \
+the function :math:`f(x)=(x+1)(x-2)(x+3)`, one need to \
+define the function, the error :math:`\delta_x`, and the point of \
+interest :math:`x_0`.
+
+.. literalinclude:: examples/error_propagation_univariate.py
+   :lines: 23-27, 31
+
+The the error can be calculated as follow:
+
+.. literalinclude:: examples/error_propagation_univariate.py
+   :lines: 32
+
+In the following figure, the of error on x has been propagated at two \
+different locations:
+
+.. plot:: error_propagation/examples/error_propagation_univariate.py
+
+
+
+Non-linear function of multiple variables
+-----------------------------------------
+
+We can extend the framework developped above to function $f$ of \
+multiple variables, $x_0,x_1,...x_i,...x_n$. When f is a set of \
+non-linear combination of the variables x, an interval propagation \
+could be performed in order to compute intervals which contain all \
+consistent values for the variables. In a probabilistic approach, \
+the function f must usually be linearized by approximation to a \
+first-order Taylor series expansion, though in some cases, exact \
+formulas can be derived that do not depend on the expansion as is \
+the case for the exact variance of products. The Taylor expansion \
+would be:
+
+.. math::
+   f_k \approx f^0_k+ \sum_i^n \frac{\partial f_k}{\partial {x_i}} x_i
+
+
+where \partial f_k/\partial x_i denotes the partial derivative of fk \
+with respect to the i-th variable, evaluated at the mean value of \
+all components of vector x. Or in matrix notation,
+
+.. math::
+   \mathrm{f} \approx \mathrm{f}^0 + \mathrm{J} \mathrm{x}\
+
+where J is the Jacobian matrix. Since f_0 is a constant it does not \
+contribute to the error on f. Therefore, the propagation of error \
+follows the linear case, above, but replacing the linear \
+coefficients, Aik and Ajk by the partial derivatives, \
+$\frac{\partial f_k}{\partial x_i}$ and \
+$\frac{\partial f_k}{\partial x_j}$. In matrix notation,
+
+.. math::
+   \mathrm{\Sigma}^\mathrm{f} = \mathrm{J}
+\mathrm{\Sigma}^\mathrm{x} \mathrm{J}^\top
+
+That is, the Jacobian of the function is used to transform the rows \
+and columns of the variance-covariance matrix of the argument.
+
+"""
+import numpy as np
+
+
+def estimate_error(markers, rigid_marker_id=[0, 1]):
+    """
+       Estimate the error by using the deviation from the median
+distance between two markers
+    """
+    if not isinstance(rigid_marker_id, list):
+        raise TypeError('rigid_marker_id should be a list')
+    if len(rigid_marker_id) != 2:
+        raise ValueError('rigid_marker_id should be a list with two elements')
+    dist = np.sqrt(((markers.loc[:, rigid_marker_id[0]] -
+                     markers.loc[:, rigid_marker_id[1]])**2).sum(
+                         axis=1, skipna=False))
+    median_dist = np.nanmedian(dist)
+    return np.abs(dist-median_dist)
+
+
+def estimate_jacobian(fun, x, args=None, epsilon=1e-6):
+    """Estimate the jacobian matrix
+
+    :param fun: The objective function to be derivated. Must be in \
+the form f(x, *args). \
+The argument, x, is a 1-D array of points, and args is a tuple of \
+any additional \
+fixed parameters needed to completely specify the function.
+    :param x: values at which the jacobian should be calculated
+    :param args: Extra arguments passed to the objective function
+    :param epsilon: step used to estimate the jacobian
+    :returns: An estimated jacobian matrix
+    """
+    if isinstance(x, (list, tuple, np.ndarray)):
+        jacobian_matrix = list()
+        for vari, _ in enumerate(x):
+            x_minus = x.copy()
+            x_plus = x.copy()
+            x_minus[vari] -= epsilon
+            x_plus[vari] += epsilon
+
+            if args is None:
+                dfs1 = fun(x_minus)
+                dfs2 = fun(x_plus)
+            else:
+                dfs1 = fun(x_minus, args)
+                dfs2 = fun(x_plus, args)
+            dfs1 = np.array(dfs1)
+            dfs2 = np.array(dfs2)
+            deriv = (dfs2-dfs1)/(2*epsilon)
+            jacobian_matrix.append(deriv)
+        return np.array(jacobian_matrix).transpose()
+    else:
+        x_minus = x-epsilon
+        x_plus = x+epsilon
+        if args is None:
+            deriv = (fun(x_plus) - fun(x_minus))/(2*epsilon)
+        else:
+            deriv = (fun(x_plus, args) - fun(x_minus, args))/(2*epsilon)
+        return deriv
+
+
+def propagate_error(fun, x, covar, args=None, epsilon=1e-6):
+    """Estimate the jacobian matrix
+
+    :param fun: The objective function to be error propagated. \
+Must be in the form f(x, *args). \
+The argument, x, is a 1-D array of points, and args is a tuple of \
+any additional \
+fixed parameters needed to completely specify the function.
+    :param x: values at which the error should be calculated
+    :param covar: variance-covariance matrix
+    :param args: Extra arguments passed to the objective function
+    :param epsilon: step used to estimate the jacobian
+    :returns: An estimated jacobian matrix
+    """
+    jacobian_matrix = estimate_jacobian(fun, x, args, epsilon=1e-6)
+    if isinstance(x, (list, tuple, np.ndarray)):
+        return jacobian_matrix.dot(covar.dot(jacobian_matrix.transpose()))
+    else:
+        return np.abs(jacobian_matrix)*covar
diff --git a/navipy/errorprop/test.py b/navipy/errorprop/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..d65beb053cb4341585801fe3b7b5b14d64589e2a
--- /dev/null
+++ b/navipy/errorprop/test.py
@@ -0,0 +1,49 @@
+import unittest
+import numpy as np
+from tra3dpy.errorprop import propagate_error, estimate_jacobian
+
+
+def sincosine(x):
+    return np.cos(x[0]+x[1]), np.sin(x[0]-x[1])
+
+
+class TestErrorProp(unittest.TestCase):
+
+    def test_cosine(self):
+        fun = np.cos
+        x = 2*(np.random.rand()-0.5)*np.pi
+        covar = 2*(np.random.rand()-0.5)*np.pi / 10
+        err = propagate_error(fun, x, covar)
+        err_theo = np.abs(-np.sin(x))*covar
+        self.assertAlmostEqual(err, err_theo)
+
+    def test_sincosine(self):
+        fun = sincosine
+        x = 2*(np.random.rand(2)-0.5)*np.pi
+        covar = 2*(np.random.rand(2, 2)-0.5)*np.pi / 10
+        # Test jacobian
+        jacobian_matrix = estimate_jacobian(fun, x)
+        jacobian_matrix_theo = [[-np.sin(x[0]+x[1]), -np.sin(x[0]+x[1])],
+                                [np.cos(x[0]-x[1]), -np.cos(x[0]-x[1])]]
+        jacobian_matrix_theo = np.array(jacobian_matrix_theo)
+        np.testing.assert_array_almost_equal(
+            jacobian_matrix, jacobian_matrix_theo)
+
+        # Test prop error
+        err = propagate_error(fun, x, covar)
+        s = np.sin(x[0]+x[1])
+        c = np.cos(x[0]-x[1])
+        d = covar[0, 0]
+        e = covar[0, 1]
+        f = covar[1, 0]
+        g = covar[1, 1]
+        err_theo = np.zeros((2, 2))
+        err_theo[0, 0] = -s*(-d*s - f*s) - s*(-g*s - e*s)
+        err_theo[0, 1] = c*(-d*s - f*s) - c*(-g*s - e*s)
+        err_theo[1, 0] = -s*(c*d - c*f) - s*(c*e - c*g)
+        err_theo[1, 1] = c*(c*d - c*f) - c*(c*e - c*g)
+        np.testing.assert_array_almost_equal(err, err_theo)
+
+
+if __name__ == '__main__':
+    unittest.main()