In [1]:
import numpy as np

import matplotlib.pyplot as plt

import scipy
from scipy import stats

Exo 6

  • the first recursion fails
  • the second recursion works
In [3]:
from scipy import integrate
In [4]:
integrate.quad(lambda x: x, 0,1)
Out[4]:
(0.5, 5.551115123125783e-15)
In [5]:
integrate.quad(lambda x: 1/(10+x), 0,1)
Out[5]:
(0.09531017980432485, 1.0581555609992505e-15)
In [6]:
np.log(11) - np.log(10)
Out[6]:
0.09531017980432477
In [8]:
I_n = [np.log(11) - np.log(10)]
for k in range(1,31):
    I_n.append( 1/k - 10*I_n[-1])
    
I_n
Out[8]:
[0.09531017980432477,
 0.04689820195675232,
 0.031017980432476833,
 0.023153529008564988,
 0.01846470991435012,
 0.015352900856498819,
 0.013137658101678468,
 0.011480561840358172,
 0.010194381596418278,
 0.009167295146928323,
 0.00832704853071678,
 0.007638605601923115,
 0.0069472773141021765,
 0.007450303782055162,
 -0.0030744663919801962,
 0.09741133058646863,
 -0.9116133058646863,
 9.174956588058627,
 -91.69401032503072,
 916.9927348292546,
 -9169.877348292546,
 91698.82110197308,
 -916988.1655651854,
 9169881.699130116,
 -91698816.94963449,
 916988169.5363449,
 -9169881695.324987,
 91698816953.28691,
 -916988169532.8334,
 9169881695328.37,
 -91698816953283.66]

check: in fact it has failed

In [17]:
I_n_quad = [ integrate.quad(lambda x: x**k/(10+x), 0,1)[0] for k in range(20)]
I_n_quad
Out[17]:
[0.09531017980432485,
 0.0468982019567514,
 0.031017980432486006,
 0.02315352900847329,
 0.018464709915267108,
 0.015352900847328937,
 0.013137658193377286,
 0.011480560923370004,
 0.010194390766299978,
 0.00916720344811137,
 0.008327965518886312,
 0.007629435720227788,
 0.00703897613105546,
 0.0065333156125223285,
 0.0060954153033481685,
 0.005712513633184992,
 0.005374863668150088,
 0.005074892730263844,
 0.004806628252917125,
 0.004565296418197191]
In [13]:
I_n[20], I_n_quad[20] - 1/20/11
Out[13]:
(-9169.877348292546, -0.00019841872742643648)
In [14]:
diff = np.array(I_n) - np.array(I_n_quad)
diff[abs(diff) > 1]
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
/tmp/ipykernel_31/3727512830.py in <module>
----> 1 diff = np.array(I_n) - np.array(I_n_quad)
      2 diff[abs(diff) > 1]

ValueError: operands could not be broadcast together with shapes (31,) (21,) 
In [19]:
integrate.quad(lambda x: x**/(10 + x), 0,1),
Out[19]:
((0.00916720344811137, 1.0177640339516369e-16),)
In [17]:
integrate.quad(lambda x: x**30/(10 + x), 0,1)[0]
Out[17]:
0.0029409287048613284
$$\int_0^1 \frac{x^{30}}{10 + x} \sim \int_0^1 \frac{x^{30}}{11} = \frac{1}{11} \times \frac{1}{31}$$
In [24]:
1/11/31
Out[24]:
0.002932551319648094
In [15]:
I_n = [1/11/31]
for k in range(30,0,-1):
    I_n.append( (1/k - I_n[-1])/10 )
    
I_n[-5:], I_n[-1] - (np.log(11) - np.log(10))
Out[15]:
([0.018464709915267108,
  0.02315352900847329,
  0.031017980432486002,
  0.0468982019567514,
  0.09531017980432485],
 8.326672684688674e-17)

Exo 7¶

relative error¶

https://matthew-brett.github.io/teaching/floating_error.html

In [ ]:
from math import  log10, floor
x = -0.1234 # a number with greater precision than format allows
p = 3 # number of digits in mantissa
x1 = abs(x) # 0.1234
e1 = log10(x1) # -0.9086848403027772
exponent = floor(e1) # -1
m1 = x / 10 ** exponent # -1.234
mantissa = round(m1, p-1) # -1.23
exponent, mantissa

Taylor¶

  • https://en.wikipedia.org/wiki/Symmetric_derivative

  • https://www.ljll.math.upmc.fr/frey/cours/UdC/ma691/ma691_ch6.pdf

Eroors are as follows:

  • error usual derivative $\leq 1/2 \| f''\| h$
  • error symmetric derivative $\leq 1/6 \| f'''\| h^2$

For the usual derivative

$$f(x+h)- f(x)= hf′(x)+ \frac{1}{2} h^2f′′(x) +O(h^2)$$

so

$$\left |\frac{f(x+h)- f(x)}{h} - f′(x) \right| = \frac{1}{2} h|f′′(x)| +O(h^2)$$

For the symmetric derivative use :

$$f(x+h)+f(x−h)−2f(x)=h^2f′′(x)+\frac{1}{12}h^4f^{(4)}(x)+O(h^5)$$

Reference for fun

https://www.ams.org/journals/tran/1983-277-02/S0002-9947-1983-0694378-6/S0002-9947-1983-0694378-6.pdf


Exo 8¶

Taylor¶

$$f(x+h)+f(x−h)−2f(x)=h^2f′′(x)+\frac{1}{12}h^4f^{(4)}(x)+O(h^5)$$
  • error (symmetric) derivative $\leq \frac{1}{12}\| f^{(4)}\| h^2$
In [183]:
sum(np.linalg.eigvals(A)), np.linalg.eigvals(A)
Out[183]:
(15.999999999999988,
 array([3.87938524, 3.53208889, 3.        , 2.34729636, 1.65270364,
        0.12061476, 0.46791111, 1.        ]))

Better to¶

write a function to generate the matrix $A$

In [185]:
def M(n):
    A = 2*np.identity(n)
    for i in range(n-1):
        A[i,i+1] = A[i+1,i] = -1
    return A


X = np.linspace(0,1,100)

plt.plot(X, 12*X**2+4);
# this doesn't work with @
plt.plot(X[2:-2], -100**2*M(100).dot(X**4+ 2*X**2)[2:-2]);
In [186]:
N = 500
# subdivision of the interval
X = np.linspace(0,1,N)
h = (X[1] - X[0])

# -A/h^2 + C
B = M(N)/(h**2) + np.diag(X)
# this is probably more accurate as N**2 has no rounding error
B = M(N)*(N**2) + np.diag(X)

f = (1 + 2*X - X**2)*np.exp(X) + X**2 - X
g = (1-X)*(np.exp(X)-1)

Y = np.linalg.solve(B, f)
plt.plot(X, Y)
plt.plot(X, g);

np.max(np.abs(Y - g))
Out[186]:
0.0034362595476501016

Jacobi method¶

In numerical linear algebra, the Jacobi method is an iterative algorithm for determining the solutions of a strictly diagonally dominant system of linear equations. Each diagonal element is solved for, and an approximate value is plugged in. The process is then iterated until it converges. This algorithm is a stripped-down version of the Jacobi transformation method of matrix diagonalization. The method is named after Carl Gustav Jacob Jacobi.

https://en.wikipedia.org/wiki/Jacobi_method

In [232]:
C = np.array([2,1,5,7]).reshape(2,2)
In [32]:
def decompose(C):
    D = np.array([C[i,i] for i in range(C.shape[0])])
    LU = np.copy(C) - np.diag(D)
    return D, LU

V = np.ones(2)
b = np.array([11,13])
D, LU = decompose(C)
In [732]:
for k in range(20):
    #V = (-LU.dot(V) + b)/D
    V = V + (-C.dot(V) + b)/D
C.dot(V)
Out[732]:
array([10.99999999, 13.        ])
In [42]:
# initialize the matrix
C = np.array([[10., -1., 2., 0.],
              [-1., 11., -1., 3.],
              [2., -1., 10., -1.],
              [0.0, 3., -1., 8.]])
# initialize the RHS vector
b = np.array([6., 25., -11., 15.])
V = np.zeros_like(C[0])

D, LU = decompose(C)

VS = []
for k in range(10):
    V = (-LU.dot(V) + b)/D
    VS.append(V)

E = [np.linalg.norm( b - C.dot(v)) for v in VS]
plt.plot(-np.log(E));
In [38]:
from scipy import stats

         
stats.linregress(np.arange(len(E)),-np.log(E))
Out[38]:
LinregressResult(slope=0.854590388387257, intercept=-2.4365624942108233, rvalue=0.9999854699430567, pvalue=1.950023073529421e-19, stderr=0.001628794894121396)

Eigenvalues¶

In [50]:
ev, evv = np.linalg.eig(np.linalg.inv(np.diag(D).dot(C)))
In [51]:
 
Out[51]:
array([10., 11., 10.,  8.])

Jacobi applied to the exo 8¶

In [28]:
N = 200
X = np.linspace(0,1,N)

B = M(N)*(N**2) + np.diag(X)
# target function
b = (1 + 2*X - X**2)*np.exp(X) + X**2 - X 
# exact solution
g = (1-X)*(np.exp(X)-1)
# initial guess
V = np.ones(N)

# could use
# D = 2*N**2 - X

D = np.array([B[i,i] for i in range(N)])

Iteration loop¶

In [29]:
#%%timeit
E = []
for k in range(4000):
    #V = (-LU.dot(V) + b)/D
    V = V + (-B.dot(V) + b)/D
    E.append(np.linalg.norm(V - g,np.inf))
    
In [30]:
plt.plot(-np.log(np.array(E[100:])))
Out[30]:
[<matplotlib.lines.Line2D at 0x7fdb718474c0>]
In [367]:
plt.plot(V);
plt.plot(g );

np.max(np.abs(V - g)), np.linalg.norm(V-g, np.inf)
Out[367]:
(0.008589502825312118, 0.008589502825312118)
In [363]:
#%%timeit
K = N**2*np.array([1,-2,1])

for k in range(500000):
    #trick to avoid matrix multiplication
    V = V + ( np.convolve(V, K, mode='same')- X*V + b)/D
    

Convergence¶

  • easy case $C$ symmetric positive definite
  • $(L + U)/D$ has a dominant eigenvalue
In [52]:
C = np.array([2,1,1,1]).reshape(2,2)
In [56]:
D = np.array([C[i,i] for i in range(C.shape[0])])
In [66]:
C.dot([0,1])
Out[66]:
array([1, 1])
In [91]:
C = np.array([2,1,1,1]).reshape(2,2)
D = np.array([C[i,i] for i in range(C.shape[0])])
V = np.ones(2)
b = np.ones(2)

E = []
for k in range(40):
    #V = (-LU.dot(V) + b)/D
    V = V + (-C.dot(V) + b)/D
    E.append(np.linalg.norm(V - [0,1]))
plt.plot(np.log(E));
In [92]:
plt.plot(np.log(E));
In [93]:
DC = np.diag(D)
ee, vv = np.linalg.eig((C-DC)/D)
In [96]:
 
Out[96]:
array([[2, 0],
       [0, 1]])
In [ ]: