import numpy as np
import scipy
from scipy.special import jn
n = 0
x = 0.0
"J_{0:d}({1:f}) = {2:f}".format(n, x, jn(n, x))
'J_0(0.000000) = 1.000000'
x = np.linspace(0, 10, 100)
fig, ax = plt.subplots()
for n in range(4):
ax.plot(x, jn(n, x), label=r"$J_%d(x)$" % n)
ax.legend();
$\displaystyle \int_a^b f(x) dx$
from scipy.integrate import quad
# define a simple function for the integrand
def f(x):
return x*x
x_lower = 0 # the lower limit of x
x_upper = 1 # the upper limit of x
val, abserr = quad(f, x_lower, x_upper)
"integral value ={}, absolute error ={}".format(val, abserr)
'integral value =0.33333333333333337, absolute error =3.700743415417189e-15'
线性方程组
$A x = b$
from scipy.linalg import *
A = np.array([[1,2,3], [4,5,6], [7,8,9]])
b = np.array([1,2,3])
x = solve(A, b)
x
array([-0.33333333, 0.66666667, 0. ])
# check
np.dot(A, x) - b
array([ -1.11022302e-16, 0.00000000e+00, 0.00000000e+00])
from scipy import optimize
def f(x):
return 4*x**3 + (x-2)**2 + x**4
fig, ax = plt.subplots()
x = np.linspace(-5, 3, 100)
ax.plot(x, f(x));
x_min = optimize.fmin_bfgs(f, -2)
x_min
Optimization terminated successfully. Current function value: -3.506641 Iterations: 6 Function evaluations: 30 Gradient evaluations: 10
array([-2.67298164])
x_min = optimize.fmin_bfgs(f, 0.5)
x_min
Optimization terminated successfully. Current function value: 2.804988 Iterations: 3 Function evaluations: 15 Gradient evaluations: 5
array([ 0.46961745])
optimize.brent(f)
0.46961743402759754
optimize.fminbound(f, -4, 2)
-2.6729822917513886
from scipy.interpolate import *
def f(x):
return np.sin(x)
n = np.arange(0, 10)
x = np.linspace(0, 9, 100)
y_meas = f(n) + 0.1 * np.random.randn(len(n)) # simulate measurement with noise
y_real = f(x)
linear_interpolation = interp1d(n, y_meas)
y_interp1 = linear_interpolation(x)
cubic_interpolation = interp1d(n, y_meas, kind='cubic')
y_interp2 = cubic_interpolation(x)
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(n, y_meas, 'bs', label='noisy data')
ax.plot(x, y_real, 'k', lw=2, label='true function')
ax.plot(x, y_interp1, 'r', label='linear interp')
ax.plot(x, y_interp2, 'g', label='cubic interp')
ax.legend(loc=3);
from scipy import stats
# create a (discreet) random variable with poissionian distribution
X = stats.poisson(3.5) # photon distribution for a coherent state with n=3.5 photons
n = np.arange(0,15)
fig, axes = plt.subplots(3,1, sharex=True)
# plot the probability mass function (PMF)
axes[0].step(n, X.pmf(n))
# plot the commulative distribution function (CDF)
axes[1].step(n, X.cdf(n))
# plot histogram of 1000 random realizations of the stochastic variable X
axes[2].hist(X.rvs(size=1000));
# create a (continous) random variable with normal distribution
Y = stats.norm()
x = np.linspace(-5,5,100)
fig, axes = plt.subplots(3,1, sharex=True)
# plot the probability distribution function (PDF)
axes[0].plot(x, Y.pdf(x))
# plot the commulative distributin function (CDF)
axes[1].plot(x, Y.cdf(x));
# plot histogram of 1000 random realizations of the stochastic variable Y
axes[2].hist(Y.rvs(size=1000), bins=50);
X.mean(), X.std(), X.var() # poission distribution
(3.5, 1.8708286933869707, 3.5)
Y.mean(), Y.median(), Y.std(), Y.var() # normal distribution
(0.0, 0.0, 1.0, 1.0)
# Statistical tests
t_statistic, p_value = stats.ttest_ind(X.rvs(size=1000), X.rvs(size=1000))
print("t-statistic = {}".format(t_statistic))
print("p-value ={}".format(p_value))
t-statistic = 1.1286809118317047 p-value =0.25916796614330784