Skip to main content
โšก Calmops

SciPy: Scientific Computing and Advanced Numerical Methods

SciPy: Scientific Computing and Advanced Numerical Methods

SciPy builds on NumPy to provide advanced scientific computing capabilities including optimization, integration, interpolation, and linear algebra.

Installation and Setup

pip install scipy numpy matplotlib

Optimization

Minimization

import numpy as np
from scipy.optimize import minimize, minimize_scalar
import matplotlib.pyplot as plt

# Define objective function
def rosenbrock(x):
    """Rosenbrock function: (1-x)^2 + 100(y-x^2)^2"""
    return (1 - x[0])**2 + 100 * (x[1] - x[0]**2)**2

# Minimize using different methods
x0 = np.array([0, 0])

# BFGS method
result_bfgs = minimize(rosenbrock, x0, method='BFGS')
print(f"BFGS: {result_bfgs.x}, Success: {result_bfgs.success}")

# Nelder-Mead method
result_nm = minimize(rosenbrock, x0, method='Nelder-Mead')
print(f"Nelder-Mead: {result_nm.x}, Success: {result_nm.success}")

# Powell method
result_powell = minimize(rosenbrock, x0, method='Powell')
print(f"Powell: {result_powell.x}, Success: {result_powell.success}")

# Minimize scalar function
def f(x):
    return (x - 3)**2 + 2

result_scalar = minimize_scalar(f, bounds=(0, 5), method='bounded')
print(f"Scalar minimization: x={result_scalar.x}, f(x)={result_scalar.fun}")

# Visualize
x = np.linspace(-2, 2, 100)
y = np.linspace(-1, 3, 100)
X, Y = np.meshgrid(x, y)
Z = (1 - X)**2 + 100 * (Y - X**2)**2

plt.figure(figsize=(10, 8))
plt.contour(X, Y, Z, levels=np.logspace(-1, 3.5, 20))
plt.plot(result_bfgs.x[0], result_bfgs.x[1], 'r*', markersize=15, label='BFGS')
plt.plot(1, 1, 'g*', markersize=15, label='Optimum')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.title('Rosenbrock Function Optimization')
plt.show()

Constrained Optimization

from scipy.optimize import minimize

# Objective function
def objective(x):
    return x[0]**2 + x[1]**2

# Constraint: x + y >= 1
def constraint1(x):
    return x[0] + x[1] - 1

# Constraint: x >= 0, y >= 0
bounds = [(0, None), (0, None)]

constraints = {'type': 'ineq', 'fun': constraint1}

x0 = np.array([0.5, 0.5])
result = minimize(objective, x0, method='SLSQP', bounds=bounds, constraints=constraints)

print(f"Optimal solution: {result.x}")
print(f"Minimum value: {result.fun}")

Root Finding

from scipy.optimize import fsolve, brentq

# Find roots of f(x) = x^3 - 2x - 5
def f(x):
    return x**3 - 2*x - 5

# Using fsolve (multiple variables)
root_fsolve = fsolve(f, x0=2)
print(f"Root (fsolve): {root_fsolve[0]}")

# Using brentq (single variable, bracketed)
root_brentq = brentq(f, 2, 3)
print(f"Root (brentq): {root_brentq}")

# System of equations
def equations(vars):
    x, y = vars
    eq1 = x**2 + y**2 - 1  # Circle
    eq2 = x - y  # Line
    return [eq1, eq2]

solution = fsolve(equations, [0.5, 0.5])
print(f"System solution: {solution}")

Integration

Numerical Integration

from scipy.integrate import quad, dblquad, tplquad
import numpy as np

# Single integral: โˆซ sin(x) dx from 0 to ฯ€
def integrand(x):
    return np.sin(x)

result, error = quad(integrand, 0, np.pi)
print(f"โˆซsin(x)dx from 0 to ฯ€ = {result:.6f} ยฑ {error:.2e}")

# Double integral: โˆซโˆซ x*y dxdy
def integrand_2d(y, x):
    return x * y

result_2d, error_2d = dblquad(integrand_2d, 0, 1, 0, 1)
print(f"โˆซโˆซ x*y dxdy = {result_2d:.6f} ยฑ {error_2d:.2e}")

# Triple integral
def integrand_3d(z, y, x):
    return x * y * z

result_3d, error_3d = tplquad(integrand_3d, 0, 1, 0, 1, 0, 1)
print(f"โˆซโˆซโˆซ x*y*z dxdydz = {result_3d:.6f} ยฑ {error_3d:.2e}")

# Complex integrand
def complex_integrand(x):
    return np.exp(-x**2) * np.cos(x)

result_complex, _ = quad(complex_integrand, -np.inf, np.inf)
print(f"โˆซ e^(-xยฒ)cos(x)dx = {result_complex:.6f}")

Ordinary Differential Equations (ODE)

from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt

# Define ODE: dy/dt = -2y
def dydt(y, t):
    return -2 * y

# Initial condition
y0 = 1

# Time points
t = np.linspace(0, 5, 100)

# Solve ODE
solution = odeint(dydt, y0, t)

# Plot
plt.figure(figsize=(10, 6))
plt.plot(t, solution, 'b-', label='Numerical solution')
plt.plot(t, np.exp(-2*t), 'r--', label='Analytical solution')
plt.xlabel('Time')
plt.ylabel('y')
plt.legend()
plt.title('ODE Solution: dy/dt = -2y')
plt.grid(True)
plt.show()

# System of ODEs: Lotka-Volterra (predator-prey)
def lotka_volterra(y, t, a, b, c, d):
    x, y_pop = y
    dxdt = a*x - b*x*y_pop
    dydt = c*x*y_pop - d*y_pop
    return [dxdt, dydt]

# Parameters
a, b, c, d = 1.0, 0.1, 0.1, 0.4

# Initial conditions
y0 = [10, 5]

# Time points
t = np.linspace(0, 100, 1000)

# Solve
solution = odeint(lotka_volterra, y0, t, args=(a, b, c, d))

# Plot
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(t, solution[:, 0], label='Prey')
plt.plot(t, solution[:, 1], label='Predator')
plt.xlabel('Time')
plt.ylabel('Population')
plt.legend()
plt.title('Lotka-Volterra Model')

plt.subplot(1, 2, 2)
plt.plot(solution[:, 0], solution[:, 1])
plt.xlabel('Prey')
plt.ylabel('Predator')
plt.title('Phase Space')
plt.show()

Interpolation

from scipy.interpolate import interp1d, UnivariateSpline
import numpy as np
import matplotlib.pyplot as plt

# Data points
x = np.array([0, 1, 2, 3, 4, 5])
y = np.array([0, 1, 4, 9, 16, 25])

# Linear interpolation
f_linear = interp1d(x, y, kind='linear')

# Cubic interpolation
f_cubic = interp1d(x, y, kind='cubic')

# Spline interpolation
f_spline = UnivariateSpline(x, y, s=0)

# Evaluate at new points
x_new = np.linspace(0, 5, 50)
y_linear = f_linear(x_new)
y_cubic = f_cubic(x_new)
y_spline = f_spline(x_new)

# Plot
plt.figure(figsize=(10, 6))
plt.plot(x, y, 'o', label='Data points')
plt.plot(x_new, y_linear, '-', label='Linear')
plt.plot(x_new, y_cubic, '--', label='Cubic')
plt.plot(x_new, y_spline, '-.', label='Spline')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.title('Interpolation Methods')
plt.grid(True)
plt.show()

Linear Algebra

from scipy import linalg
import numpy as np

# Matrix operations
A = np.array([[1, 2], [3, 4]])
b = np.array([5, 6])

# Solve Ax = b
x = linalg.solve(A, b)
print(f"Solution: {x}")

# Eigenvalues and eigenvectors
eigenvalues, eigenvectors = linalg.eig(A)
print(f"Eigenvalues: {eigenvalues}")
print(f"Eigenvectors:\n{eigenvectors}")

# Matrix decomposition (SVD)
U, s, Vt = linalg.svd(A)
print(f"Singular values: {s}")

# QR decomposition
Q, R = linalg.qr(A)
print(f"Q:\n{Q}")
print(f"R:\n{R}")

# Determinant
det = linalg.det(A)
print(f"Determinant: {det}")

# Matrix inverse
A_inv = linalg.inv(A)
print(f"Inverse:\n{A_inv}")

Signal Processing

from scipy import signal
import numpy as np
import matplotlib.pyplot as plt

# Create signal
t = np.linspace(0, 1, 1000)
signal_data = np.sin(2*np.pi*10*t) + 0.5*np.sin(2*np.pi*20*t) + np.random.normal(0, 0.1, len(t))

# Design filter
b, a = signal.butter(4, 0.1)

# Apply filter
filtered = signal.filtfilt(b, a, signal_data)

# FFT
frequencies = np.fft.fftfreq(len(signal_data), t[1]-t[0])
fft_values = np.fft.fft(signal_data)

# Plot
plt.figure(figsize=(12, 8))

plt.subplot(2, 2, 1)
plt.plot(t, signal_data, label='Original')
plt.plot(t, filtered, label='Filtered')
plt.xlabel('Time')
plt.ylabel('Amplitude')
plt.legend()
plt.title('Signal Filtering')

plt.subplot(2, 2, 2)
plt.plot(frequencies[:len(frequencies)//2], np.abs(fft_values[:len(fft_values)//2]))
plt.xlabel('Frequency')
plt.ylabel('Magnitude')
plt.title('FFT')

plt.tight_layout()
plt.show()

Best Practices

  1. Choose appropriate method: Different optimization/integration methods suit different problems
  2. Provide good initial guesses: Improves convergence for optimization
  3. Check convergence: Verify results with different methods
  4. Handle edge cases: Check for singularities and discontinuities
  5. Use appropriate tolerances: Balance accuracy and computation time

Common Pitfalls

Bad Practice:

# Don't: Use default tolerances without checking
result = minimize(f, x0)

# Don't: Ignore convergence warnings
result = quad(f, 0, np.inf)

# Don't: Use inappropriate method
result = minimize(f, x0, method='BFGS')  # For constrained problem

Good Practice:

# Do: Specify tolerances
result = minimize(f, x0, options={'ftol': 1e-8})

# Do: Check for convergence
if not result.success:
    print(f"Warning: {result.message}")

# Do: Use appropriate method
result = minimize(f, x0, method='SLSQP', constraints=constraints)

Conclusion

SciPy provides powerful tools for scientific computing. Master optimization, integration, and ODE solving to tackle complex numerical problems. Combine with NumPy and Matplotlib for comprehensive scientific computing workflows.

Comments