diff --git a/final/README.MD b/final/README.MD
new file mode 100644
index 0000000000000000000000000000000000000000..2d2ba5b6b7cf11c8ec7f33c85246e7dcf54326e1
--- /dev/null
+++ b/final/README.MD
@@ -0,0 +1,14 @@
+# Final Project - Heatmapping CT Data
+
+Given a CT scan of a material with constant material density, fit a 3D model (.step file) to that data and characterize error in the scanned part.
+
+Is the part larger or smaller than the intended CAD, is that difference isotropic? For example is error coming from over-extruding in a 3D-printed part (XY error), or poor layer height calibration (Z error).
+
+For diamond rotors is the ID concentric with the OD? Is there taper in either? Are the ID or OD lobed, and if so at what angular frequency?
+
+## Planning 4/13/23
+
+For visualzing a .tiff stack:
+
+- pygfx
+- pytorch 3d has lots of libraries for visualizing 3d data from MRI machines etc
diff --git a/week11/13_1.py b/week11/13_1.py
new file mode 100644
index 0000000000000000000000000000000000000000..83eb9d9e41870205d8dfe774ca70df3b1192a223
--- /dev/null
+++ b/week11/13_1.py
@@ -0,0 +1,17 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.animation as animation
+
+'''
+Find the first five diagonal Pade approximants [1/1],..., [5/5] to e^x around the origin. Evaluate the approximations at x=1 and compare with the correct value of e = 2.718281828459045.
+How is the error improving with the order? How does that compare to the polynomial error?
+https://www.youtube.com/watch?v=3TK8Fi_I0h0
+Pade approximants are ratios of polynomials, order is written [N/M] where N is the order of the numerator and M is the order of the denominator
+'''
+e_truth = 2.718281828459045
+taylor_expansion = 
+
+def pade_approx(N, M):
+    factorials = np.zeros(N+M+2)
+    for i in range(len(factorials)):
+        factorials[i] = np.math.factorial(i)
\ No newline at end of file
diff --git a/week11/13_2.py b/week11/13_2.py
new file mode 100644
index 0000000000000000000000000000000000000000..eae9e5418edfb098c8234ac464c1b6529cdb7da5
--- /dev/null
+++ b/week11/13_2.py
@@ -0,0 +1,107 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.animation as animation
+
+'''
+Take as a data set x = {−10,−9,...,9,10}, and y(x) = 0
+if x ≤ 0 and y(x) = 1 if x > 0.
+'''
+def generate_data(x):
+    y = np.zeros(len(x))
+    y[x > 0] = 1
+    return x, y
+x,y = generate_data(np.arange(-10, 10, 1))
+
+# A) Fit the data with a polynomial with 5, 10, and 15 terms using a pseudo-inverse of the vandermonde matrix.
+def fit_polynomial(x, y, num_coeff):
+    # construct the Vandermonde matrix, which is the height of the data and width of the number of coefficients
+    Vand = np.vander(x, num_coeff, increasing=True)
+    # take the pseudo-inverse of the Vandermonde matrix
+    Vand_inv = np.linalg.pinv(Vand)
+    # multiply the pseudo-inverse by the y values to get the coefficients
+    coefs = Vand_inv.dot(y)
+    return coefs
+
+def polynomial_solve(x, coefs):
+    result = 0
+    for i in range(len(coefs)):
+        result += coefs[i] * (x ** i) # sum up the terms in the polynomial
+    return result
+
+# '''
+plt.plot(x, y, 'o')
+for i in range(5, 16, 5):
+    coefs = fit_polynomial(x, y, i)
+    y_fit = polynomial_solve(x, coefs)
+    plt.plot(x, y_fit)
+plt.legend(['data', '5', '10', '15'])
+plt.title('Vandermond fits with 5, 10, and 15 terms')
+plt.show()
+# '''
+
+# B) Fit the data with 5, 10, and 15 r^3 RBFs uniformly distributed between x = −10 and x = 10.
+def RBF_fit(x, y, num_coeff):
+    '''
+    For reference: http://www.jessebett.com/Radial-Basis-Function-USRA/
+    https://www.youtube.com/watch?v=Kl9xk9BukNE
+    '''
+    # initialize the centers of the RBFs
+    centers = np.linspace(-10, 10, num_coeff)
+    # following 14.26
+    matrix = np.zeros((len(x),num_coeff))
+    for i in range(len(x)):
+        for j in range(num_coeff):
+            matrix[i][j] = np.abs(x[i] - centers[j]) ** 3
+    # take the pseudo-inverse of the matrix
+    matrix_inv = np.linalg.pinv(matrix)
+    # multiply the pseudo-inverse by the y values to get the coefficients
+    coefs = matrix_inv.dot(y)
+    return centers, coefs 
+    
+def RBF_solve(x, centers, coefs):
+    result = 0
+    for i in range(len(centers)):
+        result += coefs[i] * np.abs(x - centers[i]) ** 3
+    return result
+
+# '''
+plt.plot(x, y, 'o')
+for i in range(5, 16, 5):
+    centers, coefs = RBF_fit(x, y, i)
+    y_fit = RBF_solve(x, centers, coefs)
+    plt.plot(x, y_fit)
+plt.legend(['data', '5', '10', '15'])
+plt.title('RBF fits with 5, 10, and 15 terms')
+plt.show()
+# '''
+
+# C) Using the coefficients found for these six fits, evaluate the total out-of-sample error at x={−10.5,−9.5,...,9.5,10.5}.
+x_half_steps = np.arange(-10.5, 10.5, 1)
+truth = np.zeros(len(x_half_steps))
+truth[x_half_steps > 0] = 1
+# plot the half steps
+# plt.plot(x_half_steps, truth, 'o')
+# plt.title('Half steps')
+# plt.show()
+errors_vander = []
+errors_RBF = []
+
+for i, rank in enumerate(range(5, 16, 5)):    
+    coefs = fit_polynomial(x, y, rank)
+    y_fit = polynomial_solve(x_half_steps, coefs)
+    error = np.sum(np.abs(y_fit - truth))
+    errors_vander.append(error)
+    if rank == 15:
+        print(np.abs(y_fit - truth))
+
+    centers, coefs = RBF_fit(x, y, rank)
+    y_fit = RBF_solve(x_half_steps, centers, coefs)
+    error = np.sum(np.abs(y_fit - truth))
+    errors_RBF.append(error)
+
+print('Vandermonde errors')
+for i, error in enumerate(errors_vander):
+    print(f"Rank: {(i+1)*5} Error: {error} ")
+print('RBF errors')
+for i, error in enumerate(errors_RBF):
+    print(f"Rank: {(i+1)*5} Error: {error} ")
diff --git a/week11/15_1.py b/week11/15_1.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff0db9ddbc0816b0a448ab5e2cbaaf04fc1a3f40
--- /dev/null
+++ b/week11/15_1.py
@@ -0,0 +1,57 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.animation as animation
+
+'''
+Generate a data set by sampling 100 points from y = tanh(x) between x = -5 to 5,
+adding random noise with a magnitude of 0.25 to y
+Fit it with a cluster weighted model using a local linear function in the clusters,
+y=a+bx. Plot the data and the resulting forecast ⟨y|x⟩, uncertainty ⟨σ2 y|x⟩,
+and cluster probabilities p(⃗x|cm)p(cm) as the number of clusters 
+is increased (starting with one),and animate their evolution during the EM iterations.
+'''
+
+def generate_data(x, sigma):
+    noise = np.random.normal(0, sigma, len(x))
+    y = np.tanh(x) + noise
+    return x, y
+
+x,y = generate_data(np.arange(-5, 5, .1), 0.25)
+plt.plot(x, y, 'o')
+plt.show()
+
+def cluster_weighted(x, y, num_clusters, num_coeff, iterations):
+    N = len(x)
+    # For plotting we want to save everything
+    forecast = np.zeros(iterations, N)
+    forecast_error = np.zeros(iterations, N)
+    cluster_prob = np.zeros(iterations, num_clusters)
+    mu = np.random.uniform(min(x), max(y), num_clusters)     # initialize the cluster mus
+    p_c = np.ones(num_clusters) / num_clusters # initialize the cluster weights 
+    variances = np.ones(num_clusters) # initialize the cluster variances
+    beta = np.ones((num_clusters, num_coeff)) # betas
+    var_y = np.ones(num_clusters) # output variances
+    
+    for iteration in range(iterations):
+        # P(x | c_m), equation (16.24)
+        p_x_c = np.zeros((N, num_coeff))
+        for i in range(N):
+            for m in range(num_coeff):
+                p_x_c[i, m] = np.exp((- (X[i] - mu[m]) ** 2) / (2 * variances[m]) ) / np.sqrt(2 * np.pi * variances[m])
+
+    # Save probabilites per cluster
+        for m in range(variances):
+            for i in range(N):
+                cluster_prob[iteration, m, i] = p_x_c[i, m] * p_c[m]
+
+        # (step 16.26) - The mean of our gaussian as a function which we can evaluate at x
+        def f(x, coeff):
+            s = 0
+            for i in range(len(coeff)):
+                s += coeff[i] * (x ** i)
+            return s
+        # P(y | x, c_m) - (equation 16.26)
+        p_y_x_c = np.zeros((N, num_coeff))
+        for i in range(N):
+            for m in range(num_coeff):
+                p_y_x_c[i, m] = np.exp((- (y[i] - f(x[i], beta[m])) ** 2) / (2 * var_y[m]) ) / np.sqrt(2 * np.pi * var_y[m])
\ No newline at end of file
diff --git a/week11/README.MD b/week11/README.MD
new file mode 100644
index 0000000000000000000000000000000000000000..f747619c1f63b3760ee59e87be828bb9b31ff09b
--- /dev/null
+++ b/week11/README.MD
@@ -0,0 +1,5 @@
+# Week 11 - Functions + Density Estimation
+
+<img src="img/13_2_a.png" height="600">
+
+<img src="img/13_b_a.png" height="600">
diff --git a/week11/img/13_2_a.png b/week11/img/13_2_a.png
new file mode 100644
index 0000000000000000000000000000000000000000..9b796a2a3e3a59c9b2968403b4b56059d4a39781
Binary files /dev/null and b/week11/img/13_2_a.png differ
diff --git a/week11/img/13_2_b.png b/week11/img/13_2_b.png
new file mode 100644
index 0000000000000000000000000000000000000000..86f72173564a3ef272cb65bb15d71ed5f901189c
Binary files /dev/null and b/week11/img/13_2_b.png differ
diff --git a/week12/14_1_CMAES.py b/week12/14_1_CMAES.py
new file mode 100644
index 0000000000000000000000000000000000000000..61f20b8eb06cd5249699060e1489e0b6f15e0360
--- /dev/null
+++ b/week12/14_1_CMAES.py
@@ -0,0 +1,69 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.animation as animation
+from cmaes import CMA
+
+'''
+Search week
+'''
+
+# A) Plot the rosenbrock function in 3D
+def rosenbrock_2d(coords):
+    x = coords[0]
+    y = coords[1]
+    return  (1 - x) ** 2 + 100 * (y - x ** 2) ** 2 # + np.sin(x)/x
+
+def parabola(coords):
+    x = coords[0]
+    y = coords[1]
+    return x**2 + y**2
+
+bound = 2
+z_bound = 100
+x = np.linspace(-bound, bound, z_bound)
+y = np.linspace(-bound, bound, z_bound)
+
+X, Y = np.meshgrid(x, y)
+Z = rosenbrock_2d([X, Y])
+
+fig = plt.figure()
+ax = plt.axes(projection='3d')
+# ax.plot_surface(X, Y, Z, cmap='viridis', alpha=.5)
+
+# F) Rosenbrock search with CMAES
+'''
+For reference: https://www.youtube.com/watch?v=lj_qdRxzjMo
+'''
+
+optimizer = CMA(mean=np.array([-.5,-.5]), sigma=.5, population_size=20)
+print(optimizer.ask())
+
+starting_point = [-2, -2]
+generations = 50
+sqrt = int(np.sqrt(generations))
+points = np.ndarray((generations, optimizer.population_size, 3))
+generation_counter = 0
+
+for generation in range(generations):
+    generation_counter += 1
+    solutions = []
+    for i in range(optimizer.population_size):
+        point = optimizer.ask()
+        value = rosenbrock_2d([point[0], point[1]])
+        solutions.append((point, value))
+        points[generation,i] = point[0], point[1], value
+    optimizer.tell(solutions)
+
+# Animate each generation
+def animate(i):
+    ax.clear()
+    ax.view_init(30, 45)
+    ax.text2D(0.45, 0.95, "Generation: " + str(i), transform=ax.transAxes)
+    plt.title("Parabolic function with CMA-ES")
+    ax.plot_surface(X, Y, Z, cmap='viridis', alpha=.5)
+    ax.scatter(*zip(*points[i]), c = plt.cm.jet(i/generations))
+
+anim = animation.FuncAnimation(fig, animate, frames=generations, interval=100, repeat=False)
+plt.show()
+# save animation
+# anim.save('cmaes_rosenbrock_2d.gif', writer='imagemagick', fps=4)
\ No newline at end of file
diff --git a/week12/14_1_gradient_descent.py b/week12/14_1_gradient_descent.py
new file mode 100644
index 0000000000000000000000000000000000000000..118010efce7240ee53fbd5e66b08fa8212a76b83
--- /dev/null
+++ b/week12/14_1_gradient_descent.py
@@ -0,0 +1,87 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.animation as animation
+import sympy
+from sympy import *
+
+'''
+Search week
+'''
+
+# A) Plot the rosenbrock function in 3D
+def rosenbrock_2d(coords):
+    x = coords[0]
+    y = coords[1]
+    return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
+
+bound = 1
+z_bound = 100
+x = np.linspace(-bound, bound, z_bound)
+y = np.linspace(-bound, bound, z_bound)
+
+X, Y = np.meshgrid(x, y)
+Z = rosenbrock_2d([X, Y])
+
+fig = plt.figure()
+ax = plt.axes(projection='3d')
+ax.plot_surface(X, Y, Z, cmap='viridis', alpha=0.9)
+
+# D) Rosenbrock search with gradient descent
+
+x, y = sympy.symbols('x y')
+rosenbrock = (1 - x)**2 + 100*(y - x**2)**2
+
+# Find the gradient of the rosenbrock function with sympy
+def find_diff():
+    rosenbrock_diff = sympy.derive_by_array(rosenbrock, (x, y))
+    return rosenbrock_diff
+
+def evaluate_gradient(derivative, variables, substitute):
+    subs_dict = dict(zip(variables, substitute))
+    Gradient = derivative.subs(subs_dict)
+    return Gradient
+
+def gradient_descent(starting_point, learning_rate, iterations, tol=1e-2):
+    x_num = starting_point[0]
+    y_num = starting_point[1]
+    xs = [x_num]
+    ys = [y_num]
+    inertia = np.array([0, 0])
+    zs = [rosenbrock_2d([x_num, y_num])]
+    loop_count = 0
+    for i in range(iterations):
+        gradient = evaluate_gradient(rosenbrock_diff, [x, y], [x_num, y_num]) + inertia
+        x_num = x_num - learning_rate * gradient[0]
+        y_num = y_num - learning_rate * gradient[1]
+        z_num = rosenbrock_2d([x_num, y_num])
+        xs.append(x_num)
+        ys.append(y_num)
+        zs.append(z_num)
+        inertia = gradient * 0.65
+        loop_count += 1
+        # if the difference between the last two z values is smaller than the tolerance, stop
+        if abs(zs[-1] - zs[-2]) < tol:
+            break
+    return [xs, ys, zs], loop_count
+
+import time
+start_time = time.time()
+rosenbrock_diff = find_diff()
+starting_point = [-1, -1]
+learning_rate = 0.001
+iterations = 100
+
+# Scatter plot of the gradient descent
+descent_points, passes = gradient_descent(starting_point, learning_rate, iterations)
+end_time = time.time()
+print("Time taken: " + str(end_time - start_time))
+
+ax.plot(descent_points[0], descent_points[1], descent_points[2], c='r', marker='o', linewidth = 1, markersize=3)
+# title with the final point coords (with 3 decimal places)
+final_point = str([round(descent_points[0][-1], 3), round(descent_points[1][-1], 3), round(descent_points[2][-1], 3)])
+ax.text2D(0.05, 1, "Final Point: " + final_point, transform=ax.transAxes)
+# show the number of passes as a text block on the plot
+ax.text2D(0.05, 0.95, "Passes: " + str(passes), transform=ax.transAxes)
+ax.view_init(30, 45)
+plt.show()
+
diff --git a/week12/14_1_nelder_mead.py b/week12/14_1_nelder_mead.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9bcf591ca30d8902a5e072a1cb0822210260b39
--- /dev/null
+++ b/week12/14_1_nelder_mead.py
@@ -0,0 +1,132 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.animation as animation
+
+'''
+Search week
+'''
+
+# A) Plot the rosenbrock function in 3D
+def rosenbrock_2d(coords):
+    x = coords[0]
+    y = coords[1]
+    return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
+
+bound = 1
+z_bound = 100
+x = np.linspace(-bound, bound, z_bound)
+y = np.linspace(-bound, bound, z_bound)
+
+X, Y = np.meshgrid(x, y)
+Z = rosenbrock_2d([X, Y])
+
+fig = plt.figure()
+ax = plt.axes(projection='3d')
+ax.plot_surface(X, Y, Z, cmap='viridis', alpha=0.9)
+
+# B) Rosenbrock search with the downhill simplex method
+def nelder_mead(obj_func, x_start, step=1, max_iter=1000, tol=0.01, alpha=2.1, gamma=3.9, rho=1.1, sigma=.9):
+    step_counter = 0
+    eval_counter = 0
+    
+    # Define the simplex
+    n = len(x_start) # Number of dimensions
+    simplex = np.zeros((n+1, n)) # Initialize the simplex, which is comprised of 3 XY points (or always 1 more point than the number of dimensions)
+    simplex[0] = x_start # Set the starting point as the first point in the simplex
+    
+    # For every dimension, create a point that is step away from the starting point
+    for i in range(n):
+        point = x_start.copy()
+        point[i] += step
+        simplex[i+1] = point
+
+    # Evaluate the objective function for each point in the simplex
+    f = np.zeros(n+1)
+    for i in range(n+1):
+        f[i] = obj_func(simplex[i])
+        eval_counter += 1
+
+    simplices = [simplex.copy()]
+    simplex_evals = [f.copy()]
+    print('Simplex: ', simplex)
+
+    for iteration in range(max_iter):
+        step_counter += 1
+        # Sort the simplex by function value
+        order = np.argsort(f)
+        simplex = simplex[order]
+        f = f[order]
+        
+        # Calculate the centroid
+        centroid_coords = np.mean(simplex[:-1], axis=0)
+        print(centroid_coords)
+        
+        # Reflection
+        x_r = centroid_coords + alpha*(centroid_coords - simplex[-1])
+        f_r = obj_func(x_r)
+        eval_counter += 1
+        if f[0] <= f_r < f[-2]:
+            simplex[-1] = x_r
+            f[-1] = f_r
+        # Expansion
+        elif f_r < f[0]:
+            x_e = centroid_coords + gamma*(x_r - centroid_coords)
+            f_e = obj_func(x_e)
+            eval_counter += 1
+            if f_e < f_r:
+                simplex[-1] = x_e
+                f[-1] = f_e
+            else:
+                simplex[-1] = x_r
+                f[-1] = f_r
+        # Contraction
+        else:
+            centroid_coordsc = centroid_coords + rho*(simplex[-1] - centroid_coords)
+            f_cc = obj_func(centroid_coordsc)
+            eval_counter += 1
+            if f_cc < f[-1]:
+                simplex[-1] = centroid_coordsc
+                f[-1] = f_cc
+            # Shrink
+            else:
+                for i in range(1, n+1):
+                    simplex[i] = simplex[0] + sigma*(simplex[i] - simplex[0])
+                    f[i] = obj_func(simplex[i])
+                    eval_counter += 1
+        # After each iteration, add the simplex to the list of simplices
+        simplices.append(simplex.copy())
+
+        # Check the stopping criterion, if our simplex is small enough, return the minimum point
+        if np.max(np.abs(simplex[1:] - simplex[0])) < tol and np.max(np.abs(f[1:]-f[0])) < tol:
+            simplices.append(simplex.copy())
+            return simplices, step_counter, eval_counter
+    
+    # If we reach the maximum number of iterations (the for loop expires), return the list of simplices
+    return simplices, step_counter, eval_counter
+
+# Downhill simplex method
+x_start = np.array([-1, -1])
+stopping_criteria = 1e-6
+
+simplex_points, steps, evals = nelder_mead(rosenbrock_2d, x_start)
+final_point = simplex_points[-1][-1]
+print('Final Point: ', final_point)
+print('Iterations: ', steps)
+print('Evaluations: ', evals)
+
+# Each simplex consists of 3 points, so we need to reshape the array
+simplex_points = np.array(simplex_points).reshape(-1, 2)
+
+all_evals = np.zeros(len(simplex_points))
+for i in range(len(simplex_points)):
+    all_evals[i] = rosenbrock_2d(simplex_points[i])
+
+# Plot all of the simplex evaluations
+# Add iterations and evaluations to the plot
+ax.text2D(0.05, 0.95, "Iterations: %d" % steps, transform=ax.transAxes)
+ax.text2D(0.05, 0.90, "Evaluations: %d" % evals, transform=ax.transAxes)
+ax.view_init(30, 45)
+# Plot lines connecting the simplex points
+for i in range(len(simplex_points)-1):
+    ax.plot(simplex_points[i:i+2,0], simplex_points[i:i+2,1], all_evals[i:i+2]+.01, color='red', linewidth=1, marker = 'o', markersize=3)
+plt.show()
\ No newline at end of file
diff --git a/week12/README.MD b/week12/README.MD
new file mode 100644
index 0000000000000000000000000000000000000000..f5d17181e1570cbe36079606458961b63d7b0460
--- /dev/null
+++ b/week12/README.MD
@@ -0,0 +1,21 @@
+# Week 12 - Search
+
+## Rosenbrock search with Nelder Mead
+
+<img src="img/grad_descent.jpg" height="600">
+
+## Rosenbrock search with Gradient Descent
+
+Had to add pretty substantial inertia to get all the way to the edge of the banana
+
+<img src="img/grad_descent.jpg" height="600">
+
+## Rosenbrock search with CMA_ES
+
+Using the nicely packaged cmaes library here
+
+<img src="img/cmaes_rosenbrock.gif" height="400">
+
+To be sure I searched a parabola as well:
+
+<img src="img/parabolic_rosenbrock.gif" height="400">
\ No newline at end of file
diff --git a/week12/img/cmaes_parabola.gif b/week12/img/cmaes_parabola.gif
new file mode 100644
index 0000000000000000000000000000000000000000..4d830871a31faad153e5aa70e50584a3ccc6128b
Binary files /dev/null and b/week12/img/cmaes_parabola.gif differ
diff --git a/week12/img/cmaes_rosenbrock.gif b/week12/img/cmaes_rosenbrock.gif
new file mode 100644
index 0000000000000000000000000000000000000000..8a66fb5449b566749fdd9fc9306bbe3daa45364f
Binary files /dev/null and b/week12/img/cmaes_rosenbrock.gif differ
diff --git a/week12/img/grad_descent.jpg b/week12/img/grad_descent.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..ffe19a50614792074531996792a08bc57dbd3805
Binary files /dev/null and b/week12/img/grad_descent.jpg differ
diff --git a/week12/img/nelder_mead.jpg b/week12/img/nelder_mead.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b2bd816f79ab55f155f212df1be319679eb9a7e5
Binary files /dev/null and b/week12/img/nelder_mead.jpg differ
diff --git a/week9/week9_11_2.py b/week9/week9_11_2.py
index 0535f118f39f250fda095f068bea248350a604a4..b34e6aa30c7da02b48d4147575a91be933e1d146 100644
--- a/week9/week9_11_2.py
+++ b/week9/week9_11_2.py
@@ -26,8 +26,9 @@ def levenberg_step(x, y, a, b, l):
     J[0] = -2 * np.sum((y - np.sin(a + b*x)) * np.cos(a + b * x))    # from wolfram
     J[1] = -2 * np.sum((y - np.sin(a + b*x)) * np.cos(a + b * x) * x)
 
+    # Hessian from wolfram and then using 12.28 to form M...
     M = np.zeros((2, 2))
-    M[0, 0] = 0.5 * (1 + l) * 2 * np.sum(np.cos(a + b * x) ** 2)    # from wolfram
+    M[0, 0] = 0.5 * (1 + l) * 2 * np.sum(np.cos(a + b * x) ** 2)
     M[1, 1] = 0.5 * (1 + l) * 2 * np.sum((x * np.cos(a + b * x)) ** 2)
 
     M[0, 1] = 0.5 * 2 * np.sum(x * (np.cos(a + b * x) ** 2))