1/24
Looks like no tags are added yet.
Name | Mastery | Learn | Test | Matching | Spaced | Call with Kai |
|---|
No analytics yet
Send a link to your students to track their progress
\zeta(x) = log(1 + e^x) in LATEX
\zeta(x) = log(1 + e^x)
yi = e^(yi) / ∑^Nj=1e^y_j in Latex
yi = \frac{e^{yi}}{\sum{j = 1}^{N} e^{yj}}
Displays plot under cell
%matplotlib inline
Imports for plotting a function x
numpy as np
matplotlib.pyplot as plt
Generate x values using numerical evaluation (100 points between -5 and 5) and calculate corresponding y values
x = np.linespace(-5, 5, 100)
y = function(x_values)
Create a function that returns y=1/1+e^−x
def function(x):
return 1 / (1 = np.exp(-x))
How would I create a plot
plt.plot(x_values, y_values, label=r'$y = 1/(1 + e^{-x})$', color='red')
plt.xlim(-5, 5)
plt.ylim(0, 1)
plt.xlabel('X axis')
plt.ylabel('Y axis')
plt.title('Plot of Function')
plt.legend()
plt.grid(True)
plt.show()
Print the derivative of y=1/1+e^−x
from sympy import Derivative
print(Derivative(1 / (1 + exp(-x))
Create a vector and print the L2 and L1 norms
import numpy as np
from numpy.linalg import norm
vector_a = np.array([5, 2, 8])
print(f"Vector A: {vector_a}")
l2_norm = norm(vector_a, 2)
print(f"L2 Norm of Vector A: {l2_norm:.4f}")
l1_norm = norm(vector_a, 1)
print(f"L1 Norm of Vector A: {l1_norm:.4f}")
Declare 2 vectors and find the Euclidean and Cosine Distances
import numpy as np
from scipy.spatial.distance import euclidean, cosine
vx = np.array([5, 2, 8])
vy = np.array([1, 3, 2])
print(f"Eudclidean Distance v2: {euclidean(vy, vx)}")
print(f"Cosine Distance v2: {cosine(vy, vx)}")
pretty print a 3 x 4 matrix that counts up to 15
import numpy as np
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
[13, 14, 15]])
print(matrix)
Create 2 vectors and multiply them
import numpy as np
vy = np.array([1, 3, 2])
A = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
[13, 14, 15]])
print(f"Ay = {A @ vy}")
Create a 3x4 matrix and calculate the pairwise Euclidean distance matrix for the row-vectors
import numpy as np
from scipy.spatial.distance import pdist, squareform
A = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
[13, 14, 15]])
condensed_distances = pdist(A, 'euclidean')
print(squareform(condensed_distances))
Create a matrix and its transpose
import numpy as np
B = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])
C = B.T
Create a vector and eval its eigen decompostition
import numpy as np
B = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])
eigenvalues, eigenvectors = np.linalg.eig(B)
Reconstruct a matrix using eigen decomp
import numpy as np
from scipy.spatial.distance import euclidean, cosine
B = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])
eval, evec = np.linalg.eig(B @ B.T)
Lambda = np.diag(eval)
v_inv = np.linalg.inv(evec)
C_reconstructed = eve @ Lambda @ v_inv
print(f"Reconstructed Matrix :\n{C_reconstructed}\n")
is_close = np.allclose(B @ B.T, C_reconstructed)
Get Singular Value Decomp for a matrix
import numpy as np
B = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])
ls, s, rs = np.linalg.svd(B)
print("\nLeft singular vectors:\n", ls)
print("\nSingular values:\n", s)
print("\nRight singular vectors (transposed):\n", rs)
use Singular Value Decomp to reconstruct matrix
import numpy as np
B = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])
u, s, vh = np.linalg.svd(B, full_matrices=False)
C_recon = u @ np.diag(s) @ vh
print("\nReconstructed Matrix C:\n", C_recon)
# Check if reconstruction is close to the original
print("\nReconstruction successful? :", np.allclose(B @ B.T, C_recon))
Generate a plot of percent variance accounted for by each principal component
import numpy as np
import matplotlib.pyplot as plt
wdbc = np.loadtxt("https://phillips-lab.org/public/WDBC.txt")
X = wdbc[:, :-1] # features
Y = wdbc[:, -1] # labels
# Mean-center the data
X_centered = X - np.mean(X, axis=0)
# Singular Value Decomposition
U, S, Vt = np.linalg.svd(X_centered, full_matrices=False)
# Percent variance explained by each principal component
percent_variance = 100 * S / np.sum(S)"
# Plot
plt.figure()
plt.plot(percent_variance, marker='o')
plt.xlabel("Principal Component")
plt.ylabel("Percent Variance")
plt.title("Percent Variance Explained by Each Principal Component")
plt.grid(True)
plt.show()
determine the percent variance accounted for by the first two principal components
import numpy as np
# Singular Value Decomposition
U, S, Vt = np.linalg.svd(X_centered, full_matrices=False)
# Percent variance explained by first two principal components
percent_variance = 100 * (S[0] + S[1]) / np.sum(S)
percent_variance
plot the two-dimensional projection with properly colored category labels (2 classes)
import numpy as np
import matplotlib.pyplot as plt
# Project onto first two principal components
X_rotated = U @ np.diag(S)
PCs = X_rotated[:, :2]
# Plot with category labels
plt.figure()
plt.scatter(PCs[Y == 0, 0], PCs[Y == 0, 1],
color='blue', label='Benign', alpha=0.7)
plt.scatter(PCs[Y == 1, 0], PCs[Y == 1, 1],
color='red', label='Malignant', alpha=0.7)
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.title("2D PCA Projection")
plt.legend()
plt.show()
generate a tSNE plot of the data (2 classes)
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
# Scale features
X_scaled = StandardScaler().fit_transform(X)
# Run t-SNE
tsne = TSNE(n_components=2, random_state=42, perplexity=30)
X_tsne = tsne.fit_transform(X_scaled)
# Plot t-SNE results
plt.figure()
plt.scatter(X_tsne[Y == 0, 0], X_tsne[Y == 0, 1],
color='blue', label='Benign', alpha=0.7)
plt.scatter(X_tsne[Y == 1, 0], X_tsne[Y == 1, 1],
color='red', label='Malignant', alpha=0.7)
plt.xlabel("t-SNE Component 1")
plt.ylabel("t-SNE Component 2")
plt.title("t-SNE Projection")
plt.legend()
plt.show()
plot the two-dimensional projection with properly colored category labels (mult classes)
import numpy as np
import matplotlib.pyplot as plt
# Get unique classes
classes = np.unique(Y)
colors = ['red', 'green', 'blue', 'cyan', 'magenta', 'yellow']
# Plot results
plt.figure()
for i, c in enumerate(classes):
plt.scatter(
PCs[Y == c, 0],
PCs[Y == c, 1],
color=colors[i % len(colors)],
label=f"Class {c}",
alpha=0.7
)
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.title("2D PCA Projection")
plt.legend()
plt.show()
generate a tSNE plot of the data (mult classes)
import numpy as np
import matplotlib.pyplot as plt
# Scale features
X_scaled = StandardScaler().fit_transform(X)
# Run t-SNE
tsne = TSNE(n_components=2, random_state=42, perplexity=30)
X_tsne = tsne.fit_transform(X_scaled)
# Get unique classes
classes = np.unique(Y)
colors = ['red', 'green', 'blue', 'cyan', 'magenta', 'yellow']
# Plot t-SNE results
plt.figure()
for i, c in enumerate(classes):
plt.scatter(
X_tsne[Y == c, 0],
X_tsne[Y == c, 1],
color=colors[i % len(colors)],
label=f"Class {int(c)}",
alpha=0.7
)
plt.xlabel("t-SNE Component 1")
plt.ylabel("t-SNE Component 2")
plt.title("t-SNE Projection")
plt.legend()
plt.show()
Reconstruct from eigenvalue formula
reconstructed = evectors @ np.diag(eval) @ np.linalg.inv(evec)