neural_net.py
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
f = lambda x: np.maximum(0,x)
h1 = f(np.dot(X, W1)+b1)
h2 = np.dot(h1,W2)+b2
scores = h2
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifiers loss. So that your results match ours, multiply the #
# regularization loss by 0.5 #
#############################################################################
shift_scores = scores - np.max(scores, axis = 1).reshape(-1,1)
softmax_output = np.exp(shift_scores)/np.sum(np.exp(shift_scores), axis = 1).reshape(-1,1)
loss = -np.sum(np.log(softmax_output[np.arange(N), y]))
loss /= N
loss += reg * 0.5* (np.sum(W1 * W1)+np.sum(W2 * W2))
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
softmax_output[np.arange(N), y] += -1
grads['W2'] = 1.0/N * h1.T.dot(softmax_output) + reg* W2
grads['b2'] = 1.0/N * np.sum(softmax_output, axis = 0)
dh1 = softmax_output.dot(W2.T)
dh1_ReLU = (X.dot(W1)+b1 >0)*dh1
grads['W1'] = 1.0/N * X.T.dot(dh1_ReLU) + reg* W1
grads['b1'] = 1.0/N * np.sum(dh1_ReLU, axis = 0)
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in xrange(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
sample_index = np.random.choice(num_train, batch_size ,replace = True)
X_batch = X[sample_index,:]
y_batch = y[sample_index]
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
self.params['W1'] = self.params['W1'] -learning_rate*grads['W1']
self.params['W2'] = self.params['W2'] -learning_rate*grads['W2']
self.params['b1'] = self.params['b1'] -learning_rate*grads['b1']
self.params['b2'] = self.params['b2'] -learning_rate*grads['b2']
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print 'iteration %d / %d: loss %f' % (it, num_iters, loss)
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
f = lambda x: np.maximum(0,x)
h1 = f(np.dot(X, self.params['W1'])+self.params['b1'])
h2 = np.dot(h1,self.params['W2'])+self.params['b2']
scores = h2
y_pred = np.argmax(scores, axis = 1)
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
two_layer_net.ipynb的部分代码实现
from vis_utils import visualize_grid
# Visualize the weights of the network
def show_net_weights(net):
W1 = net.params['W1']
W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()
best_net = None # store the best model into this
results = {}
best_val = -1
learning_rate = [8e-4, 9e-4]
regularization_strengths = [0.01, 0.005]
learning_rate_decay= [0.97, 0.95]
#################################################################################
# TODO: Tune hyperparameters using the validation set. Store your best trained #
# model in best_net. #
# #
# To help debug your network, it may help to use visualizations similar to the #
# ones we used above; these visualizations will have significant qualitative #
# differences from the ones we saw above for the poorly tuned network. #
# #
# Tweaking hyperparameters by hand can be fun, but you might find it useful to #
# write code to sweep through possible combinations of hyperparameters #
# automatically like we did on the previous exercises. #
#################################################################################
iters = 2000
for lr in learning_rate:
for reg in regularization_strengths:
for lrd in learning_rate_decay:
print lr,reg,lrd
net = TwoLayerNet(input_size, hidden_size, num_classes)
net.train(X_train, y_train, X_val, y_val, learning_rate=lr, reg=reg, learning_rate_decay=lrd, num_iters=iters, verbose=False)
y_train_pred = net.predict(X_train)
acc_train = np.mean(y_train == y_train_pred)
y_val_pred = net.predict(X_val)
acc_val = np.mean(y_val == y_val_pred)
results[(lr, reg, lrd)] = (acc_train, acc_val)
if best_val < acc_val:
best_val = acc_val
best_net = net
pass
#################################################################################
# END OF YOUR CODE #
#################################################################################
# Print out results.
for lr, reg, lrd in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg, lrd)]
print 'lr %e reg %e lrd %e train accuracy: %f val accuracy: %f' % (
lr, reg, lrd, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# visualize the weights of the best network
show_net_weights(best_net)
test_acc = (best_net.predict(X_test) == y_test).mean()
print 'Test accuracy: ', test_acc
lr 8.000000e-04 reg 5.000000e-03 lrd 9.500000e-01 train accuracy: 0.524082 val accuracy: 0.480000
lr 8.000000e-04 reg 5.000000e-03 lrd 9.700000e-01 train accuracy: 0.533122 val accuracy: 0.491000
lr 8.000000e-04 reg 1.000000e-02 lrd 9.500000e-01 train accuracy: 0.529204 val accuracy: 0.501000
lr 8.000000e-04 reg 1.000000e-02 lrd 9.700000e-01 train accuracy: 0.532184 val accuracy: 0.499000
lr 9.000000e-04 reg 5.000000e-03 lrd 9.500000e-01 train accuracy: 0.533469 val accuracy: 0.486000
lr 9.000000e-04 reg 5.000000e-03 lrd 9.700000e-01 train accuracy: 0.533041 val accuracy: 0.499000
lr 9.000000e-04 reg 1.000000e-02 lrd 9.500000e-01 train accuracy: 0.532510 val accuracy: 0.493000
lr 9.000000e-04 reg 1.000000e-02 lrd 9.700000e-01 train accuracy: 0.531918 val accuracy: 0.477000
best validation accuracy achieved during cross-validation: 0.501000
Test accuracy: 0.501