cost function优化
最原始更新由此
相应的难点代码:
self.weights = [w-(eta/len(mini_batch))*nwfor w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nbfor b, nb in zip(self.biases, nabla_b)]
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
1. cost entropy
图像:
相应的代码:
return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))
2. softmax
关键代码:
3. overfitting:在训练集上表现号,在测试集上表现差。
4. Regulization L2
self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nwfor w, nw in zip(self.weights, nabla_w)]
5. Regulization L1
附加:
完整代码:
"""network2.py
~~~~~~~~~~~~~~An improved version of network.py, implementing the stochastic
gradient descent learning algorithm for a feedforward neural network.
Improvements include the addition of the cross-entropy cost function,
regularization, and better initialization of network weights. Note
that I have focused on making the code simple, easily readable, and
easily modifiable. It is not optimized, and omits many desirable
features."""#### Libraries
# Standard library
import json
import random
import sys# Third-party libraries
import numpy as np#### Define the quadratic and cross-entropy cost functionsclass QuadraticCost(object):@staticmethoddef fn(a, y):"""Return the cost associated with an output ``a`` and desired output``y``."""return 0.5*np.linalg.norm(a-y)**2@staticmethoddef delta(z, a, y):"""Return the error delta from the output layer."""return (a-y) * sigmoid_prime(z)class CrossEntropyCost(object):@staticmethoddef fn(a, y):"""Return the cost associated with an output ``a`` and desired output``y``. Note that np.nan_to_num is used to ensure numericalstability. In particular, if both ``a`` and ``y`` have a 1.0in the same slot, then the expression (1-y)*np.log(1-a)returns nan. The np.nan_to_num ensures that that is convertedto the correct value (0.0)."""return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))@staticmethoddef delta(z, a, y):"""Return the error delta from the output layer. Note that theparameter ``z`` is not used by the method. It is included inthe method's parameters in order to make the interfaceconsistent with the delta method for other cost classes."""return (a-y)#### Main Network class
class Network(object):def __init__(self, sizes, cost=CrossEntropyCost):"""The list ``sizes`` contains the number of neurons in the respectivelayers of the network. For example, if the list was [2, 3, 1]then it would be a three-layer network, with the first layercontaining 2 neurons, the second layer 3 neurons, and thethird layer 1 neuron. The biases and weights for the networkare initialized randomly, using``self.default_weight_initializer`` (see docstring for thatmethod)."""self.num_layers = len(sizes)self.sizes = sizesself.default_weight_initializer()self.cost=costdef default_weight_initializer(self):"""Initialize each weight using a Gaussian distribution with mean 0and standard deviation 1 over the square root of the number ofweights connecting to the same neuron. Initialize the biasesusing a Gaussian distribution with mean 0 and standarddeviation 1.Note that the first layer is assumed to be an input layer, andby convention we won't set any biases for those neurons, sincebiases are only ever used in computing the outputs from laterlayers."""self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]self.weights = [np.random.randn(y, x)/np.sqrt(x)for x, y in zip(self.sizes[:-1], self.sizes[1:])]def large_weight_initializer(self):"""Initialize the weights using a Gaussian distribution with mean 0and standard deviation 1. Initialize the biases using aGaussian distribution with mean 0 and standard deviation 1.Note that the first layer is assumed to be an input layer, andby convention we won't set any biases for those neurons, sincebiases are only ever used in computing the outputs from laterlayers.This weight and bias initializer uses the same approach as inChapter 1, and is included for purposes of comparison. Itwill usually be better to use the default weight initializerinstead."""self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]self.weights = [np.random.randn(y, x)for x, y in zip(self.sizes[:-1], self.sizes[1:])]def feedforward(self, a):"""Return the output of the network if ``a`` is input."""for b, w in zip(self.biases, self.weights):a = sigmoid(np.dot(w, a)+b)return adef SGD(self, training_data, epochs, mini_batch_size, eta,lmbda = 0.0,evaluation_data=None,monitor_evaluation_cost=False,monitor_evaluation_accuracy=False,monitor_training_cost=False,monitor_training_accuracy=False):"""Train the neural network using mini-batch stochastic gradientdescent. The ``training_data`` is a list of tuples ``(x, y)``representing the training inputs and the desired outputs. Theother non-optional parameters are self-explanatory, as is theregularization parameter ``lmbda``. The method also accepts``evaluation_data``, usually either the validation or testdata. We can monitor the cost and accuracy on either theevaluation data or the training data, by setting theappropriate flags. The method returns a tuple containing fourlists: the (per-epoch) costs on the evaluation data, theaccuracies on the evaluation data, the costs on the trainingdata, and the accuracies on the training data. All values areevaluated at the end of each training epoch. So, for example,if we train for 30 epochs, then the first element of the tuplewill be a 30-element list containing the cost on theevaluation data at the end of each epoch. Note that the listsare empty if the corresponding flag is not set."""if evaluation_data: n_data = len(evaluation_data)n = len(training_data)evaluation_cost, evaluation_accuracy = [], []training_cost, training_accuracy = [], []for j in xrange(epochs):random.shuffle(training_data)mini_batches = [training_data[k:k+mini_batch_size]for k in xrange(0, n, mini_batch_size)]for mini_batch in mini_batches:self.update_mini_batch(mini_batch, eta, lmbda, len(training_data))print "Epoch %s training complete" % jif monitor_training_cost:cost = self.total_cost(training_data, lmbda)training_cost.append(cost)print "Cost on training data: {}".format(cost)if monitor_training_accuracy:accuracy = self.accuracy(training_data, convert=True)training_accuracy.append(accuracy)print "Accuracy on training data: {} / {}".format(accuracy, n)if monitor_evaluation_cost:cost = self.total_cost(evaluation_data, lmbda, convert=True)evaluation_cost.append(cost)print "Cost on evaluation data: {}".format(cost)if monitor_evaluation_accuracy:accuracy = self.accuracy(evaluation_data)evaluation_accuracy.append(accuracy)print "Accuracy on evaluation data: {} / {}".format(self.accuracy(evaluation_data), n_data)printreturn evaluation_cost, evaluation_accuracy, \training_cost, training_accuracydef update_mini_batch(self, mini_batch, eta, lmbda, n):"""Update the network's weights and biases by applying gradientdescent using backpropagation to a single mini batch. The``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is thelearning rate, ``lmbda`` is the regularization parameter, and``n`` is the total size of the training data set."""nabla_b = [np.zeros(b.shape) for b in self.biases]nabla_w = [np.zeros(w.shape) for w in self.weights]for x, y in mini_batch:delta_nabla_b, delta_nabla_w = self.backprop(x, y)nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nwfor w, nw in zip(self.weights, nabla_w)]self.biases = [b-(eta/len(mini_batch))*nbfor b, nb in zip(self.biases, nabla_b)]def backprop(self, x, y):"""Return a tuple ``(nabla_b, nabla_w)`` representing thegradient for the cost function C_x. ``nabla_b`` and``nabla_w`` are layer-by-layer lists of numpy arrays, similarto ``self.biases`` and ``self.weights``."""nabla_b = [np.zeros(b.shape) for b in self.biases]nabla_w = [np.zeros(w.shape) for w in self.weights]# feedforwardactivation = xactivations = [x] # list to store all the activations, layer by layerzs = [] # list to store all the z vectors, layer by layerfor b, w in zip(self.biases, self.weights):z = np.dot(w, activation)+bzs.append(z)activation = sigmoid(z)activations.append(activation)# backward passdelta = (self.cost).delta(zs[-1], activations[-1], y)nabla_b[-1] = deltanabla_w[-1] = np.dot(delta, activations[-2].transpose())# Note that the variable l in the loop below is used a little# differently to the notation in Chapter 2 of the book. Here,# l = 1 means the last layer of neurons, l = 2 is the# second-last layer, and so on. It's a renumbering of the# scheme in the book, used here to take advantage of the fact# that Python can use negative indices in lists.for l in xrange(2, self.num_layers):z = zs[-l]sp = sigmoid_prime(z)delta = np.dot(self.weights[-l+1].transpose(), delta) * spnabla_b[-l] = deltanabla_w[-l] = np.dot(delta, activations[-l-1].transpose())return (nabla_b, nabla_w)def accuracy(self, data, convert=False):"""Return the number of inputs in ``data`` for which the neuralnetwork outputs the correct result. The neural network'soutput is assumed to be the index of whichever neuron in thefinal layer has the highest activation.The flag ``convert`` should be set to False if the data set isvalidation or test data (the usual case), and to True if thedata set is the training data. The need for this flag arisesdue to differences in the way the results ``y`` arerepresented in the different data sets. In particular, itflags whether we need to convert between the differentrepresentations. It may seem strange to use differentrepresentations for the different data sets. Why not use thesame representation for all three data sets? It's done forefficiency reasons -- the program usually evaluates the coston the training data and the accuracy on other data sets.These are different types of computations, and using differentrepresentations speeds things up. More details on therepresentations can be found inmnist_loader.load_data_wrapper."""if convert:results = [(np.argmax(self.feedforward(x)), np.argmax(y))for (x, y) in data]else:results = [(np.argmax(self.feedforward(x)), y)for (x, y) in data]return sum(int(x == y) for (x, y) in results)def total_cost(self, data, lmbda, convert=False):"""Return the total cost for the data set ``data``. The flag``convert`` should be set to False if the data set is thetraining data (the usual case), and to True if the data set isthe validation or test data. See comments on the similar (butreversed) convention for the ``accuracy`` method, above."""cost = 0.0for x, y in data:a = self.feedforward(x)if convert: y = vectorized_result(y)cost += self.cost.fn(a, y)/len(data)cost += 0.5*(lmbda/len(data))*sum(np.linalg.norm(w)**2 for w in self.weights)return costdef save(self, filename):"""Save the neural network to the file ``filename``."""data = {"sizes": self.sizes,"weights": [w.tolist() for w in self.weights],"biases": [b.tolist() for b in self.biases],"cost": str(self.cost.__name__)}f = open(filename, "w")json.dump(data, f)f.close()#### Loading a Network
def load(filename):"""Load a neural network from the file ``filename``. Returns aninstance of Network."""f = open(filename, "r")data = json.load(f)f.close()cost = getattr(sys.modules[__name__], data["cost"])net = Network(data["sizes"], cost=cost)net.weights = [np.array(w) for w in data["weights"]]net.biases = [np.array(b) for b in data["biases"]]return net#### Miscellaneous functions
def vectorized_result(j):"""Return a 10-dimensional unit vector with a 1.0 in the j'th positionand zeroes elsewhere. This is used to convert a digit (0...9)into a corresponding desired output from the neural network."""e = np.zeros((10, 1))e[j] = 1.0return edef sigmoid(z):"""The sigmoid function."""return 1.0/(1.0+np.exp(-z))def sigmoid_prime(z):"""Derivative of the sigmoid function."""return sigmoid(z)*(1-sigmoid(z))