Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit 8d5f17d

Browse files
committed
a little bit of cleanup
1 parent b48de74 commit 8d5f17d

File tree

1 file changed

+35
-52
lines changed
  • Classic Computer Science Problems in Swift.playground/Pages/Chapter 7.xcplaygroundpage

1 file changed

+35
-52
lines changed

‎Classic Computer Science Problems in Swift.playground/Pages/Chapter 7.xcplaygroundpage/Contents.swift

Lines changed: 35 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -57,21 +57,9 @@ func randomWeights(number: Int) -> [Double] {
5757
return (0..<number).map{ _ in Random.double(from: 0.0, to: 1.0) }
5858
}
5959

60-
// MARK: Activation Functions and Their Derivatives
61-
62-
/// the classic sigmoid activation function
63-
func sigmoid(_ x: Double) -> Double {
64-
return 1.0 / (1.0 + exp(-x))
65-
}
66-
67-
// as derived at http://www.ai.mit.edu/courses/6.892/lecture8-html/sld015.htm
68-
func derivativeSigmoid(_ x: Double) -> Double {
69-
return sigmoid(x) * (1 - sigmoid(x))
70-
}
71-
7260
// MARK: SIMD Accelerated Math
7361

74-
// Based on example from Surge project
62+
// The next four functions are based on example from Surge project
7563
// https://github.com/mattt/Surge/blob/master/Source/Arithmetic.swift
7664
/// Find the dot product of two vectors
7765
/// assuming that they are of the same length
@@ -82,44 +70,49 @@ func dotProduct(_ xs: [Double], _ ys: [Double]) -> Double {
8270
return answer
8371
}
8472

85-
// Based on example from Surge project
86-
// https://github.com/mattt/Surge/blob/master/Source/Arithmetic.swift
8773
/// Subtract one vector from another
88-
/// assuming that they are of the same length
89-
/// using SIMD instructions to speed computation
90-
public func sub(x: [Double], y: [Double]) -> [Double] {
74+
public func sub(_ x: [Double], _ y: [Double]) -> [Double] {
9175
var results = [Double](y)
9276
catlas_daxpby(Int32(x.count), 1.0, x, 1, -1, &results, 1)
93-
9477
return results
9578
}
9679

97-
// Another Surge example, see above citation
98-
public func mul(x: [Double], y: [Double]) -> [Double] {
80+
/// Multiply two vectors together
81+
public func mul(_ x: [Double], _ y: [Double]) -> [Double] {
9982
var results = [Double](repeating: 0.0, count: x.count)
10083
vDSP_vmulD(x, 1, y, 1, &results, 1, vDSP_Length(x.count))
101-
10284
return results
10385
}
10486

105-
// Another Surge example, see above citation
106-
public func sum(x: [Double]) -> Double {
87+
/// Sum a vector
88+
public func sum(_ x: [Double]) -> Double {
10789
var result: Double = 0.0
10890
vDSP_sveD(x, 1, &result, vDSP_Length(x.count))
109-
11091
return result
11192
}
11293

94+
// MARK: Activation Functions and Their Derivatives
95+
96+
/// the classic sigmoid activation function
97+
func sigmoid(_ x: Double) -> Double {
98+
return 1.0 / (1.0 + exp(-x))
99+
}
100+
101+
// as derived at http://www.ai.mit.edu/courses/6.892/lecture8-html/sld015.htm
102+
func derivativeSigmoid(_ x: Double) -> Double {
103+
return sigmoid(x) * (1 - sigmoid(x))
104+
}
105+
113106
/// An individual node in a layer
114107
class Neuron {
115108
var weights: [Double]
116109
var activationFunction: (Double) -> Double
117110
var derivativeActivationFunction: (Double) -> Double
118-
var inputCache: Double = 0.0
111+
var outputCache: Double = 0.0
119112
var delta: Double = 0.0
120113
var learningRate: Double
121114

122-
init(weights: [Double], activationFunction: @escaping (Double) -> Double, derivativeActivationFunction: @escaping (Double) -> Double, learningRate: Double=0.25) {
115+
init(weights: [Double], activationFunction: @escaping (Double) -> Double, derivativeActivationFunction: @escaping (Double) -> Double, learningRate: Double) {
123116
self.weights = weights
124117
self.activationFunction = activationFunction
125118
self.derivativeActivationFunction = derivativeActivationFunction
@@ -129,8 +122,8 @@ class Neuron {
129122
/// The output that will be going to the next layer
130123
/// or the final output if this is an output layer
131124
func output(inputs: [Double]) -> Double {
132-
inputCache = dotProduct(inputs, weights)
133-
return activationFunction(inputCache)
125+
outputCache = dotProduct(inputs, weights)
126+
return activationFunction(outputCache)
134127
}
135128

136129
}
@@ -140,14 +133,6 @@ class Layer {
140133
var neurons: [Neuron]
141134
var outputCache: [Double]
142135

143-
// for future use in deserializing networks
144-
init(previousLayer: Layer? = nil, neurons: [Neuron] = [Neuron]()) {
145-
self.previousLayer = previousLayer
146-
self.neurons = neurons
147-
self.outputCache = Array<Double>(repeating: 0.0, count: neurons.count)
148-
}
149-
150-
// main init
151136
init(previousLayer: Layer? = nil, numNeurons: Int, activationFunction: @escaping (Double) -> Double, derivativeActivationFunction: @escaping (Double)-> Double, learningRate: Double) {
152137
self.previousLayer = previousLayer
153138
self.neurons = Array<Neuron>()
@@ -169,7 +154,7 @@ class Layer {
169154
// should only be called on an output layer
170155
func calculateDeltasForOutputLayer(expected: [Double]) {
171156
for n in 0..<neurons.count {
172-
neurons[n].delta = neurons[n].derivativeActivationFunction(neurons[n].inputCache) * (expected[n] - outputCache[n])
157+
neurons[n].delta = neurons[n].derivativeActivationFunction(neurons[n].outputCache) * (expected[n] - outputCache[n])
173158
}
174159
}
175160

@@ -179,19 +164,17 @@ class Layer {
179164
let nextWeights = nextLayer.neurons.map { 0ドル.weights[index] }
180165
let nextDeltas = nextLayer.neurons.map { 0ドル.delta }
181166
let sumOfWeightsXDeltas = dotProduct(nextWeights, nextDeltas)
182-
neuron.delta = neuron.derivativeActivationFunction(neuron.inputCache) * sumOfWeightsXDeltas
167+
neuron.delta = neuron.derivativeActivationFunction(neuron.outputCache) * sumOfWeightsXDeltas
183168
}
184169
}
185-
186-
187170
}
188171

189172
/// Represents an entire neural network. From largest to smallest we go
190173
/// Network -> Layers -> Neurons
191174
class Network {
192175
var layers: [Layer]
193176

194-
init(layerStructure:[Int], activationFunction: @escaping (Double) -> Double = sigmoid, derivativeActivationFunction: @escaping (Double) -> Double = derivativeSigmoid, learningRate: Double=0.25) {
177+
init(layerStructure:[Int], activationFunction: @escaping (Double) -> Double = sigmoid, derivativeActivationFunction: @escaping (Double) -> Double = derivativeSigmoid, learningRate: Double) {
195178
if (layerStructure.count < 3) {
196179
print("Error: Should be at least 3 layers (1 input, 1 hidden, 1 output)")
197180
}
@@ -214,7 +197,7 @@ class Network {
214197

215198
/// Figure out each neuron's changes based on the errors
216199
/// of the output versus the expected outcome
217-
func backPropagate(expected: [Double]) {
200+
func backpropagate(expected: [Double]) {
218201
//calculate delta for output layer neurons
219202
layers.last?.calculateDeltasForOutputLayer(expected: expected)
220203
//calculate delta for prior layers
@@ -223,32 +206,32 @@ class Network {
223206
}
224207
}
225208

226-
/// backPropagate() doesn't actually change any weights
227-
/// this function uses the deltas calculated in backPropagate()
209+
/// backpropagate() doesn't actually change any weights
210+
/// this function uses the deltas calculated in backpropagate()
228211
/// to actually make changes to the weights
229212
func updateWeights() {
230-
for layer in layers{
213+
for layer in layers.dropFirst(){ // skip input layer
231214
for neuron in layer.neurons {
232215
for w in 0..<neuron.weights.count {
233-
neuron.weights[w] = neuron.weights[w] + (neuron.learningRate * (layer.previousLayer?.outputCache[w])! * neuron.delta)
216+
neuron.weights[w] = neuron.weights[w] + (neuron.learningRate * (layer.previousLayer?.outputCache[w])! * neuron.delta)
234217
}
235218
}
236219
}
237220
}
238221

239222
/// train() uses the results of outputs() run over
240223
/// many *inputs* and compared against *expecteds* to feed
241-
/// backPropagate() and updateWeights()
224+
/// backpropagate() and updateWeights()
242225
func train(inputs:[[Double]], expecteds:[[Double]], printError:Bool = false, threshold:Double? = nil) {
243226
for (location, xs) in inputs.enumerated() {
244227
let ys = expecteds[location]
245228
let outs = outputs(input: xs)
246229
if (printError) {
247-
let diff = sub(x:outs, y: ys)
248-
let error = sqrt(sum(x:mul(x:diff, y: diff)))
230+
let diff = sub(outs, ys)
231+
let error = sqrt(sum(mul(diff, diff)))
249232
print("\(error) error in run \(location)")
250233
}
251-
backPropagate(expected: ys)
234+
backpropagate(expected: ys)
252235
updateWeights()
253236
}
254237
}
@@ -286,7 +269,7 @@ func normalizeByColumnMax( dataset:inout [[Double]]) {
286269

287270
// MARK: Iris Test
288271

289-
var network: Network = Network(layerStructure: [4,5,3], learningRate: 0.3)
272+
var network: Network = Network(layerStructure: [4,6,3], learningRate: 0.3)
290273
var irisParameters: [[Double]] = [[Double]]()
291274
var irisClassifications: [[Double]] = [[Double]]()
292275
var irisSpecies: [String] = [String]()

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /