Skip to content

Commit

Permalink
Updated technical documents to use the new Layer protocol. (tensorflo…
Browse files Browse the repository at this point in the history
  • Loading branch information
neild0 authored and rxwei committed May 6, 2019
1 parent 3f4f2de commit 19c2fca
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 15 deletions.
8 changes: 4 additions & 4 deletions docs/DifferentiableFunctions.md
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ specify the same attribute. This enables generic code using differentiation
defined in terms of protocol requirements.

Here is an example of a neural network `Layer` protocol that defines a
`@differentiable` method called `applied(to:)`. As shown, the `applied(to:)`
`@differentiable` method called `call(_:)`. As shown, the `call(_:)`
method can be differentiated in a `Layer` protocol extension, even though it is
not a concrete method.

Expand All @@ -246,7 +246,7 @@ protocol Layer: Differentiable {
associatedtype Output: Differentiable
/// Returns the output obtained from applying the layer to the given input.
@differentiable
func applied(to input: Input) -> Output
func call(_ input: Input) -> Output
}

extension Layer {
Expand All @@ -262,7 +262,7 @@ extension Layer {
backpropagator: (_ direction: Output.CotangentVector)
-> (layerGradient: CotangentVector, inputGradient: Input.CotangentVector)) {
let (out, pullback) = valueWithPullback(at: input) { layer, input in
return layer.applied(to: input)
return layer(input)
}
return (out, pullback)
}
Expand All @@ -274,7 +274,7 @@ struct DenseLayer: Layer {
var bias: Tensor<Float>

@differentiable
func applied(to input: Tensor<Float>) -> Tensor<Float> {
func call(_ input: Tensor<Float>) -> Tensor<Float> {
return matmul(input, weight) + bias
}
}
Expand Down
4 changes: 2 additions & 2 deletions docs/DifferentiableTypes.md
Original file line number Diff line number Diff line change
Expand Up @@ -77,14 +77,14 @@ struct DenseLayer: Differentiable {
// The compiler synthesizes all `Differentiable` protocol requirements, adding only properties
// not marked with `@noDerivative` to associated tangent space types.

func applied(to input: Tensor<Float>) -> Tensor<Float> {
func call(_ input: Tensor<Float>) -> Tensor<Float> {
return matmul(input, weight) + bias
}
}

// Differential operators like `gradient(at:in:)` just work!
let dense = DenseLayer(weight: [[1, 1], [1, 1]], bias: [0, 0])
let 𝛁dense = gradient(at: dense) { dense in dense.applied(to: [[3, 3]]).sum() }
let 𝛁dense = gradient(at: dense) { dense in dense([[3, 3]]).sum() }

dump(𝛁dense)
// ▿ DenseLayer.AllDifferentiableVariables
Expand Down
18 changes: 9 additions & 9 deletions docs/ParameterOptimization.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ struct MyMLModel {
var weight1, weight2: Tensor<Float>
var bias1, bias2: Tensor<Float>

func applied(to input: Tensor<Float>) {
func call(_ input: Tensor<Float>) {
let h = relu(input weight1 + bias1)
return sigmoid(h weight2 + bias2)
}
}

let model = MyMLModel(...)
let input = Tensor<Float>([0.2, 0.4])
print(model.applied(to: input))
print(model(input))
```

Here are some additional rules about models and parameters:
Expand Down Expand Up @@ -187,7 +187,7 @@ struct DenseLayer: KeyPathIterable {
var bias: Tensor<Float>
var activation: (Tensor<Float>) -> (Tensor<Float>) = relu

func applied(to input: Tensor<Float>) -> Tensor<Float> {
func call(_ input: Tensor<Float>) -> Tensor<Float> {
return activation(matmul(input, weight) + bias)
}

Expand Down Expand Up @@ -325,7 +325,7 @@ struct DenseLayer: KeyPathIterable, Differentiable {
@noDerivative var activation: @differentiable (Tensor<Float>) -> Tensor<Float> = relu

@differentiable
func applied(to input: Tensor<Float>) -> Tensor<Float> {
func call(_ input: Tensor<Float>) -> Tensor<Float> {
return activation(matmul(input, weight) + bias)
}
}
Expand Down Expand Up @@ -368,7 +368,7 @@ class SGD<Model, Scalar: TensorFlowFloatingPoint>
// Example optimizer usage.
var dense = DenseLayer(weight: [[1, 1], [1, 1]], bias: [1, 1])
let input = Tensor<Float>(ones: [2, 2])
let 𝛁dense = dense.gradient { dense in dense.applied(to: input) }
let 𝛁dense = dense.gradient { dense in dense(input) }

let optimizer = SGD<DenseLayer, Float>()
optimizer.update(&dense.allDifferentiableVariables, with: 𝛁dense)
Expand Down Expand Up @@ -472,9 +472,9 @@ struct Classifier: Layer {
l2 = Dense<Float>(inputSize: hiddenSize, outputSize: 1, activation: relu)
}
@differentiable
func applied(to input: Tensor<Float>) -> Tensor<Float> {
let h1 = l1.applied(to: input)
return l2.applied(to: h1)
func call(_ input: Tensor<Float>) -> Tensor<Float> {
let h1 = l1(input)
return l2(h1)
}
}
var classifier = Classifier(hiddenSize: 4)
Expand All @@ -484,7 +484,7 @@ let y: Tensor<Float> = [[0], [1], [1], [0]]

for _ in 0..<3000 {
let 𝛁model = classifier.gradient { classifier -> Tensor<Float> in
let ŷ = classifier.applied(to: x)
let ŷ = classifier(x)
return meanSquaredError(predicted: ŷ, expected: y)
}
// Parameter optimization here!
Expand Down

0 comments on commit 19c2fca

Please sign in to comment.