Skip to content

Commit

Permalink
lazy init
Browse files Browse the repository at this point in the history
  • Loading branch information
szagoruyko committed Feb 9, 2016
1 parent 948ac6a commit 4696a46
Show file tree
Hide file tree
Showing 17 changed files with 35 additions and 46 deletions.
4 changes: 2 additions & 2 deletions BCECriterion.lua
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ function BCECriterion:updateOutput(input, target)
assert( input:nElement() == target:nElement(),
"input and target size mismatch")

self.buffer = self.buffer or torch.Tensor():typeAs(input)
self.buffer = self.buffer or input.new()

local buffer = self.buffer
local weights = self.weights
Expand Down Expand Up @@ -74,7 +74,7 @@ function BCECriterion:updateGradInput(input, target)
assert( input:nElement() == target:nElement(),
"input and target size mismatch")

self.buffer = self.buffer or torch.Tensor():typeAs(input)
self.buffer = self.buffer or input.new()

local buffer = self.buffer
local weights = self.weights
Expand Down
6 changes: 1 addition & 5 deletions LogSigmoid.lua
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
local LogSigmoid, parent = torch.class('nn.LogSigmoid', 'nn.Module')

function LogSigmoid:__init()
parent.__init(self)
self.buffer = torch.Tensor()
end

function LogSigmoid:updateOutput(input)
self.buffer = self.buffer or input.new()
input.THNN.LogSigmoid_updateOutput(
input:cdata(),
self.output:cdata(),
Expand Down
4 changes: 2 additions & 2 deletions PReLU.lua
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ function PReLU:__init(nOutputPlane)
self.nOutputPlane = nOutputPlane or 0
self.weight = torch.Tensor(nOutputPlane or 1):fill(0.25)
self.gradWeight = torch.Tensor(nOutputPlane or 1)
self.gradWeightBuf = torch.Tensor()
self.gradWeightBuf2 = torch.Tensor()
end

function PReLU:updateOutput(input)
Expand All @@ -32,6 +30,8 @@ function PReLU:updateGradInput(input, gradOutput)
end

function PReLU:accGradParameters(input, gradOutput, scale)
self.gradWeightBuf = self.gradWeightBuf or input.new()
self.gradWeightBuf2 = self.gradWeightBuf2 or input.new()
input.THNN.PReLU_accGradParameters(
input:cdata(),
gradOutput:cdata(),
Expand Down
6 changes: 2 additions & 4 deletions Reshape.lua
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,12 @@ function Reshape:__init(...)
self.nelement = self.nelement * self.size[i]
self.batchsize[i+1] = self.size[i]
end

-- only used for non-contiguous input or gradOutput
self._input = torch.Tensor()
self._gradOutput = torch.Tensor()
end

function Reshape:updateOutput(input)
if not input:isContiguous() then
self._input = self._input or input.new()
self._gradOutput = self._gradOutput or input.new()
self._input:resizeAs(input)
self._input:copy(input)
input = self._input
Expand Down
3 changes: 1 addition & 2 deletions SpatialAdaptiveMaxPooling.lua
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,10 @@ function SpatialAdaptiveMaxPooling:__init(W, H)

self.W = W
self.H = H

self.indices = torch.Tensor()
end

function SpatialAdaptiveMaxPooling:updateOutput(input)
self.indices = self.indices or input.new()
input.THNN.SpatialAdaptiveMaxPooling_updateOutput(
input:cdata(),
self.output:cdata(),
Expand Down
4 changes: 2 additions & 2 deletions SpatialConvolution.lua
Original file line number Diff line number Diff line change
Expand Up @@ -155,8 +155,8 @@ function SpatialConvolution:accGradParameters(input, gradOutput, scale)
end

function SpatialConvolution:type(type,tensorCache)
self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()
self.finput = self.finput and torch.Tensor()
self.fgradInput = self.fgradInput and torch.Tensor()
return parent.type(self,type,tensorCache)
end

Expand Down
9 changes: 4 additions & 5 deletions SpatialConvolutionLocal.lua
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@ function SpatialConvolutionLocal:__init(nInputPlane, nOutputPlane, iW, iH ,kW, k
self.gradWeight = torch.Tensor():resizeAs(self.weight)
self.gradBias = torch.Tensor():resizeAs(self.bias)

self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()

self:reset()
end

Expand Down Expand Up @@ -117,6 +114,8 @@ local function checkOutputSize(self, input, output)
end

function SpatialConvolutionLocal:updateOutput(input)
self.finput = self.finput or input.new()
self.fgradInput = self.fgradInput or input.new()
checkInputSize(self, input)
viewWeight(self)
input = makeContiguous(self, input)
Expand Down Expand Up @@ -148,8 +147,8 @@ function SpatialConvolutionLocal:accGradParameters(input, gradOutput, scale)
end

function SpatialConvolutionLocal:type(type,tensorCache)
self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()
self.finput = self.finput and torch.Tensor()
self.fgradInput = self.fgradInput and torch.Tensor()
return parent.type(self,type,tensorCache)
end

Expand Down
9 changes: 4 additions & 5 deletions SpatialConvolutionMM.lua
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,6 @@ function SpatialConvolutionMM:__init(nInputPlane, nOutputPlane, kW, kH, dW, dH,
self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane*kH*kW)
self.gradBias = torch.Tensor(nOutputPlane)

self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()

self:reset()
end

Expand Down Expand Up @@ -63,6 +60,8 @@ local function makeContiguous(self, input, gradOutput)
end

function SpatialConvolutionMM:updateOutput(input)
self.finput = self.finput or input.new()
self.fgradInput = self.fgradInput or input.new()
-- backward compatibility
if self.padding then
self.padW = self.padding
Expand Down Expand Up @@ -121,8 +120,8 @@ function SpatialConvolutionMM:accGradParameters(input, gradOutput, scale)
end

function SpatialConvolutionMM:type(type,tensorCache)
self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()
self.finput = self.finput and torch.Tensor()
self.fgradInput = self.fgradInput and torch.Tensor()
return parent.type(self,type,tensorCache)
end

Expand Down
4 changes: 2 additions & 2 deletions SpatialCrossMapLRN.lua
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,14 @@ function SpatialCrossMapLRN:__init(size, alpha, beta, k)
self.alpha = alpha or 0.0001
self.beta = beta or 0.75
self.k = k or 1

self.scale = torch.Tensor()
end

function SpatialCrossMapLRN:updateOutput(input)
assert(input:dim() == 3 or input:dim() == 4,
'Input must be 3D or 4D')

self.scale = self.scale or input.new()

if torch.type(input) == 'torch.CudaTensor' then
input.nn.SpatialCrossMapLRN_updateOutput(self, input)
else
Expand Down
2 changes: 1 addition & 1 deletion SpatialFractionalMaxPooling.lua
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ function SpatialFractionalMaxPooling:__init(poolSizeW, poolSizeH, arg1, arg2)
-- Pool size (how wide the pooling for each output unit is)
self.poolSizeW = poolSizeW
self.poolSizeH = poolSizeH
self.indices = torch.Tensor()

-- Random samples are drawn for all
-- batch * plane * (height, width; i.e., 2) points. This determines
Expand Down Expand Up @@ -115,6 +114,7 @@ function SpatialFractionalMaxPooling:fixPoolingRegions(val)
end

function SpatialFractionalMaxPooling:updateOutput(input)
self.indices = self.indices or input.new()
self:initSampleBuffer_(input)
local outW, outH = self:getOutputSizes_(input)

Expand Down
9 changes: 4 additions & 5 deletions SpatialFullConvolution.lua
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@ function SpatialFullConvolution:__init(nInputPlane, nOutputPlane,
self.bias = torch.Tensor(self.nOutputPlane)
self.gradBias = torch.Tensor(self.nOutputPlane)

self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()

self:reset()
end

Expand Down Expand Up @@ -69,6 +66,8 @@ function SpatialFullConvolution:backCompatibility()
end

function SpatialFullConvolution:updateOutput(input)
self.finput = self.finput or input.new()
self.fgradInput = self.fgradInput or input.new()
self:backCompatibility()

input = makeContiguous(self, input)
Expand All @@ -92,8 +91,8 @@ function SpatialFullConvolution:accGradParameters(input, gradOutput, scale)
end

function SpatialFullConvolution:type(type, tensorCache)
self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()
self.finput = self.finput and torch.Tensor()
self.fgradInput = self.fgradInput and torch.Tensor()
return parent.type(self, type, tensorCache)
end

Expand Down
1 change: 1 addition & 0 deletions SpatialMaxPooling.lua
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ function SpatialMaxPooling:floor()
end

function SpatialMaxPooling:updateOutput(input)
self.indices = self.indices or input.new()
-- backward compatibility
self.ceil_mode = self.ceil_mode or false
self.padW = self.padW or 0
Expand Down
3 changes: 1 addition & 2 deletions TemporalMaxPooling.lua
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,10 @@ function TemporalMaxPooling:__init(kW, dW)

self.kW = kW
self.dW = dW

self.indices = torch.Tensor()
end

function TemporalMaxPooling:updateOutput(input)
self.indices = self.indices or input.new()
input.nn.TemporalMaxPooling_updateOutput(self, input)
return self.output
end
Expand Down
9 changes: 4 additions & 5 deletions VolumetricConvolution.lua
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,6 @@ function VolumetricConvolution:__init(nInputPlane, nOutputPlane, kT, kW, kH, dT,
self.bias = torch.Tensor(nOutputPlane)
self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW)
self.gradBias = torch.Tensor(nOutputPlane)
-- temporary buffers for unfolding (CUDA)
self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()
self:reset()
end

Expand Down Expand Up @@ -80,6 +77,8 @@ local function unviewWeight(self)
end

function VolumetricConvolution:updateOutput(input)
self.finput = self.finput or input.new()
self.fgradInput = self.fgradInput or input.new()
if input:type() == 'torch.CudaTensor' then
input.THNN.VolumetricConvolution_updateOutput(
input:cdata(),
Expand Down Expand Up @@ -171,8 +170,8 @@ function VolumetricConvolution:accGradParameters(input, gradOutput, scale)
end

function VolumetricConvolution:type(type, tensorCache)
self.finput:set()
self.fgradInput:set()
if self.finput then self.finput:set() end
if self.fgradInput then self.fgradInput:set() end
return parent.type(self, type, tensorCache)
end

Expand Down
5 changes: 2 additions & 3 deletions VolumetricFullConvolution.lua
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,6 @@ function VolumetricFullConvolution:__init(nInputPlane, nOutputPlane, kT, kH, kW,
self.bias = torch.Tensor(nOutputPlane)
self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW)
self.gradBias = torch.Tensor(nOutputPlane)
-- temporary buffers for unfolding (CUDA)
self.finput = torch.Tensor()
self.fgradInput = torch.Tensor()
self:reset()
end

Expand All @@ -54,6 +51,8 @@ function VolumetricFullConvolution:reset(stdv)
end

function VolumetricFullConvolution:updateOutput(input)
self.finput = self.finput or input.new()
self.fgradInput = self.fgradInput or input.new()
input.THNN.VolumetricFullConvolution_updateOutput(
input:cdata(),
self.output:cdata(),
Expand Down
1 change: 1 addition & 0 deletions VolumetricMaxPooling.lua
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ function VolumetricMaxPooling:floor()
end

function VolumetricMaxPooling:updateOutput(input)
self.indices = self.indices or input.new()
input.THNN.VolumetricMaxPooling_updateOutput(
input:cdata(),
self.output:cdata(),
Expand Down
2 changes: 1 addition & 1 deletion WeightedMSECriterion.lua
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ local WeightedMSECriterion, parent = torch.class('nn.WeightedMSECriterion','nn.M
function WeightedMSECriterion:__init(w)
parent.__init(self)
self.weight = w:clone()
self.buffer = torch.Tensor()
end

function WeightedMSECriterion:updateOutput(input,target)
self.buffer = self.buffer or input.new()
self.buffer:resizeAs(input):copy(target)
if input:dim() - 1 == self.weight:dim() then
for i=1,input:size(1) do
Expand Down

0 comments on commit 4696a46

Please sign in to comment.