Rate this Page

Note

Go to the end to download the full example code.

Creating Extensions Using NumPy and SciPy#

Created On: Mar 24, 2017 | Last Updated: Apr 25, 2023 | Last Verified: Not Verified

Author: Adam Paszke

Updated by: Adam Dziedzic

In this tutorial, we shall go through two tasks:

  1. Create a neural network layer with no parameters.

    • This calls into numpy as part of its implementation

  2. Create a neural network layer that has learnable weights

    • This calls into SciPy as part of its implementation

importtorch
fromtorch.autogradimport Function

Parameter-less example#

This layer doesn’t particularly do anything useful or mathematically correct.

It is aptly named BadFFTFunction

Layer Implementation

fromnumpy.fftimport rfft2, irfft2
classBadFFTFunction(Function ):
 @staticmethod
 defforward(ctx, input):
 numpy_input = input.detach().numpy()
 result = abs(rfft2(numpy_input))
 return input.new(result )
 @staticmethod
 defbackward(ctx, grad_output):
 numpy_go = grad_output.numpy()
 result = irfft2(numpy_go)
 return grad_output.new(result )
# since this layer does not have any parameters, we can
# simply declare this as a function, rather than as an ``nn.Module`` class
defincorrect_fft(input):
 return BadFFTFunction.apply(input)

Example usage of the created layer:

input = torch.randn (8, 8, requires_grad=True)
result = incorrect_fft(input)
print(result )
result.backward (torch.randn (result .size()))
print(input)
tensor([[ 3.3419, 0.1656, 7.6066, 4.7383, 3.8915],
 [ 7.5762, 4.4006, 6.4325, 11.1999, 12.2115],
 [ 4.6097, 5.2863, 3.4551, 3.6515, 4.6108],
 [ 7.8133, 16.1469, 8.8314, 12.0719, 9.0824],
 [11.5534, 7.4282, 6.8911, 7.3443, 1.4537],
 [ 7.8133, 7.7879, 6.9815, 5.8072, 9.0824],
 [ 4.6097, 6.5907, 5.3708, 7.3843, 4.6108],
 [ 7.5762, 6.4118, 6.9828, 12.2926, 12.2115]],
 grad_fn=<BadFFTFunctionBackward>)
tensor([[-0.6619, 0.7142, 2.0830, 1.2622, -0.2735, -0.3263, -0.2668, -0.2433],
 [-1.5010, -0.0945, -1.1853, -0.8700, 0.2981, -0.4020, -1.1243, 0.9004],
 [-1.0366, -0.3953, 0.5724, 0.3222, 0.5853, 0.8533, 0.4251, 1.4347],
 [-0.2783, -0.3853, -0.4909, 0.4404, 0.0499, 0.8857, -2.1674, -1.2256],
 [ 1.9085, 1.2474, -1.6831, -0.4804, 0.3802, -1.8774, 1.2096, -0.4731],
 [ 1.0659, -0.9195, -0.0661, 0.5568, -0.0785, 0.6158, 1.0304, -2.2441],
 [ 1.1398, 1.0379, -0.6474, -0.1136, 0.6278, -0.3721, -0.0289, 0.5239],
 [ 2.2013, -0.0689, -0.6829, -0.6604, 1.1073, 0.1134, 1.1053, -0.0314]],
 requires_grad=True)

Parametrized example#

In deep learning literature, this layer is confusingly referred to as convolution while the actual operation is cross-correlation (the only difference is that filter is flipped for convolution, which is not the case for cross-correlation).

Implementation of a layer with learnable weights, where cross-correlation has a filter (kernel) that represents weights.

The backward pass computes the gradient wrt the input and the gradient wrt the filter.

fromnumpyimport flip
importnumpyasnp
fromscipy.signalimport convolve2d, correlate2d
fromtorch.nn.modules.moduleimport Module
fromtorch.nn.parameterimport Parameter
classScipyConv2dFunction(Function ):
 @staticmethod
 defforward(ctx, input, filter, bias):
 # detach so we can cast to NumPy
 input, filter, bias = input.detach(), filter.detach(), bias.detach()
 result = correlate2d(input.numpy(), filter.numpy(), mode='valid')
 result += bias.numpy()
 ctx.save_for_backward(input, filter, bias)
 return torch.as_tensor (result , dtype=input.dtype)
 @staticmethod
 defbackward(ctx, grad_output):
 grad_output = grad_output.detach()
 input, filter, bias = ctx.saved_tensors
 grad_output = grad_output.numpy()
 grad_bias = np.sum(grad_output, keepdims=True)
 grad_input = convolve2d(grad_output, filter.numpy(), mode='full')
 # the previous line can be expressed equivalently as:
 # grad_input = correlate2d(grad_output, flip(flip(filter.numpy(), axis=0), axis=1), mode='full')
 grad_filter = correlate2d(input.numpy(), grad_output, mode='valid')
 return torch.from_numpy (grad_input), torch.from_numpy (grad_filter).to(torch.float ), torch.from_numpy (grad_bias).to(torch.float )
classScipyConv2d(Module ):
 def__init__(self, filter_width, filter_height):
 super(ScipyConv2d , self).__init__()
 self.filter = Parameter (torch.randn (filter_width, filter_height))
 self.bias = Parameter (torch.randn (1, 1))
 defforward(self, input):
 return ScipyConv2dFunction.apply(input, self.filter, self.bias)

Example usage:

module = ScipyConv2d (3, 3)
print("Filter and bias: ", list(module.parameters ()))
input = torch.randn (10, 10, requires_grad=True)
output = module(input)
print("Output from the convolution: ", output )
output.backward (torch.randn (8, 8))
print("Gradient for the input map: ", input.grad)
Filter and bias: [Parameter containing:
tensor([[ 1.1407, 0.6346, -0.0944],
 [-0.5939, -0.1144, 0.0627],
 [ 0.4012, -0.4877, -0.6280]], requires_grad=True), Parameter containing:
tensor([[0.3372]], requires_grad=True)]
Output from the convolution: tensor([[-0.5889, 1.9098, -1.1029, -3.3765, -1.6983, 2.5669, 1.5563, 0.3241],
 [ 1.8811, -3.1677, -0.4530, 4.2938, -0.3859, -2.6784, -0.4717, -1.3796],
 [ 1.3129, 2.5753, 0.6625, 1.2519, 3.7540, 1.5647, 1.7548, 3.8592],
 [ 0.8001, -0.6487, 2.5804, 3.7698, 0.3425, 0.5344, 1.1604, 1.4503],
 [ 0.5724, 0.7822, -0.0070, -1.2796, -1.6289, -0.4995, 1.1365, 0.8296],
 [-0.0944, -1.9118, -0.2770, 1.6874, 0.2638, -0.9998, 0.1122, 1.6844],
 [ 0.9248, 1.5961, -0.3228, -1.0089, 0.3024, 3.3935, 0.5927, -2.8174],
 [ 0.1716, -2.3171, 1.0417, 3.4000, 0.2192, -1.5859, 1.2105, 3.7964]],
 grad_fn=<ScipyConv2dFunctionBackward>)
Gradient for the input map: tensor([[ 1.2679, -0.3556, -0.3152, 0.0333, -0.4327, -1.3121, -1.5846, -1.9736,
 -0.7834, 0.1282],
 [ 1.6473, 1.3787, 2.7095, 2.4690, -1.2910, 0.1712, 1.3630, 1.9619,
 0.6073, -0.1646],
 [-0.6681, 0.0450, -1.7358, -1.8676, 0.2391, -0.6194, -0.1407, 0.3997,
 1.2439, 0.8826],
 [-0.7398, -2.8350, -0.4051, -2.2335, -3.5536, 3.0243, 1.5868, -2.2011,
 -1.4191, -0.3912],
 [ 2.1779, 2.5195, 2.3890, 2.5589, 1.0739, -1.0692, 0.6250, 0.8456,
 -0.4076, -0.1855],
 [-1.9770, -0.2620, -0.4430, -1.0235, 1.5632, 1.9843, -2.0087, -2.3784,
 0.6456, 0.8416],
 [ 1.7909, 0.6683, 0.7601, -0.5546, -0.6727, 1.3565, 1.7288, 0.3273,
 0.0591, 0.2688],
 [-0.2760, 1.0134, 0.4758, 1.0994, 0.2051, -1.5185, -1.8077, -1.4707,
 0.0884, 0.4750],
 [ 0.0834, -0.6190, -0.6571, -1.1965, -0.2158, 0.0589, -0.5232, -0.1469,
 -0.2662, -0.0678],
 [ 0.1640, 0.0274, -0.3691, -0.1492, -0.7384, -0.8612, 0.0098, 0.2065,
 0.9050, 0.6752]])

Check the gradients:

fromtorch.autograd.gradcheckimport gradcheck
moduleConv = ScipyConv2d (3, 3)
input = [torch.randn (20, 20, dtype=torch.double , requires_grad=True)]
test = gradcheck (moduleConv, input, eps=1e-6, atol=1e-4)
print("Are the gradients correct: ", test)
Are the gradients correct: True

Total running time of the script: (0 minutes 0.610 seconds)