fused_act.py 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. # modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_act.py # noqa:E501
  2. import torch
  3. from torch import nn
  4. from torch.autograd import Function
  5. try:
  6. from . import fused_act_ext
  7. except ImportError:
  8. import os
  9. BASICSR_JIT = os.getenv('BASICSR_JIT')
  10. if BASICSR_JIT == 'True':
  11. from torch.utils.cpp_extension import load
  12. module_path = os.path.dirname(__file__)
  13. fused_act_ext = load(
  14. 'fused',
  15. sources=[
  16. os.path.join(module_path, 'src', 'fused_bias_act.cpp'),
  17. os.path.join(module_path, 'src', 'fused_bias_act_kernel.cu'),
  18. ],
  19. )
  20. class FusedLeakyReLUFunctionBackward(Function):
  21. @staticmethod
  22. def forward(ctx, grad_output, out, negative_slope, scale):
  23. ctx.save_for_backward(out)
  24. ctx.negative_slope = negative_slope
  25. ctx.scale = scale
  26. empty = grad_output.new_empty(0)
  27. grad_input = fused_act_ext.fused_bias_act(grad_output, empty, out, 3, 1, negative_slope, scale)
  28. dim = [0]
  29. if grad_input.ndim > 2:
  30. dim += list(range(2, grad_input.ndim))
  31. grad_bias = grad_input.sum(dim).detach()
  32. return grad_input, grad_bias
  33. @staticmethod
  34. def backward(ctx, gradgrad_input, gradgrad_bias):
  35. out, = ctx.saved_tensors
  36. gradgrad_out = fused_act_ext.fused_bias_act(gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope,
  37. ctx.scale)
  38. return gradgrad_out, None, None, None
  39. class FusedLeakyReLUFunction(Function):
  40. @staticmethod
  41. def forward(ctx, input, bias, negative_slope, scale):
  42. empty = input.new_empty(0)
  43. out = fused_act_ext.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
  44. ctx.save_for_backward(out)
  45. ctx.negative_slope = negative_slope
  46. ctx.scale = scale
  47. return out
  48. @staticmethod
  49. def backward(ctx, grad_output):
  50. out, = ctx.saved_tensors
  51. grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(grad_output, out, ctx.negative_slope, ctx.scale)
  52. return grad_input, grad_bias, None, None
  53. class FusedLeakyReLU(nn.Module):
  54. def __init__(self, channel, negative_slope=0.2, scale=2**0.5):
  55. super().__init__()
  56. self.bias = nn.Parameter(torch.zeros(channel))
  57. self.negative_slope = negative_slope
  58. self.scale = scale
  59. def forward(self, input):
  60. return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
  61. def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2**0.5):
  62. return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)