1+ import  torch 
2+ import  torch .nn  as  nn 
3+ import  torch .nn .functional  as  F 
4+ 5+ 6+ class  Inception5h (nn .Module ):
7+ 8+  def  __init__ (self ):
9+  super (Inception5h , self ).__init__ ()
10+  self .conv2d0_pre_relu_conv  =  nn .Conv2d (in_channels = 3 , out_channels = 64 , kernel_size = (7 , 7 ), stride = (2 , 2 ), groups = 1 , bias = True )
11+  self .conv2d1_pre_relu_conv  =  nn .Conv2d (in_channels = 64 , out_channels = 64 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
12+  self .conv2d2_pre_relu_conv  =  nn .Conv2d (in_channels = 64 , out_channels = 192 , kernel_size = (3 , 3 ), stride = (1 , 1 ), groups = 1 , bias = True )
13+  self .mixed3a_1x1_pre_relu_conv  =  nn .Conv2d (in_channels = 192 , out_channels = 64 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
14+  self .mixed3a_3x3_bottleneck_pre_relu_conv  =  nn .Conv2d (in_channels = 192 , out_channels = 96 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
15+  self .mixed3a_5x5_bottleneck_pre_relu_conv  =  nn .Conv2d (in_channels = 192 , out_channels = 16 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
16+  self .mixed3a_pool_reduce_pre_relu_conv  =  nn .Conv2d (in_channels = 192 , out_channels = 32 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
17+  self .mixed3a_3x3_pre_relu_conv  =  nn .Conv2d (in_channels = 96 , out_channels = 128 , kernel_size = (3 , 3 ), stride = (1 , 1 ), groups = 1 , bias = True )
18+  self .mixed3a_5x5_pre_relu_conv  =  nn .Conv2d (in_channels = 16 , out_channels = 32 , kernel_size = (5 , 5 ), stride = (1 , 1 ), groups = 1 , bias = True )
19+  self .mixed3b_1x1_pre_relu_conv  =  nn .Conv2d (in_channels = 256 , out_channels = 128 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
20+  self .mixed3b_3x3_bottleneck_pre_relu_conv  =  nn .Conv2d (in_channels = 256 , out_channels = 128 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
21+  self .mixed3b_5x5_bottleneck_pre_relu_conv  =  nn .Conv2d (in_channels = 256 , out_channels = 32 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
22+  self .mixed3b_pool_reduce_pre_relu_conv  =  nn .Conv2d (in_channels = 256 , out_channels = 64 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
23+  self .mixed3b_3x3_pre_relu_conv  =  nn .Conv2d (in_channels = 128 , out_channels = 192 , kernel_size = (3 , 3 ), stride = (1 , 1 ), groups = 1 , bias = True )
24+  self .mixed3b_5x5_pre_relu_conv  =  nn .Conv2d (in_channels = 32 , out_channels = 96 , kernel_size = (5 , 5 ), stride = (1 , 1 ), groups = 1 , bias = True )
25+  self .mixed4a_1x1_pre_relu_conv  =  nn .Conv2d (in_channels = 480 , out_channels = 192 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
26+  self .mixed4a_3x3_bottleneck_pre_relu_conv  =  nn .Conv2d (in_channels = 480 , out_channels = 96 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
27+  self .mixed4a_5x5_bottleneck_pre_relu_conv  =  nn .Conv2d (in_channels = 480 , out_channels = 16 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
28+  self .mixed4a_pool_reduce_pre_relu_conv  =  nn .Conv2d (in_channels = 480 , out_channels = 64 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
29+  self .mixed4a_3x3_pre_relu_conv  =  nn .Conv2d (in_channels = 96 , out_channels = 204 , kernel_size = (3 , 3 ), stride = (1 , 1 ), groups = 1 , bias = True )
30+  self .mixed4a_5x5_pre_relu_conv  =  nn .Conv2d (in_channels = 16 , out_channels = 48 , kernel_size = (5 , 5 ), stride = (1 , 1 ), groups = 1 , bias = True )
31+  self .head0_bottleneck_pre_relu_conv  =  nn .Conv2d (in_channels = 508 , out_channels = 128 , kernel_size = (1 , 1 ), stride = (1 , 1 ), groups = 1 , bias = True )
32+  self .nn0_pre_relu_matmul  =  nn .Linear (in_features  =  2048 , out_features  =  1024 , bias  =  True )
33+  self .softmax0_pre_activation_matmul  =  nn .Linear (in_features  =  1024 , out_features  =  1008 , bias  =  True )
34+ 35+  def  forward (self , x ):
36+  conv2d0_pre_relu_conv_pad  =  F .pad (x , (2 , 3 , 2 , 3 ))
37+  conv2d0_pre_relu_conv  =  self .conv2d0_pre_relu_conv (conv2d0_pre_relu_conv_pad )
38+  conv2d0  =  F .relu (conv2d0_pre_relu_conv )
39+  maxpool0_pad  =  F .pad (conv2d0 , (0 , 1 , 0 , 1 ), value = float ('-inf' ))
40+  maxpool0  =  F .max_pool2d (maxpool0_pad , kernel_size = (3 , 3 ), stride = (2 , 2 ), padding = 0 , ceil_mode = False )
41+  localresponsenorm0  =  F .local_response_norm (maxpool0 , size = 9 , alpha = 9.999999747378752e-05 , beta = 0.5 , k = 1 )
42+  conv2d1_pre_relu_conv  =  self .conv2d1_pre_relu_conv (localresponsenorm0 )
43+  conv2d1  =  F .relu (conv2d1_pre_relu_conv )
44+  conv2d2_pre_relu_conv_pad  =  F .pad (conv2d1 , (1 , 1 , 1 , 1 ))
45+  conv2d2_pre_relu_conv  =  self .conv2d2_pre_relu_conv (conv2d2_pre_relu_conv_pad )
46+  conv2d2  =  F .relu (conv2d2_pre_relu_conv )
47+  localresponsenorm1  =  F .local_response_norm (conv2d2 , size = 9 , alpha = 9.999999747378752e-05 , beta = 0.5 , k = 1 )
48+  maxpool1_pad  =  F .pad (localresponsenorm1 , (0 , 1 , 0 , 1 ), value = float ('-inf' ))
49+  maxpool1  =  F .max_pool2d (maxpool1_pad , kernel_size = (3 , 3 ), stride = (2 , 2 ), padding = 0 , ceil_mode = False )
50+  mixed3a_1x1_pre_relu_conv  =  self .mixed3a_1x1_pre_relu_conv (maxpool1 )
51+  mixed3a_3x3_bottleneck_pre_relu_conv  =  self .mixed3a_3x3_bottleneck_pre_relu_conv (maxpool1 )
52+  mixed3a_5x5_bottleneck_pre_relu_conv  =  self .mixed3a_5x5_bottleneck_pre_relu_conv (maxpool1 )
53+  mixed3a_pool_pad  =  F .pad (maxpool1 , (1 , 1 , 1 , 1 ), value = float ('-inf' ))
54+  mixed3a_pool  =  F .max_pool2d (mixed3a_pool_pad , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 0 , ceil_mode = False )
55+  mixed3a_1x1  =  F .relu (mixed3a_1x1_pre_relu_conv )
56+  mixed3a_3x3_bottleneck  =  F .relu (mixed3a_3x3_bottleneck_pre_relu_conv )
57+  mixed3a_5x5_bottleneck  =  F .relu (mixed3a_5x5_bottleneck_pre_relu_conv )
58+  mixed3a_pool_reduce_pre_relu_conv  =  self .mixed3a_pool_reduce_pre_relu_conv (mixed3a_pool )
59+  mixed3a_3x3_pre_relu_conv_pad  =  F .pad (mixed3a_3x3_bottleneck , (1 , 1 , 1 , 1 ))
60+  mixed3a_3x3_pre_relu_conv  =  self .mixed3a_3x3_pre_relu_conv (mixed3a_3x3_pre_relu_conv_pad )
61+  mixed3a_5x5_pre_relu_conv_pad  =  F .pad (mixed3a_5x5_bottleneck , (2 , 2 , 2 , 2 ))
62+  mixed3a_5x5_pre_relu_conv  =  self .mixed3a_5x5_pre_relu_conv (mixed3a_5x5_pre_relu_conv_pad )
63+  mixed3a_pool_reduce  =  F .relu (mixed3a_pool_reduce_pre_relu_conv )
64+  mixed3a_3x3  =  F .relu (mixed3a_3x3_pre_relu_conv )
65+  mixed3a_5x5  =  F .relu (mixed3a_5x5_pre_relu_conv )
66+  mixed3a  =  torch .cat ((mixed3a_1x1 , mixed3a_3x3 , mixed3a_5x5 , mixed3a_pool_reduce ), 1 )
67+  mixed3b_1x1_pre_relu_conv  =  self .mixed3b_1x1_pre_relu_conv (mixed3a )
68+  mixed3b_3x3_bottleneck_pre_relu_conv  =  self .mixed3b_3x3_bottleneck_pre_relu_conv (mixed3a )
69+  mixed3b_5x5_bottleneck_pre_relu_conv  =  self .mixed3b_5x5_bottleneck_pre_relu_conv (mixed3a )
70+  mixed3b_pool_pad  =  F .pad (mixed3a , (1 , 1 , 1 , 1 ), value = float ('-inf' ))
71+  mixed3b_pool  =  F .max_pool2d (mixed3b_pool_pad , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 0 , ceil_mode = False )
72+  mixed3b_1x1  =  F .relu (mixed3b_1x1_pre_relu_conv )
73+  mixed3b_3x3_bottleneck  =  F .relu (mixed3b_3x3_bottleneck_pre_relu_conv )
74+  mixed3b_5x5_bottleneck  =  F .relu (mixed3b_5x5_bottleneck_pre_relu_conv )
75+  mixed3b_pool_reduce_pre_relu_conv  =  self .mixed3b_pool_reduce_pre_relu_conv (mixed3b_pool )
76+  mixed3b_3x3_pre_relu_conv_pad  =  F .pad (mixed3b_3x3_bottleneck , (1 , 1 , 1 , 1 ))
77+  mixed3b_3x3_pre_relu_conv  =  self .mixed3b_3x3_pre_relu_conv (mixed3b_3x3_pre_relu_conv_pad )
78+  mixed3b_5x5_pre_relu_conv_pad  =  F .pad (mixed3b_5x5_bottleneck , (2 , 2 , 2 , 2 ))
79+  mixed3b_5x5_pre_relu_conv  =  self .mixed3b_5x5_pre_relu_conv (mixed3b_5x5_pre_relu_conv_pad )
80+  mixed3b_pool_reduce  =  F .relu (mixed3b_pool_reduce_pre_relu_conv )
81+  mixed3b_3x3  =  F .relu (mixed3b_3x3_pre_relu_conv )
82+  mixed3b_5x5  =  F .relu (mixed3b_5x5_pre_relu_conv )
83+  mixed3b  =  torch .cat ((mixed3b_1x1 , mixed3b_3x3 , mixed3b_5x5 , mixed3b_pool_reduce ), 1 )
84+  maxpool4_pad  =  F .pad (mixed3b , (0 , 1 , 0 , 1 ), value = float ('-inf' ))
85+  maxpool4  =  F .max_pool2d (maxpool4_pad , kernel_size = (3 , 3 ), stride = (2 , 2 ), padding = 0 , ceil_mode = False )
86+  mixed4a_1x1_pre_relu_conv  =  self .mixed4a_1x1_pre_relu_conv (maxpool4 )
87+  mixed4a_3x3_bottleneck_pre_relu_conv  =  self .mixed4a_3x3_bottleneck_pre_relu_conv (maxpool4 )
88+  mixed4a_5x5_bottleneck_pre_relu_conv  =  self .mixed4a_5x5_bottleneck_pre_relu_conv (maxpool4 )
89+  mixed4a_pool_pad  =  F .pad (maxpool4 , (1 , 1 , 1 , 1 ), value = float ('-inf' ))
90+  mixed4a_pool  =  F .max_pool2d (mixed4a_pool_pad , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 0 , ceil_mode = False )
91+  mixed4a_1x1  =  F .relu (mixed4a_1x1_pre_relu_conv )
92+  mixed4a_3x3_bottleneck  =  F .relu (mixed4a_3x3_bottleneck_pre_relu_conv )
93+  mixed4a_5x5_bottleneck  =  F .relu (mixed4a_5x5_bottleneck_pre_relu_conv )
94+  mixed4a_pool_reduce_pre_relu_conv  =  self .mixed4a_pool_reduce_pre_relu_conv (mixed4a_pool )
95+  mixed4a_3x3_pre_relu_conv_pad  =  F .pad (mixed4a_3x3_bottleneck , (1 , 1 , 1 , 1 ))
96+  mixed4a_3x3_pre_relu_conv  =  self .mixed4a_3x3_pre_relu_conv (mixed4a_3x3_pre_relu_conv_pad )
97+  mixed4a_5x5_pre_relu_conv_pad  =  F .pad (mixed4a_5x5_bottleneck , (2 , 2 , 2 , 2 ))
98+  mixed4a_5x5_pre_relu_conv  =  self .mixed4a_5x5_pre_relu_conv (mixed4a_5x5_pre_relu_conv_pad )
99+  mixed4a_pool_reduce  =  F .relu (mixed4a_pool_reduce_pre_relu_conv )
100+  mixed4a_3x3  =  F .relu (mixed4a_3x3_pre_relu_conv )
101+  mixed4a_5x5  =  F .relu (mixed4a_5x5_pre_relu_conv )
102+  mixed4a  =  torch .cat ((mixed4a_1x1 , mixed4a_3x3 , mixed4a_5x5 , mixed4a_pool_reduce ), 1 )
103+  head0_pool  =  F .avg_pool2d (mixed4a , kernel_size = (5 , 5 ), stride = (3 , 3 ), padding = (0 ,), ceil_mode = False , count_include_pad = False )
104+  head0_bottleneck_pre_relu_conv  =  self .head0_bottleneck_pre_relu_conv (head0_pool )
105+  head0_bottleneck  =  F .relu (head0_bottleneck_pre_relu_conv )
106+  avgpool_2d  =  nn .AdaptiveAvgPool2d ((4 , 4 ))
107+  x  =  avgpool_2d (head0_bottleneck )
108+  x  =  torch .flatten (x , 1 )
109+  nn0_pre_relu_matmul  =  self .nn0_pre_relu_matmul (x )
110+  nn0  =  F .relu (nn0_pre_relu_matmul )
111+  nn0_reshape  =  torch .reshape (input  =  nn0 , shape  =  (- 1 ,1024 ))
112+  softmax0_pre_activation_matmul  =  self .softmax0_pre_activation_matmul (nn0_reshape )
113+  softmax0  =  F .softmax (softmax0_pre_activation_matmul )
114+  return  softmax0 
0 commit comments