41 using namespace shogun;
44 int32_t input_width, int32_t input_height,
45 int32_t radius_x, int32_t radius_y,
46 int32_t stride_x, int32_t stride_y,
49 m_input_width(input_width), m_input_height(input_height),
50 m_radius_x(radius_x), m_radius_y(radius_y),
51 m_stride_x(stride_x), m_stride_y(stride_y),
53 m_activation_function(function)
74 int32_t batch_size = activations.
num_cols;
78 for (int32_t l=0; l<input_indices.
vlen; l++)
87 for (int32_t m=0; m<num_maps; m++)
91 for (int32_t j=0; j<batch_size; j++)
94 input(i+m*m_input_num_neurons,j);
106 convolve(buffer, weights_matrix, activations,
112 for (int32_t j=0; j<batch_size; j++)
121 for (int32_t j=0; j<batch_size; j++)
128 for (int32_t j=0; j<batch_size; j++)
130 CMath::max<float64_t>(0, activations(i+
m_row_offset,j));
142 int32_t batch_size = activation_gradients.
num_cols;
148 for (int32_t j=0; j<batch_size; j++)
159 for (int32_t j=0; j<batch_size; j++)
167 bias_gradients[i] = 0;
168 for (int32_t j=0; j<batch_size; j++)
169 bias_gradients[i] += activation_gradients(i+
m_row_offset,j);
179 for (int32_t l=0; l<input_indices.
vlen; l++)
188 for (int32_t m=0; m<num_maps; m++)
205 int32_t pooling_width, int32_t pooling_height,
209 int32_t pooled_row_offset =
m_row_offset/(pooling_width*pooling_height);
211 for (int32_t i=0; i<pooled_activations.
num_cols; i++)
218 pooled_activations.
matrix+i*pooled_activations.
num_rows + pooled_row_offset,
232 for (int32_t x1=x; x1<x+pooling_width; x1++)
234 for (int32_t y1=y; y1<y+pooling_height; y1++)
236 if (image(y1,x1) > max)
243 result(y/pooling_height, x/pooling_width) = max;
244 indices(y/pooling_height, x/pooling_width) = max_index;
256 int32_t inputs_row_offset,
257 int32_t outputs_row_offset)
259 for (int32_t i=0; i<outputs.
num_cols; i++)
282 weights(y1-y+m_radius_y,x1-x+m_radius_x)*image(y1,x1);
285 weights(m_radius_y-y1+y,m_radius_x-x1+x)*image(y1,x1);
299 int32_t inputs_row_offset,
300 int32_t local_gradients_row_offset)
302 for (int32_t i=0; i<local_gradients.
num_cols; i++)
321 weight_gradients(m_radius_y-y1+y,m_radius_x-x1+x) +=
int32_t m_input_num_neurons
void convolve(SGMatrix< float64_t > inputs, SGMatrix< float64_t > weights, SGMatrix< float64_t > outputs, bool flip, bool reset_output, int32_t inputs_row_offset, int32_t outputs_row_offset)
virtual SGMatrix< float64_t > get_activation_gradients()
virtual int32_t get_num_neurons()
Base class for neural network layers.
virtual SGMatrix< float64_t > get_activations()
void compute_activations(SGVector< float64_t > parameters, CDynamicObjectArray *layers, SGVector< int32_t > input_indices, SGMatrix< float64_t > activations, SGMatrix< float64_t > buffer)
EConvMapActivationFunction m_activation_function
CSGObject * element(int32_t idx1, int32_t idx2=0, int32_t idx3=0)
CConvolutionalFeatureMap(int32_t input_width, int32_t input_height, int32_t radius_x, int32_t radius_y, int32_t stride_x=1, int32_t stride_y=1, int32_t index=0, EConvMapActivationFunction function=CMAF_IDENTITY)
Dynamic array class for CSGObject pointers that creates an array that can be used like a list or an a...
int32_t m_output_num_neurons
static float64_t exp(float64_t x)
EConvMapActivationFunction
Determines the activation function for neurons in a convolutional feature map.
void compute_gradients(SGVector< float64_t > parameters, SGMatrix< float64_t > activations, SGMatrix< float64_t > activation_gradients, CDynamicObjectArray *layers, SGVector< int32_t > input_indices, SGVector< float64_t > parameter_gradients)
void compute_weight_gradients(SGMatrix< float64_t > inputs, SGMatrix< float64_t > local_gradients, SGMatrix< float64_t > weight_gradients, int32_t inputs_row_offset, int32_t local_gradients_row_offset)
void pool_activations(SGMatrix< float64_t > activations, int32_t pooling_width, int32_t pooling_height, SGMatrix< float64_t > pooled_activations, SGMatrix< float64_t > max_indices)