Something wrong when computing the receptive field using non-zero gradient in Keras
I'm trying to compute the receptive field of some specific neurons based on the non-zero gradient but found one strange thing.
The following is a simple NN model built in keras. The remaining parts are to calculate the gradient of the output (here the targeted neuron of which pos is (0,2) on the first channel) of conv2d_4 w.r.t its input. Through finding those non-zero values on the gradient map, we can easily locate the receptive field of one neuron. The ideal receptive field of one neuron in the output of conv2d_4 w.r.t its input should be 3x3 since the kernel size of conv2d_4 is 3x3, but the non-zero gradient map is a 4x5 patch (given by those TRUE values in f_sum).
import numpy as np
import keras.backend as K
import matplotlib.pyplot as plt
from keras.models import load_model, Model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,Input, AveragePooling2D, Lambda
def model_build_func(input_shape=(25,25,1)):
inp = Input(shape=input_shape, name='input')
x = Conv2D(32, (3,3), activation='linear', name='conv2d_1')(inp)
x = Conv2D(32, (3,3), activation='linear', name='conv2d_2')(x)
x = AveragePooling2D(pool_size=(2,2))(x)
x = Conv2D(64, (3,3), activation='linear', name='conv2d_3')(x)
x = Conv2D(64, (3,3), activation='linear', name='conv2d_4')(x)
x = AveragePooling2D(pool_size=(2,2))(x)
x = Flatten()(x)
x = Dense(units=64, name='dense_1')(x)
x = Dense(units=2, name='dense_2')(x)
model = Model(inputs=inp, outputs=x)
return model
# used for building the Lambda layer
def get_mask_tensor(input_tensors, x_pos, y_pos, channel_idx):
mask_tensor = K.tf.gradients(input_tensors[0][:,x_pos,y_pos,channel_idx], input_tensors[1])[0]
return mask_tensor
#specify the position of the neuron that we want to compute the RF
x_pos = 0
y_pos = 2
channel_idx = 0
layer_idx = 5 # the layer: conv2d_4
model = model_build_func()
current_layer = model.layers[layer_idx]
#get the gradient tensor
mask_tensor = Lambda(get_mask_tensor, output_shape=K.int_shape(model.input),
arguments={'x_pos':x_pos, 'y_pos':y_pos, 'channel_idx':channel_idx})([current_layer.output, current_layer.input])
#create a keras model
new_model = Model(inputs=[model.input], outputs=[mask_tensor])
#get the value of the gradient map
gradient_map = new_model.predict(0.1*(np.random.random(size=(32,25,25,1))-0.05))
f_sum = np.sum(np.abs(gradient_map), axis=-1)
f_sum = np.sum(np.abs(f_sum), axis=0)
#f_sum is a binary array.
#It should has a 3x3 patch with TRUE values, but here it's 4x5
plt.imshow(f_sum!=0)
plt.grid()
plt.show()
python keras
add a comment |
I'm trying to compute the receptive field of some specific neurons based on the non-zero gradient but found one strange thing.
The following is a simple NN model built in keras. The remaining parts are to calculate the gradient of the output (here the targeted neuron of which pos is (0,2) on the first channel) of conv2d_4 w.r.t its input. Through finding those non-zero values on the gradient map, we can easily locate the receptive field of one neuron. The ideal receptive field of one neuron in the output of conv2d_4 w.r.t its input should be 3x3 since the kernel size of conv2d_4 is 3x3, but the non-zero gradient map is a 4x5 patch (given by those TRUE values in f_sum).
import numpy as np
import keras.backend as K
import matplotlib.pyplot as plt
from keras.models import load_model, Model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,Input, AveragePooling2D, Lambda
def model_build_func(input_shape=(25,25,1)):
inp = Input(shape=input_shape, name='input')
x = Conv2D(32, (3,3), activation='linear', name='conv2d_1')(inp)
x = Conv2D(32, (3,3), activation='linear', name='conv2d_2')(x)
x = AveragePooling2D(pool_size=(2,2))(x)
x = Conv2D(64, (3,3), activation='linear', name='conv2d_3')(x)
x = Conv2D(64, (3,3), activation='linear', name='conv2d_4')(x)
x = AveragePooling2D(pool_size=(2,2))(x)
x = Flatten()(x)
x = Dense(units=64, name='dense_1')(x)
x = Dense(units=2, name='dense_2')(x)
model = Model(inputs=inp, outputs=x)
return model
# used for building the Lambda layer
def get_mask_tensor(input_tensors, x_pos, y_pos, channel_idx):
mask_tensor = K.tf.gradients(input_tensors[0][:,x_pos,y_pos,channel_idx], input_tensors[1])[0]
return mask_tensor
#specify the position of the neuron that we want to compute the RF
x_pos = 0
y_pos = 2
channel_idx = 0
layer_idx = 5 # the layer: conv2d_4
model = model_build_func()
current_layer = model.layers[layer_idx]
#get the gradient tensor
mask_tensor = Lambda(get_mask_tensor, output_shape=K.int_shape(model.input),
arguments={'x_pos':x_pos, 'y_pos':y_pos, 'channel_idx':channel_idx})([current_layer.output, current_layer.input])
#create a keras model
new_model = Model(inputs=[model.input], outputs=[mask_tensor])
#get the value of the gradient map
gradient_map = new_model.predict(0.1*(np.random.random(size=(32,25,25,1))-0.05))
f_sum = np.sum(np.abs(gradient_map), axis=-1)
f_sum = np.sum(np.abs(f_sum), axis=0)
#f_sum is a binary array.
#It should has a 3x3 patch with TRUE values, but here it's 4x5
plt.imshow(f_sum!=0)
plt.grid()
plt.show()
python keras
add a comment |
I'm trying to compute the receptive field of some specific neurons based on the non-zero gradient but found one strange thing.
The following is a simple NN model built in keras. The remaining parts are to calculate the gradient of the output (here the targeted neuron of which pos is (0,2) on the first channel) of conv2d_4 w.r.t its input. Through finding those non-zero values on the gradient map, we can easily locate the receptive field of one neuron. The ideal receptive field of one neuron in the output of conv2d_4 w.r.t its input should be 3x3 since the kernel size of conv2d_4 is 3x3, but the non-zero gradient map is a 4x5 patch (given by those TRUE values in f_sum).
import numpy as np
import keras.backend as K
import matplotlib.pyplot as plt
from keras.models import load_model, Model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,Input, AveragePooling2D, Lambda
def model_build_func(input_shape=(25,25,1)):
inp = Input(shape=input_shape, name='input')
x = Conv2D(32, (3,3), activation='linear', name='conv2d_1')(inp)
x = Conv2D(32, (3,3), activation='linear', name='conv2d_2')(x)
x = AveragePooling2D(pool_size=(2,2))(x)
x = Conv2D(64, (3,3), activation='linear', name='conv2d_3')(x)
x = Conv2D(64, (3,3), activation='linear', name='conv2d_4')(x)
x = AveragePooling2D(pool_size=(2,2))(x)
x = Flatten()(x)
x = Dense(units=64, name='dense_1')(x)
x = Dense(units=2, name='dense_2')(x)
model = Model(inputs=inp, outputs=x)
return model
# used for building the Lambda layer
def get_mask_tensor(input_tensors, x_pos, y_pos, channel_idx):
mask_tensor = K.tf.gradients(input_tensors[0][:,x_pos,y_pos,channel_idx], input_tensors[1])[0]
return mask_tensor
#specify the position of the neuron that we want to compute the RF
x_pos = 0
y_pos = 2
channel_idx = 0
layer_idx = 5 # the layer: conv2d_4
model = model_build_func()
current_layer = model.layers[layer_idx]
#get the gradient tensor
mask_tensor = Lambda(get_mask_tensor, output_shape=K.int_shape(model.input),
arguments={'x_pos':x_pos, 'y_pos':y_pos, 'channel_idx':channel_idx})([current_layer.output, current_layer.input])
#create a keras model
new_model = Model(inputs=[model.input], outputs=[mask_tensor])
#get the value of the gradient map
gradient_map = new_model.predict(0.1*(np.random.random(size=(32,25,25,1))-0.05))
f_sum = np.sum(np.abs(gradient_map), axis=-1)
f_sum = np.sum(np.abs(f_sum), axis=0)
#f_sum is a binary array.
#It should has a 3x3 patch with TRUE values, but here it's 4x5
plt.imshow(f_sum!=0)
plt.grid()
plt.show()
python keras
I'm trying to compute the receptive field of some specific neurons based on the non-zero gradient but found one strange thing.
The following is a simple NN model built in keras. The remaining parts are to calculate the gradient of the output (here the targeted neuron of which pos is (0,2) on the first channel) of conv2d_4 w.r.t its input. Through finding those non-zero values on the gradient map, we can easily locate the receptive field of one neuron. The ideal receptive field of one neuron in the output of conv2d_4 w.r.t its input should be 3x3 since the kernel size of conv2d_4 is 3x3, but the non-zero gradient map is a 4x5 patch (given by those TRUE values in f_sum).
import numpy as np
import keras.backend as K
import matplotlib.pyplot as plt
from keras.models import load_model, Model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,Input, AveragePooling2D, Lambda
def model_build_func(input_shape=(25,25,1)):
inp = Input(shape=input_shape, name='input')
x = Conv2D(32, (3,3), activation='linear', name='conv2d_1')(inp)
x = Conv2D(32, (3,3), activation='linear', name='conv2d_2')(x)
x = AveragePooling2D(pool_size=(2,2))(x)
x = Conv2D(64, (3,3), activation='linear', name='conv2d_3')(x)
x = Conv2D(64, (3,3), activation='linear', name='conv2d_4')(x)
x = AveragePooling2D(pool_size=(2,2))(x)
x = Flatten()(x)
x = Dense(units=64, name='dense_1')(x)
x = Dense(units=2, name='dense_2')(x)
model = Model(inputs=inp, outputs=x)
return model
# used for building the Lambda layer
def get_mask_tensor(input_tensors, x_pos, y_pos, channel_idx):
mask_tensor = K.tf.gradients(input_tensors[0][:,x_pos,y_pos,channel_idx], input_tensors[1])[0]
return mask_tensor
#specify the position of the neuron that we want to compute the RF
x_pos = 0
y_pos = 2
channel_idx = 0
layer_idx = 5 # the layer: conv2d_4
model = model_build_func()
current_layer = model.layers[layer_idx]
#get the gradient tensor
mask_tensor = Lambda(get_mask_tensor, output_shape=K.int_shape(model.input),
arguments={'x_pos':x_pos, 'y_pos':y_pos, 'channel_idx':channel_idx})([current_layer.output, current_layer.input])
#create a keras model
new_model = Model(inputs=[model.input], outputs=[mask_tensor])
#get the value of the gradient map
gradient_map = new_model.predict(0.1*(np.random.random(size=(32,25,25,1))-0.05))
f_sum = np.sum(np.abs(gradient_map), axis=-1)
f_sum = np.sum(np.abs(f_sum), axis=0)
#f_sum is a binary array.
#It should has a 3x3 patch with TRUE values, but here it's 4x5
plt.imshow(f_sum!=0)
plt.grid()
plt.show()
python keras
python keras
edited Nov 14 '18 at 11:58
Yetionyo
asked Nov 14 '18 at 9:02
YetionyoYetionyo
184
184
add a comment |
add a comment |
0
active
oldest
votes
Your Answer
StackExchange.ifUsing("editor", function () {
StackExchange.using("externalEditor", function () {
StackExchange.using("snippets", function () {
StackExchange.snippets.init();
});
});
}, "code-snippets");
StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "1"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);
StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});
function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});
}
});
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53296374%2fsomething-wrong-when-computing-the-receptive-field-using-non-zero-gradient-in-ke%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
0
active
oldest
votes
0
active
oldest
votes
active
oldest
votes
active
oldest
votes
Thanks for contributing an answer to Stack Overflow!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53296374%2fsomething-wrong-when-computing-the-receptive-field-using-non-zero-gradient-in-ke%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown