――I would like to study in the future, but for the time being, I will copy it.
main.py
#AI learning model part(neural network)To create
# images_placeholder:Image placeholder, keep_prob:dropout rate place_holder becomes an argument
#Outputs and returns the probability of each label for the input image
def inference(images_placeholder, keep_prob):
#Weight with standard deviation 0.Initialize with a normal distribution of 1
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
#Bias standard deviation 0.Initialize with a normal distribution of 1
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
#Create a convolution layer
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
#Create a pooling layer
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
#28px image data input in vector format*Return to 28px image(?)。
#This time it's a color image, so 3(1 for monochrome)
x_image = tf.reshape(images_placeholder, [-1, IMAGE_SIZE, IMAGE_SIZE, 3])
#Create the first layer of the convolution layer
with tf.name_scope('conv1') as scope:
#The argument is[width, height, input, filters]。
# 5px*The image is filtered in the range of 5px. This time it is a color image, so input is 3?
#Detect 32 features
W_conv1 = weight_variable([5, 5, 3, 32])
#Substitute the bias value
b_conv1 = bias_variable([32])
#The parts that are likely to be useful as features are left, and the parts that are unlikely to be used as features are
#Understanding that it is not treated as a feature as 0(Relu function)
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
#Creation of pooling layer 1
# 2*Make 2 frames and set the features in the frame to 1*It is compressed nicely in 1 minute.
#That frame 2*Understanding that slide 2 at a time to apply compression work to the entire image
#Roughly summarize the features subdivided by rough understanding in a slightly better way(Compress)
with tf.name_scope('pool1') as scope:
h_pool1 = max_pool_2x2(h_conv1)
#Creation of the second layer of the convolution layer
with tf.name_scope('conv2') as scope:
#Filtering is performed again with the output on the first layer as the input on the second layer.
#Detects 64 features. Why is input 32?(I want you to tell me)
W_conv2 = weight_variable([5, 5, 32, 64])
#Substitute the bias value(Same as the first layer)
b_conv2 = bias_variable([64])
#Arrangement of detected features(Same as the first layer)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
#Creation of pooling layer 2(Same as booring layer 1)
with tf.name_scope('pool2') as scope:
h_pool2 = max_pool_2x2(h_conv2)
#Creation of fully connected layer 1
with tf.name_scope('fc1') as scope:
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
#Convert image analysis to vector results
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
#Like the first and second, it activates the detected features.
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#dropout settings
#Optimized only for training data and not really usable
#It seems to play a role in preventing "overfitting" that becomes AI
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#Creation of fully connected layer 2(Read layer)
with tf.name_scope('fc2') as scope:
W_fc2 = weight_variable([1024, NUM_CLASSES])
b_fc2 = bias_variable([NUM_CLASSES])
#Normalization with softmax function
#Convert the output of the neural network so far to the probability of each label
with tf.name_scope('softmax') as scope:
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
#Probability of each label(Something like?)return it
return y_conv
#Calculate how much "error" there was between the prediction result and the correct answer
#logits is the calculation result: float - [batch_size, NUM_CLASSES]
#labels is the correct label: int32 - [batch_size, NUM_CLASSES]
def loss(logits, labels):
#Calculation of cross entropy
cross_entropy = -tf.reduce_sum(labels*tf.log(logits))
#Specify to display in TensorBoard
tf.summary.scalar("cross_entropy", cross_entropy)
#Error rate value(cross_entropy)return it
return cross_entropy
#error(loss)Train a learning model designed using error backpropagation based on
#I'm not sure what's happening behind the scenes, but the weights of each layer of the learning model(w)And so on
#Understanding that it is optimized and adjusted based on the error(?)
# (The explanation of the book "Is artificial intelligence surpassing humans?")
def training(loss, learning_rate):
#Like this function does all that
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
return train_step
#Calculate the correct answer rate of the prediction result given by the learning model at inference
def accuracy(logits, labels):
#Compare whether the prediction label and the correct label are equal. Returns True if they are the same
#argmax is the index of the part with the largest value in the array(=Label number that seems to be the most correct answer)return it
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
#boolean correct_Calculate the correct answer rate by changing prediction to float
# false:0,true:Convert to 1 and calculate
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#Set to display on TensorBoard
tf.summary.scalar("accuracy", accuracy)
return accuracy
――You don't know, right? I only understand somehow n
-I made a Dir en gray face classifier using TensorFlow --(1) Introduction -I made a face classifier for Dir en gray using TensorFlow-② Environment construction -I made a face classifier for Dir en gray using TensorFlow-③ Image collection -I made a face classifier for Dir en gray using TensorFlow-④ Face extraction -I made a face classifier for Dir en gray using TensorFlow-⑤ Learning data preparation -I made a Dir en gray face classifier using TensorFlow-⑥ Learning program -I made a face classifier for Dir en gray using TensorFlow-⑦ Learning model -I made a Dir en gray face classifier using TensorFlow --⑧ Learning execution -I made a Dir en gray face classifier using TensorFlow --⑨ Data visualization -I made a Dir en gray face classifier using TensorFlow --⑩ Face classification test -I made a face classifier for Dir en gray using TensorFlow-⑪ Web release preparation -I made a Dir en gray face classifier using TensorFlow --⑫ Web release -I made a Dir en gray face classifier using TensorFlow --⑬ Playing (final)
Recommended Posts