SlideShare a Scribd company logo
1 of 94
Download to read offline






>>> import numpy as np
>>> a = np.zeros((2, 2)); b = np.ones((2, 2))
>>> np.sum(b, axis=1)
array([ 2., 2.])
>>> a.shape
(2, 2)
>>> np.reshape(a, (1, 4))
array([[ 0., 0., 0., 0.]])
>>> import tensorflow as tf
>>> sess = tf.Session()
>>> a = tf.zeros((2, 2)); b = tf.ones((2, 2))
>>> sess.run(tf.reduce_sum(b, axis=1))
[ 2., 2.]
>>> a.get_shape()
(2, 2)
>>> sess.run(tf.reshape(a, (1, 4)))
[[ 0., 0., 0., 0.]]
sess.run()
>>> sess = tf.Session()
>>> a = np.zeros((2, 2)); ta = tf.zeros((2, 2))
>>> print(a)
[[ 0. 0.]
[ 0. 0.]]
>>> print(ta)
Tensor("zeros:0", shape=(2, 2), dtype=float32)
>>> print(sess.run(ta))
[[ 0. 0.]
[ 0. 0.]]
sess.run()
>>> sess = tf.Session()
>>> a = np.zeros((2, 2)); ta = tf.zeros((2, 2))
>>> print(a)
[[ 0. 0.]
[ 0. 0.]]
>>> print(ta)
Tensor("zeros:0", shape=(2, 2), dtype=float32)
>>> print(sess.run(ta))
[[ 0. 0.]
[ 0. 0.]]






>>> a = tf.constant(5.0)
>>> b = tf.constant(6.0)
>>> c = a * b
>>> sess = tf.Session()
>>> print(sess.run(c))
30.0


>>> a = tf.constant(5.0)
>>> b = tf.constant(6.0)
>>> c = a * b
>>> sess = tf.Session()
>>> print(sess.run(c))
30.0




>>> a = tf.constant(5.0)
>>> b = tf.constant(6.0)
>>> c = a * b
>>> sess = tf.Session()
>>> print(sess.run(c))
30.0






tf.Session


>>> sess = tf.Session()
>>> print(sess.run(c))
>>> with tf.Session() as sess:
>>> print(sess.run(c))
>>> print(c.eval())


>>> sess = tf.Session()
>>> print(sess.run(c))
>>> with tf.Session() as sess:
>>> print(sess.run(c))
>>> print(c.eval())
sess.run(c) 



tf.Session


>>> w = tf.Variable(tf.zeros((2, 2)), name="weight")
>>> with tf.Session() as sess:
>>> print(sess.run(w))


>>> w = tf.Variable(tf.zeros((2, 2)), name="weight")
>>> with tf.Session() as sess:
>>> print(sess.run(w))
>>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 

name="weight")
>>> with tf.Session() as sess:
>>> sess.run(tf.global_variables_initializer())
>>> print(sess.run(w))

[[-0.10020355 -0.01114563]
[ 0.04050281 -0.15980773]
[-0.00628474 -0.02608337]
[ 0.16397022 0.02898547]
[ 0.04264377 0.04281621]]
>>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 

name="weight")
>>> with tf.Session() as sess:
>>> sess.run(tf.global_variables_initializer())
>>> print(sess.run(w))

[[-0.10020355 -0.01114563]
[ 0.04050281 -0.15980773]
[-0.00628474 -0.02608337]
[ 0.16397022 0.02898547]
[ 0.04264377 0.04281621]]
tf.Variable 

>>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 

name="weight")
>>> with tf.Session() as sess:
>>> sess.run(tf.global_variables_initializer())
>>> print(sess.run(w))

[[-0.10020355 -0.01114563]
[ 0.04050281 -0.15980773]
[-0.00628474 -0.02608337]
[ 0.16397022 0.02898547]
[ 0.04264377 0.04281621]]
tf.Variable
tf.Variable 

>>> state = tf.Variable(0, name="counter")
>>> new_value = tf.add(state, tf.constant(1))
>>> update = tf.assign(state, new_value)
>>> with tf.Session() as sess:
>>> sess.run(tf.global_variables_initializer())
>>> print(sess.run(state))
>>> for _ in range(3):
>>> sess.run(update)
>>> print(sess.run(state))
0
1
2
3
>>> x1 = tf.constant(1)
>>> x2 = tf.constant(2)
>>> x3 = tf.constant(3)
>>> temp = tf.add(x2, x3)
>>> mul = tf.mul(x1, temp)
>>> with tf.Session() as sess:
>>> result1, result2 = sess.run([mul, temp])
>>> print(result1, result2)
5 5
>>> x1 = tf.constant(1)
>>> x2 = tf.constant(2)
>>> x3 = tf.constant(3)
>>> temp = tf.add(x2, x3)
>>> mul = tf.mul(x1, temp)
>>> with tf.Session() as sess:
>>> result1, result2 = sess.run([mul, temp])
>>> print(result1, result2)
5 5
sess.run(var) 



sess.run([var1, .. ,])
tf.placeholder feed_dict
>>> a = tf.placeholder(tf.int16)
>>> b = tf.placeholder(tf.int16)
>>> add = tf.add(a, b)
>>> mul = tf.mul(a, b)
>>> with tf.Session() as sess:
>>> print(sess.run(add, feed_dict={a: 2, b: 3}))
>>> print(sess.run(mul, feed_dict={a: 2, b: 3}))
5
6
tf.placeholder feed_dict
>>> a = tf.placeholder(tf.int16)
>>> b = tf.placeholder(tf.int16)
>>> add = tf.add(a, b)
>>> mul = tf.mul(a, b)
>>> with tf.Session() as sess:
>>> print(sess.run(add, feed_dict={a: 2, b: 3}))
>>> print(sess.run(mul, feed_dict={a: 2, b: 3}))
5
6
tf.placeholder
tf.placeholder feed_dict
>>> a = tf.placeholder(tf.int16)
>>> b = tf.placeholder(tf.int16)
>>> add = tf.add(a, b)
>>> mul = tf.mul(a, b)
>>> with tf.Session() as sess:
>>> print(sess.run(add, feed_dict={a: 2, b: 3}))
>>> print(sess.run(mul, feed_dict={a: 2, b: 3}))
5
6
tf.placeholder
tf.placeholder
# using tf.constant
matrix1 = tf.constant([[3., 3.]])
matrix2 = tf.constant([[2.],[2.]])
product = tf.matmul(matrix1, matrix2)
with tf.Session() as sess:
result = sess.run(product)
print(result)
# using placeholder
import numpy as np
matrix1 = tf.placeholder(tf.float32, [1, 2])
matrix2 = tf.placeholder(tf.float32, [2, 1])
product = tf.matmul(matrix1, matrix2)
with tf.Session() as sess:
mv1 = np.array([[3., 3.]])
mv2 = np.array([[2.], [2.]])
result = sess.run(product, feed_dict={matrix1: mv1, matrix2: mv2})
print(result)
import tensorflow as tf
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/",
one_hot=True)
learning_rate = 0.001
max_steps = 15000
batch_size = 128
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
def MLP(inputs):
W_1 = tf.Variable(tf.random_normal([784, 256]))
b_1 = tf.Variable(tf.zeros([256]))
W_2 = tf.Variable(tf.random_normal([256, 256]))
b_2 = tf.Variable(tf.zeros([256]))
W_out = tf.Variable(tf.random_normal([256, 10]))
b_out = tf.Variable(tf.zeros([10]))
h_1 = tf.add(tf.matmul(inputs, W_1), b_1)
h_1 = tf.nn.relu(h_1)
h_2 = tf.add(tf.matmul(h_1, W_2), b_2)
h_2 = tf.nn.relu(h_2)
out = tf.add(tf.matmul(h_2, W_out), b_out)
return out
net = MLP(x)
# define loss and optimizer
loss_op = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(net, y))
opt = tf.train.AdamOptimizer(learning_rate).minimize(loss_op)
# initializing the variables
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
# train model
for step in range(max_steps):
batch_X, batch_y = mnist.train.next_batch(batch_size)
_, loss = sess.run([opt, loss_op],
feed_dict={x: batch_X, y: batch_y})
if (step+1) % 1000 == 0:
print("[{}/{}] loss:{:.3f}".format(step+1, max_steps, loss))
print("Optimization Finished!")
# test model
correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1))
# calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Train accuracy: {:.3f}” .format(sess.run(accuracy,
feed_dict={x: mnist.train.images, y: mnist.train.labels})))
print("Test accuracy: {:.3f}” .format(sess.run(accuracy,
feed_dict={x: mnist.test.images, y: mnist.test.labels})))
tf.nn.softmax_cross_entropy_with_logits 

f.train.####Optimizer
tf.train.####Optimizer.minimize(loss_op)
tf.variable_scope()
tf.get_variable()
tf.variable_scope()
var1 = tf.Variable([1], name="var")
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
var2 = tf.Variable([1], name="var")
var3 = tf.Variable([1], name="var")
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}”.format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var_1:0
tf.get_variable()
tf.Variable


var1 = tf.Variable([1], name="var")
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var2 = tf.Variable([1], name="var")
scp.reuse_variables() # allow reuse variables
var3 = tf.Variable([1], name="var")
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}”.format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var_1:0
tf.get_variable()
tf.Variable


var1 = tf.Variable([1], name="var")
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var2 = tf.Variable([1], name="var")
scp.reuse_variables() # allow reuse variables
var3 = tf.Variable([1], name="var")
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}”.format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var_1:0
var3 foo/bar/var:0
tf.Variable
tf.get_variable()


var1 = tf.get_variable("var", [1])
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var2 = tf.get_variable("var", [1])
scp.reuse_variables() # allow reuse variables
var3 = tf.get_variable("var", [1])
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}".format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var:0
tf.get_variable()


var1 = tf.get_variable("var", [1])
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var2 = tf.get_variable("var", [1])
scp.reuse_variables() # allow reuse variables
var3 = tf.get_variable("var", [1])
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}".format(var3.name))
var1: var:0
var2: foo/bar/var:0
var3: foo/bar/var:0
tf.get_variable()
False
ValueError
True
ValueError
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as scp:
var1 = tf.get_variable("var", [1])
scp.reuse_variables()
var2 = tf.get_variable("var", [1])
with tf.variable_scope("bar", reuse=True):
var3 = tf.get_variable("var", [1])
print("var1: {}".format(var1.name))
print("var2: {}".format(var2.name))
print("var3: {}”.format(var3.name))
var1: foo/bar/var:0
var2: foo/bar/var:0
var3: foo/bar/var:0
tf.get_variable tf.Variable
tf.Variable
tf.Variable



tf.###.get_variables_by_name(“my_var”, “my_scope”)
tf.get_variable tf.Variable
tf.Variable
tf.Variable



tf.###.get_variables_by_name(“my_var”, “my_scope”)


• tf.nn.conv2d(input, filter, strides, padding, name=None)
1. input
2. filter
3. stride input
4. padding “SAME” “VALID” 

• tf.nn.##_pool(value, ksize, strides, padding, name=None)
1. value
2. ksize 

3. stride input
4. padding “SAME” “VALID”
from tensorflow.contrib.layers import variance_scaling_initializer
he_init = variance_scaling_initializer()
def conv(bottom,
num_filter, ksize=3, stride=1, padding="SAME",
scope=None):
bottom_shape = bottom.get_shape().as_list()[3]
with tf.variable_scope(scope or "conv"):
W = tf.get_variable("W",
[ksize, ksize, bottom_shape, num_filter],
initializer=he_init)
b = tf.get_variable("b", [num_filter],
initializer=tf.constant_initializer(0))
x = tf.nn.conv2d(bottom, W,
strides=[1, stride, stride, 1],
padding=padding)
x = tf.nn.relu(tf.nn.bias_add(x, b))
return x
from tensorflow.contrib.layers import variance_scaling_initializer
he_init = variance_scaling_initializer()
def conv(bottom,
num_filter, ksize=3, stride=1, padding="SAME",
scope=None):
bottom_shape = bottom.get_shape().as_list()[3]
with tf.variable_scope(scope or "conv"):
W = tf.get_variable("W",
[ksize, ksize, bottom_shape, num_filter],
initializer=he_init)
b = tf.get_variable("b", [num_filter],
initializer=tf.constant_initializer(0))
x = tf.nn.conv2d(bottom, W,
strides=[1, stride, stride, 1],
padding=padding)
x = tf.nn.relu(tf.nn.bias_add(x, b))
return x
from tensorflow.contrib.layers import variance_scaling_initializer
he_init = variance_scaling_initializer()
def conv(bottom,
num_filter, ksize=3, stride=1, padding="SAME",
scope=None):
bottom_shape = bottom.get_shape().as_list()[3]
with tf.variable_scope(scope or "conv"):
W = tf.get_variable("W",
[ksize, ksize, bottom_shape, num_filter],
initializer=he_init)
b = tf.get_variable("b", [num_filter],
initializer=tf.constant_initializer(0))
x = tf.nn.conv2d(bottom, W,
strides=[1, stride, stride, 1],
padding=padding)
x = tf.nn.relu(tf.nn.bias_add(x, b))
return x
def maxpool(bottom,
ksize=2, stride=2, padding="SAME",
scope=None):
with tf.variable_scope(scope or "maxpool"):
pool = tf.nn.max_pool(bottom, ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding=padding)
return pool
def fc(bottom, num_dims, scope=None):
bottom_shape = bottom.get_shape().as_list()
if len(bottom_shape) > 2:
bottom = tf.reshape(bottom,
[-1, reduce(lambda x, y: x*y, bottom_shape[1:])])
bottom_shape = bottom.get_shape().as_list()
with tf.variable_scope(scope or "fc"):
W = tf.get_variable("W", [bottom_shape[1], num_dims],
initializer=he_init)
b = tf.get_variable("b", [num_dims],
initializer=tf.constant_initializer(0))
out = tf.nn.bias_add(tf.matmul(bottom, W), b)
return out
def fc_relu(bottom, num_dims, scope=None):
with tf.variable_scope(scope or "fc"):
out = fc(bottom, num_dims, scope="fc")
relu = tf.nn.relu(out)
return relu
def fc(bottom, num_dims, scope=None):
bottom_shape = bottom.get_shape().as_list()
if len(bottom_shape) > 2:
bottom = tf.reshape(bottom,
[-1, reduce(lambda x, y: x*y, bottom_shape[1:])])
bottom_shape = bottom.get_shape().as_list()
with tf.variable_scope(scope or "fc"):
W = tf.get_variable("W", [bottom_shape[1], num_dims],
initializer=he_init)
b = tf.get_variable("b", [num_dims],
initializer=tf.constant_initializer(0))
out = tf.nn.bias_add(tf.matmul(bottom, W), b)
return out
def fc_relu(bottom, num_dims, scope=None):
with tf.variable_scope(scope or "fc"):
out = fc(bottom, num_dims, scope="fc")
relu = tf.nn.relu(out)
return relu
keep_prob = tf.placeholder(tf.float32, None)
def conv_net(x, keep_prob):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
conv1 = conv(x, 32, 5, scope="conv_1")
conv1 = maxpool(conv1, scope="maxpool_1")
conv2 = conv(conv1, 64, 5, scope="conv_2")
conv2 = maxpool(conv2, scope="maxpool_2")
fc1 = fc_relu(conv2, 1024, scope="fc_1")
fc1 = tf.nn.dropout(fc1, keep_prob)
out = fc(fc1, 10, scope="out")
return out
keep_prob = tf.placeholder(tf.float32, None)
def conv_net(x, keep_prob):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
conv1 = conv(x, 32, 5, scope="conv_1")
conv1 = maxpool(conv1, scope="maxpool_1")
conv2 = conv(conv1, 64, 5, scope="conv_2")
conv2 = maxpool(conv2, scope="maxpool_2")
fc1 = fc_relu(conv2, 1024, scope="fc_1")
fc1 = tf.nn.dropout(fc1, keep_prob)
out = fc(fc1, 10, scope="out")
return out




config = tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True))
sess = tf.Session(config=config)


tf.contrib
tf.contrib
tf.contrib 

tf.contrib.slim
slim
tf.contrib tf.contrib.slim
tf.slim
tf.contrib.slim
input = ...
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128],
dtype=tf.float32, stddev=1e-1),
name='weights')
conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1],
padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128],
dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
input = ...
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128],
dtype=tf.float32, stddev=1e-1),
name='weights')
conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1],
padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128],
dtype=tf.float32),
trainable=True, name='biases')
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope)
input = ...
net = slim.conv2d(input, 128, [3, 3],
padding=‘SAME’, scope=‘conv1_1')
# 1. simple network generation with slim
net = ...
net = slim.conv2d(net, 256, [3, 3], scope='conv3_1')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_2')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_3')
net = slim.max_pool2d(net, [2, 2], scope=‘pool3')

# 1. cleaner by repeat operation:
net = ...
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],
scope='conv3')
net = slim.max_pool(net, [2, 2], scope=‘pool3')

# 2. Verbose way:
x = slim.fully_connected(x, 32, scope='fc/fc_1')
x = slim.fully_connected(x, 64, scope='fc/fc_2')
x = slim.fully_connected(x, 128, scope='fc/fc_3')

# 2. Equivalent, TF-Slim way using slim.stack:
slim.stack(x, slim.fully_connected, [32, 64, 128], scope='fc')
• tf.truncated_normal_initializer tf
• slim.xavier_initializer
• slim.variance_scaling_initializer
• slim.l1_regularizer
• slim.l2_regularizer
• …
net = slim.conv2d(inputs, 64, [11, 11], 4, padding=‘SAME', 

weights_initializer=slim.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(0.0005),
scope='conv1')


he_init = slim.variance_scaling_initializer()
xavier_init = slim.xavier_initializer()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=he_init,
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=xavier_init,
scope='conv2')
net = slim.fully_connected(net, 1000,
activation_fn=None, scope='fc')


he_init = slim.variance_scaling_initializer()
xavier_init = slim.xavier_initializer()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=he_init,
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=xavier_init,
scope='conv2')
net = slim.fully_connected(net, 1000,
activation_fn=None, scope='fc')
[..]


he_init = slim.variance_scaling_initializer()
xavier_init = slim.xavier_initializer()
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=he_init,
weights_regularizer=slim.l2_regularizer(0.0005)):
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'):
net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1')
net = slim.conv2d(net, 256, [5, 5],
weights_initializer=xavier_init,
scope='conv2')
net = slim.fully_connected(net, 1000,
activation_fn=None, scope='fc')
[..]


# Define the loss functions and get the total loss.
loss1 = slim.losses.softmax_cross_entropy(pred1, label1)
loss2 = slim.losses.mean_squared_error(pred2, label2)
# The following two lines have the same effect:
total_loss = loss1 + loss2
slim.losses.get_total_loss(add_regularization_losses=False)
# If you want to add regularization loss
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
total_loss = loss1 + loss2 + reg_loss
# or
total_loss = slim.losses.get_total_loss()
tf slim


def save(self, ckpt_dir, global_step=None):
if self.config.get("saver") is None:
self.config["saver"] = 

tf.train.Saver(max_to_keep=30)
saver = self.config["saver"]
sess = self.config["sess"]
dirname = os.path.join(ckpt_dir, self.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
saver.save(sess, dirname, global_step)


def save(self, ckpt_dir, global_step=None):
if self.config.get("saver") is None:
self.config["saver"] = 

tf.train.Saver(max_to_keep=30)
saver = self.config["saver"]
sess = self.config["sess"]
dirname = os.path.join(ckpt_dir, self.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
saver.save(sess, dirname, global_step)


def save(self, ckpt_dir, global_step=None):
if self.config.get("saver") is None:
self.config["saver"] = 

tf.train.Saver(max_to_keep=30)
saver = self.config["saver"]
sess = self.config["sess"]
dirname = os.path.join(ckpt_dir, self.name)
if not os.path.exists(dirname):
os.makedirs(dirname)
saver.save(sess, dirname, global_step)
dirname 

global_step


def load_latest_checkpoint(self, ckpt_dir, exclude=None):
path = tf.train.latest_checkpoint(ckpt_dir)
if path is None:
raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir))
print("Load {} save file".format(path))
self._load(path, exclude)
def load_from_path(self, ckpt_path, exclude=None):
self._load(ckpt_path, exclude)
def _load(self, ckpt_path, exclude):
init_fn = slim.assign_from_checkpoint_fn(ckpt_path,
slim.get_variables_to_restore(exclude=exclude),
ignore_missing_vars=True)
init_fn(self.config["sess"])



def load_latest_checkpoint(self, ckpt_dir, exclude=None):
path = tf.train.latest_checkpoint(ckpt_dir)
if path is None:
raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir))
print("Load {} save file".format(path))
self._load(path, exclude)
def load_from_path(self, ckpt_path, exclude=None):
self._load(ckpt_path, exclude)
def _load(self, ckpt_path, exclude):
init_fn = slim.assign_from_checkpoint_fn(ckpt_path,
slim.get_variables_to_restore(exclude=exclude),
ignore_missing_vars=True)
init_fn(self.config["sess"])

exclude 



def load_latest_checkpoint(self, ckpt_dir, exclude=None):
path = tf.train.latest_checkpoint(ckpt_dir)
if path is None:
raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir))
print("Load {} save file".format(path))
self._load(path, exclude)
def load_from_path(self, ckpt_path, exclude=None):
self._load(ckpt_path, exclude)
def _load(self, ckpt_path, exclude):
init_fn = slim.assign_from_checkpoint_fn(ckpt_path,
slim.get_variables_to_restore(exclude=exclude),
ignore_missing_vars=True)
init_fn(self.config["sess"])

exclude 

False 



True
X = tf.placeholder(tf.float32, [None, 224, 224, 3], name="X")
y = tf.placeholder(tf.int32, [None, 8], name="y")
is_training = tf.placeholder(tf.bool, name="is_training")
with slim.arg_scope(vgg.vgg_arg_scope()):
net, end_pts = vgg.vgg_16(X, is_training=is_training, 

num_classes=1000)
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = type_loss + reg_loss
with tf.variable_scope("opt"):
opt = tf.train.AdamOptimizer(0.001).minimize(loss_op)
self.load_from_path(ckpt_path=VGG_PATH, exclude=["vgg_16/fc8"])
...
X = tf.placeholder(tf.float32, [None, 224, 224, 3], name="X")
y = tf.placeholder(tf.int32, [None, 8], name="y")
is_training = tf.placeholder(tf.bool, name="is_training")
with slim.arg_scope(vgg.vgg_arg_scope()):
net, end_pts = vgg.vgg_16(X, is_training=is_training, 

num_classes=1000)
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = type_loss + reg_loss
with tf.variable_scope("opt"):
opt = tf.train.AdamOptimizer(0.001).minimize(loss_op)
self.load_from_path(ckpt_path=VGG_PATH, exclude=["vgg_16/fc8"])
...
fc8 

trainable=False
























tf.summary.FileWriter
tf.summary.###
sess.run(…)
FileWriter add_summary


tensorboard —-logdir=## —-host=## —-port=##
import tensorflow as tf
slim = tf.contrib.slim
# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
max_steps = 10000
batch_size = 128
lr = 0.001
keep_prob = 0.5
weight_decay = 0.0004
logs_path = "/tmp/tensorflow_logs/example"
def my_arg_scope(is_training, weight_decay):
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
biases_initializer=tf.zeros_initializer,
stride=1, padding="SAME"):
with slim.arg_scope([slim.dropout],
is_training=is_training) as arg_sc:
return arg_sc
def my_net(x, keep_prob, outputs_collections="my_net"):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=outputs_collections):
net = slim.conv2d(x, 64, [3, 3], scope="conv1")
net = slim.max_pool2d(net, [2, 2], scope="pool1")
net = slim.conv2d(net, 128, [3, 3], scope="conv2")
net = slim.max_pool2d(net, [2, 2], scope="pool2")
net = slim.conv2d(net, 256, [3, 3], scope="conv3")
# global average pooling
net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True)
net = slim.dropout(net, keep_prob, scope="dropout3")
net = slim.conv2d(net, 1024, [1, 1], scope="fc4")
net = slim.dropout(net, keep_prob, scope="dropout4")
net = slim.conv2d(net, 10, [1, 1],
activation_fn=None, scope="fc5")
end_points = 

slim.utils.convert_collection_to_dict(outputs_collections)
return tf.reshape(net, [-1, 10]), end_points
def my_net(x, keep_prob, outputs_collections="my_net"):
x = tf.reshape(x, shape=[-1, 28, 28, 1])
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=outputs_collections):
net = slim.conv2d(x, 64, [3, 3], scope="conv1")
net = slim.max_pool2d(net, [2, 2], scope="pool1")
net = slim.conv2d(net, 128, [3, 3], scope="conv2")
net = slim.max_pool2d(net, [2, 2], scope="pool2")
net = slim.conv2d(net, 256, [3, 3], scope="conv3")
# global average pooling
net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True)
net = slim.dropout(net, keep_prob, scope="dropout3")
net = slim.conv2d(net, 1024, [1, 1], scope="fc4")
net = slim.dropout(net, keep_prob, scope="dropout4")
net = slim.conv2d(net, 10, [1, 1],
activation_fn=None, scope="fc5")
end_points = 

slim.utils.convert_collection_to_dict(outputs_collections)
return tf.reshape(net, [-1, 10]), end_points
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
with slim.arg_scope(my_arg_scope(is_training, weight_decay)):
net, end_pts = my_net(x, keep_prob)
pred = slim.softmax(net, scope="prediction")
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = cls_loss + reg_loss
with tf.variable_scope("Adam"):
opt = tf.train.AdamOptimizer(lr)
# Op to calculate every variable gradient
grads = tf.gradients(loss_op, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))
# Op to update all variables according to their gradient
apply_grads = opt.apply_gradients(grads_and_vars=grads)
with tf.variable_scope("accuracy"):
correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1))
acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
with slim.arg_scope(my_arg_scope(is_training, weight_decay)):
net, end_pts = my_net(x, keep_prob)
pred = slim.softmax(net, scope="prediction")
with tf.variable_scope("losses"):
cls_loss = slim.losses.softmax_cross_entropy(net, y)
reg_loss = tf.add_n(slim.losses.get_regularization_losses())
loss_op = cls_loss + reg_loss
with tf.variable_scope("Adam"):
opt = tf.train.AdamOptimizer(lr)
# Op to calculate every variable gradient
grads = tf.gradients(loss_op, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))
# Op to update all variables according to their gradient
apply_grads = opt.apply_gradients(grads_and_vars=grads)
with tf.variable_scope("accuracy"):
correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1))
acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
# Create a summary to monitor loss and accuracy
summ_loss = tf.summary.scalar("loss", loss_op)
summ_acc = tf.summary.scalar("accuracy_test", acc_op)
# Create summaries to visualize weights and grads
for var in tf.trainable_variables():
tf.summary.histogram(var.name, var, collections=["my_summ"])
for grad, var in grads:
tf.summary.histogram(var.name + "/gradient", grad,
collections=["my_summ"])
summ_wg = tf.summary.merge_all(key="my_summ")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter(logs_path,
graph=sess.graph)
for step in range(max_steps):
batch_X, batch_y = mnist.train.next_batch(batch_size)
_, loss, plot_loss, plot_wg = sess.run([apply_grads, loss_op,
summ_loss, summ_wg],
feed_dict={x: batch_X, y: batch_y, is_training: True})
summary_writer.add_summary(plot_loss, step)
summary_writer.add_summary(plot_wg, step)
if (step+1) % 100 == 0:
plot_acc = sess.run(summ_acc, feed_dict={x: mnist.test.images,
y: mnist.test.labels,
is_training: False})
summary_writer.add_summary(plot_acc, step)
print("Optimization Finished!")
test_acc = sess.run(acc_op, feed_dict={x: mnist.test.images,
y: mnist.test.labels,
is_training: False})
print("Test accuracy: {:.3f}".format(test_acc))
feed_dict
TensorFlow Tutorial

More Related Content

What's hot

Computational Linguistics week 10
 Computational Linguistics week 10 Computational Linguistics week 10
Computational Linguistics week 10Mark Chang
 
Introduction to Tensorflow
Introduction to TensorflowIntroduction to Tensorflow
Introduction to TensorflowTzar Umang
 
30 分鐘學會實作 Python Feature Selection
30 分鐘學會實作 Python Feature Selection30 分鐘學會實作 Python Feature Selection
30 分鐘學會實作 Python Feature SelectionJames Huang
 
Tensor board
Tensor boardTensor board
Tensor boardSung Kim
 
TensorFlow 深度學習快速上手班--電腦視覺應用
TensorFlow 深度學習快速上手班--電腦視覺應用TensorFlow 深度學習快速上手班--電腦視覺應用
TensorFlow 深度學習快速上手班--電腦視覺應用Mark Chang
 
Pybelsberg — Constraint-based Programming in Python
Pybelsberg — Constraint-based Programming in PythonPybelsberg — Constraint-based Programming in Python
Pybelsberg — Constraint-based Programming in PythonChristoph Matthies
 
Pythonbrasil - 2018 - Acelerando Soluções com GPU
Pythonbrasil - 2018 - Acelerando Soluções com GPUPythonbrasil - 2018 - Acelerando Soluções com GPU
Pythonbrasil - 2018 - Acelerando Soluções com GPUPaulo Sergio Lemes Queiroz
 
Machine Learning - Introduction to Tensorflow
Machine Learning - Introduction to TensorflowMachine Learning - Introduction to Tensorflow
Machine Learning - Introduction to TensorflowAndrew Ferlitsch
 
Kristhyan kurtlazartezubia evidencia1-metodosnumericos
Kristhyan kurtlazartezubia evidencia1-metodosnumericosKristhyan kurtlazartezubia evidencia1-metodosnumericos
Kristhyan kurtlazartezubia evidencia1-metodosnumericosKristhyanAndreeKurtL
 
Deep learning with C++ - an introduction to tiny-dnn
Deep learning with C++  - an introduction to tiny-dnnDeep learning with C++  - an introduction to tiny-dnn
Deep learning with C++ - an introduction to tiny-dnnTaiga Nomi
 
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12OdessaFrontend
 
Intro to Python (High School) Unit #3
Intro to Python (High School) Unit #3Intro to Python (High School) Unit #3
Intro to Python (High School) Unit #3Jay Coskey
 
[신경망기초] 합성곱신경망
[신경망기초] 합성곱신경망[신경망기초] 합성곱신경망
[신경망기초] 합성곱신경망jaypi Ko
 
Pytorch and Machine Learning for the Math Impaired
Pytorch and Machine Learning for the Math ImpairedPytorch and Machine Learning for the Math Impaired
Pytorch and Machine Learning for the Math ImpairedTyrel Denison
 
GoLightly - a customisable virtual machine written in Go
GoLightly - a customisable virtual machine written in GoGoLightly - a customisable virtual machine written in Go
GoLightly - a customisable virtual machine written in GoEleanor McHugh
 

What's hot (20)

Computational Linguistics week 10
 Computational Linguistics week 10 Computational Linguistics week 10
Computational Linguistics week 10
 
About RNN
About RNNAbout RNN
About RNN
 
About RNN
About RNNAbout RNN
About RNN
 
Introduction to Tensorflow
Introduction to TensorflowIntroduction to Tensorflow
Introduction to Tensorflow
 
30 分鐘學會實作 Python Feature Selection
30 分鐘學會實作 Python Feature Selection30 分鐘學會實作 Python Feature Selection
30 分鐘學會實作 Python Feature Selection
 
Tensor board
Tensor boardTensor board
Tensor board
 
TensorFlow 深度學習快速上手班--電腦視覺應用
TensorFlow 深度學習快速上手班--電腦視覺應用TensorFlow 深度學習快速上手班--電腦視覺應用
TensorFlow 深度學習快速上手班--電腦視覺應用
 
深層学習後半
深層学習後半深層学習後半
深層学習後半
 
Pybelsberg — Constraint-based Programming in Python
Pybelsberg — Constraint-based Programming in PythonPybelsberg — Constraint-based Programming in Python
Pybelsberg — Constraint-based Programming in Python
 
Pythonbrasil - 2018 - Acelerando Soluções com GPU
Pythonbrasil - 2018 - Acelerando Soluções com GPUPythonbrasil - 2018 - Acelerando Soluções com GPU
Pythonbrasil - 2018 - Acelerando Soluções com GPU
 
Machine Learning - Introduction to Tensorflow
Machine Learning - Introduction to TensorflowMachine Learning - Introduction to Tensorflow
Machine Learning - Introduction to Tensorflow
 
Kristhyan kurtlazartezubia evidencia1-metodosnumericos
Kristhyan kurtlazartezubia evidencia1-metodosnumericosKristhyan kurtlazartezubia evidencia1-metodosnumericos
Kristhyan kurtlazartezubia evidencia1-metodosnumericos
 
Python book
Python bookPython book
Python book
 
Deep learning with C++ - an introduction to tiny-dnn
Deep learning with C++  - an introduction to tiny-dnnDeep learning with C++  - an introduction to tiny-dnn
Deep learning with C++ - an introduction to tiny-dnn
 
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12
Машинное обучение на JS. С чего начать и куда идти | Odessa Frontend Meetup #12
 
Intro to Python (High School) Unit #3
Intro to Python (High School) Unit #3Intro to Python (High School) Unit #3
Intro to Python (High School) Unit #3
 
[신경망기초] 합성곱신경망
[신경망기초] 합성곱신경망[신경망기초] 합성곱신경망
[신경망기초] 합성곱신경망
 
Pytorch and Machine Learning for the Math Impaired
Pytorch and Machine Learning for the Math ImpairedPytorch and Machine Learning for the Math Impaired
Pytorch and Machine Learning for the Math Impaired
 
Simple Neural Network Python Code
Simple Neural Network Python CodeSimple Neural Network Python Code
Simple Neural Network Python Code
 
GoLightly - a customisable virtual machine written in Go
GoLightly - a customisable virtual machine written in GoGoLightly - a customisable virtual machine written in Go
GoLightly - a customisable virtual machine written in Go
 

Viewers also liked

Introduction to TensorFlow
Introduction to TensorFlowIntroduction to TensorFlow
Introduction to TensorFlowMatthias Feys
 
20150306 파이썬기초 IPython을이용한프로그래밍_이태영
20150306 파이썬기초 IPython을이용한프로그래밍_이태영20150306 파이썬기초 IPython을이용한프로그래밍_이태영
20150306 파이썬기초 IPython을이용한프로그래밍_이태영Tae Young Lee
 
High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017
High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017
High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017Chris Fregly
 
Startup JavaScript 10 - OpenAPI & RSS 활용
Startup JavaScript 10 - OpenAPI & RSS 활용Startup JavaScript 10 - OpenAPI & RSS 활용
Startup JavaScript 10 - OpenAPI & RSS 활용Circulus
 
Personal Interconnect AUdio - piAu manual
Personal Interconnect AUdio - piAu manualPersonal Interconnect AUdio - piAu manual
Personal Interconnect AUdio - piAu manualCirculus
 
숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)
숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)
숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)Kevin Kim
 
라즈베리파이와자바스크립트로만드는 IoT
라즈베리파이와자바스크립트로만드는 IoT라즈베리파이와자바스크립트로만드는 IoT
라즈베리파이와자바스크립트로만드는 IoTCirculus
 
123D Design - 정리함 만들기
123D Design - 정리함 만들기123D Design - 정리함 만들기
123D Design - 정리함 만들기Circulus
 
123D Design - 모델링 기초
123D Design - 모델링 기초123D Design - 모델링 기초
123D Design - 모델링 기초Circulus
 
How to deploy oVirt using Nested KVM environment?
How to deploy oVirt using Nested KVM environment?How to deploy oVirt using Nested KVM environment?
How to deploy oVirt using Nested KVM environment?Rogan Kyuseok Lee
 
Startup 123D Design - 10. 3D프린팅
Startup 123D Design - 10. 3D프린팅Startup 123D Design - 10. 3D프린팅
Startup 123D Design - 10. 3D프린팅Circulus
 
RHCE FINAL Questions and Answers
RHCE FINAL Questions and AnswersRHCE FINAL Questions and Answers
RHCE FINAL Questions and AnswersRadien software
 
파이썬 확률과 통계 기초 이해하기
파이썬 확률과 통계 기초 이해하기파이썬 확률과 통계 기초 이해하기
파이썬 확률과 통계 기초 이해하기Yong Joon Moon
 
Django, 저는 이렇게 씁니다.
Django, 저는 이렇게 씁니다.Django, 저는 이렇게 씁니다.
Django, 저는 이렇게 씁니다.Kyoung Up Jung
 
Introduction to Deep Learning with TensorFlow
Introduction to Deep Learning with TensorFlowIntroduction to Deep Learning with TensorFlow
Introduction to Deep Learning with TensorFlowTerry Taewoong Um
 
간단한 블로그를 만들며 Django 이해하기
간단한 블로그를 만들며 Django 이해하기간단한 블로그를 만들며 Django 이해하기
간단한 블로그를 만들며 Django 이해하기Kyoung Up Jung
 
텐서플로우 기초 이해하기
텐서플로우 기초 이해하기 텐서플로우 기초 이해하기
텐서플로우 기초 이해하기 Yong Joon Moon
 

Viewers also liked (20)

Introduction to TensorFlow
Introduction to TensorFlowIntroduction to TensorFlow
Introduction to TensorFlow
 
Intro to Python
Intro to PythonIntro to Python
Intro to Python
 
20150306 파이썬기초 IPython을이용한프로그래밍_이태영
20150306 파이썬기초 IPython을이용한프로그래밍_이태영20150306 파이썬기초 IPython을이용한프로그래밍_이태영
20150306 파이썬기초 IPython을이용한프로그래밍_이태영
 
High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017
High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017
High Performance Distributed TensorFlow with GPUs - NYC Workshop - July 9 2017
 
Splunk 교육자료 v1.2
Splunk 교육자료 v1.2Splunk 교육자료 v1.2
Splunk 교육자료 v1.2
 
Startup JavaScript 10 - OpenAPI & RSS 활용
Startup JavaScript 10 - OpenAPI & RSS 활용Startup JavaScript 10 - OpenAPI & RSS 활용
Startup JavaScript 10 - OpenAPI & RSS 활용
 
Personal Interconnect AUdio - piAu manual
Personal Interconnect AUdio - piAu manualPersonal Interconnect AUdio - piAu manual
Personal Interconnect AUdio - piAu manual
 
숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)
숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)
숭실대교육교재 - IoT 산업에서 오픈소스의 활용방안(김형채)
 
라즈베리파이와자바스크립트로만드는 IoT
라즈베리파이와자바스크립트로만드는 IoT라즈베리파이와자바스크립트로만드는 IoT
라즈베리파이와자바스크립트로만드는 IoT
 
123D Design - 정리함 만들기
123D Design - 정리함 만들기123D Design - 정리함 만들기
123D Design - 정리함 만들기
 
123D Design - 모델링 기초
123D Design - 모델링 기초123D Design - 모델링 기초
123D Design - 모델링 기초
 
Splunk6.3 소개서 2015_11
Splunk6.3 소개서 2015_11Splunk6.3 소개서 2015_11
Splunk6.3 소개서 2015_11
 
How to deploy oVirt using Nested KVM environment?
How to deploy oVirt using Nested KVM environment?How to deploy oVirt using Nested KVM environment?
How to deploy oVirt using Nested KVM environment?
 
Startup 123D Design - 10. 3D프린팅
Startup 123D Design - 10. 3D프린팅Startup 123D Design - 10. 3D프린팅
Startup 123D Design - 10. 3D프린팅
 
RHCE FINAL Questions and Answers
RHCE FINAL Questions and AnswersRHCE FINAL Questions and Answers
RHCE FINAL Questions and Answers
 
파이썬 확률과 통계 기초 이해하기
파이썬 확률과 통계 기초 이해하기파이썬 확률과 통계 기초 이해하기
파이썬 확률과 통계 기초 이해하기
 
Django, 저는 이렇게 씁니다.
Django, 저는 이렇게 씁니다.Django, 저는 이렇게 씁니다.
Django, 저는 이렇게 씁니다.
 
Introduction to Deep Learning with TensorFlow
Introduction to Deep Learning with TensorFlowIntroduction to Deep Learning with TensorFlow
Introduction to Deep Learning with TensorFlow
 
간단한 블로그를 만들며 Django 이해하기
간단한 블로그를 만들며 Django 이해하기간단한 블로그를 만들며 Django 이해하기
간단한 블로그를 만들며 Django 이해하기
 
텐서플로우 기초 이해하기
텐서플로우 기초 이해하기 텐서플로우 기초 이해하기
텐서플로우 기초 이해하기
 

Similar to TensorFlow Tutorial

Introduction to TensorFlow
Introduction to TensorFlowIntroduction to TensorFlow
Introduction to TensorFlowBabu Priyavrat
 
Working with tf.data (TF 2)
Working with tf.data (TF 2)Working with tf.data (TF 2)
Working with tf.data (TF 2)Oswald Campesato
 
What is TensorFlow and why do we use it
What is TensorFlow and why do we use itWhat is TensorFlow and why do we use it
What is TensorFlow and why do we use itRobert John
 
Can you fix the errors- It isn't working when I try to run import s.pdf
Can you fix the errors- It isn't working when I try to run    import s.pdfCan you fix the errors- It isn't working when I try to run    import s.pdf
Can you fix the errors- It isn't working when I try to run import s.pdfaksachdevahosymills
 
Baby Steps to Machine Learning at DevFest Lagos 2019
Baby Steps to Machine Learning at DevFest Lagos 2019Baby Steps to Machine Learning at DevFest Lagos 2019
Baby Steps to Machine Learning at DevFest Lagos 2019Robert John
 
Introduction to TensorFlow 2 and Keras
Introduction to TensorFlow 2 and KerasIntroduction to TensorFlow 2 and Keras
Introduction to TensorFlow 2 and KerasOswald Campesato
 
Implement the following sorting algorithms Bubble Sort Insertion S.pdf
Implement the following sorting algorithms  Bubble Sort  Insertion S.pdfImplement the following sorting algorithms  Bubble Sort  Insertion S.pdf
Implement the following sorting algorithms Bubble Sort Insertion S.pdfkesav24
 
From NumPy to PyTorch
From NumPy to PyTorchFrom NumPy to PyTorch
From NumPy to PyTorchMike Ruberry
 
Introduction to TensorFlow 2
Introduction to TensorFlow 2Introduction to TensorFlow 2
Introduction to TensorFlow 2Oswald Campesato
 
TensorFlow Dev Summit 2018 Extended: TensorFlow Eager Execution
TensorFlow Dev Summit 2018 Extended: TensorFlow Eager ExecutionTensorFlow Dev Summit 2018 Extended: TensorFlow Eager Execution
TensorFlow Dev Summit 2018 Extended: TensorFlow Eager ExecutionTaegyun Jeon
 
딥러닝 교육 자료 #2
딥러닝 교육 자료 #2딥러닝 교육 자료 #2
딥러닝 교육 자료 #2Ashal aka JOKER
 
TensorFlow for IITians
TensorFlow for IITiansTensorFlow for IITians
TensorFlow for IITiansAshish Bansal
 
From Tensorflow Graph to Tensorflow Eager
From Tensorflow Graph to Tensorflow EagerFrom Tensorflow Graph to Tensorflow Eager
From Tensorflow Graph to Tensorflow EagerGuy Hadash
 
Fourier project presentation
Fourier project  presentationFourier project  presentation
Fourier project presentation志璿 楊
 
GenServer in Action – Yurii Bodarev
GenServer in Action – Yurii Bodarev   GenServer in Action – Yurii Bodarev
GenServer in Action – Yurii Bodarev Elixir Club
 
goal_state = [1, 8, 7, 2, 0, 6, 3, 4, 5] #goal_state = [1, 0, 7, 2, .pdf
  goal_state = [1, 8, 7, 2, 0, 6, 3, 4, 5] #goal_state = [1, 0, 7, 2, .pdf  goal_state = [1, 8, 7, 2, 0, 6, 3, 4, 5] #goal_state = [1, 0, 7, 2, .pdf
goal_state = [1, 8, 7, 2, 0, 6, 3, 4, 5] #goal_state = [1, 0, 7, 2, .pdfANJALIENTERPRISES1
 

Similar to TensorFlow Tutorial (20)

Introduction to TensorFlow
Introduction to TensorFlowIntroduction to TensorFlow
Introduction to TensorFlow
 
Working with tf.data (TF 2)
Working with tf.data (TF 2)Working with tf.data (TF 2)
Working with tf.data (TF 2)
 
What is TensorFlow and why do we use it
What is TensorFlow and why do we use itWhat is TensorFlow and why do we use it
What is TensorFlow and why do we use it
 
Can you fix the errors- It isn't working when I try to run import s.pdf
Can you fix the errors- It isn't working when I try to run    import s.pdfCan you fix the errors- It isn't working when I try to run    import s.pdf
Can you fix the errors- It isn't working when I try to run import s.pdf
 
Baby Steps to Machine Learning at DevFest Lagos 2019
Baby Steps to Machine Learning at DevFest Lagos 2019Baby Steps to Machine Learning at DevFest Lagos 2019
Baby Steps to Machine Learning at DevFest Lagos 2019
 
Introduction to TensorFlow 2 and Keras
Introduction to TensorFlow 2 and KerasIntroduction to TensorFlow 2 and Keras
Introduction to TensorFlow 2 and Keras
 
Implement the following sorting algorithms Bubble Sort Insertion S.pdf
Implement the following sorting algorithms  Bubble Sort  Insertion S.pdfImplement the following sorting algorithms  Bubble Sort  Insertion S.pdf
Implement the following sorting algorithms Bubble Sort Insertion S.pdf
 
From NumPy to PyTorch
From NumPy to PyTorchFrom NumPy to PyTorch
From NumPy to PyTorch
 
Introduction to TensorFlow 2
Introduction to TensorFlow 2Introduction to TensorFlow 2
Introduction to TensorFlow 2
 
TensorFlow Dev Summit 2018 Extended: TensorFlow Eager Execution
TensorFlow Dev Summit 2018 Extended: TensorFlow Eager ExecutionTensorFlow Dev Summit 2018 Extended: TensorFlow Eager Execution
TensorFlow Dev Summit 2018 Extended: TensorFlow Eager Execution
 
TensorFlow Tutorial.pdf
TensorFlow Tutorial.pdfTensorFlow Tutorial.pdf
TensorFlow Tutorial.pdf
 
딥러닝 교육 자료 #2
딥러닝 교육 자료 #2딥러닝 교육 자료 #2
딥러닝 교육 자료 #2
 
Corona sdk
Corona sdkCorona sdk
Corona sdk
 
TensorFlow for IITians
TensorFlow for IITiansTensorFlow for IITians
TensorFlow for IITians
 
From Tensorflow Graph to Tensorflow Eager
From Tensorflow Graph to Tensorflow EagerFrom Tensorflow Graph to Tensorflow Eager
From Tensorflow Graph to Tensorflow Eager
 
Fourier project presentation
Fourier project  presentationFourier project  presentation
Fourier project presentation
 
Lab 1 izz
Lab 1 izzLab 1 izz
Lab 1 izz
 
GenServer in action
GenServer in actionGenServer in action
GenServer in action
 
GenServer in Action – Yurii Bodarev
GenServer in Action – Yurii Bodarev   GenServer in Action – Yurii Bodarev
GenServer in Action – Yurii Bodarev
 
goal_state = [1, 8, 7, 2, 0, 6, 3, 4, 5] #goal_state = [1, 0, 7, 2, .pdf
  goal_state = [1, 8, 7, 2, 0, 6, 3, 4, 5] #goal_state = [1, 0, 7, 2, .pdf  goal_state = [1, 8, 7, 2, 0, 6, 3, 4, 5] #goal_state = [1, 0, 7, 2, .pdf
goal_state = [1, 8, 7, 2, 0, 6, 3, 4, 5] #goal_state = [1, 0, 7, 2, .pdf
 

More from NamHyuk Ahn

Supporting Time-Sensitive Applications on a Commodity OS
Supporting Time-Sensitive Applications on a Commodity OSSupporting Time-Sensitive Applications on a Commodity OS
Supporting Time-Sensitive Applications on a Commodity OSNamHyuk Ahn
 
Generative Adversarial Network (+Laplacian Pyramid GAN)
Generative Adversarial Network (+Laplacian Pyramid GAN)Generative Adversarial Network (+Laplacian Pyramid GAN)
Generative Adversarial Network (+Laplacian Pyramid GAN)NamHyuk Ahn
 
Single Shot Multibox Detector
Single Shot Multibox DetectorSingle Shot Multibox Detector
Single Shot Multibox DetectorNamHyuk Ahn
 
Multimodal Residual Learning for Visual QA
Multimodal Residual Learning for Visual QAMultimodal Residual Learning for Visual QA
Multimodal Residual Learning for Visual QANamHyuk Ahn
 
Google's Multilingual Neural Machine Translation System
Google's Multilingual Neural Machine Translation SystemGoogle's Multilingual Neural Machine Translation System
Google's Multilingual Neural Machine Translation SystemNamHyuk Ahn
 
DeconvNet, DecoupledNet, TransferNet in Image Segmentation
DeconvNet, DecoupledNet, TransferNet in Image SegmentationDeconvNet, DecoupledNet, TransferNet in Image Segmentation
DeconvNet, DecoupledNet, TransferNet in Image SegmentationNamHyuk Ahn
 
Case Study of Convolutional Neural Network
Case Study of Convolutional Neural NetworkCase Study of Convolutional Neural Network
Case Study of Convolutional Neural NetworkNamHyuk Ahn
 

More from NamHyuk Ahn (7)

Supporting Time-Sensitive Applications on a Commodity OS
Supporting Time-Sensitive Applications on a Commodity OSSupporting Time-Sensitive Applications on a Commodity OS
Supporting Time-Sensitive Applications on a Commodity OS
 
Generative Adversarial Network (+Laplacian Pyramid GAN)
Generative Adversarial Network (+Laplacian Pyramid GAN)Generative Adversarial Network (+Laplacian Pyramid GAN)
Generative Adversarial Network (+Laplacian Pyramid GAN)
 
Single Shot Multibox Detector
Single Shot Multibox DetectorSingle Shot Multibox Detector
Single Shot Multibox Detector
 
Multimodal Residual Learning for Visual QA
Multimodal Residual Learning for Visual QAMultimodal Residual Learning for Visual QA
Multimodal Residual Learning for Visual QA
 
Google's Multilingual Neural Machine Translation System
Google's Multilingual Neural Machine Translation SystemGoogle's Multilingual Neural Machine Translation System
Google's Multilingual Neural Machine Translation System
 
DeconvNet, DecoupledNet, TransferNet in Image Segmentation
DeconvNet, DecoupledNet, TransferNet in Image SegmentationDeconvNet, DecoupledNet, TransferNet in Image Segmentation
DeconvNet, DecoupledNet, TransferNet in Image Segmentation
 
Case Study of Convolutional Neural Network
Case Study of Convolutional Neural NetworkCase Study of Convolutional Neural Network
Case Study of Convolutional Neural Network
 

Recently uploaded

IVE Industry Focused Event - Defence Sector 2024
IVE Industry Focused Event - Defence Sector 2024IVE Industry Focused Event - Defence Sector 2024
IVE Industry Focused Event - Defence Sector 2024Mark Billinghurst
 
Oxy acetylene welding presentation note.
Oxy acetylene welding presentation note.Oxy acetylene welding presentation note.
Oxy acetylene welding presentation note.eptoze12
 
8251 universal synchronous asynchronous receiver transmitter
8251 universal synchronous asynchronous receiver transmitter8251 universal synchronous asynchronous receiver transmitter
8251 universal synchronous asynchronous receiver transmitterShivangiSharma879191
 
CCS355 Neural Network & Deep Learning UNIT III notes and Question bank .pdf
CCS355 Neural Network & Deep Learning UNIT III notes and Question bank .pdfCCS355 Neural Network & Deep Learning UNIT III notes and Question bank .pdf
CCS355 Neural Network & Deep Learning UNIT III notes and Question bank .pdfAsst.prof M.Gokilavani
 
CCS355 Neural Network & Deep Learning Unit II Notes with Question bank .pdf
CCS355 Neural Network & Deep Learning Unit II Notes with Question bank .pdfCCS355 Neural Network & Deep Learning Unit II Notes with Question bank .pdf
CCS355 Neural Network & Deep Learning Unit II Notes with Question bank .pdfAsst.prof M.Gokilavani
 
Heart Disease Prediction using machine learning.pptx
Heart Disease Prediction using machine learning.pptxHeart Disease Prediction using machine learning.pptx
Heart Disease Prediction using machine learning.pptxPoojaBan
 
Comparative Analysis of Text Summarization Techniques
Comparative Analysis of Text Summarization TechniquesComparative Analysis of Text Summarization Techniques
Comparative Analysis of Text Summarization Techniquesugginaramesh
 
Concrete Mix Design - IS 10262-2019 - .pptx
Concrete Mix Design - IS 10262-2019 - .pptxConcrete Mix Design - IS 10262-2019 - .pptx
Concrete Mix Design - IS 10262-2019 - .pptxKartikeyaDwivedi3
 
Introduction-To-Agricultural-Surveillance-Rover.pptx
Introduction-To-Agricultural-Surveillance-Rover.pptxIntroduction-To-Agricultural-Surveillance-Rover.pptx
Introduction-To-Agricultural-Surveillance-Rover.pptxk795866
 
Architect Hassan Khalil Portfolio for 2024
Architect Hassan Khalil Portfolio for 2024Architect Hassan Khalil Portfolio for 2024
Architect Hassan Khalil Portfolio for 2024hassan khalil
 
Application of Residue Theorem to evaluate real integrations.pptx
Application of Residue Theorem to evaluate real integrations.pptxApplication of Residue Theorem to evaluate real integrations.pptx
Application of Residue Theorem to evaluate real integrations.pptx959SahilShah
 
Past, Present and Future of Generative AI
Past, Present and Future of Generative AIPast, Present and Future of Generative AI
Past, Present and Future of Generative AIabhishek36461
 
Correctly Loading Incremental Data at Scale
Correctly Loading Incremental Data at ScaleCorrectly Loading Incremental Data at Scale
Correctly Loading Incremental Data at ScaleAlluxio, Inc.
 
Electronically Controlled suspensions system .pdf
Electronically Controlled suspensions system .pdfElectronically Controlled suspensions system .pdf
Electronically Controlled suspensions system .pdfme23b1001
 
Instrumentation, measurement and control of bio process parameters ( Temperat...
Instrumentation, measurement and control of bio process parameters ( Temperat...Instrumentation, measurement and control of bio process parameters ( Temperat...
Instrumentation, measurement and control of bio process parameters ( Temperat...121011101441
 
Gfe Mayur Vihar Call Girls Service WhatsApp -> 9999965857 Available 24x7 ^ De...
Gfe Mayur Vihar Call Girls Service WhatsApp -> 9999965857 Available 24x7 ^ De...Gfe Mayur Vihar Call Girls Service WhatsApp -> 9999965857 Available 24x7 ^ De...
Gfe Mayur Vihar Call Girls Service WhatsApp -> 9999965857 Available 24x7 ^ De...srsj9000
 
Risk Assessment For Installation of Drainage Pipes.pdf
Risk Assessment For Installation of Drainage Pipes.pdfRisk Assessment For Installation of Drainage Pipes.pdf
Risk Assessment For Installation of Drainage Pipes.pdfROCENODodongVILLACER
 

Recently uploaded (20)

young call girls in Rajiv Chowk🔝 9953056974 🔝 Delhi escort Service
young call girls in Rajiv Chowk🔝 9953056974 🔝 Delhi escort Serviceyoung call girls in Rajiv Chowk🔝 9953056974 🔝 Delhi escort Service
young call girls in Rajiv Chowk🔝 9953056974 🔝 Delhi escort Service
 
IVE Industry Focused Event - Defence Sector 2024
IVE Industry Focused Event - Defence Sector 2024IVE Industry Focused Event - Defence Sector 2024
IVE Industry Focused Event - Defence Sector 2024
 
Oxy acetylene welding presentation note.
Oxy acetylene welding presentation note.Oxy acetylene welding presentation note.
Oxy acetylene welding presentation note.
 
8251 universal synchronous asynchronous receiver transmitter
8251 universal synchronous asynchronous receiver transmitter8251 universal synchronous asynchronous receiver transmitter
8251 universal synchronous asynchronous receiver transmitter
 
CCS355 Neural Network & Deep Learning UNIT III notes and Question bank .pdf
CCS355 Neural Network & Deep Learning UNIT III notes and Question bank .pdfCCS355 Neural Network & Deep Learning UNIT III notes and Question bank .pdf
CCS355 Neural Network & Deep Learning UNIT III notes and Question bank .pdf
 
CCS355 Neural Network & Deep Learning Unit II Notes with Question bank .pdf
CCS355 Neural Network & Deep Learning Unit II Notes with Question bank .pdfCCS355 Neural Network & Deep Learning Unit II Notes with Question bank .pdf
CCS355 Neural Network & Deep Learning Unit II Notes with Question bank .pdf
 
Heart Disease Prediction using machine learning.pptx
Heart Disease Prediction using machine learning.pptxHeart Disease Prediction using machine learning.pptx
Heart Disease Prediction using machine learning.pptx
 
Comparative Analysis of Text Summarization Techniques
Comparative Analysis of Text Summarization TechniquesComparative Analysis of Text Summarization Techniques
Comparative Analysis of Text Summarization Techniques
 
Concrete Mix Design - IS 10262-2019 - .pptx
Concrete Mix Design - IS 10262-2019 - .pptxConcrete Mix Design - IS 10262-2019 - .pptx
Concrete Mix Design - IS 10262-2019 - .pptx
 
Introduction-To-Agricultural-Surveillance-Rover.pptx
Introduction-To-Agricultural-Surveillance-Rover.pptxIntroduction-To-Agricultural-Surveillance-Rover.pptx
Introduction-To-Agricultural-Surveillance-Rover.pptx
 
young call girls in Green Park🔝 9953056974 🔝 escort Service
young call girls in Green Park🔝 9953056974 🔝 escort Serviceyoung call girls in Green Park🔝 9953056974 🔝 escort Service
young call girls in Green Park🔝 9953056974 🔝 escort Service
 
Architect Hassan Khalil Portfolio for 2024
Architect Hassan Khalil Portfolio for 2024Architect Hassan Khalil Portfolio for 2024
Architect Hassan Khalil Portfolio for 2024
 
Application of Residue Theorem to evaluate real integrations.pptx
Application of Residue Theorem to evaluate real integrations.pptxApplication of Residue Theorem to evaluate real integrations.pptx
Application of Residue Theorem to evaluate real integrations.pptx
 
Past, Present and Future of Generative AI
Past, Present and Future of Generative AIPast, Present and Future of Generative AI
Past, Present and Future of Generative AI
 
Correctly Loading Incremental Data at Scale
Correctly Loading Incremental Data at ScaleCorrectly Loading Incremental Data at Scale
Correctly Loading Incremental Data at Scale
 
Exploring_Network_Security_with_JA3_by_Rakesh Seal.pptx
Exploring_Network_Security_with_JA3_by_Rakesh Seal.pptxExploring_Network_Security_with_JA3_by_Rakesh Seal.pptx
Exploring_Network_Security_with_JA3_by_Rakesh Seal.pptx
 
Electronically Controlled suspensions system .pdf
Electronically Controlled suspensions system .pdfElectronically Controlled suspensions system .pdf
Electronically Controlled suspensions system .pdf
 
Instrumentation, measurement and control of bio process parameters ( Temperat...
Instrumentation, measurement and control of bio process parameters ( Temperat...Instrumentation, measurement and control of bio process parameters ( Temperat...
Instrumentation, measurement and control of bio process parameters ( Temperat...
 
Gfe Mayur Vihar Call Girls Service WhatsApp -> 9999965857 Available 24x7 ^ De...
Gfe Mayur Vihar Call Girls Service WhatsApp -> 9999965857 Available 24x7 ^ De...Gfe Mayur Vihar Call Girls Service WhatsApp -> 9999965857 Available 24x7 ^ De...
Gfe Mayur Vihar Call Girls Service WhatsApp -> 9999965857 Available 24x7 ^ De...
 
Risk Assessment For Installation of Drainage Pipes.pdf
Risk Assessment For Installation of Drainage Pipes.pdfRisk Assessment For Installation of Drainage Pipes.pdf
Risk Assessment For Installation of Drainage Pipes.pdf
 

TensorFlow Tutorial

  • 1.
  • 2.
  • 3.
  • 5. >>> import numpy as np >>> a = np.zeros((2, 2)); b = np.ones((2, 2)) >>> np.sum(b, axis=1) array([ 2., 2.]) >>> a.shape (2, 2) >>> np.reshape(a, (1, 4)) array([[ 0., 0., 0., 0.]])
  • 6. >>> import tensorflow as tf >>> sess = tf.Session() >>> a = tf.zeros((2, 2)); b = tf.ones((2, 2)) >>> sess.run(tf.reduce_sum(b, axis=1)) [ 2., 2.] >>> a.get_shape() (2, 2) >>> sess.run(tf.reshape(a, (1, 4))) [[ 0., 0., 0., 0.]]
  • 7. sess.run() >>> sess = tf.Session() >>> a = np.zeros((2, 2)); ta = tf.zeros((2, 2)) >>> print(a) [[ 0. 0.] [ 0. 0.]] >>> print(ta) Tensor("zeros:0", shape=(2, 2), dtype=float32) >>> print(sess.run(ta)) [[ 0. 0.] [ 0. 0.]]
  • 8. sess.run() >>> sess = tf.Session() >>> a = np.zeros((2, 2)); ta = tf.zeros((2, 2)) >>> print(a) [[ 0. 0.] [ 0. 0.]] >>> print(ta) Tensor("zeros:0", shape=(2, 2), dtype=float32) >>> print(sess.run(ta)) [[ 0. 0.] [ 0. 0.]] 
 

  • 9. 
 >>> a = tf.constant(5.0) >>> b = tf.constant(6.0) >>> c = a * b >>> sess = tf.Session() >>> print(sess.run(c)) 30.0
  • 10. 
 >>> a = tf.constant(5.0) >>> b = tf.constant(6.0) >>> c = a * b >>> sess = tf.Session() >>> print(sess.run(c)) 30.0 

  • 11. 
 >>> a = tf.constant(5.0) >>> b = tf.constant(6.0) >>> c = a * b >>> sess = tf.Session() >>> print(sess.run(c)) 30.0 
 
 
 tf.Session
  • 12. 
 >>> sess = tf.Session() >>> print(sess.run(c)) >>> with tf.Session() as sess: >>> print(sess.run(c)) >>> print(c.eval())
  • 13. 
 >>> sess = tf.Session() >>> print(sess.run(c)) >>> with tf.Session() as sess: >>> print(sess.run(c)) >>> print(c.eval()) sess.run(c) 
 
 tf.Session
  • 14. 
 >>> w = tf.Variable(tf.zeros((2, 2)), name="weight") >>> with tf.Session() as sess: >>> print(sess.run(w))
  • 15. 
 >>> w = tf.Variable(tf.zeros((2, 2)), name="weight") >>> with tf.Session() as sess: >>> print(sess.run(w))
  • 16. >>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 
 name="weight") >>> with tf.Session() as sess: >>> sess.run(tf.global_variables_initializer()) >>> print(sess.run(w))
 [[-0.10020355 -0.01114563] [ 0.04050281 -0.15980773] [-0.00628474 -0.02608337] [ 0.16397022 0.02898547] [ 0.04264377 0.04281621]]
  • 17. >>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 
 name="weight") >>> with tf.Session() as sess: >>> sess.run(tf.global_variables_initializer()) >>> print(sess.run(w))
 [[-0.10020355 -0.01114563] [ 0.04050281 -0.15980773] [-0.00628474 -0.02608337] [ 0.16397022 0.02898547] [ 0.04264377 0.04281621]] tf.Variable 

  • 18. >>> w = tf.Variable(tf.random_normal([5, 2], stddev=0.1), 
 name="weight") >>> with tf.Session() as sess: >>> sess.run(tf.global_variables_initializer()) >>> print(sess.run(w))
 [[-0.10020355 -0.01114563] [ 0.04050281 -0.15980773] [-0.00628474 -0.02608337] [ 0.16397022 0.02898547] [ 0.04264377 0.04281621]] tf.Variable tf.Variable 

  • 19. >>> state = tf.Variable(0, name="counter") >>> new_value = tf.add(state, tf.constant(1)) >>> update = tf.assign(state, new_value) >>> with tf.Session() as sess: >>> sess.run(tf.global_variables_initializer()) >>> print(sess.run(state)) >>> for _ in range(3): >>> sess.run(update) >>> print(sess.run(state)) 0 1 2 3
  • 20. >>> x1 = tf.constant(1) >>> x2 = tf.constant(2) >>> x3 = tf.constant(3) >>> temp = tf.add(x2, x3) >>> mul = tf.mul(x1, temp) >>> with tf.Session() as sess: >>> result1, result2 = sess.run([mul, temp]) >>> print(result1, result2) 5 5
  • 21. >>> x1 = tf.constant(1) >>> x2 = tf.constant(2) >>> x3 = tf.constant(3) >>> temp = tf.add(x2, x3) >>> mul = tf.mul(x1, temp) >>> with tf.Session() as sess: >>> result1, result2 = sess.run([mul, temp]) >>> print(result1, result2) 5 5 sess.run(var) 
 
 sess.run([var1, .. ,])
  • 22.
  • 23. tf.placeholder feed_dict >>> a = tf.placeholder(tf.int16) >>> b = tf.placeholder(tf.int16) >>> add = tf.add(a, b) >>> mul = tf.mul(a, b) >>> with tf.Session() as sess: >>> print(sess.run(add, feed_dict={a: 2, b: 3})) >>> print(sess.run(mul, feed_dict={a: 2, b: 3})) 5 6
  • 24. tf.placeholder feed_dict >>> a = tf.placeholder(tf.int16) >>> b = tf.placeholder(tf.int16) >>> add = tf.add(a, b) >>> mul = tf.mul(a, b) >>> with tf.Session() as sess: >>> print(sess.run(add, feed_dict={a: 2, b: 3})) >>> print(sess.run(mul, feed_dict={a: 2, b: 3})) 5 6 tf.placeholder
  • 25. tf.placeholder feed_dict >>> a = tf.placeholder(tf.int16) >>> b = tf.placeholder(tf.int16) >>> add = tf.add(a, b) >>> mul = tf.mul(a, b) >>> with tf.Session() as sess: >>> print(sess.run(add, feed_dict={a: 2, b: 3})) >>> print(sess.run(mul, feed_dict={a: 2, b: 3})) 5 6 tf.placeholder tf.placeholder
  • 26. # using tf.constant matrix1 = tf.constant([[3., 3.]]) matrix2 = tf.constant([[2.],[2.]]) product = tf.matmul(matrix1, matrix2) with tf.Session() as sess: result = sess.run(product) print(result) # using placeholder import numpy as np matrix1 = tf.placeholder(tf.float32, [1, 2]) matrix2 = tf.placeholder(tf.float32, [2, 1]) product = tf.matmul(matrix1, matrix2) with tf.Session() as sess: mv1 = np.array([[3., 3.]]) mv2 = np.array([[2.], [2.]]) result = sess.run(product, feed_dict={matrix1: mv1, matrix2: mv2}) print(result)
  • 27. import tensorflow as tf # Import MINST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) learning_rate = 0.001 max_steps = 15000 batch_size = 128 x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10])
  • 28. def MLP(inputs): W_1 = tf.Variable(tf.random_normal([784, 256])) b_1 = tf.Variable(tf.zeros([256])) W_2 = tf.Variable(tf.random_normal([256, 256])) b_2 = tf.Variable(tf.zeros([256])) W_out = tf.Variable(tf.random_normal([256, 10])) b_out = tf.Variable(tf.zeros([10])) h_1 = tf.add(tf.matmul(inputs, W_1), b_1) h_1 = tf.nn.relu(h_1) h_2 = tf.add(tf.matmul(h_1, W_2), b_2) h_2 = tf.nn.relu(h_2) out = tf.add(tf.matmul(h_2, W_out), b_out) return out net = MLP(x) # define loss and optimizer loss_op = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(net, y)) opt = tf.train.AdamOptimizer(learning_rate).minimize(loss_op)
  • 29. # initializing the variables init_op = tf.global_variables_initializer() sess = tf.Session() sess.run(init_op) # train model for step in range(max_steps): batch_X, batch_y = mnist.train.next_batch(batch_size) _, loss = sess.run([opt, loss_op], feed_dict={x: batch_X, y: batch_y}) if (step+1) % 1000 == 0: print("[{}/{}] loss:{:.3f}".format(step+1, max_steps, loss)) print("Optimization Finished!") # test model correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1)) # calculate accuracy accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print("Train accuracy: {:.3f}” .format(sess.run(accuracy, feed_dict={x: mnist.train.images, y: mnist.train.labels}))) print("Test accuracy: {:.3f}” .format(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})))
  • 30.
  • 31.
  • 34. tf.variable_scope() var1 = tf.Variable([1], name="var") with tf.variable_scope("foo"): with tf.variable_scope("bar"): var2 = tf.Variable([1], name="var") var3 = tf.Variable([1], name="var") print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}”.format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var_1:0
  • 35. tf.get_variable() tf.Variable 
 var1 = tf.Variable([1], name="var") with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var2 = tf.Variable([1], name="var") scp.reuse_variables() # allow reuse variables var3 = tf.Variable([1], name="var") print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}”.format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var_1:0
  • 36. tf.get_variable() tf.Variable 
 var1 = tf.Variable([1], name="var") with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var2 = tf.Variable([1], name="var") scp.reuse_variables() # allow reuse variables var3 = tf.Variable([1], name="var") print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}”.format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var_1:0 var3 foo/bar/var:0 tf.Variable
  • 37. tf.get_variable() 
 var1 = tf.get_variable("var", [1]) with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var2 = tf.get_variable("var", [1]) scp.reuse_variables() # allow reuse variables var3 = tf.get_variable("var", [1]) print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}".format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var:0
  • 38. tf.get_variable() 
 var1 = tf.get_variable("var", [1]) with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var2 = tf.get_variable("var", [1]) scp.reuse_variables() # allow reuse variables var3 = tf.get_variable("var", [1]) print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}".format(var3.name)) var1: var:0 var2: foo/bar/var:0 var3: foo/bar/var:0
  • 40. with tf.variable_scope("foo"): with tf.variable_scope("bar") as scp: var1 = tf.get_variable("var", [1]) scp.reuse_variables() var2 = tf.get_variable("var", [1]) with tf.variable_scope("bar", reuse=True): var3 = tf.get_variable("var", [1]) print("var1: {}".format(var1.name)) print("var2: {}".format(var2.name)) print("var3: {}”.format(var3.name)) var1: foo/bar/var:0 var2: foo/bar/var:0 var3: foo/bar/var:0
  • 43. 
 • tf.nn.conv2d(input, filter, strides, padding, name=None) 1. input 2. filter 3. stride input 4. padding “SAME” “VALID” 
 • tf.nn.##_pool(value, ksize, strides, padding, name=None) 1. value 2. ksize 
 3. stride input 4. padding “SAME” “VALID”
  • 44. from tensorflow.contrib.layers import variance_scaling_initializer he_init = variance_scaling_initializer() def conv(bottom, num_filter, ksize=3, stride=1, padding="SAME", scope=None): bottom_shape = bottom.get_shape().as_list()[3] with tf.variable_scope(scope or "conv"): W = tf.get_variable("W", [ksize, ksize, bottom_shape, num_filter], initializer=he_init) b = tf.get_variable("b", [num_filter], initializer=tf.constant_initializer(0)) x = tf.nn.conv2d(bottom, W, strides=[1, stride, stride, 1], padding=padding) x = tf.nn.relu(tf.nn.bias_add(x, b)) return x
  • 45. from tensorflow.contrib.layers import variance_scaling_initializer he_init = variance_scaling_initializer() def conv(bottom, num_filter, ksize=3, stride=1, padding="SAME", scope=None): bottom_shape = bottom.get_shape().as_list()[3] with tf.variable_scope(scope or "conv"): W = tf.get_variable("W", [ksize, ksize, bottom_shape, num_filter], initializer=he_init) b = tf.get_variable("b", [num_filter], initializer=tf.constant_initializer(0)) x = tf.nn.conv2d(bottom, W, strides=[1, stride, stride, 1], padding=padding) x = tf.nn.relu(tf.nn.bias_add(x, b)) return x
  • 46. from tensorflow.contrib.layers import variance_scaling_initializer he_init = variance_scaling_initializer() def conv(bottom, num_filter, ksize=3, stride=1, padding="SAME", scope=None): bottom_shape = bottom.get_shape().as_list()[3] with tf.variable_scope(scope or "conv"): W = tf.get_variable("W", [ksize, ksize, bottom_shape, num_filter], initializer=he_init) b = tf.get_variable("b", [num_filter], initializer=tf.constant_initializer(0)) x = tf.nn.conv2d(bottom, W, strides=[1, stride, stride, 1], padding=padding) x = tf.nn.relu(tf.nn.bias_add(x, b)) return x
  • 47. def maxpool(bottom, ksize=2, stride=2, padding="SAME", scope=None): with tf.variable_scope(scope or "maxpool"): pool = tf.nn.max_pool(bottom, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1], padding=padding) return pool
  • 48. def fc(bottom, num_dims, scope=None): bottom_shape = bottom.get_shape().as_list() if len(bottom_shape) > 2: bottom = tf.reshape(bottom, [-1, reduce(lambda x, y: x*y, bottom_shape[1:])]) bottom_shape = bottom.get_shape().as_list() with tf.variable_scope(scope or "fc"): W = tf.get_variable("W", [bottom_shape[1], num_dims], initializer=he_init) b = tf.get_variable("b", [num_dims], initializer=tf.constant_initializer(0)) out = tf.nn.bias_add(tf.matmul(bottom, W), b) return out def fc_relu(bottom, num_dims, scope=None): with tf.variable_scope(scope or "fc"): out = fc(bottom, num_dims, scope="fc") relu = tf.nn.relu(out) return relu
  • 49. def fc(bottom, num_dims, scope=None): bottom_shape = bottom.get_shape().as_list() if len(bottom_shape) > 2: bottom = tf.reshape(bottom, [-1, reduce(lambda x, y: x*y, bottom_shape[1:])]) bottom_shape = bottom.get_shape().as_list() with tf.variable_scope(scope or "fc"): W = tf.get_variable("W", [bottom_shape[1], num_dims], initializer=he_init) b = tf.get_variable("b", [num_dims], initializer=tf.constant_initializer(0)) out = tf.nn.bias_add(tf.matmul(bottom, W), b) return out def fc_relu(bottom, num_dims, scope=None): with tf.variable_scope(scope or "fc"): out = fc(bottom, num_dims, scope="fc") relu = tf.nn.relu(out) return relu
  • 50. keep_prob = tf.placeholder(tf.float32, None) def conv_net(x, keep_prob): x = tf.reshape(x, shape=[-1, 28, 28, 1]) conv1 = conv(x, 32, 5, scope="conv_1") conv1 = maxpool(conv1, scope="maxpool_1") conv2 = conv(conv1, 64, 5, scope="conv_2") conv2 = maxpool(conv2, scope="maxpool_2") fc1 = fc_relu(conv2, 1024, scope="fc_1") fc1 = tf.nn.dropout(fc1, keep_prob) out = fc(fc1, 10, scope="out") return out
  • 51. keep_prob = tf.placeholder(tf.float32, None) def conv_net(x, keep_prob): x = tf.reshape(x, shape=[-1, 28, 28, 1]) conv1 = conv(x, 32, 5, scope="conv_1") conv1 = maxpool(conv1, scope="maxpool_1") conv2 = conv(conv1, 64, 5, scope="conv_2") conv2 = maxpool(conv2, scope="maxpool_2") fc1 = fc_relu(conv2, 1024, scope="fc_1") fc1 = tf.nn.dropout(fc1, keep_prob) out = fc(fc1, 10, scope="out") return out
  • 56.
  • 57. input = ... with tf.name_scope('conv1_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope)
  • 58. input = ... with tf.name_scope('conv1_1') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(input, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope) input = ... net = slim.conv2d(input, 128, [3, 3], padding=‘SAME’, scope=‘conv1_1')
  • 59. # 1. simple network generation with slim net = ... net = slim.conv2d(net, 256, [3, 3], scope='conv3_1') net = slim.conv2d(net, 256, [3, 3], scope='conv3_2') net = slim.conv2d(net, 256, [3, 3], scope='conv3_3') net = slim.max_pool2d(net, [2, 2], scope=‘pool3')
 # 1. cleaner by repeat operation: net = ... net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') net = slim.max_pool(net, [2, 2], scope=‘pool3')
 # 2. Verbose way: x = slim.fully_connected(x, 32, scope='fc/fc_1') x = slim.fully_connected(x, 64, scope='fc/fc_2') x = slim.fully_connected(x, 128, scope='fc/fc_3')
 # 2. Equivalent, TF-Slim way using slim.stack: slim.stack(x, slim.fully_connected, [32, 64, 128], scope='fc')
  • 60. • tf.truncated_normal_initializer tf • slim.xavier_initializer • slim.variance_scaling_initializer • slim.l1_regularizer • slim.l2_regularizer • … net = slim.conv2d(inputs, 64, [11, 11], 4, padding=‘SAME', 
 weights_initializer=slim.xavier_initializer(), weights_regularizer=slim.l2_regularizer(0.0005), scope='conv1')
  • 61. 
 he_init = slim.variance_scaling_initializer() xavier_init = slim.xavier_initializer() with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=he_init, weights_regularizer=slim.l2_regularizer(0.0005)): with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'): net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1') net = slim.conv2d(net, 256, [5, 5], weights_initializer=xavier_init, scope='conv2') net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc')
  • 62. 
 he_init = slim.variance_scaling_initializer() xavier_init = slim.xavier_initializer() with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=he_init, weights_regularizer=slim.l2_regularizer(0.0005)): with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'): net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1') net = slim.conv2d(net, 256, [5, 5], weights_initializer=xavier_init, scope='conv2') net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc') [..]
  • 63. 
 he_init = slim.variance_scaling_initializer() xavier_init = slim.xavier_initializer() with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=he_init, weights_regularizer=slim.l2_regularizer(0.0005)): with slim.arg_scope([slim.conv2d], stride=1, padding='SAME'): net = slim.conv2d(inputs, 64, [11, 11], 4, scope='conv1') net = slim.conv2d(net, 256, [5, 5], weights_initializer=xavier_init, scope='conv2') net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc') [..]
  • 64. 
 # Define the loss functions and get the total loss. loss1 = slim.losses.softmax_cross_entropy(pred1, label1) loss2 = slim.losses.mean_squared_error(pred2, label2) # The following two lines have the same effect: total_loss = loss1 + loss2 slim.losses.get_total_loss(add_regularization_losses=False) # If you want to add regularization loss reg_loss = tf.add_n(slim.losses.get_regularization_losses()) total_loss = loss1 + loss2 + reg_loss # or total_loss = slim.losses.get_total_loss()
  • 66.
  • 67.
  • 68.
  • 69.
  • 70. 
 def save(self, ckpt_dir, global_step=None): if self.config.get("saver") is None: self.config["saver"] = 
 tf.train.Saver(max_to_keep=30) saver = self.config["saver"] sess = self.config["sess"] dirname = os.path.join(ckpt_dir, self.name) if not os.path.exists(dirname): os.makedirs(dirname) saver.save(sess, dirname, global_step)
  • 71. 
 def save(self, ckpt_dir, global_step=None): if self.config.get("saver") is None: self.config["saver"] = 
 tf.train.Saver(max_to_keep=30) saver = self.config["saver"] sess = self.config["sess"] dirname = os.path.join(ckpt_dir, self.name) if not os.path.exists(dirname): os.makedirs(dirname) saver.save(sess, dirname, global_step)
  • 72. 
 def save(self, ckpt_dir, global_step=None): if self.config.get("saver") is None: self.config["saver"] = 
 tf.train.Saver(max_to_keep=30) saver = self.config["saver"] sess = self.config["sess"] dirname = os.path.join(ckpt_dir, self.name) if not os.path.exists(dirname): os.makedirs(dirname) saver.save(sess, dirname, global_step) dirname 
 global_step
  • 73. 
 def load_latest_checkpoint(self, ckpt_dir, exclude=None): path = tf.train.latest_checkpoint(ckpt_dir) if path is None: raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir)) print("Load {} save file".format(path)) self._load(path, exclude) def load_from_path(self, ckpt_path, exclude=None): self._load(ckpt_path, exclude) def _load(self, ckpt_path, exclude): init_fn = slim.assign_from_checkpoint_fn(ckpt_path, slim.get_variables_to_restore(exclude=exclude), ignore_missing_vars=True) init_fn(self.config["sess"])

  • 74. 
 def load_latest_checkpoint(self, ckpt_dir, exclude=None): path = tf.train.latest_checkpoint(ckpt_dir) if path is None: raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir)) print("Load {} save file".format(path)) self._load(path, exclude) def load_from_path(self, ckpt_path, exclude=None): self._load(ckpt_path, exclude) def _load(self, ckpt_path, exclude): init_fn = slim.assign_from_checkpoint_fn(ckpt_path, slim.get_variables_to_restore(exclude=exclude), ignore_missing_vars=True) init_fn(self.config["sess"])
 exclude 

  • 75. 
 def load_latest_checkpoint(self, ckpt_dir, exclude=None): path = tf.train.latest_checkpoint(ckpt_dir) if path is None: raise AssertionError("No ckpt exists in {0}.".format(ckpt_dir)) print("Load {} save file".format(path)) self._load(path, exclude) def load_from_path(self, ckpt_path, exclude=None): self._load(ckpt_path, exclude) def _load(self, ckpt_path, exclude): init_fn = slim.assign_from_checkpoint_fn(ckpt_path, slim.get_variables_to_restore(exclude=exclude), ignore_missing_vars=True) init_fn(self.config["sess"])
 exclude 
 False 
 
 True
  • 76.
  • 77.
  • 78. X = tf.placeholder(tf.float32, [None, 224, 224, 3], name="X") y = tf.placeholder(tf.int32, [None, 8], name="y") is_training = tf.placeholder(tf.bool, name="is_training") with slim.arg_scope(vgg.vgg_arg_scope()): net, end_pts = vgg.vgg_16(X, is_training=is_training, 
 num_classes=1000) with tf.variable_scope("losses"): cls_loss = slim.losses.softmax_cross_entropy(net, y) reg_loss = tf.add_n(slim.losses.get_regularization_losses()) loss_op = type_loss + reg_loss with tf.variable_scope("opt"): opt = tf.train.AdamOptimizer(0.001).minimize(loss_op) self.load_from_path(ckpt_path=VGG_PATH, exclude=["vgg_16/fc8"]) ...
  • 79. X = tf.placeholder(tf.float32, [None, 224, 224, 3], name="X") y = tf.placeholder(tf.int32, [None, 8], name="y") is_training = tf.placeholder(tf.bool, name="is_training") with slim.arg_scope(vgg.vgg_arg_scope()): net, end_pts = vgg.vgg_16(X, is_training=is_training, 
 num_classes=1000) with tf.variable_scope("losses"): cls_loss = slim.losses.softmax_cross_entropy(net, y) reg_loss = tf.add_n(slim.losses.get_regularization_losses()) loss_op = type_loss + reg_loss with tf.variable_scope("opt"): opt = tf.train.AdamOptimizer(0.001).minimize(loss_op) self.load_from_path(ckpt_path=VGG_PATH, exclude=["vgg_16/fc8"]) ... fc8 
 trainable=False
  • 82. import tensorflow as tf slim = tf.contrib.slim # Import MINST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) max_steps = 10000 batch_size = 128 lr = 0.001 keep_prob = 0.5 weight_decay = 0.0004 logs_path = "/tmp/tensorflow_logs/example" def my_arg_scope(is_training, weight_decay): with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), biases_initializer=tf.zeros_initializer, stride=1, padding="SAME"): with slim.arg_scope([slim.dropout], is_training=is_training) as arg_sc: return arg_sc
  • 83. def my_net(x, keep_prob, outputs_collections="my_net"): x = tf.reshape(x, shape=[-1, 28, 28, 1]) with slim.arg_scope([slim.conv2d, slim.max_pool2d], outputs_collections=outputs_collections): net = slim.conv2d(x, 64, [3, 3], scope="conv1") net = slim.max_pool2d(net, [2, 2], scope="pool1") net = slim.conv2d(net, 128, [3, 3], scope="conv2") net = slim.max_pool2d(net, [2, 2], scope="pool2") net = slim.conv2d(net, 256, [3, 3], scope="conv3") # global average pooling net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True) net = slim.dropout(net, keep_prob, scope="dropout3") net = slim.conv2d(net, 1024, [1, 1], scope="fc4") net = slim.dropout(net, keep_prob, scope="dropout4") net = slim.conv2d(net, 10, [1, 1], activation_fn=None, scope="fc5") end_points = 
 slim.utils.convert_collection_to_dict(outputs_collections) return tf.reshape(net, [-1, 10]), end_points
  • 84. def my_net(x, keep_prob, outputs_collections="my_net"): x = tf.reshape(x, shape=[-1, 28, 28, 1]) with slim.arg_scope([slim.conv2d, slim.max_pool2d], outputs_collections=outputs_collections): net = slim.conv2d(x, 64, [3, 3], scope="conv1") net = slim.max_pool2d(net, [2, 2], scope="pool1") net = slim.conv2d(net, 128, [3, 3], scope="conv2") net = slim.max_pool2d(net, [2, 2], scope="pool2") net = slim.conv2d(net, 256, [3, 3], scope="conv3") # global average pooling net = tf.reduce_mean(net, [1, 2], name="pool3", keep_dims=True) net = slim.dropout(net, keep_prob, scope="dropout3") net = slim.conv2d(net, 1024, [1, 1], scope="fc4") net = slim.dropout(net, keep_prob, scope="dropout4") net = slim.conv2d(net, 10, [1, 1], activation_fn=None, scope="fc5") end_points = 
 slim.utils.convert_collection_to_dict(outputs_collections) return tf.reshape(net, [-1, 10]), end_points
  • 85. x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) is_training = tf.placeholder(tf.bool) with slim.arg_scope(my_arg_scope(is_training, weight_decay)): net, end_pts = my_net(x, keep_prob) pred = slim.softmax(net, scope="prediction") with tf.variable_scope("losses"): cls_loss = slim.losses.softmax_cross_entropy(net, y) reg_loss = tf.add_n(slim.losses.get_regularization_losses()) loss_op = cls_loss + reg_loss with tf.variable_scope("Adam"): opt = tf.train.AdamOptimizer(lr) # Op to calculate every variable gradient grads = tf.gradients(loss_op, tf.trainable_variables()) grads = list(zip(grads, tf.trainable_variables())) # Op to update all variables according to their gradient apply_grads = opt.apply_gradients(grads_and_vars=grads) with tf.variable_scope("accuracy"): correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1)) acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
  • 86. x = tf.placeholder(tf.float32, [None, 784]) y = tf.placeholder(tf.float32, [None, 10]) is_training = tf.placeholder(tf.bool) with slim.arg_scope(my_arg_scope(is_training, weight_decay)): net, end_pts = my_net(x, keep_prob) pred = slim.softmax(net, scope="prediction") with tf.variable_scope("losses"): cls_loss = slim.losses.softmax_cross_entropy(net, y) reg_loss = tf.add_n(slim.losses.get_regularization_losses()) loss_op = cls_loss + reg_loss with tf.variable_scope("Adam"): opt = tf.train.AdamOptimizer(lr) # Op to calculate every variable gradient grads = tf.gradients(loss_op, tf.trainable_variables()) grads = list(zip(grads, tf.trainable_variables())) # Op to update all variables according to their gradient apply_grads = opt.apply_gradients(grads_and_vars=grads) with tf.variable_scope("accuracy"): correct_op = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1)) acc_op = tf.reduce_mean(tf.cast(correct_op, tf.float32))
  • 87. # Create a summary to monitor loss and accuracy summ_loss = tf.summary.scalar("loss", loss_op) summ_acc = tf.summary.scalar("accuracy_test", acc_op) # Create summaries to visualize weights and grads for var in tf.trainable_variables(): tf.summary.histogram(var.name, var, collections=["my_summ"]) for grad, var in grads: tf.summary.histogram(var.name + "/gradient", grad, collections=["my_summ"]) summ_wg = tf.summary.merge_all(key="my_summ") sess = tf.Session() sess.run(tf.global_variables_initializer()) summary_writer = tf.summary.FileWriter(logs_path, graph=sess.graph)
  • 88. for step in range(max_steps): batch_X, batch_y = mnist.train.next_batch(batch_size) _, loss, plot_loss, plot_wg = sess.run([apply_grads, loss_op, summ_loss, summ_wg], feed_dict={x: batch_X, y: batch_y, is_training: True}) summary_writer.add_summary(plot_loss, step) summary_writer.add_summary(plot_wg, step) if (step+1) % 100 == 0: plot_acc = sess.run(summ_acc, feed_dict={x: mnist.test.images, y: mnist.test.labels, is_training: False}) summary_writer.add_summary(plot_acc, step) print("Optimization Finished!") test_acc = sess.run(acc_op, feed_dict={x: mnist.test.images, y: mnist.test.labels, is_training: False}) print("Test accuracy: {:.3f}".format(test_acc))
  • 89.
  • 90.
  • 91.
  • 92.