0%

神经网络 - TensorFlow

1、安装TensorFlow

1、安装

建议安装1.12.0版本

1
pip install tensorflow==1.12.0

2、TensorFlow与NumPy版本不兼容

1.18.0版本的NumPyTensorFlow不兼容,改用1.14.0版本的NumPy

与NumPy版本不兼容

安装1.14.0版本的Numpy

1
pip install numpy==1.14.0

3、取消警告提示

取消警告

TensorFlow在运行时会提示警告,将下列语句添加到程序文件中来取消提示。

1
2
3
import os
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

2、基础

1、简单程序 a+b

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

if __name__ == "__main__":
a = tf.constant(10)
b = tf.constant(20)
c = a + b
print(c)
with tf.Session() as sess:
value = sess.run(c)
print("value:", value)

2、查看默认图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

if __name__ == "__main__":
print(tf.get_default_graph())
a = tf.constant(10)
print(a.graph)
b = tf.constant(20)
print(b.graph)
c = a + b
print(c.graph)

with tf.Session() as sess:
c_value = sess.run(c)
print(sess.graph)
print(c_value)

3、自定义图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def graph_demo():
# 默认图
a = tf.constant(10)
b = tf.constant(20)
c = a + b

with tf.Session() as sess:
c_value = sess.run(c)
print('默认图:', sess.graph)
print(c_value)
# 自定义图
new_g = tf.Graph()
with new_g.as_default():
a_new = tf.constant(100)
b_new = tf.constant(200)
c_new = a_new + b_new
with tf.Session(graph=new_g) as sess:
c_new_value = sess.run(c_new)
print('自定义图:', sess.graph)
print(c_new_value)


if __name__ == "__main__":
graph_demo()

4、图的可视化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def event_demo():
a = tf.constant(10)
b = tf.constant(20)
c = a + b
print(c)
with tf.Session() as sess:
c_value = sess.run(c)
print(c_value)
print(sess.graph)
writer = tf.summary.FileWriter(
cur_path + '06图的可视化', graph=tf.get_default_graph())
writer.close()


if __name__ == "__main__":
event_demo()

运行程序会在./神经网络/TensorFlow教程/06图的可视化/目录下生成event文件,在终端执行如下命令:

1
tensorboard --logdir='./神经网络/TensorFlow教程/06图的可视化/'

5、会话Session

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def sess_demo():
a = tf.constant(10)
b = tf.constant(20)
c = a + b
print(c)
config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True)
with tf.Session(config=config) as sess:
values = sess.run([a, b, c])
print(values)
c_value = c.eval()
print(c_value)


if __name__ == "__main__":
sess_demo()

6、feed_dict的使用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

if __name__ == "__main__":
a = tf.placeholder(tf.float32)
b = tf.placeholder('float32')
c = a + b
cc = tf.add(a, b)
x = tf.placeholder(tf.float32, None)
y = x * 20 + 100
with tf.Session() as sess:
c_value = sess.run(c, feed_dict={a: 10, b: 20})
cc_value = sess.run(cc, feed_dict={a: 10, b: 20})
print(c_value)
print(cc_value)
y_value = sess.run(y, feed_dict={x: [10, 20, 30, 40]})
print(y_value)

7、创建张量

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def demo_1():
print('demo 1:')
tensor1 = tf.constant(5.0)
tensor2 = tf.constant([1, 2, 3, 4, 5])
tensor3 = tf.constant([[1, 2], [3, 4]])
print(tensor1)
print(tensor2)
print(tensor3)
with tf.Session() as sess:
value_1 = sess.run(tensor1)
value_2 = sess.run(tensor2)
value_3 = sess.run(tensor3)
print(value_1)
print(value_2)
print(value_3)


def demo_2():
print('demo 2:')
# 全为1的张量
tensor1 = tf.ones(shape=[1, 2, 3])
# 全为0的张量
tensor2 = tf.zeros(shape=[3, 4])
# 均匀分布
tensor3 = tf.random_uniform([2, 3], minval=0, maxval=4)
# 标准差的正太分布
tensor4 = tf.random_normal([2, 3], mean=5, stddev=4)
with tf.Session() as sess:
value_1 = sess.run(tensor1)
value_2 = sess.run(tensor2)
value_3 = sess.run(tensor3)
value_4 = sess.run(tensor4)
print(value_1)
print(value_2)
print(value_3)
print(value_4)


if __name__ == "__main__":
demo_1()
demo_2()

8、改变张量形状

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def demo_1():
print('demo 1:')
a = tf.placeholder(dtype=tf.float32, shape=[None, None])
b = tf.placeholder(dtype=tf.float32, shape=[None, 5])
print('修改前:')
print('a:', a)
print('b:', b)
a.set_shape([2, 5])
b.set_shape([5, 5])
print('修改后:')
print('a:', a)
print('b:', b)


def demo_2():
print('demo 2:')
a = tf.placeholder(dtype=tf.float32, shape=[3, 4])
print('修改前:')
print('a:', a)
a = tf.reshape(a, shape=[2, 6])
print('修改后:')
print('a:', a)


if __name__ == "__main__":
demo_1()
demo_2()

9、矩阵运算

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import tensorflow as tf
import numpy as np
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

if __name__ == "__main__":
sess = tf.Session()
A = tf.random_uniform([3, 2])
B = tf.fill([2, 4], 3.5)
C = tf.random_normal([3, 4])
print('A:\n', sess.run(A))
print('B:\n', sess.run(B))
print('C:\n', sess.run(C))
print('A*B:\n', sess.run(tf.matmul(A, B)))
print('A*B+C:\n', sess.run(tf.matmul(A, B)+C))

3、案例

1、线性回归模型

1、线性回归原理

1、构造模型

$ y = w_1x_1 + w_2x_2 + … + w_n*x_n + b $

2、损失函数

​ 均方误差

3、优化损失

​ 梯度下降

2、设计方案

1、准备数据

​ 随机100个点,只有一个特征,x和y之间满足关系$ y = kx + b$

X.shape = (100, 1)

y.shape = (100, 1)

​ 数据分布满足 $y = 0.8x + 0.7$

y_predict = tf.matual(X, weight) + bias

2、构造损失函数

error = tf.reduce_mean(tf.square(y_predicty-y))

3、优化损失

tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(error)

3、实现
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/线性回归案例/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def linear_regression():
# 1、准备数据
X = tf.random_normal(shape=[100, 1])
y = tf.matmul(X, [[0.8]]) + 0.7
# 2、构建模型
weight = tf.Variable(initial_value=tf.random_normal(shape=[1, 1]))
bias = tf.Variable(initial_value=tf.random_normal(shape=[1]))
y_predict = tf.matmul(X, weight) + bias
# 3、构建损失函数
error = tf.reduce_mean(tf.square(y_predict-y))
# 4、优化损失函数
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.01).minimize(error)

init = tf.global_variables_initializer()
with tf.Session() as sess:
# 初始化
sess.run(init)
print('查看训练前模型参数:\n权重:%f,偏量:%f,损失:%f' %
(weight.eval(), bias.eval(), error.eval()))
# 开始训练
for i in range(1000):
sess.run(optimizer)
if i % 100 == 0:
print('训练第%d次后模型参数:\n权重:%f,偏量:%f,损失:%f' %
((i+1), weight.eval(), bias.eval(), error.eval()))


if __name__ == "__main__":
linear_regression()
4、添加变量显示
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/线性回归案例/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def linear_regression():
# 1、准备数据
X = tf.random_normal(shape=[100, 1])
y = tf.matmul(X, [[0.8]]) + 0.7
# 2、构建模型
weight = tf.Variable(initial_value=tf.random_normal(shape=[1, 1]))
bias = tf.Variable(initial_value=tf.random_normal(shape=[1]))
y_predict = tf.matmul(X, weight) + bias
# 3、构建损失函数
error = tf.reduce_mean(tf.square(y_predict-y))
# 4、优化损失函数
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.01).minimize(error)

init = tf.global_variables_initializer()
# 收集变量
tf.summary.scalar('error', error)
tf.summary.histogram('weight', weight)
tf.summary.histogram('bias', bias)
# 合并变量
merge = tf.summary.merge_all()

with tf.Session() as sess:
# 初始化
sess.run(init)
print('查看训练前模型参数:\n权重:%f,偏量:%f,损失:%f' %
(weight.eval(), bias.eval(), error.eval()))

# 创建事件文件
fileWriter = tf.summary.FileWriter(
cur_path+'14添加变量显示', graph=sess.graph)

# 开始训练
for i in range(1000):
sess.run(optimizer)
# 运行合并变量
summary = sess.run(merge)
# 将变量写入事件文件
fileWriter.add_summary(summary, i)
if i % 100 == 0:
print('训练第%d次后模型参数:\n权重:%f,偏量:%f,损失:%f' %
((i+1), weight.eval(), bias.eval(), error.eval()))


if __name__ == "__main__":
linear_regression()
5、增加命名空间
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/线性回归案例/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def linear_regression():
# 1、准备数据
with tf.variable_scope('prepare_data'):
X = tf.random_normal(shape=[100, 1], name='feature')
y = tf.matmul(X, [[0.8]]) + 0.7
# 2、构建模型
with tf.variable_scope('create_mode'):
weight = tf.Variable(initial_value=tf.random_normal(
shape=[1, 1]), name='weight')
bias = tf.Variable(
initial_value=tf.random_normal(shape=[1]), name='bias')
y_predict = tf.matmul(X, weight) + bias
# 3、构建损失函数
with tf.variable_scope('loss_function'):
error = tf.reduce_mean(tf.square(y_predict - y))
# 4、优化损失函数
with tf.variable_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.01).minimize(error)

init = tf.global_variables_initializer()
# 收集变量
tf.summary.scalar('error', error)
tf.summary.histogram('weight', weight)
tf.summary.histogram('bias', bias)
# 合并变量
merge = tf.summary.merge_all()

with tf.Session() as sess:
# 初始化
sess.run(init)
print('查看训练前模型参数:\n权重:%f,偏量:%f,损失:%f' %
(weight.eval(), bias.eval(), error.eval()))

# 创建事件文件
fileWriter = tf.summary.FileWriter(
cur_path+'15增加命名空间', graph=sess.graph)

# 开始训练
for i in range(1000):
sess.run(optimizer)
# 运行合并变量
summary = sess.run(merge)
# 将变量写入事件文件
fileWriter.add_summary(summary, i)
if i % 100 == 0:
print('训练第%d次后模型参数:\n权重:%f,偏量:%f,损失:%f' %
((i+1), weight.eval(), bias.eval(), error.eval()))


if __name__ == "__main__":
linear_regression()
6、保存模型
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/线性回归案例/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def linear_regression():
# 1、准备数据
with tf.variable_scope('prepare_data'):
X = tf.random_normal(shape=[100, 1], name='feature')
y = tf.matmul(X, [[0.8]]) + 0.7
# 2、构建模型
with tf.variable_scope('create_mode'):
weight = tf.Variable(initial_value=tf.random_normal(
shape=[1, 1]), name='weight')
bias = tf.Variable(
initial_value=tf.random_normal(shape=[1]), name='bias')
y_predict = tf.matmul(X, weight) + bias
# 3、构建损失函数
with tf.variable_scope('loss_function'):
error = tf.reduce_mean(tf.square(y_predict - y))
# 4、优化损失函数
with tf.variable_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.01).minimize(error)

init = tf.global_variables_initializer()
# 收集变量
tf.summary.scalar('error', error)
tf.summary.histogram('weight', weight)
tf.summary.histogram('bias', bias)
# 合并变量
merge = tf.summary.merge_all()

# 保存模型
saver = tf.train.Saver()
with tf.Session() as sess:
# 初始化
sess.run(init)
print('查看训练前模型参数:\n权重:%f,偏量:%f,损失:%f' %
(weight.eval(), bias.eval(), error.eval()))

# 创建事件文件
fileWriter = tf.summary.FileWriter(
cur_path+'15增加命名空间', graph=sess.graph)

# 开始训练
for i in range(1000):
sess.run(optimizer)
# 运行合并变量
summary = sess.run(merge)
# 将变量写入事件文件
fileWriter.add_summary(summary, i)
if i % 100 == 0:
print('训练第%d次后模型参数:\n权重:%f,偏量:%f,损失:%f' %
((i+1), weight.eval(), bias.eval(), error.eval()))
# 保存模型
saver.save(sess, cur_path+'ckpt/linear_regression.ckpt')


if __name__ == "__main__":
linear_regression()
7、读取模型
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/线性回归案例/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


def linear_regression():
# 1、准备数据
with tf.variable_scope('prepare_data'):
X = tf.random_normal(shape=[100, 1], name='feature')
y = tf.matmul(X, [[0.8]]) + 0.7
# 2、构建模型
with tf.variable_scope('create_mode'):
weight = tf.Variable(initial_value=tf.random_normal(
shape=[1, 1]), name='weight')
bias = tf.Variable(
initial_value=tf.random_normal(shape=[1]), name='bias')
y_predict = tf.matmul(X, weight) + bias
# 3、构建损失函数
with tf.variable_scope('loss_function'):
error = tf.reduce_mean(tf.square(y_predict-y))
# 4、优化损失函数
with tf.variable_scope('opterimizer'):
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.01).minimize(error)

init = tf.global_variables_initializer()
# 收集变量
tf.summary.scalar('error', error)
tf.summary.histogram('weight', weight)
tf.summary.histogram('bias', bias)
# 合并变量
merge = tf.summary.merge_all()

# 保存模型
saver = tf.train.Saver()
with tf.Session() as sess:
# 初始化
sess.run(init)
print('查看训练前模型参数:\n权重:%f,偏量:%f,损失:%f' %
(weight.eval(), bias.eval(), error.eval()))

# 创建事件文件
fileWriter = tf.summary.FileWriter(
cur_path+'15增加命名空间', graph=sess.graph)

# 读取模型
# 判断模型是否存在
ckpt = tf.train.get_checkpoint_state(cur_path + '/ckpt/')
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, cur_path + 'ckpt/linear_regression.ckpt')
print('训练后模型参数:\n权重:%f,偏量:%f,损失:%f' %
(weight.eval(), bias.eval(), error.eval()))


if __name__ == "__main__":
linear_regression()

2、手写数字识别

1、数据准备

手写数字数据下载

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
import os
# import tensorflow

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/手写数字识别/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

if __name__ == "__main__":
mnist = input_data.read_data_sets(cur_path + 'data/', one_hot=True)
# load images
train_X = mnist.train.images
validation_X = mnist.validation.images
test_X = mnist.test.images
# load labels
train_y = mnist.train.labels
validation_y = mnist.validation.labels
test_y = mnist.test.labels

# 输出训练集样本和标签的大小
print(train_X.shape, train_y.shape)
# 查看训练集中第一个样本的 image 和 label
print(train_X[0])
print(train_y[0])
# 获取数据集中的100行
image, label = mnist.train.next_batch(100)
print(image.shape, label.shape)

# 可视化样本,提取训练集中前20个样本
fig, ax = plt.subplots(nrows=4, ncols=5, sharex='all', sharey='all')
ax = ax.flatten()
for i in range(20):
img = train_X[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
2、实现
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import os

# 当前文件路径
cur_path = './神经网络/TensorFlow教程/手写数字识别/'
# 不提示警告信息
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


class Mnist(object):
def __init__(self):
self.load_data()
self.create_mode()

# 准备数据
def load_data(self):
with tf.variable_scope('prepare_data'):
self.mnist = input_data.read_data_sets(
cur_path + 'data/', one_hot=True)
self.X = tf.placeholder(dtype=tf.float32, shape=[
None, 784], name='train_images')
self.y = tf.placeholder(dtype=tf.float32, shape=[
None, 10], name='train_labels')

# 构建模型
def create_mode(self):
# 构建模型
with tf.variable_scope('create_mode'):
self.weight = tf.Variable(initial_value=tf.random_normal(
shape=[784, 10]), name='weight')
self.bias = tf.Variable(
initial_value=tf.random_normal(shape=[10]), name='bias')
self.y_predict = tf.matmul(self.X, self.weight) + self.bias

# 损失函数
with tf.variable_scope('loss_function'):
# 激活函数:softmax
# 损失函数:交叉熵
self.error = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y,
logits=self.y_predict))

# 优化
with tf.variable_scope('optimizer'):
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=0.1).minimize(self.error)

# 准确率
predict = tf.equal(tf.argmax(self.y, 1),
tf.argmax(self.y_predict, 1))
self.accurate = tf.reduce_mean(tf.cast(predict, tf.float32))

# 模型初始化
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()

# 训练模型
def train(self):
# print('Start training.')
for i in range(1000):
images, labels = self.mnist.train.next_batch(100)
op, loss = self.sess.run([self.optimizer, self.error], feed_dict={
self.X: images, self.y: labels})
# 保存模型
if i % 100 == 0:
# 保存模型
self.saver.save(self.sess, cur_path +
'ckpt/mnist_demo.ckpt')

# 测试模型
def test(self):
# print('Start test.')
acc = 0
images = self.mnist.test.images
labels = self.mnist.test.labels
acc = self.sess.run(self.accurate, feed_dict={
self.X: images, self.y: labels})
return acc

# 保存模型
def save(self, path):
self.saver.save(self.sess, path+'mnist_demo.ckpt')

# 读取模型
def restore(self, path):
ckpt = tf.train.get_checkpoint_state(path)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, path+'mnist_demo.ckpt')

def __del__(self):
self.sess.close()


if __name__ == "__main__":
path = cur_path + 'ckpt/'

# 训练并保存模型
# mnist_1 = Mnist()
# mnist_1.train()
# mnist_1.save(path)

# 读取并测试模型
mnist_2 = Mnist()
mnist_2.restore(path)
acc = mnist_2.test()
print(acc)