-
Notifications
You must be signed in to change notification settings - Fork 612
add another folder for Python 3.5 and Tensorflow 1.1 #25
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
CreatCodeBuild
merged 2 commits into
CreatCodeBuild:master
from
whyscience:mater_python3
Sep 12, 2017
Merged
Changes from all commits
Commits
Show all changes
2 commits
Select commit
Hold shift + click to select a range
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
Binary file added
Season1_Tensorflow1.1_Python3.5/1-3/NVIDIA Autonomous Car.mp4
Binary file not shown.
4 changes: 4 additions & 0 deletions
Season1_Tensorflow1.1_Python3.5/1-3/README.md
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,4 @@ | ||
| # 1 - 3 小节 | ||
| 主要讲解 TensorFlow 的最最最基本使用。 | ||
|
|
||
| 接触过TensorFlow,或者阅读英文文档没有问题的同学可以直接开始 4 - 6 小节 |
137 changes: 137 additions & 0 deletions
Season1_Tensorflow1.1_Python3.5/1-3/run.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,137 @@ | ||
| # encoding: utf-8 | ||
| # 为了 Python3 的兼容,如果你用的 Python2.7 | ||
| from __future__ import print_function, division | ||
| import tensorflow as tf | ||
|
|
||
| print('Loaded TF version', tf.__version__, '\n\n') | ||
|
|
||
| # Tensor 在数学中是"张量" | ||
| # 标量,矢量/向量,张量 | ||
|
|
||
| # 简单地理解 | ||
| # 标量表示值 | ||
| # 矢量表示位置(空间中的一个点) | ||
| # 张量表示整个空间 | ||
|
|
||
| # 一维数组是矢量 | ||
| # 多维数组是张量, 矩阵也是张量 | ||
|
|
||
|
|
||
| # 4个重要的类型 | ||
| # @Variable 计算图谱中的变量 | ||
| # @Tensor 一个多维矩阵,带有很多方法 | ||
| # @Graph 一个计算图谱 | ||
| # @Session 用来运行一个计算图谱 | ||
|
|
||
|
|
||
| # 三个重要的函数 | ||
|
|
||
| # Variable 变量 | ||
| """ | ||
| tf.Variable.__init__( | ||
| initial_value=None, @Tensor | ||
| trainable=True, | ||
| collections=None, | ||
| validate_shape=True, | ||
| caching_device=None, | ||
| name=None, | ||
| variable_def=None, | ||
| dtype=None) | ||
| """ | ||
|
|
||
|
|
||
| # 注意:Variable是一个Class,Tensor也是一个Class | ||
|
|
||
| # Constant 常数 | ||
| # tf.constant(value, dtype=None, shape=None, name='Const') | ||
| # return: a constant @Tensor | ||
|
|
||
| # Placeholder 暂时变量? | ||
| # tf.placeholder(dtype, shape=None, name=None) | ||
| # return: 一个还尚未存在的 @Tensor | ||
|
|
||
| # 让我们用计算图谱来实现一些简单的函数 | ||
| # + - * / 四则运算 | ||
| def basic_operation(): | ||
| v1 = tf.Variable(10) | ||
| v2 = tf.Variable(5) | ||
| addv = v1 + v2 | ||
| print(addv) | ||
| print(type(addv)) | ||
| print(type(v1)) | ||
|
|
||
| c1 = tf.constant(10) | ||
| c2 = tf.constant(5) | ||
| addc = c1 + c2 | ||
| print(addc) | ||
| print(type(addc)) | ||
| print(type(c1)) | ||
|
|
||
| # 用来运行计算图谱的对象/实例? | ||
| # session is a runtime | ||
| sess = tf.Session() | ||
|
|
||
| # Variable -> 初始化 -> 有值的Tensor | ||
| tf.initialize_all_variables().run(session=sess) | ||
|
|
||
| print('变量是需要初始化的') | ||
| print('加法(v1, v2) = ', addv.eval(session=sess)) | ||
| print('加法(v1, v2) = ', sess.run(addv)) | ||
| print('加法(c1, c2) = ', addc.eval(session=sess)) | ||
| print('\n\n') | ||
| # 这种定义操作,再执行操作的模式被称之为"符号式编程" Symbolic Programming | ||
|
|
||
| # tf.Graph.__init__() | ||
| # Creates a new, empty Graph. | ||
| graph = tf.Graph() | ||
| with graph.as_default(): | ||
| value1 = tf.constant([1, 2]) | ||
| value2 = tf.Variable([3, 4]) | ||
| mul = value1 / value2 | ||
|
|
||
| with tf.Session(graph=graph) as mySess: | ||
| tf.initialize_all_variables().run() | ||
| print('一一对应的除法(value1, value2) = ', mySess.run(mul)) | ||
| print('一一对应的除法(value1, value2) = ', mul.eval()) | ||
|
|
||
| # tensor.eval(session=sess) | ||
| # sess.run(tensor) | ||
|
|
||
| # 省内存?placeholder才是王道 | ||
| # def use_placeholder(): | ||
| graph = tf.Graph() | ||
| with graph.as_default(): | ||
| value1 = tf.placeholder(dtype=tf.float64) | ||
| value2 = tf.Variable([3, 4], dtype=tf.float64) | ||
| mul = value1 * value2 | ||
|
|
||
| with tf.Session(graph=graph) as mySess: | ||
| tf.initialize_all_variables().run() | ||
| # 我们想象一下这个数据是从远程加载进来的 | ||
| # 文件,网络 | ||
| # 假装是 10 GB | ||
| value = load_from_remote() | ||
| for partialValue in load_partial(value, 2): | ||
| runResult = mySess.run(mul, feed_dict={value1: partialValue}) | ||
| # evalResult = mul.eval(feed_dict={value1: partialValue}) | ||
| print('乘法(value1, value2) = ', runResult) | ||
| # cross validation | ||
|
|
||
|
|
||
| def load_from_remote(): | ||
| return [-x for x in range(1000)] | ||
|
|
||
|
|
||
| # 自定义的 Iterator | ||
| # yield, generator function | ||
| def load_partial(value, step): | ||
| index = 0 | ||
| while index < len(value): | ||
| yield value[index:index + step] | ||
| index += step | ||
| return | ||
|
|
||
|
|
||
| if __name__ == '__main__': | ||
| basic_operation() | ||
| # use_placeholder() |
Binary file added
Season1_Tensorflow1.1_Python3.5/1-3/指南.pptx
Binary file not shown.
2 changes: 2 additions & 0 deletions
Season1_Tensorflow1.1_Python3.5/10-11/.gitignore
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,2 @@ | ||
| /board/ | ||
| /fc1_weights/ |
2 changes: 2 additions & 0 deletions
Season1_Tensorflow1.1_Python3.5/10-11/README.md
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,2 @@ | ||
| # TensorBoard 计算图谱可视化 | ||
| TensorBoard 是TF自带的强大可视化工具 |
202 changes: 202 additions & 0 deletions
Season1_Tensorflow1.1_Python3.5/10-11/dp.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,202 @@ | ||
| # 为了 Python2 玩家们 | ||
| from __future__ import print_function, division | ||
|
|
||
| # 第三方 | ||
| import tensorflow as tf | ||
| from sklearn.metrics import confusion_matrix | ||
| import numpy as np | ||
|
|
||
| # 我们自己 | ||
| import load | ||
|
|
||
| train_samples, train_labels = load._train_samples, load._train_labels | ||
| test_samples, test_labels = load._test_samples, load._test_labels | ||
|
|
||
| print('Training set', train_samples.shape, train_labels.shape) | ||
| print(' Test set', test_samples.shape, test_labels.shape) | ||
|
|
||
| image_size = load.image_size | ||
| num_labels = load.num_labels | ||
| num_channels = load.num_channels | ||
|
|
||
|
|
||
| def get_chunk(samples, labels, chunkSize): | ||
| """ | ||
| Iterator/Generator: get a batch of data | ||
| 这个函数是一个迭代器/生成器,用于每一次只得到 chunkSize 这么多的数据 | ||
| 用于 for loop, just like range() function | ||
| """ | ||
| if len(samples) != len(labels): | ||
| raise Exception('Length of samples and labels must equal') | ||
| stepStart = 0 # initial step | ||
| i = 0 | ||
| while stepStart < len(samples): | ||
| stepEnd = stepStart + chunkSize | ||
| if stepEnd < len(samples): | ||
| yield i, samples[stepStart:stepEnd], labels[stepStart:stepEnd] | ||
| i += 1 | ||
| stepStart = stepEnd | ||
|
|
||
|
|
||
| class Network(): | ||
| def __init__(self, num_hidden, batch_size): | ||
| """ | ||
| @num_hidden: 隐藏层的节点数量 | ||
| @batch_size:因为我们要节省内存,所以分批处理数据。每一批的数据量。 | ||
| """ | ||
| self.batch_size = batch_size | ||
| self.test_batch_size = 500 | ||
|
|
||
| # Hyper Parameters | ||
| self.num_hidden = num_hidden | ||
|
|
||
| # Graph Related | ||
| self.graph = tf.Graph() | ||
| self.tf_train_samples = None | ||
| self.tf_train_labels = None | ||
| self.tf_test_samples = None | ||
| self.tf_test_labels = None | ||
| self.tf_test_prediction = None | ||
|
|
||
| # 统计 | ||
| self.merged = None | ||
|
|
||
| # 初始化 | ||
| self.define_graph() | ||
| self.session = tf.Session(graph=self.graph) | ||
| self.writer = tf.summary.FileWriter('./board', self.graph) | ||
|
|
||
| def define_graph(self): | ||
| """ | ||
| 定义我的的计算图谱 | ||
| """ | ||
| with self.graph.as_default(): | ||
| # 这里只是定义图谱中的各种变量 | ||
| with tf.name_scope('inputs'): | ||
| self.tf_train_samples = tf.placeholder( | ||
| tf.float32, shape=(self.batch_size, image_size, image_size, num_channels), name='tf_train_samples' | ||
| ) | ||
| self.tf_train_labels = tf.placeholder( | ||
| tf.float32, shape=(self.batch_size, num_labels), name='tf_train_labels' | ||
| ) | ||
| self.tf_test_samples = tf.placeholder( | ||
| tf.float32, shape=(self.test_batch_size, image_size, image_size, num_channels), | ||
| name='tf_test_samples' | ||
| ) | ||
|
|
||
| # fully connected layer 1, fully connected | ||
| with tf.name_scope('fc1'): | ||
| fc1_weights = tf.Variable( | ||
| tf.truncated_normal([image_size * image_size, self.num_hidden], stddev=0.1), name='fc1_weights' | ||
| ) | ||
| fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]), name='fc1_biases') | ||
| tf.summary.histogram('fc1_weights', fc1_weights) | ||
| tf.summary.histogram('fc1_biases', fc1_biases) | ||
|
|
||
| # fully connected layer 2 --> output layer | ||
| with tf.name_scope('fc2'): | ||
| fc2_weights = tf.Variable( | ||
| tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1), name='fc2_weights' | ||
| ) | ||
| fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]), name='fc2_biases') | ||
| tf.summary.histogram('fc2_weights', fc2_weights) | ||
| tf.summary.histogram('fc2_biases', fc2_biases) | ||
|
|
||
| # 想在来定义图谱的运算 | ||
| def model(data): | ||
| # fully connected layer 1 | ||
| shape = data.get_shape().as_list() | ||
| reshape = tf.reshape(data, [shape[0], shape[1] * shape[2] * shape[3]]) | ||
|
|
||
| with tf.name_scope('fc1_model'): | ||
| fc1_model = tf.matmul(reshape, fc1_weights) + fc1_biases | ||
| hidden = tf.nn.relu(fc1_model) | ||
|
|
||
| # fully connected layer 2 | ||
| with tf.name_scope('fc2_model'): | ||
| return tf.matmul(hidden, fc2_weights) + fc2_biases | ||
|
|
||
| # Training computation. | ||
| logits = model(self.tf_train_samples) | ||
| with tf.name_scope('loss'): | ||
| self.loss = tf.reduce_mean( | ||
| tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.tf_train_labels) | ||
| ) | ||
| tf.summary.scalar('Loss', self.loss) | ||
|
|
||
| # Optimizer. | ||
| with tf.name_scope('optimizer'): | ||
| self.optimizer = tf.train.GradientDescentOptimizer(0.0001).minimize(self.loss) | ||
|
|
||
| # Predictions for the training, validation, and test data. | ||
| with tf.name_scope('predictions'): | ||
| self.train_prediction = tf.nn.softmax(logits, name='train_prediction') | ||
| self.test_prediction = tf.nn.softmax(model(self.tf_test_samples), name='test_prediction') | ||
|
|
||
| self.merged = tf.summary.merge_all() | ||
|
|
||
| def run(self): | ||
| """ | ||
| 用到Session | ||
| """ | ||
|
|
||
| # private function | ||
| def print_confusion_matrix(confusionMatrix): | ||
| print('Confusion Matrix:') | ||
| for i, line in enumerate(confusionMatrix): | ||
| print(line, line[i] / np.sum(line)) | ||
| a = 0 | ||
| for i, column in enumerate(np.transpose(confusionMatrix, (1, 0))): | ||
| a += (column[i] / np.sum(column)) * (np.sum(column) / 26000) | ||
| print(column[i] / np.sum(column), ) | ||
| print('\n', np.sum(confusionMatrix), a) | ||
|
|
||
| with self.session as session: | ||
| tf.initialize_all_variables().run() | ||
|
|
||
| # 训练 | ||
| print('Start Training') | ||
| # batch 1000 | ||
| for i, samples, labels in get_chunk(train_samples, train_labels, chunkSize=self.batch_size): | ||
| _, l, predictions, summary = session.run( | ||
| [self.optimizer, self.loss, self.train_prediction, self.merged], | ||
| feed_dict={self.tf_train_samples: samples, self.tf_train_labels: labels} | ||
| ) | ||
| self.writer.add_summary(summary, i) | ||
| # labels is True Labels | ||
| accuracy, _ = self.accuracy(predictions, labels) | ||
| if i % 50 == 0: | ||
| print('Minibatch loss at step %d: %f' % (i, l)) | ||
| print('Minibatch accuracy: %.1f%%' % accuracy) | ||
| # | ||
|
|
||
| # 测试 | ||
| accuracies = [] | ||
| confusionMatrices = [] | ||
| for i, samples, labels in get_chunk(test_samples, test_labels, chunkSize=self.test_batch_size): | ||
| result = self.test_prediction.eval(feed_dict={self.tf_test_samples: samples}) | ||
| accuracy, cm = self.accuracy(result, labels, need_confusion_matrix=True) | ||
| accuracies.append(accuracy) | ||
| confusionMatrices.append(cm) | ||
| print('Test Accuracy: %.1f%%' % accuracy) | ||
| print(' Average Accuracy:', np.average(accuracies)) | ||
| print('Standard Deviation:', np.std(accuracies)) | ||
| print_confusion_matrix(np.add.reduce(confusionMatrices)) | ||
| # | ||
|
|
||
| def accuracy(self, predictions, labels, need_confusion_matrix=False): | ||
| """ | ||
| 计算预测的正确率与召回率 | ||
| @return: accuracy and confusionMatrix as a tuple | ||
| """ | ||
| _predictions = np.argmax(predictions, 1) | ||
| _labels = np.argmax(labels, 1) | ||
| cm = confusion_matrix(_labels, _predictions) if need_confusion_matrix else None | ||
| # == is overloaded for numpy array | ||
| accuracy = (100.0 * np.sum(_predictions == _labels) / predictions.shape[0]) | ||
| return accuracy, cm | ||
|
|
||
|
|
||
| if __name__ == '__main__': | ||
| net = Network(num_hidden=128, batch_size=100) | ||
| net.run() |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.