#《深度学习原理与TensorFlow实战》05 RNN能说会道
# 书源码地址:https://github.com/DeepVisionTeam/TensorFlowBook.git
# 视频讲座地址:http://edu.csdn.net/course/detail/5222
# win10 Tensorflow1.2.0 python3.6.1
# CUDA v8.0 cudnn-8.0-windows10-x64-v5.1
# 本地代码位置:D:\git\DeepLearning\TensorFlowBook\rnn\language_model
# https://github.com/DeepVisionTeam/TensorFlowBook/blob/master/rnn/language_model/tclm.py
# https://github.com/DeepVisionTeam/TensorFlowBook/blob/master/rnn/language_model/tclm_inference.py
# https://github.com/DeepVisionTeam/TensorFlowBook/blob/master/rnn/language_model/tclm_reader.py
# https://github.com/DeepVisionTeam/TensorFlowBook/blob/master/rnn/language_model/tensorflow_code_stat.py
# https://github.com/DeepVisionTeam/TensorFlowBook/blob/master/rnn/language_model/test_tclm_reader.py
# 在tensorflow1.2.0版本上,需要稍作修改
# 注意需要制定正确的参数
# C:\Python36\python.exe D:/git/DeepLearning/TensorFlowBook/rnn/language_model/tclm.py --data_path=..\..\..\tensorflow --model=small --save_path=.\savepath --use_fp16=True
#
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tclm_reader
import tensorflow as tf
#from tensorflow.contrib.rnn.python.ops import core_rnn_cell
#from tensorflow.contrib.rnn.python.ops import lstm_ops
from tensorflow.contrib import rnn
flags = tf.flags
logging = tf.logging
flags.DEFINE_string("model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None,
"Where the training/test data is stored.")
flags.DEFINE_string("save_path", None, "Model output directory.")
flags.DEFINE_bool("use_fp16", False,
"Train using 16-bit floats instead of 32bit floats")
FLAGS = flags.FLAGS
def data_type():
return tf.float16 if FLAGS.use_fp16 else tf.float32
class PTBInput(object):
"""The input data."""
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = tclm_reader.tensorflow_code_producer(
data, batch_size, num_steps, name=name)
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config, input_):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
lstm_cell = rnn.BasicLSTMCell(num_units=size, state_is_tuple=True)
#lstm_cell = core_rnn_cell.BasicLSTMCell(num_units=size, state_is_tuple=True)
if is_training and config.keep_prob < 1:
lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = rnn.MultiRNNCell(
[lstm_cell] * config.num_layers, state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, data_type())
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.concat(outputs, 1), [-1, size])
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size],
dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=data_type())])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = tclm_reader.VOCAB_SIZE
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = tclm_reader.VOCAB_SIZE
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = tclm_reader.VOCAB_SIZE
class TestConfig(object):
"""Tiny config, for testing."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 2
hidden_size = 2
max_epoch = 1
max_max_epoch = 1
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = tclm_reader.VOCAB_SIZE
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
elif FLAGS.model == "test":
return TestConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def main(_):
if not FLAGS.data_path:
raise ValueError(
"Must set --data_path to tensorflow source code directory")
raw_data = tclm_reader.tensorflow_code_data(FLAGS.data_path)
train_data, valid_data, test_data = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data,
name="TrainInput")
with tf.variable_scope("Model", reuse=None,
initializer=initializer):
m = PTBModel(is_training=True, config=config,
input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data,
name="ValidInput")
with tf.variable_scope("Model", reuse=True,
initializer=initializer):
mvalid = PTBModel(is_training=False, config=config,
input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data,
name="TestInput")
with tf.variable_scope("Model", reuse=True,
initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (
i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (
i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (
i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path,
global_step=sv.global_step)
if __name__ == "__main__":
tf.app.run()
'''
...
Epoch: 1 Learning rate: 1.000
0.000 perplexity: 2.667 speed: 98 wps
...
'''
# 注意制定参数 --save_path=./tc.lm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tclm_reader
import tensorflow as tf
from tclm import *
def main(_):
# $ python tclm_inference.py --save_path=./tc.lm/
if not FLAGS.save_path:
raise ValueError("Must set --save_path to language model directory")
test_data = [tclm_reader.BOF] * 500
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
test_input = PTBInput(config=config, data=test_data,
name="TestInput")
with tf.variable_scope("Model", reuse=None,
initializer=initializer):
mtest = PTBModel(is_training=True, config=eval_config,
input_=test_input)
tf.summary.scalar("Training Loss", mtest.cost)
tf.summary.scalar("Learning Rate", mtest.lr)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
ckpt = tf.train.get_checkpoint_state(FLAGS.save_path)
if ckpt and ckpt.model_checkpoint_path:
sv.saver.restore(session, ckpt.model_checkpoint_path)
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if __name__ == "__main__":
tf.app.run()
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
MAX_CHAR = 256
BOF = MAX_CHAR + 1
EOF = MAX_CHAR + 2
VOCAB_SIZE = MAX_CHAR + 3
def get_file_suffix(file):
index = file.rfind('.')
return file[index + 1:] if index >= 0 else ''
def read_source_code_data(code_files):
data = []
for code_file in code_files:
file_r = open(code_file, 'r', encoding='UTF-8')
curr_data = []
curr_data.append(BOF)
for dataline in file_r:
for c in dataline:
curr_data.append(ord(c))
curr_data.append(EOF)
data.extend(curr_data)
file_r.close()
return data
def tensorflow_code_data(data_path=None):
# find all python source code
tensorflow_code_files = []
for root, dirs, files in os.walk(data_path):
for file in files:
code_file = os.path.join(root, file)
file_suffix = get_file_suffix(code_file)
if file_suffix == 'py':
tensorflow_code_files.append(code_file)
train_code_file_count = int(len(tensorflow_code_files) * 0.5)
valid_code_file_count = int(len(tensorflow_code_files) * 0.3)
# test_code_file_count = int(len(tensorflow_code_files) * 0.2)
train_data = read_source_code_data(
tensorflow_code_files[0: train_code_file_count])
valid_data = read_source_code_data(tensorflow_code_files[
train_code_file_count: train_code_file_count + valid_code_file_count])
test_data = read_source_code_data(
tensorflow_code_files[train_code_file_count + valid_code_file_count:])
return train_data, valid_data, test_data
def tensorflow_code_producer(raw_data, batch_size, num_steps, name=None):
"""Iterate on the raw PTB data.
This chunks up raw_data into batches of examples and returns Tensors that
are drawn from these batches.
Args:
raw_data: one of the raw data outputs from ptb_raw_data.
batch_size: int, the batch size.
num_steps: int, the number of unrolls.
name: the name of this operation (optional).
Returns:
A pair of Tensors, each shaped [batch_size, num_steps].
The second element of the tuple is the same data time-shifted to the right by one.
Raises:
tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
"""
with tf.name_scope(name, "TensorflowCodeProducer",
[raw_data, batch_size, num_steps]):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data",
dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0: batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
assertion = tf.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps])
x.set_shape([batch_size, num_steps])
y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])
y.set_shape([batch_size, num_steps])
return x, y
“`
import os
def get_file_suffix(file):
index = file.rfind(‘.’)
return file[index + 1:] if index >= 0 else ”
def tensorflow_code_stat(data_path):
tensorflow_code_files = []
for root, dirs, files in os.walk(data_path):
for file in files:
code_file = os.path.join(root, file)
file_suffix = get_file_suffix(code_file)
if file_suffix == ‘py’:
tensorflow_code_files.append(code_file)
dict_c = {}
for code_file in tensorflow_code_files:
file_r = open(code_file, 'r', encoding='UTF-8')
for data in file_r:
for c in data:
if c in dict_c:
dict_c[c] += 1
else:
dict_c[c] = 1
file_r.close()
for c, c_freq in dict_c.items():
print(c + '\t' + str(ord(c)) + '\t' + str(c_freq))
if name == “main“:
# 需要注意这里的目录i相对位置,或者用绝对路径
tensorflow_code_stat(‘../../../tensorflow/tensorflow/’)
”’
32 22269644
C 67 195582
o 111 4482778
p 112 2767253
y 121 865716
r 114 4523279
i 105 4313868
g 103 1117762
h 104 1593713
t 116 6038934
2 50 356485
0 48 468573
1 49 448245
5 53 86463
T 84 453378
e 101 8656155
n 110 4455952
s 115 5930738
F 70 171095
l 108 3345283
w 119 594435
A 65 334059
u 117 2178558
. 46 2103344
R 82 262307
v 118 686076
d 100 2250758
62 21221
< 60 14353
j 106 61869
| 124 4892
\ 92 69468
J 74 3632
! 33 7608
Z 90 5844
$ 36 1699
` 96 408658
^ 94 4767
~ 126 1074
? 63 2574
Q 81 11146
9 1238
& 38 1267
Т 1058 12
е 1077 17
с 1089 27
т 1090 12
ф 1092 40
ы 1099 40
в 1074 40
你 20320 10
好 22909 10
怎 24590 10
么 20040 10
样 26679 10
а 1072 45
65279 5
Н 1053 5
к 1082 20
и 1080 20
д 1076 15
р 1088 20
ж 1078 5
ј 1112 15
н 1085 5
п 1087 5
о 1086 5
м 1084 5
爱 29233 10
要 35201 5
依 20381 5
法 27861 5
治 27835 5
国 22269 5
是 26159 10
赞 36190 5
美 32654 5
那 37027 5
些 20123 5
谁 35841 5
公 20844 5
义 20041 5
的 30340 5
和 21644 5
惩 24809 5
罚 32602 5
恶 24694 5
人 20154 5
。 12290 5
韩 38889 5
非 38750 5
у 1091 10
г 1075 10
з 1079 10
ñ 241 10
ë 235 10
â 226 10
ô 244 10
à 224 10
æ 230 10
ø 248 10
⚡ 9889 10