Skip to content

Commit 7cbedd5

Browse files
committed
remove useless comments
1 parent e984c3b commit 7cbedd5

File tree

1 file changed

+8
-13
lines changed

1 file changed

+8
-13
lines changed

main_simple_seq2seq.py

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -49,16 +49,14 @@
4949

5050
unk_id = w2idx['unk'] # 1
5151
pad_id = w2idx['_'] # 0
52-
# print(idx2w[8001])
53-
# exit()
52+
5453
start_id = xvocab_size # 8002
5554
end_id = xvocab_size+1 # 8003
56-
# print(start_id, end_id)
57-
# exit()
55+
5856
w2idx.update({'start_id': start_id})
5957
w2idx.update({'end_id': end_id})
6058
idx2w = idx2w + ['start_id', 'end_id']
61-
# print(idx2w)
59+
6260
xvocab_size = yvocab_size = xvocab_size + 2
6361

6462
""" A data for Seq2Seq should look like this:
@@ -70,16 +68,14 @@
7068

7169
print("encode_seqs", [idx2w[id] for id in trainX[10]])
7270
target_seqs = tl.prepro.sequences_add_end_id([trainY[10]], end_id=end_id)[0]
73-
# target_seqs = tl.prepro.remove_pad_sequences([target_seqs], pad_id=pad_id)[0]
71+
# target_seqs = tl.prepro.remove_pad_sequences([target_seqs], pad_id=pad_id)[0]
7472
print("target_seqs", [idx2w[id] for id in target_seqs])
75-
# exit()
7673
decode_seqs = tl.prepro.sequences_add_start_id([trainY[10]], start_id=start_id, remove_last=False)[0]
77-
# decode_seqs = tl.prepro.remove_pad_sequences([decode_seqs], pad_id=pad_id)[0]
74+
# decode_seqs = tl.prepro.remove_pad_sequences([decode_seqs], pad_id=pad_id)[0]
7875
print("decode_seqs", [idx2w[id] for id in decode_seqs])
7976
target_mask = tl.prepro.sequences_get_mask([target_seqs])[0]
8077
print("target_mask", target_mask)
8178
print(len(target_seqs), len(decode_seqs), len(target_mask))
82-
# exit()
8379

8480
###============= model
8581
def model(encode_seqs, decode_seqs, is_train=True, reuse=False):
@@ -127,8 +123,8 @@ def model(encode_seqs, decode_seqs, is_train=True, reuse=False):
127123
y = tf.nn.softmax(net.outputs)
128124

129125
# loss for training
130-
# print(net_out.outputs) # (?, 8004)
131-
# print(target_seqs) # (32, ?)
126+
# print(net_out.outputs) # (?, 8004)
127+
# print(target_seqs) # (32, ?)
132128
# loss_weights = tf.ones_like(target_seqs, dtype=tf.float32)
133129
# loss = tf.contrib.legacy_seq2seq.sequence_loss(net_out.outputs, target_seqs, loss_weights, yvocab_size)
134130
loss = tl.cost.cross_entropy_seq_with_mask(logits=net_out.outputs, target_seqs=target_seqs, input_mask=target_mask, return_details=False, name='cost')
@@ -137,7 +133,7 @@ def model(encode_seqs, decode_seqs, is_train=True, reuse=False):
137133

138134
lr = 0.0001
139135
train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss)
140-
# Truncated Backpropagation for training
136+
# Truncated Backpropagation for training (option)
141137
# max_grad_norm = 30
142138
# grads, _ = tf.clip_by_global_norm(tf.gradients(loss, net_out.all_params),max_grad_norm)
143139
# optimizer = tf.train.GradientDescentOptimizer(lr)
@@ -219,7 +215,6 @@ def model(encode_seqs, decode_seqs, is_train=True, reuse=False):
219215
break
220216
sentence = sentence + [w]
221217
print(" >", ' '.join(sentence))
222-
# exit()
223218

224219
print("Epoch[%d/%d] averaged loss:%f took:%.5fs" % (epoch, n_epoch, total_err/n_iter, time.time()-epoch_time))
225220

0 commit comments

Comments
 (0)