-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathtrain.sh
More file actions
66 lines (55 loc) · 2.91 KB
/
train.sh
File metadata and controls
66 lines (55 loc) · 2.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#!/usr/bin/env bash
###########################################################
# The file is based on ./code2vec/preprocess.sh and ./code2vec/train.sh
###########################################################
# PREPROCESSING
###########################################################
. ./default.config
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--nthreads)
NUM_THREADS="$2"
shift # past argument
shift # past value
;;
esac
done
${PYTHON} dataset_to_codefiles.py --chunks_dir ${CHUNKS_DIR}
rm -rf ${DATA_DIR}
mkdir -p ${DATA_DIR}
echo "Extracting paths from validation set..."
${JAVA} -jar ${EXTRACTOR_JAR} code2vec --lang py --project ${VAL_DIR} --output ${JAR_OUTPUP_FOLDER_VAL} \
--maxL ${MAX_PATH_LENGTH} --maxW ${MAX_PATH_WIDTH} --maxContexts ${MAX_CONTEXTS} --maxTokens ${WORD_VOCAB_SIZE} \
--maxPaths ${PATH_VOCAB_SIZE}
echo "Finished extracting paths from validation set"
echo "Extracting paths from test set..."
${JAVA} -jar ${EXTRACTOR_JAR} code2vec --lang py --project ${TEST_DIR} --output ${JAR_OUTPUP_FOLDER_TEST} \
--maxL ${MAX_PATH_LENGTH} --maxW ${MAX_PATH_WIDTH} --maxContexts ${MAX_CONTEXTS} --maxTokens ${WORD_VOCAB_SIZE} \
--maxPaths ${PATH_VOCAB_SIZE}
echo "Finished extracting paths from test set"
echo "Extracting paths from training set..."
${JAVA} -jar ${EXTRACTOR_JAR} code2vec --lang py --project ${TRAIN_DIR} --output ${JAR_OUTPUP_FOLDER_TRAIN} \
--maxL ${MAX_PATH_LENGTH} --maxW ${MAX_PATH_WIDTH} --maxContexts ${MAX_CONTEXTS} --maxTokens ${WORD_VOCAB_SIZE} \
--maxPaths ${PATH_VOCAB_SIZE}
echo "Finished extracting paths from training set"
${PYTHON} filepaths_to_targets.py ${VAL_DATA_FILE} ${TEST_DATA_FILE} ${TRAIN_DATA_FILE}
echo "Creating histograms from the training data"
cat ${TRAIN_DATA_FILE} | cut -d' ' -f1 | awk '{n[$0]++} END {for (i in n) print i,n[i]}' > ${TARGET_HISTOGRAM_FILE}
cat ${TRAIN_DATA_FILE} | cut -d' ' -f2- | tr ' ' '\n' | cut -d',' -f1,3 | tr ',' '\n' \
| awk '{n[$0]++} END {for (i in n) print i,n[i]}' > ${ORIGIN_HISTOGRAM_FILE}
cat ${TRAIN_DATA_FILE} | cut -d' ' -f2- | tr ' ' '\n' | cut -d',' -f2 \
| awk '{n[$0]++} END {for (i in n) print i,n[i]}' > ${PATH_HISTOGRAM_FILE}
${PYTHON} code2vec/preprocess.py --train_data ${TRAIN_DATA_FILE} --val_data ${VAL_DATA_FILE} \
--max_contexts ${MAX_CONTEXTS} --word_vocab_size ${WORD_VOCAB_SIZE} --path_vocab_size ${PATH_VOCAB_SIZE} \
--target_vocab_size ${TARGET_VOCAB_SIZE} --word_histogram ${ORIGIN_HISTOGRAM_FILE} --output_name ${DATA_DIR}/data \
--path_histogram ${PATH_HISTOGRAM_FILE} --target_histogram ${TARGET_HISTOGRAM_FILE} --test_data ${TEST_DATA_FILE}
echo "### Preprocessing is done ###"
##########################################################
# TRAINING
##########################################################
mkdir -p ${MODEL_DIR} ${VECTORS_DIR}
${PYTHON} -u code2vec/code2vec.py --framework tensorflow --data ${DATA_DIR}/data --test ${VAL_DATA} \
--save ${MODEL_DIR}/saved_model