1. python程序部分
import argparse
FLAGS = tf.app.flags.FLAGS
office31_flags.train()
parser = argparse.ArgumentParser()
parser.add_argument('--unlabeled_data_path', type=str, default=None)
parser.add_argument('--labeled_train_data_dir', type=str, default=None)
parser.add_argument('--train_dir_for_save', type=str, default=None)
parser.add_argument('--pretrained_checkpoint_path', type=str, default=None)
parser.add_argument('--d_cross_entropy', type=float, default=None)
parser.add_argument('--d_fake', type=float, default=None)
parser.add_argument('--d_unlabel', type=float, default=None)
parser.add_argument('--g_fc7_match', type=float, default=None)
parser.add_argument('--g_logical', type=float, default=None)
parser.add_argument('--g_image_match', type=float, default=None)
parser_parameters = parser.parse_args()
FLAGS.unlabeled_data_path = parser_parameters.unlabeled_data_path
FLAGS.labeled_train_data_dir = parser_parameters.labeled_train_data_dir
FLAGS.train_dir_for_save = parser_parameters.train_dir_for_save
FLAGS.pretrained_checkpoint_path = parser_parameters.pretrained_checkpoint_path
FLAGS.d_cross_entropy = parser_parameters.d_cross_entropy
FLAGS.d_fake = parser_parameters.d_fake
FLAGS.d_unlabel = parser_parameters.d_unlabel
FLAGS.g_fc7_match = parser_parameters.g_fc7_match
FLAGS.g_logical = parser_parameters.g_logical
FLAGS.g_image_match = parser_parameters.g_image_match
FLAGS.unlabeled_data_path = os.path.join(path, 'office_data/webcam')
FLAGS.labeled_train_data_dir = os.path.join(path, 'office_data/amazon')
FLAGS.train_dir_for_save = os.path.join(path, 'trained_dir')
FLAGS.pretrained_checkpoint_path = os.path.join(path, 'pretrained_model/bvlc_alexnet.npy')
parser.add_argument('--dataset', type=str, default='CIFAR10', choices=['MNIST', 'SVHN', 'CIFAR10'])
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--update_rate', type=int, default=5)
parser.add_argument('--lr_weight_decay', action='store_true', default=False)
parser.add_argument('--dump_result', action='store_true', default=False)
2.shell部分
#!/usr/bin/env sh
export PATH=/mnt/lustre/share/anaconda2_bigvideo/bin:$PATH
export LD_LIBRARY_PATH=/mnt/lustre/share/cuda-8.0-cudnn6/lib64/:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/mnt/lustre/share/mvapich2-2.2b-cuda8.0/lib/:$LD_LIBRARY_PATH
#export LD_LIBRARY_PATH=/mnt/lustre/wuguangbin/wgb_software/tensorflow_object_detection_api/models_installed/research/slim:$LD_LIBRARY_PATH
#export PYTHONPATH=$PYTHONPATH:=/mnt/lustre/wuguangbin/Data_t1/cornell_ssd_code/tensorflow_object_detection_api_20180419forgrasp/research
#export PYTHONPATH="${PYTHONPATH}:/mnt/lustre/wuguangbin/wgb_software/tensorflow_object_detection_api/models_installed/research/slim"
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/cifar10
export PROJECT_ROOT=/mnt/lustre/wuguangbin/Data_t1/cornell_ssd_code/tensorflow_object_detection_api_20180419forgrasp/research/object_detection
srun --partition=ROBOT --gres=gpu:4 -n1 --ntasks-per-node=2 python ${PROJECT_ROOT}/office31_multi_gpu_train.py \
--unlabeled_data_path=${PROJECT_ROOT}/office_data/webcam \
--labeled_train_data_dir=${PROJECT_ROOT}/office_data/amazon \
--train_dir_for_save=${PROJECT_ROOT}/trained_dir1 \
--pretrained_checkpoint_path=${PROJECT_ROOT}/pretrained_model/bvlc_alexnet.npy \
--d_cross_entropy=1.0 \
--d_fake=1.0 \
--d_unlabel=0.5\
--g_fc7_match=1.0 \
--g_logical=1.0 \
--g_image_match=1.0 \
2>&1 &
3.pycharm中Run->Edit Configurations->Parameters部分
--unlabeled_data_path=/Documents/GAN/tensorflow_cifiar_gan1/tutorials/image/cifar10/office_data/webcam
--labeled_train_data_dir=/Documents/GAN/tensorflow_cifiar_gan1/tutorials/image/cifar10/office_data/amazon
--train_dir_for_save=/Documents/GAN/tensorflow_cifiar_gan1/tutorials/image/cifar10/trained_dir1
--pretrained_checkpoint_path=/Documents/GAN/tensorflow_cifiar_gan1/tutorials/image/cifar10/pretrained_model/bvlc_alexnet.npy
--d_cross_entropy=1.0
--d_fake=1.0
--d_unlabel=0.5
--g_fc7_match=1.0
--g_logical=1.0
--g_image_match=1.0