-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathconfigs.py
More file actions
122 lines (112 loc) · 5.89 KB
/
configs.py
File metadata and controls
122 lines (112 loc) · 5.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import random
import torch
import logging
import numpy as np
import multiprocessing
logger = logging.getLogger(__name__)
def add_args(parser):
parser.add_argument("--task", type=str, required=True,
choices=['summarize', 'refine', 'translate', 'concode', 'clone', 'defect'])
parser.add_argument("--sub_task", type=str, default='')
parser.add_argument("--add_lang_ids", action='store_true')
# plbart unfinished
parser.add_argument("--model_name", default="roberta",
type=str, choices=['roberta', 'codebert', 'graphcodebert', 'bart', 'plbart', 't5', 'codet5'])
parser.add_argument('--seed', type=int, default=1234,
help="random seed for initialization") # previous one 42
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument("--cache_path", type=str, default='cache_data')
parser.add_argument("--res_dir", type=str, default='results',
help='directory to save fine-tuning results')
parser.add_argument("--res_fn", type=str, default='')
parser.add_argument("--model_dir", type=str, default='saved_models',
help='directory to save fine-tuned models')
parser.add_argument("--summary_dir", type=str, default='tensorboard',
help='directory to save tensorboard summary')
parser.add_argument("--data_num", type=int, default=-1,
help='number of data instances to use, -1 for full data')
parser.add_argument("--gpu", type=int, default=0,
help='index of the gpu to use in a cluster')
parser.add_argument("--data_dir", default='data', type=str)
parser.add_argument("--output_dir", default='outputs', type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run eval on the train set.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--add_task_prefix", action='store_true',
help="Whether to add task prefix for t5 and codet5")
parser.add_argument("--save_last_checkpoints", action='store_true')
parser.add_argument("--always_save_model", action='store_true')
parser.add_argument("--do_eval_bleu", action='store_true',
help="Whether to evaluate bleu on dev set.")
parser.add_argument("--start_epoch", default=0, type=int)
parser.add_argument("--num_train_epochs", default=100, type=int)
parser.add_argument("--patience", default=5, type=int)
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--lr", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--beam_size", default=10, type=int,
help="beam size for beam search")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--warmup_steps", default=100, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--attention_batch_size", default=100, type=int,
help="Batch size per GPU/CPU for computing attention.")
args = parser.parse_args()
return args
def set_dist(args):
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else:
# Setup for distributed data parallel
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
cpu_count = multiprocessing.cpu_count()
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, cpu count: %d",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), cpu_count)
args.device = device
args.cpu_count = cpu_count
def set_seed(args):
"""set random seed."""
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def set_hyperparas(args):
if args.task == 'summarize':
args.adam_epsilon = 1e-8
args.beam_size = 10
args.gradient_accumulation_steps = 1
args.lr = 5e-5
args.max_source_length = 256
args.max_target_length = 128
args.num_train_epochs = 15
args.patience = 2
args.weight_decay = 0.0
args.warmup_steps = 1000
if args.model_name in ['roberta', 'codebert', 'graphcodebert']:
args.batch_size = 48
elif args.model_name in ['t5', 'codet5']:
args.batch_size = 32
elif args.model_name in ['bart', 'plbart']:
args.batch_size = 48