-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathclean_train.py
More file actions
198 lines (138 loc) · 6.88 KB
/
clean_train.py
File metadata and controls
198 lines (138 loc) · 6.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
import copy
import json
import time
from tqdm import tqdm
from Models import Client, Server, SFL_ResNet18
import torch
import numpy as np
from Models.ModelFactory import get_bottom_and_top_model
from Utils import DatasetUtil
from Utils.DatasetUtil import epsilon_normalize
from Utils.FlexLR import FlexLearningRate
from Utils.LogUtil import LogUtil
from Utils.PrintUtil import dirCheck, getNowString, Logger
import os, sys
from Utils.args import parser_args
def clean_train(args):
dataset_name = args.dataset
start_time = time.time()
now_str = getNowString()
logUtil = LogUtil(now_str)
num_users = args.num_clients
epochs = args.global_epochs
local_epochs = args.local_epochs
frac = args.frac # participation of clients; if 1 then 100% clients participate in SFLV1
lr = args.lr
wm_engage = args.wm_engage
backdoor_engage = args.bd_engage
dp_epsilon = epsilon_normalize(args.epsilon)
flex_lr_stages = args.lr_stages
flex_lr_gamma = args.gamma
noniid_mode = args.noniid
noniid_beta = args.beta
model_name = args.model
split_point = args.split_point
cos_annealing_multiplier = args.cos_annealing_multiplier
if model_name == "ResNet18":
with open(os.path.join('wm_conf', 'resnet18_wm_cfg.json'), "r") as f:
wm_config = json.load(f)
elif model_name == "MobileNet":
with open(os.path.join('wm_conf', 'mobilenet_wm_cfg.json'), "r") as f:
wm_config = json.load(f)
elif model_name == "DenseNet":
with open(os.path.join('wm_conf', 'densenet_wm_cfg.json'), "r") as f:
wm_config = json.load(f)
consoleLogDir = logUtil.getDir()
log_file = os.path.join(consoleLogDir, f"{dataset_name}_{now_str}_{epochs}_{wm_engage}_{backdoor_engage}.log")
# re-direct stdout
sys.stdout = Logger(log_file, "a")
train_dataset, test_dataset, num_classes = DatasetUtil.get_datasets(dataset_name=dataset_name,
use_augmentation=True)
bottom_model, top_model = get_bottom_and_top_model(model_name=model_name, split_point=split_point, num_classes=num_classes)
server = Server.Server(num_users=num_users, num_classes=num_classes,model_name=model_name, lr=lr,
wm_config=wm_config, log_dir=logUtil.getDir(),model=top_model)
if wm_config is not None:
server.save_wm_kwargs(logUtil.getDir())
dict_users = DatasetUtil.dataset_iid(train_dataset, num_users)
dict_users_test = DatasetUtil.dataset_iid(test_dataset, num_users)
if noniid_mode:
dict_users = DatasetUtil.dataset_noniid(num_classes=num_classes, dataset=train_dataset,num_users=num_users,beta=noniid_beta)
net_globe_client = bottom_model
# net_globe_client = SFL_ResNet18.ResNet_Bottom()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net_globe_client.to(device)
# if torch.cuda.device_count() > 1:
# net_globe_client = nn.DataParallel(net_globe_client)
net_globe_client.train()
w_globe_client = net_globe_client.state_dict()
clients = []
for idx in range(num_users):
clients.append(Client.Client(net_client_model=net_globe_client, server=server, idx=idx, lr=lr, device=device,
local_epoch=local_epochs, dp_epsilon=dp_epsilon,
dataset_train=train_dataset, dataset_test=test_dataset, indices_train=dict_users[idx],
indices_test=dict_users_test[idx], backdoor_flag=0))
flexLR = FlexLearningRate(total_epochs=epochs, initial_lr=lr, decline_multiplier=flex_lr_gamma, num_lr_stages=flex_lr_stages,
cos_annealing_multiplier=cos_annealing_multiplier)
for iter in range(epochs):
m = max(int(frac * num_users), 1)
idxs_users = np.random.choice(range(num_users), m, replace=False)
w_locals_client = []
print('',flush=True)
current_lr = flexLR.get_lr(iter)
print("-----------------------------------------------------------")
print("------ FedServer: Federation process at Client-Side ------- ")
print(f"-------------------- Epoch: {iter} ----------------------- ")
print("-----------------------------------------------------------")
for idx in tqdm(idxs_users, desc=f"Epoch: {iter}, lr: {current_lr}"):
clients[idx].adjust_learning_rate(current_lr)
w_client = clients[idx].train(net=copy.deepcopy(net_globe_client))
w_locals_client.append(copy.deepcopy(w_client))
clients[idx].evaluate(net=copy.deepcopy(net_globe_client), ell=iter)
# Call FedAvg and update the clients' global bottom model when one global epoch finished
w_globe_client = Server.fedAvg(w_locals_client)
net_globe_client.load_state_dict(w_globe_client)
print("Training and Evaluation completed!\n")
sys.stdout = sys.__stdout__
end_time = time.time()
backdoor_type_str = "No Backdoor Embedded"
server_time_cost = 0.0
client_time_cost = 0.0
for i in range(num_users):
server_time_cost += clients[i].get_server_time_cost()
client_time_cost += clients[i].get_client_time_cost()
server_time_cost /= num_users
client_time_cost /= num_users
main_task_acc = server.get_main_task_acc()
hyperparameters_dict = {
'model_name': model_name,
'split_point': split_point,
'dataset': dataset_name,
'non-iid': noniid_mode,
'num_users': num_users,
'global_epochs': epochs,
'local_epochs': local_epochs,
'init_lr': lr,
'lr_decay_stages': flex_lr_stages,
'frac': frac,
'console_log_file': f"{logUtil.getDir()}clean_train_{dataset_name}_{now_str}_{epochs}.log",
'main_task_acc': main_task_acc,
'running_time': end_time - start_time,
'backdoor_type': backdoor_type_str,
'server_time_cost_per_epoch': server_time_cost / float(epochs),
'client_time_cost_per_epoch': client_time_cost / float(epochs),
}
if dp_epsilon != 0:
hyperparameters_dict['dp_epsilon'] = args.epsilon
hyperparameters_dict['global_bottom_model'] = logUtil.save_trained_model(net_globe_client, "global_bottom_model")
hyperparameters_dict['global_top_model'] = logUtil.save_trained_model(server.get_server_global_top_model(), "global_top_model")
if noniid_mode:
hyperparameters_dict['non-iid_beta'] = noniid_beta
main_task_result_file_name = server.save_main_task_result()
main_task_detail_result_file_name= server.save_main_task_detail_result()
hyperparameters_dict['main_task_brief_result'] = main_task_result_file_name
hyperparameters_dict['main_task_detail_result'] = main_task_detail_result_file_name
logUtil.saveHyperParameters(hyperparameters_dict)
if __name__ == '__main__':
dirCheck('./logs')
args = parser_args()
clean_train(args)