SCAFFOLD的原理请见:ICML 2020 | SCAFFOLD:联邦学习的随机控制平均。
联邦学习中存在多个客户端,每个客户端都有自己的数据集,这个数据集他们是不愿意共享的。
数据集为某城市十个地区的风电功率,我们假设这10个地区的电力部门不愿意共享自己的数据,但是他们又想得到一个由所有数据统一训练得到的全局模型。
客户端的模型为一个简单的四层神经网络模型:
# -*- coding:utf-8 -*-
"""
@Time: 2022/03/02 11:21
@Author: KI
@File: model.py
@Motto: Hungry And Humble
"""
from torch import nn
class ANN(nn.Module):
def __init__(self, input_dim, name, B, E, lr):
super(ANN, self).__init__()
self.name = name
self.B = B
self.E = E
self.len = 0
self.lr = lr
self.loss = 0
self.fc1 = nn.Linear(input_dim, 20)
self.control = {}
self.delta_control = {}
self.delta_y = {}
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.dropout = nn.Dropout()
self.fc2 = nn.Linear(20, 20)
self.fc3 = nn.Linear(20, 20)
self.fc4 = nn.Linear(20, 1)
def forward(self, data):
x = self.fc1(data)
x = self.sigmoid(x)
x = self.fc2(x)
x = self.sigmoid(x)
x = self.fc3(x)
x = self.sigmoid(x)
x = self.fc4(x)
x = self.sigmoid(x)
return x
在SCAFFOLD中,本地模型的更新公式为:
y i ← y i − η l ( g i ( y i ) + c − c i ) y_i \gets y_i-\eta_l(g_i(y_i)+c-c_i) yi←yi−ηl(gi(yi)+c−ci)
因此,优化器定义如下:
# -*- coding:utf-8 -*-
"""
@Time: 2022/03/02 13:34
@Author: KI
@File: ScaffoldOptimizer.py
@Motto: Hungry And Humble
"""
from torch.optim import Optimizer
class ScaffoldOptimizer(Optimizer):
def __init__(self, params, lr, weight_decay):
defaults = dict(lr=lr, weight_decay=weight_decay)
super(ScaffoldOptimizer, self).__init__(params, defaults)
def step(self, server_controls, client_controls, closure=None):
for group in self.param_groups:
for p, c, ci in zip(group['params'], server_controls.values(), client_controls.values()):
if p.grad is None:
continue
dp = p.grad.data + c.data - ci.data
p.data = p.data - dp.data * group['lr']
核心代码为:
dp = p.grad.data + c.data - ci.data
p.data = p.data - dp.data * group['lr']
服务器端收发模型参数,并更新全局控制变量:
class Scaffold:
def __init__(self, options):
self.C = options['C']
self.E = options['E']
self.B = options['B']
self.K = options['K']
self.r = options['r']
self.input_dim = options['input_dim']
self.lr = options['lr']
self.clients = options['clients']
self.nn = ANN(input_dim=self.input_dim, name='server', B=self.B, E=self.E, lr=self.lr).to(
device)
# self.control = torch.zeros_like(self.nn.named_parameters)
for k, v in self.nn.named_parameters():
self.nn.control[k] = torch.zeros_like(v.data)
self.nn.delta_control[k] = torch.zeros_like(v.data)
self.nn.delta_y[k] = torch.zeros_like(v.data)
self.nns = []
for i in range(self.K):
temp = copy.deepcopy(self.nn)
temp.name = self.clients[i]
temp.control = copy.deepcopy(self.nn.control) # ci
temp.delta_control = copy.deepcopy(self.nn.delta_control) # ci
temp.delta_y = copy.deepcopy(self.nn.delta_y)
self.nns.append(temp)
def server(self):
for t in range(self.r):
print('round', t + 1, ':')
# sampling
m = np.max([int(self.C * self.K), 1])
index = random.sample(range(0, self.K), m)
# dispatch
self.dispatch(index)
# local updating
self.client_update(index)
# aggregation
self.aggregation(index)
return self.nn
def aggregation(self, index):
s = 0.0
for j in index:
# normal
s += self.nns[j].len
# compute
x = {}
c = {}
# init
for k, v in self.nns[0].named_parameters():
x[k] = torch.zeros_like(v.data)
c[k] = torch.zeros_like(v.data)
for j in index:
for k, v in self.nns[j].named_parameters():
x[k] += self.nns[j].delta_y[k] / len(index) # averaging
c[k] += self.nns[j].delta_control[k] / len(index) # averaging
# update x and c
for k, v in self.nn.named_parameters():
v.data += x[k].data # lr=1
self.nn.control[k].data += c[k].data * (len(index) / self.K)
def dispatch(self, index):
for j in index:
for old_params, new_params in zip(self.nns[j].parameters(), self.nn.parameters()):
new_params.data = old_params.data.clone()
def client_update(self, index): # update nn
for k in index:
self.nns[k] = train(self.nns[k], self.nn)
客户端更新本地模型:
def train(ann, server):
ann.train()
Dtr, Dte = nn_seq_wind(ann.name, ann.B)
ann.len = len(Dtr)
print('training...')
loss_function = nn.MSELoss().to(device)
loss = 0
x = copy.deepcopy(ann)
optimizer = ScaffoldOptimizer(ann.parameters(), lr=ann.lr, weight_decay=1e-4)
for epoch in range(ann.E):
for (seq, label) in Dtr:
seq = seq.to(device)
label = label.to(device)
y_pred = ann(seq)
loss = loss_function(y_pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step(server.control, ann.control)
print('epoch', epoch, ':', loss.item())
# update c
# c+ <- ci - c + 1/(E * lr) * (x-yi)
# save ann
temp = {}
for k, v in ann.named_parameters():
temp[k] = v.data.clone()
for k, v in x.named_parameters():
ann.control[k] = ann.control[k] - server.control[k] + (v.data - temp[k]) / (ann.E * ann.lr)
ann.delta_y[k] = temp[k] - v.data
ann.delta_control[k] = ann.control[k] - x.control[k]
return ann
模型更新结束后需要更新控制变量,控制变量的更新公式为:
这里 K K K为本地更新的轮数, η l \eta_l ηl为学习率。对应代码为:
ann.control[k] = ann.control[k] - server.control[k] + (v.data - temp[k]) / (ann.E * ann.lr)
控制变量更新结束后,计算模型和控制变量前后的差值:
ann.delta_y[k] = temp[k] - v.data
ann.delta_control[k] = ann.control[k] - x.control[k]
完整项目我放在了GitHub上,项目地址:Scaffold-Federated Learning,原创不易,下载后请随手点下Follow和Star,感谢!!