Skip to content

Commit 004a405

Browse files
committed
remove opacus and DP
1 parent 5012fce commit 004a405

7 files changed

Lines changed: 1 addition & 80 deletions

File tree

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ Figure 1: An Example for FedAvg. You can create a scenario using `generate_DATA.
2525

2626
The origin of the **statistical heterogeneity** phenomenon is the personalization of users, who generate non-IID (not Independent and Identically Distributed) and unbalanced data. With statistical heterogeneity existing in the FL scenario, a myriad of approaches have been proposed to crack this hard nut. In contrast, the personalized FL (pFL) may take advantage of the statistically heterogeneous data to learn the personalized model for each user.
2727

28-
Thanks to [@Stonesjtu](https://github.com/Stonesjtu/pytorch_memlab/blob/d590c489236ee25d157ff60ecd18433e8f9acbe3/pytorch_memlab/mem_reporter.py#L185), this library can also record the **GPU memory usage** for the model. By using the package [opacus](https://opacus.ai/), we introduce **DP (differential privacy)** into this library (please refer to `./system/flcore/clients/clientavg.py` for example). Following [FedCG](https://www.ijcai.org/proceedings/2022/0324.pdf), we also introduce the **[DLG (Deep Leakage from Gradients)](https://papers.nips.cc/paper_files/paper/2019/hash/60a6c4002cc7b29142def8871531281a-Abstract.html) attack** and **PSNR (Peak Signal-to-Noise Ratio) metric** to evaluate the privacy-preserving ability of tFL/pFL algorithms (please refer to `./system/flcore/servers/serveravg.py` for example). *Now we can train on some clients and evaluate performance on other new clients by setting `args.num_new_clients` in `./system/main.py`. Note that not all the tFL/pFL algorithms support this feature.*
28+
Thanks to [@Stonesjtu](https://github.com/Stonesjtu/pytorch_memlab/blob/d590c489236ee25d157ff60ecd18433e8f9acbe3/pytorch_memlab/mem_reporter.py#L185), this library can also record the **GPU memory usage** for the model. Following [FedCG](https://www.ijcai.org/proceedings/2022/0324.pdf), we also introduce the **[DLG (Deep Leakage from Gradients)](https://papers.nips.cc/paper_files/paper/2019/hash/60a6c4002cc7b29142def8871531281a-Abstract.html) attack** and **PSNR (Peak Signal-to-Noise Ratio) metric** to evaluate the privacy-preserving ability of tFL/pFL algorithms (please refer to `./system/flcore/servers/serveravg.py` for example). *Now we can train on some clients and evaluate performance on other new clients by setting `args.num_new_clients` in `./system/main.py`. Note that not all the tFL/pFL algorithms support this feature.*
2929

3030
**Citation**
3131

env_cuda_latest.yaml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ dependencies:
2020
- torchvision
2121
- calmsize
2222
- memory-profiler
23-
- opacus
2423
- portalocker
2524
- cvxpy
2625
- higher

system/flcore/clients/clientavg.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
import numpy as np
2121
import time
2222
from flcore.clients.clientbase import Client
23-
from utils.privacy import *
2423

2524

2625
class clientAVG(Client):
@@ -31,12 +30,6 @@ def train(self):
3130
trainloader = self.load_train_data()
3231
# self.model.to(self.device)
3332
self.model.train()
34-
35-
# differential privacy
36-
if self.privacy:
37-
model_origin = copy.deepcopy(self.model)
38-
self.model, self.optimizer, trainloader, privacy_engine = \
39-
initialize_dp(self.model, self.optimizer, trainloader, self.dp_sigma)
4033

4134
start_time = time.time()
4235

@@ -66,12 +59,3 @@ def train(self):
6659

6760
self.train_time_cost['num_rounds'] += 1
6861
self.train_time_cost['total_cost'] += time.time() - start_time
69-
70-
if self.privacy:
71-
eps, DELTA = get_dp_params(privacy_engine)
72-
print(f"Client {self.id}", f"epsilon = {eps:.2f}, sigma = {DELTA}")
73-
74-
for param, param_dp in zip(model_origin.parameters(), self.model.parameters()):
75-
param.data = param_dp.data.clone()
76-
self.model = model_origin
77-
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)

system/flcore/clients/clientbase.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,6 @@ def __init__(self, args, id, train_samples, test_samples, **kwargs):
5959
self.train_time_cost = {'num_rounds': 0, 'total_cost': 0.0}
6060
self.send_time_cost = {'num_rounds': 0, 'total_cost': 0.0}
6161

62-
self.privacy = args.privacy
63-
self.dp_sigma = args.dp_sigma
64-
6562
self.loss = nn.CrossEntropyLoss()
6663
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)
6764
self.learning_rate_scheduler = torch.optim.lr_scheduler.ExponentialLR(

system/flcore/clients/clientntd.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
import time
2323
import torch.nn.functional as F
2424
from flcore.clients.clientbase import Client
25-
from utils.privacy import *
2625

2726

2827
class clientNTD(Client):
@@ -39,12 +38,6 @@ def train(self):
3938
trainloader = self.load_train_data()
4039
# self.model.to(self.device)
4140
self.model.train()
42-
43-
# differential privacy
44-
if self.privacy:
45-
model_origin = copy.deepcopy(self.model)
46-
self.model, self.optimizer, trainloader, privacy_engine = \
47-
initialize_dp(self.model, self.optimizer, trainloader, self.dp_sigma)
4841

4942
start_time = time.time()
5043

@@ -76,15 +69,6 @@ def train(self):
7669

7770
self.train_time_cost['num_rounds'] += 1
7871
self.train_time_cost['total_cost'] += time.time() - start_time
79-
80-
if self.privacy:
81-
eps, DELTA = get_dp_params(privacy_engine)
82-
print(f"Client {self.id}", f"epsilon = {eps:.2f}, sigma = {DELTA}")
83-
84-
for param, param_dp in zip(model_origin.parameters(), self.model.parameters()):
85-
param.data = param_dp.data.clone()
86-
self.model = model_origin
87-
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)
8872

8973
def set_parameters(self, model):
9074
for new_param, old_param in zip(model.parameters(), self.model.parameters()):

system/main.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -413,9 +413,6 @@ def run(args):
413413
help="Running times")
414414
parser.add_argument('-eg', "--eval_gap", type=int, default=1,
415415
help="Rounds gap for evaluation")
416-
parser.add_argument('-dp', "--privacy", type=bool, default=False,
417-
help="differential privacy")
418-
parser.add_argument('-dps', "--dp_sigma", type=float, default=0.0)
419416
parser.add_argument('-sfn', "--save_folder_name", type=str, default='items')
420417
parser.add_argument('-ab', "--auto_break", type=bool, default=False)
421418
parser.add_argument('-dlg', "--dlg_eval", type=bool, default=False)
@@ -515,9 +512,6 @@ def run(args):
515512
print("Number of classes: {}".format(args.num_classes))
516513
print("Backbone: {}".format(args.model))
517514
print("Using device: {}".format(args.device))
518-
print("Using DP: {}".format(args.privacy))
519-
if args.privacy:
520-
print("Sigma for DP: {}".format(args.dp_sigma))
521515
print("Auto break: {}".format(args.auto_break))
522516
if not args.auto_break:
523517
print("Global rounds: {}".format(args.global_rounds))

system/utils/privacy.py

Lines changed: 0 additions & 37 deletions
This file was deleted.

0 commit comments

Comments
 (0)