forked from octadion/fedlearn
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdata_utils.py
More file actions
91 lines (65 loc) · 3.08 KB
/
data_utils.py
File metadata and controls
91 lines (65 loc) · 3.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import torch
from torch.utils.data import DataLoader, Subset
from torchvision import datasets, transforms
import numpy as np
def load_datasets():
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
trainset = datasets.MNIST('./data', train=True, download=True, transform=transform)
testset = datasets.MNIST('./data', train=False, download=True, transform=transform)
return trainset, testset
def create_non_iid_distribution(trainset, num_clients):
"""
Client 0: Specializes in classes 0-3
Client 1: Specializes in classes 4-6
Client 2: Specializes in classes 7-9
"""
labels = np.array([trainset[i][1] for i in range(len(trainset))])
client_indices = [[] for _ in range(num_clients)]
if num_clients == 3:
for cls in range(10):
cls_indices = np.where(labels == cls)[0]
np.random.shuffle(cls_indices)
if cls <= 3:
split_point = int(0.7 * len(cls_indices))
client_indices[0].extend(cls_indices[:split_point])
remaining = cls_indices[split_point:]
elif cls <= 6:
split_point = int(0.7 * len(cls_indices))
client_indices[1].extend(cls_indices[:split_point])
remaining = cls_indices[split_point:]
else:
split_point = int(0.7 * len(cls_indices))
client_indices[2].extend(cls_indices[:split_point])
remaining = cls_indices[split_point:]
remaining_split = np.array_split(remaining, num_clients)
for i, indices in enumerate(remaining_split):
client_indices[i].extend(indices)
for i in range(num_clients):
np.random.shuffle(client_indices[i])
return client_indices
def get_client_loaders(client_id, client_indices, trainset, testset, batch_size=32):
train_subset = Subset(trainset, client_indices[client_id])
trainloader = DataLoader(train_subset, batch_size=batch_size, shuffle=True)
testloader = DataLoader(testset, batch_size=batch_size, shuffle=False)
return trainloader, testloader
def analyze_distribution(client_indices, trainset):
print("\n" + "="*60)
print("CLIENT DATA DISTRIBUTION")
print("="*60)
for client_id, indices in enumerate(client_indices):
labels = [trainset[i][1] for i in indices]
unique, counts = np.unique(labels, return_counts=True)
print(f"\nClient {client_id}:")
print(f" Total samples: {len(indices)}")
print(f" Class distribution:")
for cls, count in zip(unique, counts):
percentage = (count / len(indices)) * 100
print(f" Class {cls}: {count:4d} ({percentage:5.1f}%)")
print("\n" + "="*60 + "\n")
def get_centralized_loaders(trainset, testset, batch_size=32):
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
testloader = DataLoader(testset, batch_size=batch_size, shuffle=False)
return trainloader, testloader