-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
178 lines (140 loc) · 6.76 KB
/
main.py
File metadata and controls
178 lines (140 loc) · 6.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
import os
WORKING_DIR = "D:/Documents/Stages/S3R/paper/code"
DATA_DIR = WORKING_DIR + "data/result"
os.chdir(WORKING_DIR)
from Partition import *
from postprocessing import *
from dissimilarities import *
# public functions
def name(a):
return [key for key, value in globals().items() if value is a][0]
def data_extraction(name_gamma_file, name_list_simu_file):
# gamma = [] # matrix of reduced coordinates Q= V_g. gamma, where Q is the concatenation of Q_i
# L = [] # list of index delimitation between Q_i's
print("Extraction...")
gamma = np.load(DATA_DIR + "\\" + name_gamma_file) # read the reduced coordinates
L = np.load(DATA_DIR + "\\" + name_list_simu_file)
print('Reduced Coordinates Nxm, G ', gamma.shape)
print('Number of samples ', gamma.shape[1])
print('Nb Simulations ', len(L))
k = 3
print('Simulation data, number k=', k, ' ends at L[', k - 1, '] = ', L[k - 1])
print("Nan in Gamma:" + str(np.isnan(gamma).any())) # Looking for corrupted data
return [gamma[:, :L[0]]] + [gamma[:, L[k - 1]:L[k]] for k in
range(1, len(L))] # list of gamma_i matrices, such as Q_i =V_g gamma_i
# Main
# Loading
DIR_SAVE_PARTITION = "D:\\Documents\\stage\\S3R\\paper\\code\\Partitions"
Dir_save_figures = "D:\\Documents\\stage\\S3R\\paper\\code\\Figures"
"""
Data extraction
You can either extract the raw data and split them into train et test set, or just load pre-split sets.
In order to compare properly metrics, we suggest to use the same train et test sets for parallelize-computing.
"""
gamma = data_extraction("dof_global_G.npy", "List_simu.npy")
gamma_train, gamma_test = train_test_split(gamma, test_size=0.2)
with open("test_set", 'wb') as fichier:
pk.dump(gamma_test, fichier)
fichier.close()
with open("train_set", 'wb') as fichier:
pk.dump(gamma_train, fichier)
fichier.close()
# OR
with open("test_set", 'wb') as fichier:
gamma_test = pk.load(fichier)
fichier.close()
with open("train_set", 'wb') as fichier:
gamma_train = pk.load(fichier)
fichier.close()
""" parameters """
K = [3, 5, 7, 9, 12, 18]
clustering_functions = ["k_medoids", "spectral"]
DBSCAN_parameters = [1]
OPTICS_parameters = [10]
distances = [biprojection, schubert, grassmann, binet_cauchy, chordal, fubini_study, martin, procrustes]
Nb_modes = 2
nb_iter = 200
### (re)-initialization
Result_all = {
distance: {"k_medoids": {"parameter": K,
'gain_DTAP': [[] for _ in K],
'gain_DTM': [[] for _ in K],
'gain_lowest_err': [[] for _ in K],
'mean_of_local_gain': [[] for _ in K]},
"spectral": {"parameter": K,
'gain_DTAP': [[] for _ in K],
'gain_DTM': [[] for _ in K],
'gain_lowest_err': [[] for _ in K],
'mean_of_local_gain': [[] for _ in K]},
"DBSCAN": {"parameter": DBSCAN_parameters,
'gain_DTAP': [[] for _ in DBSCAN_parameters],
'gain_DTM': [[] for _ in DBSCAN_parameters],
'gain_lowest_err': [[] for _ in DBSCAN_parameters],
'mean_of_local_gain': [[] for _ in DBSCAN_parameters]},
"OPTICS": {"parameter": OPTICS_parameters,
'gain_DTAP': [[] for _ in OPTICS_parameters],
'gain_DTM': [[] for _ in OPTICS_parameters],
'gain_lowest_err': [[] for _ in OPTICS_parameters],
'mean_of_local_gain': [[] for _ in OPTICS_parameters]},
} for distance in distances}
MATRICES_DIS = {key: None for key in distances}
average_result = []
# Computation and save of each combination required
time_tour = time.time()
for dist in distances:
for clustering, dic in Result_all[dist].items():
for i in range(len(dic['parameter'])):
parameter = dic['parameter'][i]
print("Dissimilarity: " + name(dist) + ", Method :" + clustering + " with " + str(
parameter) + " as clustering parameter")
if MATRICES_DIS[dist] is None:
part = Partition(gamma_train, dist, clustering_parameter=parameter, clustering_method=clustering,
bi_dim=False, nb_modes=Nb_modes)
MATRICES_DIS[dist] = part.mat_dist
listB = part.B
part = Partition(gamma_train, dist, clustering_parameter=parameter, clustering_method=clustering, B=listB,
precomputed_dissimilarity_matrix=MATRICES_DIS[dist],
nb_modes=Nb_modes)
for key, value in part.gains(gamma_test).items():
Result_all[dist][clustering][key][i].append(value)
os.chdir(DIR_SAVE_PARTITION)
with open(name(dist) + "_" + clustering + "_" + str(parameter) + "_clusters.part", 'wb') as fichier:
pk.dump(part, fichier)
fichier.close()
with open("Result_all", 'wb') as fichier:
pk.dump(Result_all, fichier)
fichier.close()
print(time.time() - time_tour, " s for the first loop. The partitions object have been saved.")
compt = 1
# Loop in order to average the results
while compt < nb_iter:
compt += 1
for dist in distances:
for clustering, dic in Result_all[dist].items():
for i in range(len(dic['parameter'])):
parameter = dic['parameter'][i]
print("Dissimilarity: " + name(dist) + ", Method :" + clustering + " with " + str(
parameter) + " as clustering parameter")
if MATRICES_DIS[dist] is None:
part = Partition(gamma_train, dist, clustering_parameter=parameter, clustering_method=clustering,
bi_dim=False, nb_modes=Nb_modes)
MATRICES_DIS[dist] = part.mat_dist
listB = part.B
part = Partition(gamma_train, dist, clustering_parameter=parameter, clustering_method=clustering,
B=listB,
precomputed_dissimilarity_matrix=MATRICES_DIS[dist],
nb_modes=Nb_modes)
for key, value in part.gains(gamma_test).items():
Result_all[dist][clustering][key][i].append(value)
with open("Result_all", 'wb') as fichier:
pk.dump(Result_all, fichier)
fichier.close()
# post-processing
os.chdir(DIR_SAVE_PARTITION)
with open("Result_all", 'rb') as fichier:
Result_all = pk.load(fichier)
fichier.close()
diss_correlation(DIR_SAVE_PARTITION)
comparison_avg(DIR_SAVE_PARTITION)
comparison_max(DIR_SAVE_PARTITION)
comparison_std(DIR_SAVE_PARTITION)