-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdata_analysis.py
More file actions
334 lines (244 loc) · 9.98 KB
/
data_analysis.py
File metadata and controls
334 lines (244 loc) · 9.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
import numpy as np
from numba import jit, njit, prange
from scipy.odr import ODR, Model, Data, RealData
#--------------------------------------------------------------------
@njit(cache=True)
def reduceIntoBin2(data):
binnedSize = data.shape[0] // 2
binnedData = np.zeros(binnedSize)
for j in prange(binnedSize):
binnedData[j] = data[2*j] + data[2*j + 1]
return binnedData * 0.5
def binningAnalysisSingle(data, printWarnings=False):
#algo find max of errors -> use this binsize
#if not converging -> max is last element then report error converging
N = data.shape[0]
maxBinningSteps = int(np.log2(N)) #so last binning has 2 elements, 1 element is useless for error
mean = np.zeros(maxBinningSteps)
meanErrors = np.zeros(maxBinningSteps)
stds = np.zeros(maxBinningSteps)
#starting data
binnedData = data
mean[0] = np.mean(data)
meanErrors[0] = np.std(data) / np.sqrt(N)
stds[0] = np.std(data)
#binning up to maxBinningSteps
for i in range(1, maxBinningSteps):
#binsize = 2**i
#binning step
binnedData = reduceIntoBin2(binnedData)
N = binnedData.shape[0]
#new error, mean
mean[i] = np.mean(binnedData)
meanErrors[i] = np.std(binnedData) / np.sqrt(N)
stds[i] = np.std(binnedData)
#--------------------------------
maxElement = np.argmax(stds)
#maxElement = np.argmax(meanErrors)
if (maxElement+1) == maxBinningSteps and printWarnings:
info = " (elements in bin of max error: " + str(data.shape[0] // 2**maxElement) + ")"
print("binningAnalysisSingle: [NOT CONVERGED] increase dataset," + info)
if maxElement == 0 and printWarnings:
info = " (elements in bin of max error: " + str(data.shape[0] // 2**maxElement) + ")"
print("binningAnalysisSingle: [Already CONVERGED] first error is largest," + info)
#print("max error at binstep=%d, binsize=%d" % (mi, 2**mi))
corrSize = 2**maxElement
return mean[maxElement], meanErrors[maxElement], stds[maxElement], corrSize
def binningAnalysisTriple(data1, data2, data3, printWarnings=True):
#algo find max of errors -> use this binsize
#if not converging -> max is last element then report error converging
#here Triple means it uses the same binsize for three data sets, the max size is taken
N = data1.shape[0]
if N != data2.shape[0] or N != data3.shape[0]:
print("binningAnalysisTriple: N != data2.shape[0] or N != data3.shape[0]")
maxBinningSteps = int(np.log2(N) + 0) #so last binning has 2 elements, 1 element is useless for error
mean1 = np.zeros(maxBinningSteps)
meanErrors1 = np.zeros(maxBinningSteps)
stds1 = np.zeros(maxBinningSteps)
mean2 = np.zeros(maxBinningSteps)
meanErrors2 = np.zeros(maxBinningSteps)
stds2 = np.zeros(maxBinningSteps)
mean3 = np.zeros(maxBinningSteps)
meanErrors3 = np.zeros(maxBinningSteps)
stds3 = np.zeros(maxBinningSteps)
#starting data
binnedData1 = data1
binnedData2 = data2
binnedData3 = data3
mean1[0] = np.mean(data1)
meanErrors1[0] = np.std(data1) / np.sqrt(N)
stds1[0] = np.std(data1)
mean2[0] = np.mean(data2)
meanErrors2[0] = np.std(data2) / np.sqrt(N)
stds2[0] = np.std(data2)
mean3[0] = np.mean(data3)
meanErrors3[0] = np.std(data3) / np.sqrt(N)
stds3[0] = np.std(data3)
#binning up to maxBinningSteps
for i in range(1, maxBinningSteps):
#binsize = 2**i
#binning step
binnedData1 = reduceIntoBin2(binnedData1)
binnedData2 = reduceIntoBin2(binnedData2)
binnedData3 = reduceIntoBin2(binnedData3)
N = binnedData1.shape[0]
#new error, mean
mean1[i] = np.mean(binnedData1)
meanErrors1[i] = np.std(binnedData1) / np.sqrt(N)
stds1[i] = np.std(binnedData1)
mean2[i] = np.mean(binnedData2)
meanErrors2[i] = np.std(binnedData2) / np.sqrt(N)
stds2[i] = np.std(binnedData2)
mean3[i] = np.mean(binnedData3)
meanErrors3[i] = np.std(binnedData3) / np.sqrt(N)
stds3[i] = np.std(binnedData3)
#take the binsize of the larger one
#maxElement == 0 means the init data is used
#maxElement = max(np.argmax(stds1), np.argmax(stds2), np.argmax(stds3))
maxElement = max(np.argmax(meanErrors1), np.argmax(meanErrors2), np.argmax(meanErrors3))
if (maxElement+1) == maxBinningSteps and printWarnings:
info = " (elements in bin of max error: " + str(data1.shape[0] // 2**maxElement) + ")"
print("binningAnalysisTriple: [NOT CONVERGED] increase dataset," + info)
if maxElement == 0 and printWarnings:
info = " (elements in bin of max error: " + str(data1.shape[0] // 2**maxElement) + ")"
print("binningAnalysisTriple: [Already CONVERGED] first error is largest," + info)
#bin to requested size
binnedData1 = data1
binnedData2 = data2
binnedData3 = data3
for i in range(1, maxElement):
binnedData1 = reduceIntoBin2(binnedData1)
binnedData2 = reduceIntoBin2(binnedData2)
binnedData3 = reduceIntoBin2(binnedData3)
#return
corrSize = 2**maxElement
return mean1[maxElement], meanErrors1[maxElement], stds1[maxElement], mean2[maxElement], meanErrors2[maxElement], stds2[maxElement], mean3[maxElement], meanErrors3[maxElement], stds3[maxElement], corrSize, binnedData1, binnedData2, binnedData3
#--------------------------------------------------------------------
@jit(cache=True)
def binderCumulant(m2, m4):
r2 = m4 / (m2**2 + 1e-15)
return 1.5 * (1.0 - r2 / 3.0)
@jit(cache=True)
def mBinderCuJackknife(binnedMag2, binnedMag4):
#for binderCumulant after binning
#init U0
size = binnedMag2.shape[0]
arr = np.zeros(size)
for i in range(size):
arr[i] = binderCumulant(binnedMag2[i], binnedMag4[i])
U0 = np.mean(arr)
#cutting
cutArr = np.zeros(size - 1)
U = np.zeros(size)
for skip in range(size):
index = 0
for i in range(size):
if i == skip:
continue
cutArr[index] = binderCumulant(binnedMag2[i], binnedMag4[i])
index += 1
U[skip] = np.sum(cutArr) / (size - 1)
Uq = np.mean(U)
mean = U0 - (size - 1) * (Uq - U0)
error = np.std(U) * np.sqrt(size-1)
return mean, error
#--------------------------------------------------------------------
@jit(cache=True)
def magSusceptibility(mAbs, m2, N, T):
return (N/T) * (m2 - mAbs**2)
@jit(cache=True)
def mSuscJackknife(binnedMagAbs, binnedMag2, N, T):
#for mSuscBins after binning
#init U0
size = binnedMagAbs.shape[0]
arr = np.zeros(size)
for i in range(size):
arr[i] = magSusceptibility(binnedMagAbs[i], binnedMag2[i], N, T)
U0 = np.mean(arr)
#cutting
cutArr = np.zeros(size - 1)
U = np.zeros(size)
for skip in range(size):
index = 0
for i in range(size):
if i == skip:
continue
cutArr[index] = magSusceptibility(binnedMagAbs[i], binnedMag2[i], N, T)
index += 1
U[skip] = np.sum(cutArr) / (size - 1)
Uq = np.mean(U)
mean = U0 - (size - 1) * (Uq - U0)
error = np.std(U) * np.sqrt(size-1)
return mean, error
#--------------------------------------------------------------------
@jit(cache=True)
def k3(mAbs, m2, mAbs3, N, T):
return (mAbs3 - 3.0 * m2 * mAbs + 2.0 * mAbs**3) * (N/T)
@jit(cache=True)
def mK3Jackknife(binnedMagAbs, binnedMag2, binnedMagAbs3, N, T):
#for k3 after binning
#init U0
size = binnedMagAbs.shape[0]
arr = np.zeros(size)
for i in range(size):
arr[i] = k3(binnedMagAbs[i], binnedMag2[i], binnedMagAbs3[i], N, T)
U0 = np.mean(arr)
#cutting
cutArr = np.zeros(size - 1)
U = np.zeros(size)
for skip in range(size):
index = 0
for i in range(size):
if i == skip:
continue
cutArr[index] = k3(binnedMagAbs[i], binnedMag2[i], binnedMagAbs3[i], N, T)
index += 1
U[skip] = np.sum(cutArr) / (size - 1)
Uq = np.mean(U)
mean = U0 - (size - 1) * (Uq - U0)
error = np.std(U) * np.sqrt(size-1)
return mean, error
#--------------------------------------------------------------------
def fit_spin_spin_correlation(rs, gc_r):
data = RealData(rs, gc_r)
def f(beta, x):
a, xi = beta
return a * np.exp(-x/xi) #/ x
model = Model(f)
odr = ODR(data, model, [1,1])
odr.set_job(fit_type=2)
output = odr.run()
a, xi = output.beta
a_err, xi_err = output.sd_beta
return a, a_err, xi, xi_err, f
def calc_spin_spin_correlation(states, N):
L = int(np.sqrt(N))
s = np.reshape(states, (-1, L, L))
sk = np.fft.fft2(s, axes=(-2, -1))
gc_k = np.square(np.abs(sk)) / N
#adjust m2 offset
gc_k[:, 0, 0] = 0
#do average here to speed up the inverse fft (N logN * count ;vs; N * count)
gc_k_mean = np.mean(gc_k, axis=0)
#inverse fft
gc_rvec = np.fft.ifft2(gc_k_mean, axes=(-2, -1))
#remove numerical imaginary parts
gc_rvec = np.abs(gc_rvec)
#gc_rvec = np.real(gc_rvec)
#average over r=abs(rvec)
#we take rvec in only 4 directions to simplify the r evaluation
gc_r = np.zeros(L//2 + 1)
for r in range(0, L//2 + 1):
g1 = gc_rvec[0, r]
g2 = gc_rvec[r, 0]
g3 = gc_rvec[0, -r]
g4 = gc_rvec[-r, 0]
gc_r[r] = np.mean([g1, g2, g3, g4])
#fit exp over (gc_r, r)
rs = np.linspace(0, L//2, L//2 + 1)
a, a_err, xi, xi_err, fit_func = fit_spin_spin_correlation(rs, gc_r)
if 0:
import data_visualization as dv
dv.plot_correlation_fit(rs, gc_r, fit_func, (a, xi))
return xi, xi_err
#--------------------------------------------------------------------