-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpearson_all_vs_all.py
More file actions
122 lines (98 loc) · 3.67 KB
/
pearson_all_vs_all.py
File metadata and controls
122 lines (98 loc) · 3.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#!/usr/bin/env python3
import os
import numpy as np
import pandas as pd
from multiprocessing import Pool, cpu_count
from itertools import combinations
import warnings
warnings.filterwarnings("ignore")
# =====================================================
# CONFIG
# =====================================================
INPUT_CSV = os.path.expanduser(
"~/Projects/CHARM/intermediary_files/Charm.binding.filtered.both.csv"
)
OUT_INC = os.path.expanduser(
"~/Projects/CHARM/intermediary_files/Pearson_oddsratinc.csv"
)
OUT_DEC = os.path.expanduser(
"~/Projects/CHARM/intermediary_files/Pearson_oddsratdec.csv"
)
NPROC = max(1, cpu_count() - 4)
# =====================================================
# LOAD DATA
# =====================================================
print("Loading oddsrat dataframe...")
df = pd.read_csv(INPUT_CSV, sep="\t")
pos_cols = [c for c in df.columns if c.startswith("pos_")]
# Ensure numeric
df[pos_cols] = df[pos_cols].apply(pd.to_numeric, errors="coerce")
print(f"Loaded {len(df)} spectra with {len(pos_cols)} positions")
# =====================================================
# SPLIT INC / DEC
# =====================================================
df_inc = df[df["Direction"] == "oddsratinc"].reset_index(drop=True)
df_dec = df[df["Direction"] == "oddsratdec"].reset_index(drop=True)
print(f"INC spectra: {len(df_inc)}")
print(f"DEC spectra: {len(df_dec)}")
# =====================================================
# PREPARE NUMPY MATRICES
# =====================================================
def prepare_matrix(df):
mat = df[pos_cols].to_numpy(dtype=np.float32)
meta = df[["RBP", "Target", "dPSI"]].to_numpy()
return mat, meta
inc_mat, inc_meta = prepare_matrix(df_inc)
dec_mat, dec_meta = prepare_matrix(df_dec)
# =====================================================
# WORKER FUNCTION (PEARSON)
# =====================================================
def pearson_worker(args):
i, j, mat, meta = args
x = mat[i]
y = mat[j]
mask = np.isfinite(x) & np.isfinite(y)
if mask.sum() < 10:
return None
r = np.corrcoef(x[mask], y[mask])[0, 1]
if np.isnan(r):
return None
return {
"RBP1": meta[i][0],
"Target1": meta[i][1],
"dPSI1": meta[i][2],
"RBP2": meta[j][0],
"Target2": meta[j][1],
"dPSI2": meta[j][2],
"PearsonR": r
}
# =====================================================
# RUN ALL-PAIR COMPARISONS
# =====================================================
def run_all_pairs(mat, meta, outfile):
n = mat.shape[0]
pairs = combinations(range(n), 2)
print(f"Running {n*(n-1)//2:,} comparisons ? {outfile}")
with open(outfile, "w") as f:
f.write("RBP1,Target1,dPSI1,RBP2,Target2,dPSI2,PearsonR\n")
with Pool(NPROC) as pool:
for res in pool.imap_unordered(
pearson_worker,
((i, j, mat, meta) for i, j in pairs),
chunksize=500
):
if res is None:
continue
f.write(
f"{res['RBP1']},{res['Target1']},{res['dPSI1']},"
f"{res['RBP2']},{res['Target2']},{res['dPSI2']},"
f"{res['PearsonR']}\n"
)
# =====================================================
# EXECUTION
# =====================================================
if __name__ == "__main__":
print(f"Using {NPROC} cores")
run_all_pairs(inc_mat, inc_meta, OUT_INC)
run_all_pairs(dec_mat, dec_meta, OUT_DEC)
print("? All Pearson correlations completed.")