-
Notifications
You must be signed in to change notification settings - Fork 68
Expand file tree
/
Copy pathutils.jl
More file actions
217 lines (171 loc) · 6.92 KB
/
utils.jl
File metadata and controls
217 lines (171 loc) · 6.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
function check_num_nodes(g::GNNGraph, x::AbstractArray)
@assert g.num_nodes == size(x, ndims(x)) "Got $(size(x, ndims(x))) as last dimension size instead of num_nodes=$(g.num_nodes)"
return true
end
function check_num_nodes(g::GNNGraph, x::Union{Tuple,NamedTuple})
map(x -> check_num_nodes(g, x), x)
return true
end
check_num_nodes(::GNNGraph, ::Nothing) = true
function check_num_edges(g::GNNGraph, e::AbstractArray)
@assert g.num_edges == size(e, ndims(e)) "Got $(size(e, ndims(e))) as last dimension size instead of num_edges=$(g.num_edges)"
return true
end
function check_num_edges(g::GNNGraph, x::Union{Tuple,NamedTuple})
map(x -> check_num_edges(g, x), x)
return true
end
check_num_edges(::GNNGraph, ::Nothing) = true
sort_edge_index(eindex::Tuple) = sort_edge_index(eindex...)
function sort_edge_index(u, v)
uv = collect(zip(u, v))
p = sortperm(uv) # isless lexicographically defined for tuples
return u[p], v[p]
end
function sort_edge_index(u::AnyCuArray, v::AnyCuArray)
#TODO proper cuda friendly implementation
sort_edge_index(u |> Flux.cpu, v |> Flux.cpu) |> Flux.gpu
end
cat_features(x1::Nothing, x2::Nothing) = nothing
cat_features(x1::AbstractArray, x2::AbstractArray) = cat(x1, x2, dims=ndims(x1))
cat_features(x1::Union{Number, AbstractVector}, x2::Union{Number, AbstractVector}) =
cat(x1, x2, dims=1)
# workaround for issue #98 #104
cat_features(x1::NamedTuple{(), Tuple{}}, x2::NamedTuple{(), Tuple{}}) = (;)
cat_features(xs::AbstractVector{NamedTuple{(), Tuple{}}}) = (;)
function cat_features(x1::NamedTuple, x2::NamedTuple)
sort(collect(keys(x1))) == sort(collect(keys(x2))) || @error "cannot concatenate feature data with different keys"
NamedTuple(k => cat_features(getfield(x1,k), getfield(x2,k)) for k in keys(x1))
end
function cat_features(xs::AbstractVector{<:AbstractArray{T,N}}) where {T<:Number, N}
cat(xs...; dims=N)
end
cat_features(xs::AbstractVector{Nothing}) = nothing
cat_features(xs::AbstractVector{<:Number}) = xs
function cat_features(xs::AbstractVector{<:NamedTuple})
symbols = [sort(collect(keys(x))) for x in xs]
all(y -> y==symbols[1], symbols) || @error "cannot concatenate feature data with different keys"
length(xs) == 1 && return xs[1]
# concatenate
syms = symbols[1]
NamedTuple(
k => cat_features([x[k] for x in xs]) for (ii,k) in enumerate(syms)
)
end
# Turns generic type into named tuple
normalize_graphdata(data::Nothing; kws...) = NamedTuple()
normalize_graphdata(data; default_name::Symbol, kws...) =
normalize_graphdata(NamedTuple{(default_name,)}((data,)); default_name, kws...)
function normalize_graphdata(data::NamedTuple; default_name, n, duplicate_if_needed=false)
# This had to workaround two Zygote bugs with NamedTuples
# https://github.com/FluxML/Zygote.jl/issues/1071
# https://github.com/FluxML/Zygote.jl/issues/1072
if n != 1
@assert all(x -> x isa AbstractArray, data) "Non-array features provided."
end
if n == 1
# If last array dimension is not 1, add a new dimension.
# This is mostly useful to reshape global feature vectors
# of size D to Dx1 matrices.
unsqz_last(v::AbstractArray) = size(v)[end] != 1 ? reshape(v, size(v)..., 1) : v
unsqz_last(v) = v
data = map(unsqz_last, data)
end
## Turn vectors in 1 x n matrices.
# unsqz_first(v::AbstractVector) = reshape(v, 1, length(v))
# unsqz_first(v) = v
# data = map(unsqz_first, data)
if duplicate_if_needed
function duplicate(v)
if v isa AbstractArray && size(v)[end] == n÷2
v = cat(v, v, dims=ndims(v))
end
v
end
data = map(duplicate, data)
end
@assert all(x -> x isa AbstractArray ? size(x)[end] == n : true, data) "Wrong size in last dimension for feature array."
return data
end
ones_like(x::AbstractArray, T::Type, sz=size(x)) = fill!(similar(x, T, sz), 1)
ones_like(x::SparseMatrixCSC, T::Type, sz=size(x)) = ones(T, sz)
ones_like(x::CUMAT_T, T::Type, sz=size(x)) = CUDA.ones(T, sz)
ones_like(x, sz=size(x)) = ones_like(x, eltype(x), sz)
numnonzeros(a::AbstractSparseMatrix) = nnz(a)
numnonzeros(a::AbstractMatrix) = count(!=(0), a)
# each edge is represented by a number in
# 1:N^2
function edge_encoding(s, t, n; directed=true)
if directed
# directed edges and self-loops allowed
idx = (s .- 1) .* n .+ t
maxid = n^2
else
# Undirected edges and self-loops allowed
maxid = n * (n + 1) ÷ 2
mask = s .> t
snew = copy(s)
tnew = copy(t)
snew[mask] .= t[mask]
tnew[mask] .= s[mask]
s, t = snew, tnew
# idx = ∑_{i',i'<i} ∑_{j',j'>=i'}^n 1 + ∑_{j',i<=j'<=j} 1
# = ∑_{i',i'<i} ∑_{j',j'>=i'}^n 1 + (j - i + 1)
# = ∑_{i',i'<i} (n - i' + 1) + (j - i + 1)
# = (i - 1)*(2*(n+1)-i)÷2 + (j - i + 1)
idx = @. (s-1)*(2*(n+1)-s)÷2 + (t-s+1)
end
return idx, maxid
end
# each edge is represented by a number in
# 1:N^2
function edge_decoding(idx, n; directed=true)
if directed
# g = remove_self_loops(g)
s = (idx .- 1) .÷ n .+ 1
t = (idx .- 1) .% n .+ 1
else
# We replace j=n in
# idx = (i - 1)*(2*(n+1)-i)÷2 + (j - i + 1)
# and obtain
# idx = (i - 1)*(2*(n+1)-i)÷2 + (n - i + 1)
# OR We replace j=i and obtain??
# idx = (i - 1)*(2*(n+1)-i)÷2 + 1
# inverting we have
s = @. ceil(Int, -sqrt((n + 1/2)^2 - 2*idx) + n + 1/2)
t = @. idx - (s-1)*(2*(n+1)-s)÷2 - 1 + s
# t = (idx .- 1) .% n .+ 1
end
return s, t
end
# binarize(x) = map(>(0), x) # map is not supported by CuSparse types
binarize(x::AbstractArray) = >(0).(x)
binarize(x::Number, T::Type)::T = ifelse(x > 0, T(1), T(0))
binarize(x::AbstractArray, T) = T.(binarize(x)) # didn't find a better cusparse compatible implementation
@non_differentiable binarize(x...)
@non_differentiable edge_encoding(x...)
@non_differentiable edge_decoding(x...)
## PIRACY. THESE SHOULD GO in CUDA.jl
# Workaround https://github.com/JuliaGPU/CUDA.jl/issues/1406
Base.sum(x::AbstractCuSparseMatrix; dims=:) = cusparse_sum(x, Val(dims))
cusparse_sum(x, ::Val{:}) = sum(cusparse_sum(x, Val(1)))
function cusparse_sum(x::AbstractCuSparseMatrix, ::Val{1})
m, n = size(x)
v = ones_like(x, (1, m))
return v * x
end
function cusparse_sum(x::AbstractCuSparseMatrix, ::Val{2})
m, n = size(x)
v = ones_like(x, (n, 1))
return x * v
end
# workaround https://github.com/JuliaGPU/CUDA.jl/issues/1664
function CUDA.CuMatrix{T}(x::AbstractCuSparseMatrix{T}) where T <: Integer
return T.(CuMatrix(Float32.(x)))
end
function Base.:(*)(x::AbstractCuSparseMatrix, d::Diagonal)
return x .* d.diag'
end
function Base.:(*)(d::Diagonal, x::AbstractCuSparseMatrix)
return d.diag .* CuArray(x) # couldn't do better
end