Skip to content

Commit aa55cef

Browse files
Sebastien Loiselclaude
andcommitted
Update for HPCBackend type parameter refactoring
- Adapt to HPCBackend{T,Ti,Device,Comm,Solver} API changes - Update backend instance mapping for new parameterization - Update all tests and tools for new backend constructor Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
1 parent 4c8541c commit aa55cef

48 files changed

Lines changed: 325 additions & 268 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
2020
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
2121

2222
[sources]
23-
HPCLinearAlgebra = {path = "../LinearAlgebraMPI.jl"}
23+
HPCLinearAlgebra = {path = "../HPCLinearAlgebra.jl"}
2424

2525
[compat]
2626
BenchmarkTools = "1.6"

examples/basic_solve.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ using MPI
1515
MPI.Init()
1616

1717
using MultiGridBarrierMPI
18-
using LinearAlgebraMPI
18+
using HPCLinearAlgebra
1919
using MultiGridBarrier
2020
using LinearAlgebra
2121

examples/roundtrip_conversion.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ using MPI
1616
MPI.Init()
1717

1818
using MultiGridBarrierMPI
19-
using LinearAlgebraMPI
19+
using HPCLinearAlgebra
2020
using MultiGridBarrier
2121
using LinearAlgebra
2222
using SparseArrays

src/MultiGridBarrierMPI.jl

Lines changed: 120 additions & 66 deletions
Large diffs are not rendered by default.

test/test_amg_structure.jl

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ end
99
using MultiGridBarrierMPI
1010
MultiGridBarrierMPI.Init()
1111

12-
using LinearAlgebraMPI
13-
using LinearAlgebraMPI: VectorMPI, MatrixMPI, SparseMatrixMPI, io0
12+
using HPCLinearAlgebra
13+
using HPCLinearAlgebra: HPCVector, HPCMatrix, HPCSparseMatrix, io0
1414
using LinearAlgebra
1515
using SparseArrays
1616
using MultiGridBarrier
@@ -86,7 +86,7 @@ println(io0(), "[DEBUG] R_finest size: $(size(R_finest))")
8686

8787
# Create a test vector with size ncols(R_finest)
8888
n_coarse = size(R_finest, 2)
89-
z_test = VectorMPI(ones(n_coarse))
89+
z_test = HPCVector(ones(n_coarse))
9090
println(io0(), "[DEBUG] z_test partition: $(z_test.partition)")
9191

9292
# Compute R * z
@@ -105,7 +105,7 @@ println(io0(), "[DEBUG] \n--- Testing full Hessian assembly ---")
105105
# Let's simulate this
106106
w = g.w
107107
w_scaled = 2.0 .* Vector(w) # Example scaling
108-
W_diag = spdiagm(length(w), length(w), 0 => VectorMPI(w_scaled))
108+
W_diag = spdiagm(length(w), length(w), 0 => HPCVector(w_scaled))
109109

110110
println(io0(), "[DEBUG] W_diag size: $(size(W_diag))")
111111
println(io0(), "[DEBUG] W_diag row_partition: $(W_diag.row_partition)")
@@ -148,7 +148,7 @@ end
148148

149149
# Try factorization
150150
println(io0(), "[DEBUG] Attempting to factorize...")
151-
b_test = VectorMPI(ones(size(Rt_Dt_W_D_R, 1)))
151+
b_test = HPCVector(ones(size(Rt_Dt_W_D_R, 1)))
152152
try
153153
x_test = Rt_Dt_W_D_R \ b_test
154154
println(io0(), "[DEBUG] Factorization succeeded!")

test/test_apply_d.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ end
99
using MultiGridBarrierMPI
1010
MultiGridBarrierMPI.Init()
1111

12-
using LinearAlgebraMPI
13-
using LinearAlgebraMPI: VectorMPI, MatrixMPI, SparseMatrixMPI, io0
12+
using HPCLinearAlgebra
13+
using HPCLinearAlgebra: HPCVector, HPCMatrix, HPCSparseMatrix, io0
1414
using LinearAlgebra
1515
using SparseArrays
1616
using MultiGridBarrier
@@ -30,7 +30,7 @@ D_op = [g.operators[:dx], g.operators[:id]] # dx and id operators
3030
# Create a test vector (should be size 16)
3131
n = length(g.w)
3232
z_native = sin.(range(0, π, length=n))
33-
z_mpi = VectorMPI(z_native)
33+
z_mpi = HPCVector(z_native)
3434

3535
println(io0(), "[DEBUG] z size: $(length(z_mpi))")
3636
println(io0(), "[DEBUG] z partition: $(z_mpi.partition)")

test/test_basic_ops.jl

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ end
99
using MultiGridBarrierMPI
1010
MultiGridBarrierMPI.Init()
1111

12-
using LinearAlgebraMPI
13-
using LinearAlgebraMPI: VectorMPI, MatrixMPI, SparseMatrixMPI, io0
12+
using HPCLinearAlgebra
13+
using HPCLinearAlgebra: HPCVector, HPCMatrix, HPCSparseMatrix, io0
1414
using LinearAlgebra
1515
using SparseArrays
1616

@@ -23,13 +23,13 @@ if rank == 0
2323
flush(stdout)
2424
end
2525

26-
# Test: Two SparseMatrixMPI multiplication
26+
# Test: Two HPCSparseMatrix multiplication
2727
A_native = sparse([1.0 0.0; 2.0 3.0; 0.0 4.0]) # 3x2
2828
B_native = sparse([1.0 2.0 3.0; 4.0 5.0 6.0]) # 2x3
2929
C_expected = A_native * B_native # 3x3
3030

31-
A_mpi = SparseMatrixMPI{Float64}(A_native)
32-
B_mpi = SparseMatrixMPI{Float64}(B_native)
31+
A_mpi = HPCSparseMatrix{Float64}(A_native)
32+
B_mpi = HPCSparseMatrix{Float64}(B_native)
3333

3434
if rank == 0
3535
println("[DEBUG] A size: $(size(A_mpi)), B size: $(size(B_mpi))")
@@ -71,10 +71,10 @@ end
7171
n = size(AtA_expected, 1)
7272
reg = 0.01 * sparse(I, n, n)
7373
AtA_reg = AtA_expected + reg
74-
AtA_reg_mpi = SparseMatrixMPI{Float64}(AtA_reg)
74+
AtA_reg_mpi = HPCSparseMatrix{Float64}(AtA_reg)
7575

7676
b = ones(n)
77-
b_mpi = VectorMPI(b)
77+
b_mpi = HPCVector(b)
7878

7979
if rank == 0
8080
println("[DEBUG] Attempting to solve (A'A + reg) \\ b...")

test/test_column_extract.jl

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ end
99
using MultiGridBarrierMPI
1010
MultiGridBarrierMPI.Init()
1111

12-
using LinearAlgebraMPI
13-
using LinearAlgebraMPI: VectorMPI, MatrixMPI, SparseMatrixMPI, io0
12+
using HPCLinearAlgebra
13+
using HPCLinearAlgebra: HPCVector, HPCMatrix, HPCSparseMatrix, io0
1414
using LinearAlgebra
1515
using SparseArrays
1616
using MultiGridBarrier
@@ -28,7 +28,7 @@ g = fem1d_mpi(Float64; L=2)
2828
D_op = [g.operators[:dx], g.operators[:id]]
2929
n = length(g.w)
3030
z_native = sin.(range(0, π, length=n))
31-
z_mpi = VectorMPI(z_native)
31+
z_mpi = HPCVector(z_native)
3232

3333
# Apply D operators to get matrix
3434
Dz_mpi = hcat([D * z_mpi for D in D_op]...)
@@ -50,10 +50,10 @@ println(io0(), "[DEBUG] Testing column extraction y[:, 1]...")
5050
col1 = y_mpi[:, 1]
5151
println(io0(), "[DEBUG] col1 type: $(typeof(col1))")
5252
println(io0(), "[DEBUG] col1 size: $(size(col1))")
53-
if col1 isa VectorMPI
53+
if col1 isa HPCVector
5454
println(io0(), "[DEBUG] col1 partition: $(col1.partition)")
5555
else
56-
println(io0(), "[DEBUG] col1 is NOT a VectorMPI!")
56+
println(io0(), "[DEBUG] col1 is NOT a HPCVector!")
5757
end
5858

5959
# Test element-wise multiplication
@@ -64,7 +64,7 @@ println(io0(), "[DEBUG] w partition: $(w.partition)")
6464
try
6565
w_col = w .* col1
6666
println(io0(), "[DEBUG] w .* col1 type: $(typeof(w_col))")
67-
if w_col isa VectorMPI
67+
if w_col isa HPCVector
6868
println(io0(), "[DEBUG] w .* col1 partition: $(w_col.partition)")
6969
end
7070
println(io0(), "[DEBUG] Element-wise multiplication succeeded!")
@@ -80,7 +80,7 @@ try
8080
foo = MultiGridBarrier.amgb_diag(D, w .* col1)
8181
println(io0(), "[DEBUG] amgb_diag result type: $(typeof(foo))")
8282
println(io0(), "[DEBUG] amgb_diag result size: $(size(foo))")
83-
if foo isa SparseMatrixMPI
83+
if foo isa HPCSparseMatrix
8484
println(io0(), "[DEBUG] amgb_diag row_partition: $(foo.row_partition)")
8585
println(io0(), "[DEBUG] amgb_diag col_partition: $(foo.col_partition)")
8686
end
@@ -99,7 +99,7 @@ try
9999
foo = MultiGridBarrier.amgb_diag(D, w .* col1)
100100
bar = D' * foo * D
101101
println(io0(), "[DEBUG] D'*foo*D size: $(size(bar))")
102-
if bar isa SparseMatrixMPI
102+
if bar isa HPCSparseMatrix
103103
println(io0(), "[DEBUG] D'*foo*D row_partition: $(bar.row_partition)")
104104
println(io0(), "[DEBUG] D'*foo*D col_partition: $(bar.col_partition)")
105105
end
@@ -132,7 +132,7 @@ if rank == 0
132132
println("[DEBUG] y difference (MPI vs native): $(norm(y_mpi_native - y_native))")
133133

134134
col1_native = y_native[:, 1]
135-
col1_mpi_native = col1 isa VectorMPI ? Vector(col1) : col1
135+
col1_mpi_native = col1 isa HPCVector ? Vector(col1) : col1
136136
println("[DEBUG] col1 difference: $(norm(col1_mpi_native - col1_native))")
137137
end
138138

test/test_d0_construction.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ end
99
using MultiGridBarrierMPI
1010
MultiGridBarrierMPI.Init()
1111

12-
using LinearAlgebraMPI
13-
using LinearAlgebraMPI: VectorMPI, MatrixMPI, SparseMatrixMPI, io0
12+
using HPCLinearAlgebra
13+
using HPCLinearAlgebra: HPCVector, HPCMatrix, HPCSparseMatrix, io0
1414
using LinearAlgebra
1515
using SparseArrays
1616
using MultiGridBarrier
@@ -37,7 +37,7 @@ n = size(D_dx_mpi, 1)
3737
println(io0(), "[DEBUG] Testing hcat of sparse matrices...")
3838

3939
# Create zero matrix
40-
Z_mpi = SparseMatrixMPI{Float64}(spzeros(Float64, n, n))
40+
Z_mpi = HPCSparseMatrix{Float64}(spzeros(Float64, n, n))
4141
Z_native = spzeros(Float64, n, n)
4242

4343
# Test 1: hcat(Z, D_dx) - like foo = [Z; D_dx] in amg_helper
@@ -113,15 +113,15 @@ y22 = ones(n) * 0.3
113113
# MPI version
114114
H_mpi = nothing
115115
# j=1: dx'*diag(w*y11)*dx
116-
foo = spdiagm(n, n, 0 => w_mpi .* VectorMPI(y11))
116+
foo = spdiagm(n, n, 0 => w_mpi .* HPCVector(y11))
117117
H_mpi = D0_dx_mpi' * foo * D0_dx_mpi
118118

119119
# j=2: id'*diag(w*y22)*id
120-
foo = spdiagm(n, n, 0 => w_mpi .* VectorMPI(y22))
120+
foo = spdiagm(n, n, 0 => w_mpi .* HPCVector(y22))
121121
H_mpi = H_mpi + D0_id_mpi' * foo * D0_id_mpi
122122

123123
# Cross term: dx'*diag(w*y12)*id + id'*diag(w*y12)*dx
124-
foo = spdiagm(n, n, 0 => w_mpi .* VectorMPI(y12))
124+
foo = spdiagm(n, n, 0 => w_mpi .* HPCVector(y12))
125125
H_mpi = H_mpi + D0_dx_mpi' * foo * D0_id_mpi + D0_id_mpi' * foo * D0_dx_mpi
126126

127127
# Native version

test/test_debug_solve.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ end
99
using MultiGridBarrierMPI
1010
MultiGridBarrierMPI.Init()
1111

12-
using LinearAlgebraMPI
13-
using LinearAlgebraMPI: VectorMPI, MatrixMPI, SparseMatrixMPI, io0
12+
using HPCLinearAlgebra
13+
using HPCLinearAlgebra: HPCVector, HPCMatrix, HPCSparseMatrix, io0
1414
using LinearAlgebra
1515
using SparseArrays
1616
using MultiGridBarrier
@@ -26,7 +26,7 @@ end
2626

2727
# Override solve to add diagnostics
2828
original_solve = MultiGridBarrier.solve
29-
function debug_solve(A::SparseMatrixMPI{T}, b::VectorMPI{T}) where T
29+
function debug_solve(A::HPCSparseMatrix{T}, b::HPCVector{T}) where T
3030
if rank == 0
3131
println("[DEBUG-SOLVE] Matrix size: $(size(A))")
3232
println("[DEBUG-SOLVE] Vector length: $(length(b))")

0 commit comments

Comments
 (0)