Skip to content

Commit a16b702

Browse files
committed
Updating documentation to ensure running examples.
1 parent aa042d4 commit a16b702

8 files changed

Lines changed: 45 additions & 43 deletions

File tree

Project.toml

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,27 +7,24 @@ version = "0.1.0"
77
[deps]
88
AutoHashEquals = "15f4f7f2-30c1-5605-9d31-71845cf9641f"
99
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
10-
ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
1110
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
1211
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
1312
Mango = "5e49fdec-d473-4d14-b295-7bff2fcf1925"
1413
OSQP = "ab2f91bb-94b4-55e3-9ba0-7f65df51de79"
15-
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
1614
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
1715

1816
[compat]
1917
AutoHashEquals = "2.2.0"
2018
Distributions = "0.25.115"
21-
ECOS = "1.1.3"
2219
JuMP = "1.25.0"
2320
LinearAlgebra = "^1.9"
2421
Mango = "^0.5"
2522
OSQP = "0.8.1"
26-
Revise = "3.6.4"
2723
julia = "^1.9"
2824

2925
[extras]
3026
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
27+
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
3128
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
3229

3330
[targets]

docs/make.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,5 +44,5 @@ end
4444
deploydocs(
4545
repo="github.com/Digitalized-Energy-Systems/DistributedResourceOptimization.jl.git",
4646
push_preview=true,
47-
devbranch="main"
47+
devbranch="development"
4848
)

docs/src/algorithms/admm.md

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -128,9 +128,6 @@ The generic ADMM coordinator ([`ADMMGenericCoordinator`](@ref)) exposes several
128128
| `max_iters` | `1000` | Maximum number of ADMM iterations |
129129
| `abs_tol` | `1e-4` | Absolute primal/dual residual tolerance |
130130
| `rel_tol` | `1e-3` | Relative primal/dual residual tolerance |
131-
| `μ` | `10` | Residual ratio threshold for ρ adaptation |
132-
| `τ` | `2` | Multiplicative factor for ρ adaptation |
133-
| `slack_penalty` | `100` | Penalty for infeasibility slack variables |
134131

135132
## Complete Example — ADMM Sharing
136133

@@ -179,7 +176,7 @@ println(result(actor2))
179176
- Reducing `ρ` when primal residuals dominate
180177
- Increasing `ρ` when dual residuals dominate
181178
- Tightening `abs_tol` / `rel_tol` for higher precision
182-
- Increasing `max_iters` for complex problems
179+
- Increasing `max_iters` if the warning "reached max iterations" appears
183180

184181
## See Also
185182

src/algorithm/admm/core.jl

Lines changed: 6 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
export ADMMStart, ADMMAnswer, ADMMAnswer, ADMMGlobalActor, ADMMGlobalObjective, ADMMGenericCoordinator, create_admm_start
1+
export ADMMStart, ADMMAnswer, ADMMGlobalActor, ADMMGlobalObjective, ADMMGenericCoordinator, create_admm_start
22

33
struct ADMMStart
44
data::Any
@@ -52,13 +52,10 @@ end
5252

5353
@kwdef struct ADMMGenericCoordinator <: Coordinator
5454
global_actor::ADMMGlobalActor
55-
ρ::Float64 = 1.0
55+
ρ::Float64 = 1.0
5656
max_iters::Int64 = 1000
57-
slack_penalty::Int64 = 100
5857
abs_tol::Float64 = 1e-4
5958
rel_tol::Float64 = 1e-3
60-
μ::Real = 10
61-
τ::Real = 2
6259
end
6360

6461
# ADMM solver
@@ -68,8 +65,6 @@ function _start_coordinator(admm::ADMMGenericCoordinator, carrier::Carrier, inpu
6865
abs_tol = admm.abs_tol
6966
rel_tol = admm.rel_tol
7067
n = length(others(carrier, "coordinator"))
71-
μ = admm.μ
72-
τ = admm.τ
7368

7469
# Initialize
7570
x = [zeros(m) for i in 1:n]
@@ -80,7 +75,7 @@ function _start_coordinator(admm::ADMMGenericCoordinator, carrier::Carrier, inpu
8075
# 1. Local x-updates (in parallel)
8176
awaitables = []
8277
# send all async and get awaitable
83-
for (i,addr) in enumerate(others(carrier, "c"))
78+
for (i,addr) in enumerate(others(carrier, "coordinator"))
8479
push!(awaitables, send_awaitable(carrier, ADMMMessage(actor_correction(admm.global_actor, x, z, u, i), ρ), addr))
8580
end
8681
# await all awaitables and update x
@@ -103,21 +98,12 @@ function _start_coordinator(admm::ADMMGenericCoordinator, carrier::Carrier, inpu
10398
ϵ_pri = sqrt(m*n)*abs_tol + rel_tol*max(maximum(norm.(x)), maximum(norm.(z)))
10499
ϵ_dual = sqrt(m*n)*abs_tol + rel_tol*maximum(norm.(u))
105100
if r_norm < ϵ_pri && s_norm < ϵ_dual
106-
@warn "Converged in $k iterations."
101+
@debug "ADMM converged in $k iterations."
107102
break
108103
end
109-
110-
# Varying penalty paramter according to B. S. He, H. Yang, and S. L. Wang, “Alternating direction method with self
111-
# adaptive penalty parameters for monotone variational inequalities,”
112-
# if r_norm > μ * s_norm
113-
# ρ = ρ * τ
114-
# elseif s_norm > μ * r_norm
115-
# ρ = ρ / τ
116-
# end
117-
104+
118105
if k == max_iters
119-
@warn "Reached max iterations ($max_iters) without full convergence."
120-
throw("ADMM not converged $x, $u")
106+
@warn "ADMM reached max iterations ($max_iters) without full convergence (r=$r_norm, s=$s_norm)."
121107
end
122108
end
123109
return x, z, u

src/algorithm/consensus/averaging.jl

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11

2-
export ConsensusActor, NoConsensusActor, AveragingConsensusAlgorithm, AveragingConsensusMessage, create_averaging_consensus_participant, gradient_term
2+
export ConsensusActor, NoConsensusActor, AveragingConsensusAlgorithm, AveragingConsensusMessage, create_averaging_consensus_participant, create_averaging_consensus_start, gradient_term
33

44
abstract type ConsensusActor end
55

@@ -78,8 +78,19 @@ function on_exchange_message(algorithm_data::AveragingConsensusAlgorithm, carrie
7878
end
7979
end
8080

81-
function create_averaging_consensus_participant(finish_callback::Function, consensus_actor::ConsensusActor; initial_λ::Real=10, α::Real=0.3, max_iter::Int=50)
81+
function create_averaging_consensus_participant(finish_callback::Function, consensus_actor::ConsensusActor; initial_λ::Real=10, α::Real=0.3, max_iter::Int=50)
8282
appl_consensus_actor = isnothing(consensus_actor) ? NoConsensusActor() : consensus_actor
83-
83+
8484
return AveragingConsensusAlgorithm(finish_callback=finish_callback, initial_λ=initial_λ, α=α, actor=appl_consensus_actor, max_iter=max_iter)
85+
end
86+
87+
"""
88+
create_averaging_consensus_start(initial_λ::Real, data::Any=nothing) -> AveragingConsensusMessage
89+
90+
Create the initial start message for averaging consensus. `initial_λ` sets the starting
91+
price/signal value (scalar, broadcast to all dimensions), and `data` is any auxiliary
92+
payload forwarded unchanged to each participant's `gradient_term`.
93+
"""
94+
function create_averaging_consensus_start(initial_λ::Real, data::Any=nothing)
95+
return AveragingConsensusMessage([initial_λ], 0, data, true)
8596
end

src/algorithm/heuristic/cohda/core.jl

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
export on_exchange_message, ScheduleSelection, SystemConfig, SolutionCandidate, TargetParams,
2-
WorkingMemory, COHDAAlgorithmData, create_cohda_start_message, create_cohda_participant
2+
WorkingMemory, COHDAAlgorithmData, create_cohda_start_message, create_cohda_participant, result
33

44
using AutoHashEquals
55

@@ -56,15 +56,15 @@ end
5656
end
5757

5858
function initial_schedule(local_decider::DefaultLocalDecider, memory::WorkingMemory)
59-
return local_decider.schedule_provider(memory)[0]
59+
return local_decider.schedule_provider(memory)[1]
6060
end
6161

6262
@kwdef mutable struct COHDAAlgorithmData <: DistributedAlgorithm
6363
participant_id::Int
6464
counter::Int = 0
6565
memory::WorkingMemory = WorkingMemory(nothing, SystemConfig(Dict()), nothing)
6666
performance_function::Function = cohda_default_performance
67-
decider::LocalDecider = DefaultLocalDecider()
67+
decider::Union{LocalDecider,Nothing} = nothing
6868
end
6969

7070
function decide(cohda_data::COHDAAlgorithmData, decider::LocalDecider, sysconfig::SystemConfig, candidate::SolutionCandidate)
@@ -211,7 +211,7 @@ function create_from_updated_sysconf(participant_id::Int, sysconfig::SystemConfi
211211
base_mat[id, :] = choice.schedule
212212
end
213213
base_mat[participant_id, :] = new_schedule
214-
return SolutionCandidate(participant_id, base_mat, nothing, keys(sysconfig.schedule_choices))
214+
return SolutionCandidate(participant_id, base_mat, nothing, Set{Int}(keys(sysconfig.schedule_choices)))
215215
end
216216

217217
function decide(cohda_data::COHDAAlgorithmData, decider::DefaultLocalDecider, sysconfig::SystemConfig, candidate::SolutionCandidate)
@@ -313,4 +313,14 @@ function create_cohda_participant_with_decider(participant_id::Int,
313313
decider=decider)
314314
end
315315

316+
"""
317+
result(actor::COHDAAlgorithmData) -> Vector{Float64}
318+
319+
Return the aggregate schedule chosen by the COHDA optimization — the column-wise sum of
320+
all participants' schedules in the best solution candidate known to this actor.
321+
"""
322+
function result(actor::COHDAAlgorithmData)
323+
return vec(sum(actor.memory.solution_candidate.schedules, dims=1))
324+
end
325+
316326

src/algorithm/heuristic/cohda/decider.jl

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,10 @@ using Distributions
33

44
@kwdef struct LocalSearchDecider <: LocalDecider
55
initial_schedule::Vector{Float64}
6-
corridors::Vector{Tuple{Float64}}
6+
corridors::Vector{Tuple{Float64,Float64}}
77
local_performance::Function
88
convergence_force_factor::Float64 = 0.1
9-
max_iterations = 10
9+
max_iterations::Int = 10
1010
sample_size_per_value::Int = 10
1111
distribution::Function = (low, up) -> Uniform(low, up)
1212
end
@@ -20,18 +20,19 @@ function local_performance_with_global_share(decider::LocalSearchDecider, schedu
2020
end
2121

2222
function find_new_value(decider::LocalSearchDecider, current_index::Int, current_best_schedule::Vector{Float64}, delta_to_target::Float64)
23-
possible_values = rand(decider.distribution, decider.sample_size_per_value)
23+
corridor = decider.corridors[current_index]
24+
possible_values = rand(decider.distribution(corridor[1], corridor[2]), decider.sample_size_per_value)
2425
current_value = current_best_schedule[current_index]
2526
new_value_performance_tuples::Vector{Tuple{Float64,Float64}} = []
2627
new_value = nothing
2728
iteration = 1
28-
while length(possible_values) > 0 || iteration > decider.max_iterations
29+
while length(possible_values) > 0 && iteration <= decider.max_iterations
2930
copy_bs = copy(current_best_schedule)
3031
random_index = ceil(Int, rand() * length(possible_values))
3132
new_value = possible_values[random_index]
3233
copy_bs[current_index] = new_value
3334

34-
# we calculate the performance with a variable local performance share and a adaptive global performance share
35+
# we calculate the performance with a variable local performance share and a adaptive global performance share
3536
performance = local_performance_with_global_share(decider, copy_bs, new_value, current_value, delta_to_target)
3637

3738
push!(new_value_performance_tuples, (new_value, performance))
@@ -41,7 +42,7 @@ function find_new_value(decider::LocalSearchDecider, current_index::Int, current
4142
sort!(new_value_performance_tuples)
4243
first = new_value_performance_tuples[1][1]
4344
second = new_value_performance_tuples[2][1]
44-
third = new_value_performance_tuples[2][1]
45+
third = new_value_performance_tuples[3][1]
4546

4647
# cut out undesirable parts of the whole vector
4748
if first > second > third

test/runtests.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ function DistributedResourceOptimization.send_to_other(carrier::TestCarrier, con
1212
push!(message_buffer, content)
1313
end
1414

15-
function DistributedResourceOptimization.schedule_using(to_be_scheduled::Function, carrier::TestCarrier, delay_s::Float64)
15+
function DistributedResourceOptimization.schedule_using(carrier::TestCarrier, to_be_scheduled::Function, ::Float64)
1616
to_be_scheduled()
1717
end
1818

0 commit comments

Comments
 (0)