Skip to content

Commit b31ba90

Browse files
[fix] remove ASIC ports before replication cleanup on empty transition (#232)
When all members are removed from a multicast group, the empty transition path cleaned up replication table entries but skipped process_membership_changes, leaving stale ports in the ASIC groups. Subsequent re-adds failed with "already contains port" (500). This fix calls process_membership_changes before cleanup_empty_group_replication so mc_port_remove runs on every port.
1 parent 375a0f2 commit b31ba90

File tree

2 files changed

+67
-10
lines changed

2 files changed

+67
-10
lines changed

dpd-client/tests/integration_tests/mcast.rs

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4762,7 +4762,11 @@ async fn test_multicast_empty_then_add_members_ipv6() -> TestResult {
47624762
// Update the internal group to add members (2 external, 1 underlay)
47634763
// Meaning: two decap/port-bitmap members.
47644764
let update_entry = types::MulticastGroupUpdateUnderlayEntry {
4765-
members: vec![external_member1, external_member2, underlay_member],
4765+
members: vec![
4766+
external_member1.clone(),
4767+
external_member2.clone(),
4768+
underlay_member.clone(),
4769+
],
47664770
};
47674771

47684772
let ipv6_update = types::UnderlayMulticastIpv6(match internal_group_ip {
@@ -4945,6 +4949,43 @@ async fn test_multicast_empty_then_add_members_ipv6() -> TestResult {
49454949

49464950
switch.packet_test(vec![send_final], expected_final)?;
49474951

4952+
// Re-add members after removing all. This exercises the full
4953+
// empty -> members -> empty -> members cycle and verifies that
4954+
// ASIC port state is properly cleaned up during the transition
4955+
// to empty, so that re-adding does not fail with a duplicate
4956+
// port error.
4957+
let readd_entry = types::MulticastGroupUpdateUnderlayEntry {
4958+
members: vec![external_member1, external_member2, underlay_member],
4959+
};
4960+
4961+
switch
4962+
.client
4963+
.multicast_group_update_underlay(
4964+
&ipv6_update,
4965+
&make_tag(TEST_TAG),
4966+
&readd_entry,
4967+
)
4968+
.await
4969+
.expect("Should re-add members after removing all");
4970+
4971+
let groups_after_readd = switch
4972+
.client
4973+
.multicast_groups_list_stream(None)
4974+
.try_collect::<Vec<_>>()
4975+
.await
4976+
.expect("Should list groups after re-add");
4977+
4978+
let readd_internal = groups_after_readd
4979+
.iter()
4980+
.find(|g| get_group_ip(g) == internal_group_ip)
4981+
.expect("Should find the internal group");
4982+
4983+
assert_eq!(
4984+
get_members(readd_internal).map(|m| m.len()).unwrap_or(0),
4985+
3,
4986+
"Internal group should have 3 members after re-add"
4987+
);
4988+
49484989
cleanup_test_group(&switch, external_group_ip, TEST_TAG).await.unwrap();
49494990
cleanup_test_group(&switch, internal_group_ip, TEST_TAG).await
49504991
}

dpd/src/mcast/mod.rs

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -829,25 +829,41 @@ pub(crate) fn modify_group_internal(
829829
// Configure replication based on member count transitions
830830
let replication_info = match (
831831
new_group_info.members.is_empty(),
832-
group_entry.replication_info.is_some(),
832+
&group_entry.replication_info,
833833
) {
834-
(true, true) => {
835-
// Transition from members to empty - cleanup tables
834+
(true, Some(repl_info)) => {
835+
// Transition from members to empty.
836+
//
837+
// First, remove ports from ASIC groups before cleaning up
838+
// replication table entries, otherwise stale ports cause subsequent
839+
// re-adds to fail with "already contains port".
840+
process_membership_changes(
841+
s,
842+
group_ip.into(),
843+
&new_group_info.members,
844+
&group_entry,
845+
repl_info,
846+
)
847+
.inspect_err(|_e| {
848+
mcast.groups.insert(group_ip.into(), group_entry.clone());
849+
})
850+
.map_err(|e| rollback_ctx.rollback_and_restore(e))?;
851+
836852
cleanup_empty_group_replication(s, group_ip.into(), &group_entry)
837853
.map_err(|e| rollback_ctx.rollback_and_restore(e))?;
838-
// Immediately clear replication_info to maintain consistency
854+
839855
group_entry.replication_info = None;
840856
None
841857
}
842-
(false, false) => {
858+
(false, None) => {
843859
// Transition from empty to members - configure replication
844860
Some(configure_replication(group_entry.external_group_id()))
845861
}
846-
(false, true) => {
862+
(false, Some(_)) => {
847863
// Already has members and replication - keep existing
848864
group_entry.replication_info.clone()
849865
}
850-
(true, false) => {
866+
(true, None) => {
851867
// Already empty and no replication - keep none
852868
None
853869
}
@@ -870,7 +886,7 @@ pub(crate) fn modify_group_internal(
870886
s,
871887
group_ip.into(),
872888
&new_group_info.members,
873-
&mut group_entry,
889+
&group_entry,
874890
repl_info,
875891
)
876892
.inspect_err(|_e| {
@@ -1489,7 +1505,7 @@ fn process_membership_changes(
14891505
s: &Switch,
14901506
group_ip: IpAddr,
14911507
new_members: &[MulticastGroupMember],
1492-
group_entry: &mut MulticastGroup,
1508+
group_entry: &MulticastGroup,
14931509
replication_info: &MulticastReplicationInfo,
14941510
) -> DpdResult<(Vec<MulticastGroupMember>, Vec<MulticastGroupMember>)> {
14951511
// First validate that IPv4 doesn't have underlay members

0 commit comments

Comments
 (0)