Skip to content

Commit c1c47d6

Browse files
ptalgulk01claude
andcommitted
Migrate MCO tests from openshift-tests-private
Migrated 17 test cases from openshift-tests-private/test/extended/mco/ to machine-config-operator/test/extended-priv/: - mco_prune.go: 3 tests for prune renderedmachineconfigs functionality (73148, 73155, 74606) - mco_machineconfigpool.go: 10 tests for MCP operations (43048, 43064, 56131, 77354, 42390, 45318, 52373, 56123, 70125, 72007, 75149, 76108, 85073) - mco_drain.go: 4 tests for node drain behavior (43245, 51381, 49568, 49672) Added supporting helper functions: - MachineConfig: NewMachineConfigList, GetRenderedMachineConfigForMaster, GetRenderedMachineConfigForMasterOrFail, GetMCPRenderedMachineConfigsOrFail - MachineConfigPool: SetMaxUnavailable, RemoveMaxUnavailable, GetSortedUpdatedNodes, IsOCL, GetAllApplicableExtensionsToMCPOrFail - Controller: GetLogsAsList, GetFilteredLogsAsList - Node: FilterSchedulableNodesOrFail - Util: IsSNO, IsExecShellError, UnwrapExecCode, getTimeDifferenceInMinute, filterTimestampFromLogs, AddToAllMachineSets, checkUpdatedLists Added template files: - change-worker-ign-version.yaml - pod-disruption-budget.yaml - create-pod.yaml - add-mc-to-trigger-node-drain.yaml All tests build successfully and appear in test listing. Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
1 parent 89245a0 commit c1c47d6

13 files changed

Lines changed: 1621 additions & 0 deletions

test/extended-priv/const.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,8 @@ const (
5151
VspherePlatform = "vsphere"
5252
// BaremetalPlatform value used to identify BareMetal infrastructure
5353
BaremetalPlatform = "baremetal"
54+
// NonePlatform value used to identify a None Platform value
55+
NonePlatform = "none"
5456

5557
// ExpirationDockerfileLabel Expiration label in Dockerfile
5658
ExpirationDockerfileLabel = `LABEL maintainer="mco-qe-team" quay.expires-after=24h`

test/extended-priv/controller.go

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,3 +191,36 @@ func checkMCCPanic(oc *exutil.CLI) {
191191

192192
logger.Infof("OK!\n")
193193
}
194+
195+
// GetLogsAsList returns the MCO controller logs as a list strings. One string per line
196+
func (mcc *Controller) GetLogsAsList() ([]string, error) {
197+
logs, err := mcc.GetLogs()
198+
if err != nil {
199+
return nil, err
200+
}
201+
202+
return strings.Split(logs, "\n"), nil
203+
}
204+
205+
// GetFilteredLogsAsList returns the filtered logs as a lit of strings, one string per line.
206+
func (mcc *Controller) GetFilteredLogsAsList(regex string) ([]string, error) {
207+
logs, err := mcc.GetLogsAsList()
208+
if err != nil {
209+
return nil, err
210+
}
211+
212+
filteredLogs := []string{}
213+
for _, line := range logs {
214+
match, err := regexp.MatchString(regex, line)
215+
if err != nil {
216+
logger.Errorf("Error filtering log lines. Error: %s", err)
217+
return nil, err
218+
}
219+
220+
if match {
221+
filteredLogs = append(filteredLogs, line)
222+
}
223+
}
224+
225+
return filteredLogs, nil
226+
}

test/extended-priv/machineconfig.go

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,11 @@ type MachineConfigList struct {
1717
ResourceList
1818
}
1919

20+
// NewMachineConfigList construct a new node list struct to handle all existing nodes
21+
func NewMachineConfigList(oc *exutil.CLI) *MachineConfigList {
22+
return &MachineConfigList{*NewResourceList(oc, "mc")}
23+
}
24+
2025
// MachineConfig struct is used to handle MachineConfig resources in OCP
2126
type MachineConfig struct {
2227
Resource
@@ -149,3 +154,36 @@ func DisableSkew(machineConfiguration *MachineConfiguration) {
149154
o.Eventually(machineConfiguration.IsGenerationUpToDate, "2m", "10s").Should(o.BeTrue(), "MachineConfiguration observedGeneration did not catch up to generation")
150155
logger.Infof("Skew functionality has been disabled")
151156
}
157+
158+
// GetRenderedMachineConfigForMaster returns a list with all the MCs whose name starts with "render-master"
159+
func (mcl *MachineConfigList) GetRenderedMachineConfigForMaster() ([]*MachineConfig, error) {
160+
mcl.SetItemsFilter(`?(@.metadata.ownerReferences[0].name=="master")`)
161+
allMCs, err := mcl.GetAll()
162+
if err != nil {
163+
return nil, err
164+
}
165+
166+
returnMCs := []*MachineConfig{}
167+
168+
for _, mc := range allMCs {
169+
if strings.HasPrefix(mc.GetName(), "rendered-master") {
170+
returnMCs = append(returnMCs, &MachineConfig{Resource: *mc})
171+
}
172+
}
173+
174+
return returnMCs, nil
175+
}
176+
177+
func (mcl *MachineConfigList) GetRenderedMachineConfigForMasterOrFail() []*MachineConfig {
178+
renderedMcMasterList, err := mcl.GetRenderedMachineConfigForMaster()
179+
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of the machineconfigs that were created by a MCP ")
180+
return renderedMcMasterList
181+
182+
}
183+
184+
// GetMachineConfigCreatedByMCPs returns a list of the machineconfigs that were created by a MCP
185+
func (mcl *MachineConfigList) GetMCPRenderedMachineConfigsOrFail() []*MachineConfig {
186+
renderedMcList, err := mcl.GetRenderedMachineConfigForMaster()
187+
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of the machineconfigs that were created by a MCP ")
188+
return renderedMcList
189+
}

test/extended-priv/machineconfigpool.go

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,20 @@ func (mcp *MachineConfigPool) pause(enable bool) {
7171
o.Expect(err).NotTo(o.HaveOccurred())
7272
}
7373

74+
// SetMaxUnavailable sets the value for maxUnavailable
75+
func (mcp *MachineConfigPool) SetMaxUnavailable(maxUnavailable int) {
76+
logger.Infof("patch mcp %v, change spec.maxUnavailable to %d", mcp.name, maxUnavailable)
77+
err := mcp.Patch("merge", fmt.Sprintf(`{"spec":{"maxUnavailable": %d}}`, maxUnavailable))
78+
o.Expect(err).NotTo(o.HaveOccurred())
79+
}
80+
81+
// RemoveMaxUnavailable removes spec.maxUnavailable attribute from the pool config
82+
func (mcp *MachineConfigPool) RemoveMaxUnavailable() {
83+
logger.Infof("patch mcp %v, removing spec.maxUnavailable", mcp.name)
84+
err := mcp.Patch("json", `[{ "op": "remove", "path": "/spec/maxUnavailable" }]`)
85+
o.Expect(err).NotTo(o.HaveOccurred())
86+
}
87+
7488
// IsPaused return true is mcp is paused
7589
func (mcp *MachineConfigPool) IsPaused() bool {
7690
return IsTrue(mcp.GetOrFail(`{.spec.paused}`))
@@ -1492,6 +1506,23 @@ func FilterExtensions(extensions map[string][]string, hasARM64, fips bool, osIma
14921506
return filteredExtensions, extensionNames, packages
14931507
}
14941508

1509+
// GetAllApplicableExtensionsToMCPOrFail returns all the extensions that are supported for the given MCP, and all the packages that will install those extensions
1510+
func GetAllApplicableExtensionsToMCPOrFail(mcp *MachineConfigPool) (extensions, packages []string) {
1511+
fips := isFIPSEnabledInClusterConfig(mcp.GetOC().AsAdmin())
1512+
1513+
armNodes, err := mcp.GetNodesByArchitecture(architecture.ARM64)
1514+
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting the list of ARM nodes in %s", mcp)
1515+
1516+
osImageStream, err := GetEffectiveOsImageStream(mcp)
1517+
o.Expect(err).NotTo(o.HaveOccurred(), "Error getting effective osImageStream from MCP %s", mcp.GetName())
1518+
1519+
_, extensions, packages = FilterExtensions(AllExtenstions, len(armNodes) > 0, fips, osImageStream)
1520+
1521+
logger.Infof("All extensions that can be applied to %s: %s", mcp, extensions)
1522+
logger.Infof("All packages that will be installed with those extensions: %s", packages)
1523+
return extensions, packages
1524+
}
1525+
14951526
func (mcp *MachineConfigPool) GetNodesWithoutArchitecture(arch architecture.Architecture, archs ...architecture.Architecture) ([]*Node, error) {
14961527
archsList := arch.String()
14971528
for _, itemArch := range archs {
@@ -1548,3 +1579,97 @@ func GetPoolWithArchDifferentFromOrFail(oc *exutil.CLI, arch architecture.Archit
15481579
e2e.Failf("Something went wrong. There is no suitable pool to execute the test case. There is no pool with nodes using an architecture different from %s", arch)
15491580
return nil
15501581
}
1582+
1583+
// GetSortedUpdatedNodes returns a list of nodes in the order that they are being updated by the MCO
1584+
// If maxUnavailable>0, then the function will fail if more that maxUpdatingNodes are being updated at the same time
1585+
func (mcp *MachineConfigPool) GetSortedUpdatedNodes(maxUnavailable int) []*Node {
1586+
timeToWait := mcp.estimateWaitDuration()
1587+
logger.Infof("Waiting %s in pool %s for all nodes to start updating.", timeToWait, mcp.name)
1588+
1589+
poolNodes, errget := mcp.GetNodes()
1590+
o.Expect(errget).NotTo(o.HaveOccurred(), fmt.Sprintf("Cannot get nodes in pool %s", mcp.GetName()))
1591+
1592+
pendingNodes := poolNodes
1593+
updatedNodes := []*Node{}
1594+
immediate := false
1595+
err := wait.PollUntilContextTimeout(context.TODO(), 20*time.Second, timeToWait, immediate, func(_ context.Context) (bool, error) {
1596+
// If there are degraded machines, stop polling, directly fail
1597+
degradedstdout, degradederr := mcp.getDegradedMachineCount()
1598+
if degradederr != nil {
1599+
logger.Errorf("the err:%v, and try next round", degradederr)
1600+
return false, nil
1601+
}
1602+
1603+
if degradedstdout != 0 {
1604+
logger.Errorf("Degraded MC:\n%s", mcp.PrettyString())
1605+
exutil.AssertWaitPollNoErr(fmt.Errorf("Degraded machines"), fmt.Sprintf("mcp %s has degraded %d machines", mcp.name, degradedstdout))
1606+
}
1607+
1608+
// Check that there aren't more thatn maxUpdatingNodes updating at the same time
1609+
if maxUnavailable > 0 {
1610+
totalUpdating := 0
1611+
for _, node := range poolNodes {
1612+
isUpdating, err := node.IsUpdating()
1613+
if err != nil {
1614+
logger.Errorf("Error getting IsUpdating state for node %s: %v", node.GetName(), err)
1615+
return false, err
1616+
}
1617+
if isUpdating {
1618+
totalUpdating++
1619+
}
1620+
}
1621+
if totalUpdating > maxUnavailable {
1622+
// print nodes for debug
1623+
mcp.oc.Run("get").Args("nodes").Execute()
1624+
exutil.AssertWaitPollNoErr(fmt.Errorf("maxUnavailable Not Honored. Pool %s, error: %d nodes were updating at the same time. Only %d nodes should be updating at the same time", mcp.GetName(), totalUpdating, maxUnavailable), "")
1625+
}
1626+
}
1627+
1628+
remainingNodes := []*Node{}
1629+
for _, node := range pendingNodes {
1630+
isUpdating, err := node.IsUpdating()
1631+
if err != nil {
1632+
logger.Errorf("Error getting IsUpdating state for node %s: %v", node.GetName(), err)
1633+
return false, err
1634+
}
1635+
if isUpdating {
1636+
logger.Infof("Node %s is UPDATING", node.GetName())
1637+
updatedNodes = append(updatedNodes, node)
1638+
} else {
1639+
remainingNodes = append(remainingNodes, node)
1640+
}
1641+
}
1642+
1643+
if len(remainingNodes) == 0 {
1644+
logger.Infof("All nodes have started to be updated on mcp %s", mcp.name)
1645+
return true, nil
1646+
1647+
}
1648+
logger.Infof(" %d remaining nodes", len(remainingNodes))
1649+
pendingNodes = remainingNodes
1650+
return false, nil
1651+
})
1652+
1653+
exutil.AssertWaitPollNoErr(err, fmt.Sprintf("Could not get the list of updated nodes on mcp %s", mcp.name))
1654+
return updatedNodes
1655+
}
1656+
1657+
// IsOCL returns true if the pool is using On Cluster Layering functionality
1658+
func (mcp MachineConfigPool) IsOCL() (bool, error) {
1659+
isOCLEnabled, err := IsFeaturegateEnabled(mcp.GetOC(), "OnClusterBuild")
1660+
if err != nil {
1661+
return false, err
1662+
}
1663+
if !isOCLEnabled {
1664+
logger.Infof("IS pool %s OCL: false", mcp.GetName())
1665+
return false, nil
1666+
}
1667+
1668+
mosc, err := mcp.GetMOSC()
1669+
if err != nil {
1670+
return false, err
1671+
}
1672+
isOCL := mosc != nil
1673+
logger.Infof("IS pool %s OCL: %t", mcp.GetName(), isOCL)
1674+
return isOCL, err
1675+
}

0 commit comments

Comments
 (0)