diff --git a/CHANGELOG/release-notes-v2.0.0.md b/CHANGELOG/release-notes-v2.0.0.md new file mode 100644 index 0000000000..86f9f71a29 --- /dev/null +++ b/CHANGELOG/release-notes-v2.0.0.md @@ -0,0 +1,31 @@ +## v2.0.0 + +## Enhancements +- feat: Rollout 5.2.0 (#6889) +- feat: Added support for tcp in virtual service and changed the apiVersion for externalSecrets (#6892) +- feat: add helm_take_ownership and helm_redeployment_request columns to user_deployment_request table (#6888) +- feat: Revamped Devtron UI with multiple dashboards (#6884) +- feat: Added support to override container name (#6880) +- feat: Increase max length for TeamRequest name field (#6876) +- feat: Added namespace support for virtualService and destinationRule (#6868) +- feat: feature flag for encryption (#6856) +- feat: encryption for db credentials (#6852) +## Bugs +- fix: migrate proxy chart dependencies and refactor related functions (#6899) +- fix: enhance validation and error handling in cluster update process (#6887) +- fix: Invalid type casting error for custom charts (#6883) +- fix: validation on team name (#6872) +- fix: sql injection (#6861) +- fix: user manager fix (#6854) +## Others +- misc: Add support for migrating plugin metadata to parent metadata (#6902) +- misc: update UserDeploymentRequestWithAdditionalFields struct to include tableName for PostgreSQL compatibility (#6896) +- chore: rename SQL migration files for consistency (#6885) +- misc: Vc empty ns fix (#6871) +- misc: added validation on create environment (#6859) +- misc: migration unique constraint on mpc (#6851) +- misc: helm app details API spec (#6850) +- misc: api Spec Added for draft (#6849) +- misc: api Specs added for lock config (#6847) + + diff --git a/README.md b/README.md index cae02ab980..01653f1cde 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,28 @@ Note: We have restructured the readme. If you are looking for Devtron's CI/CD capabilities, please [click here](#devtron-platform) # - +

-

The Kubernetes Platform That Eliminates Operational Chaos

+

AI-Native Kubernetes Management Platform

+

Kubernetes is powerful but complex. Devtron unifies app and infrastructure management with an AI teammate to simplify operations and accelerate delivery.


-«Explore Documentation» || -«Try Devtron Demo» +Explore Documentation +· +Try Devtron SaaS +

Website · Blogs · Join Discord channel -· -Twitter . YouTube @@ -123,7 +124,7 @@ The Devtron Platform provides a complete solution for all DevOps requirements, h With Devtron Platform, you can accelerate your application delivery lifecycle by leveraging advanced features like canary deployments, A/B testing, and automated rollbacks. Devtron Platform helps you achieve faster time-to-market and improved application reliability by simplifying Kubernetes operations and streamlining development workflows. -

+

Devtron deeply integrates with products across the lifecycle of microservices,i.e., CI, CD, security, cost, debugging, and observability via an intuitive web interface and helps you deploy, observe, manage & debug applications across all your Kubernetes clusters. diff --git a/Wire.go b/Wire.go index e3da203bd6..0ea78f820a 100644 --- a/Wire.go +++ b/Wire.go @@ -154,6 +154,7 @@ import ( "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs" repository7 "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs/repository" "github.com/devtron-labs/devtron/pkg/notifier" + "github.com/devtron-labs/devtron/pkg/overview" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/pipeline/draftAwareConfigService" "github.com/devtron-labs/devtron/pkg/pipeline/executors" @@ -984,6 +985,20 @@ func InitializeApp() (*App, error) { acdConfig.NewArgoCDConfigGetter, wire.Bind(new(acdConfig.ArgoCDConfigGetter), new(*acdConfig.ArgoCDConfigGetterImpl)), + + // overview starts + overview.OverviewWireSet, + restHandler.NewOverviewRestHandlerImpl, + wire.Bind(new(restHandler.OverviewRestHandler), new(*restHandler.OverviewRestHandlerImpl)), + + router.NewOverviewRouterImpl, + wire.Bind(new(router.OverviewRouter), new(*router.OverviewRouterImpl)), + + restHandler.NewInfraOverviewRestHandlerImpl, + wire.Bind(new(restHandler.InfraOverviewRestHandler), new(*restHandler.InfraOverviewRestHandlerImpl)), + + router.NewInfraOverviewRouterImpl, + wire.Bind(new(router.InfraOverviewRouter), new(*router.InfraOverviewRouterImpl)), ) return &App{}, nil } diff --git a/api/k8s/capacity/k8sCapacityRestHandler.go b/api/k8s/capacity/k8sCapacityRestHandler.go index 75f9254392..dcdcee6582 100644 --- a/api/k8s/capacity/k8sCapacityRestHandler.go +++ b/api/k8s/capacity/k8sCapacityRestHandler.go @@ -21,11 +21,13 @@ import ( "errors" "fmt" "github.com/devtron-labs/common-lib/utils" - bean2 "github.com/devtron-labs/devtron/pkg/cluster/bean" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" "github.com/devtron-labs/devtron/pkg/cluster/environment" "github.com/devtron-labs/devtron/pkg/cluster/rbac" "github.com/devtron-labs/devtron/pkg/cluster/read" bean3 "github.com/devtron-labs/devtron/pkg/k8s/bean" + overviewBean "github.com/devtron-labs/devtron/pkg/overview/bean" + overviewCache "github.com/devtron-labs/devtron/pkg/overview/cache" "gopkg.in/go-playground/validator.v9" "net/http" "strconv" @@ -53,15 +55,16 @@ type K8sCapacityRestHandler interface { EditNodeTaints(w http.ResponseWriter, r *http.Request) } type K8sCapacityRestHandlerImpl struct { - logger *zap.SugaredLogger - k8sCapacityService capacity.K8sCapacityService - userService user.UserService - enforcer casbin.Enforcer - clusterService cluster.ClusterService - environmentService environment.EnvironmentService - clusterRbacService rbac.ClusterRbacService - clusterReadService read.ClusterReadService - validator *validator.Validate + logger *zap.SugaredLogger + k8sCapacityService capacity.K8sCapacityService + userService user.UserService + enforcer casbin.Enforcer + clusterService cluster.ClusterService + environmentService environment.EnvironmentService + clusterRbacService rbac.ClusterRbacService + clusterReadService read.ClusterReadService + validator *validator.Validate + clusterCacheService overviewCache.ClusterCacheService } func NewK8sCapacityRestHandlerImpl(logger *zap.SugaredLogger, @@ -70,17 +73,21 @@ func NewK8sCapacityRestHandlerImpl(logger *zap.SugaredLogger, clusterService cluster.ClusterService, environmentService environment.EnvironmentService, clusterRbacService rbac.ClusterRbacService, - clusterReadService read.ClusterReadService, validator *validator.Validate) *K8sCapacityRestHandlerImpl { + clusterReadService read.ClusterReadService, + validator *validator.Validate, + clusterCacheService overviewCache.ClusterCacheService, +) *K8sCapacityRestHandlerImpl { return &K8sCapacityRestHandlerImpl{ - logger: logger, - k8sCapacityService: k8sCapacityService, - userService: userService, - enforcer: enforcer, - clusterService: clusterService, - environmentService: environmentService, - clusterRbacService: clusterRbacService, - clusterReadService: clusterReadService, - validator: validator, + logger: logger, + k8sCapacityService: k8sCapacityService, + userService: userService, + enforcer: enforcer, + clusterService: clusterService, + environmentService: environmentService, + clusterRbacService: clusterRbacService, + clusterReadService: clusterReadService, + validator: validator, + clusterCacheService: clusterCacheService, } } @@ -98,7 +105,7 @@ func (handler *K8sCapacityRestHandlerImpl) GetClusterListRaw(w http.ResponseWrit return } // RBAC enforcer applying - var authenticatedClusters []*bean2.ClusterBean + var authenticatedClusters []*clusterBean.ClusterBean var clusterDetailList []*bean.ClusterCapacityDetail for _, cluster := range clusters { authenticated, err := handler.clusterRbacService.CheckAuthorization(cluster.ClusterName, cluster.Id, token, userId, true) @@ -140,7 +147,7 @@ func (handler *K8sCapacityRestHandlerImpl) GetClusterListWithDetail(w http.Respo return } // RBAC enforcer applying - var authenticatedClusters []*bean2.ClusterBean + var authenticatedClusters []*clusterBean.ClusterBean for _, cluster := range clusters { authenticated, err := handler.clusterRbacService.CheckAuthorization(cluster.ClusterName, cluster.Id, token, userId, true) if err != nil { @@ -156,11 +163,21 @@ func (handler *K8sCapacityRestHandlerImpl) GetClusterListWithDetail(w http.Respo common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) return } - clusterDetailList, err := handler.k8sCapacityService.GetClusterCapacityDetailList(r.Context(), authenticatedClusters) - if err != nil { - handler.logger.Errorw("error in getting cluster capacity detail list", "err", err) - common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) - return + // Try to get data from cache if available + var clusterDetailList []*bean.ClusterCapacityDetail + cachedOverview, cacheFound := handler.clusterCacheService.GetClusterOverview() + if cacheFound { + handler.logger.Infow("serving cluster capacity details from cache", "totalClusters", cachedOverview.TotalClusters) + // Convert ClusterOverviewResponse to RawClusterCapacityDetails list and filter by RBAC + clusterDetailList = handler.filterAuthorizedClusterDetails(cachedOverview, authenticatedClusters) + } else { + handler.logger.Infow("cache not available, fetching cluster capacity details from k8s API") + clusterDetailList, err = handler.k8sCapacityService.GetClusterCapacityDetailList(r.Context(), authenticatedClusters) + if err != nil { + handler.logger.Errorw("error in getting cluster capacity detail list", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } } common.WriteJsonResp(w, nil, clusterDetailList, http.StatusOK) } @@ -473,3 +490,38 @@ func (handler *K8sCapacityRestHandlerImpl) EditNodeTaints(w http.ResponseWriter, } common.WriteJsonResp(w, nil, resp, http.StatusOK) } + +// filterAuthorizedClusterDetails converts ClusterOverviewResponse to RawClusterCapacityDetails list +// and filters based on authenticated clusters. It includes: +// 1. Clusters from cache (healthy clusters with capacity data) +// 2. Virtual clusters (from database, not in cache) +// 3. Clusters with connection errors (from database, not in cache) +func (handler *K8sCapacityRestHandlerImpl) filterAuthorizedClusterDetails( + cachedOverview *overviewBean.ClusterOverviewResponse, + authenticatedClusters []*clusterBean.ClusterBean, +) []*bean.ClusterCapacityDetail { + // Create maps for quick lookup + authenticatedClusterIds := make(map[int]bool) + for _, authenticatedCluster := range authenticatedClusters { + authenticatedClusterIds[authenticatedCluster.Id] = true + } + + // Authenticated cluster details + clusterDetailList := make([]*bean.ClusterCapacityDetail, 0, len(authenticatedClusters)) + + // Add clusters from cache + for _, capacityDetail := range cachedOverview.RawClusterCapacityDetails { + // Only include authenticated clusters + if !authenticatedClusterIds[capacityDetail.Id] { + continue + } + clusterDetailList = append(clusterDetailList, capacityDetail) + } + + handler.logger.Debugw("converted and filtered cluster details from cache", + "totalCached", len(cachedOverview.RawClusterCapacityDetails), + "authenticated", len(authenticatedClusters), + "converted", len(clusterDetailList)) + + return clusterDetailList +} diff --git a/api/restHandler/AttributesRestHandlder.go b/api/restHandler/AttributesRestHandlder.go index 767bc2d1ff..5d5da856f7 100644 --- a/api/restHandler/AttributesRestHandlder.go +++ b/api/restHandler/AttributesRestHandlder.go @@ -19,14 +19,16 @@ package restHandler import ( "encoding/json" "errors" - "github.com/devtron-labs/devtron/pkg/attributes/bean" + "fmt" "net/http" "strconv" "github.com/devtron-labs/devtron/api/restHandler/common" "github.com/devtron-labs/devtron/pkg/attributes" + "github.com/devtron-labs/devtron/pkg/attributes/bean" "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" "github.com/devtron-labs/devtron/pkg/auth/user" + "github.com/devtron-labs/devtron/util/sliceUtil" "github.com/gorilla/mux" "go.uber.org/zap" ) @@ -57,6 +59,20 @@ func NewAttributesRestHandlerImpl(logger *zap.SugaredLogger, enforcer casbin.Enf } return userAuthHandler } + +// isInternalOnlyKey checks if the given key is internal-only and should not be exposed +func (handler AttributesRestHandlerImpl) isInternalOnlyKey(key string) bool { + return bean.InternalOnlyKeys[key] +} + +// filterInternalAttributes removes internal-only attributes from the list +func (handler AttributesRestHandlerImpl) filterInternalAttributes(attributes []*bean.AttributesDto) []*bean.AttributesDto { + filtered := make([]*bean.AttributesDto, 0, len(attributes)) + return sliceUtil.Filter(filtered, attributes, func(attr *bean.AttributesDto) bool { + return !handler.isInternalOnlyKey(attr.Key) + }) +} + func (handler AttributesRestHandlerImpl) AddAttributes(w http.ResponseWriter, r *http.Request) { userId, err := handler.userService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -78,6 +94,13 @@ func (handler AttributesRestHandlerImpl) AddAttributes(w http.ResponseWriter, r return } + // Check if the key is internal-only (not allowed to be created via API) + if handler.isInternalOnlyKey(dto.Key) { + handler.logger.Warnw("attempt to create internal-only attribute", "key", dto.Key, "userId", userId) + common.WriteJsonResp(w, fmt.Errorf("forbidden: cannot create attribute with key: %q", dto.Key), nil, http.StatusForbidden) + return + } + handler.logger.Infow("request payload, AddAttributes", "payload", dto) resp, err := handler.attributesService.AddAttributes(&dto) if err != nil { @@ -105,6 +128,14 @@ func (handler AttributesRestHandlerImpl) UpdateAttributes(w http.ResponseWriter, } token := r.Header.Get("token") + + // Check if the key is internal-only (not allowed to be created via API) + if handler.isInternalOnlyKey(dto.Key) { + handler.logger.Warnw("attempt to create internal-only attribute", "key", dto.Key, "userId", userId) + common.WriteJsonResp(w, fmt.Errorf("forbidden: cannot edit attribute with key: %q", dto.Key), nil, http.StatusForbidden) + return + } + if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionUpdate, "*"); !ok { common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) return @@ -145,6 +176,14 @@ func (handler AttributesRestHandlerImpl) GetAttributesById(w http.ResponseWriter common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) return } + + // Filter out internal-only attributes + if res != nil && handler.isInternalOnlyKey(res.Key) { + handler.logger.Warnw("attempt to read internal-only attribute", "key", res.Key, "userId", userId) + common.WriteJsonResp(w, fmt.Errorf("forbidden: cannot read attribute with key: %q", res.Key), nil, http.StatusForbidden) + return + } + common.WriteJsonResp(w, nil, res, http.StatusOK) } @@ -167,7 +206,10 @@ func (handler AttributesRestHandlerImpl) GetAttributesActiveList(w http.Response common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) return } - common.WriteJsonResp(w, nil, res, http.StatusOK) + + // Filter out internal-only attributes from the list + filteredRes := handler.filterInternalAttributes(res) + common.WriteJsonResp(w, nil, filteredRes, http.StatusOK) } func (handler AttributesRestHandlerImpl) GetAttributesByKey(w http.ResponseWriter, r *http.Request) { @@ -185,9 +227,17 @@ func (handler AttributesRestHandlerImpl) GetAttributesByKey(w http.ResponseWrite vars := mux.Vars(r) key := vars["key"] + + // Check if the key is internal-only (not allowed to be read via API) + if handler.isInternalOnlyKey(key) { + handler.logger.Warnw("attempt to read internal-only attribute by key", "key", key, "userId", userId) + common.WriteJsonResp(w, fmt.Errorf("forbidden: cannot read attribute with key: %q", key), nil, http.StatusForbidden) + return + } + res, err := handler.attributesService.GetByKey(key) if err != nil { - handler.logger.Errorw("service err, GetAttributesById", "err", err) + handler.logger.Errorw("service err, GetAttributesByKey", "key", key, "err", err) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) return } @@ -204,21 +254,27 @@ func (handler AttributesRestHandlerImpl) AddDeploymentEnforcementConfig(w http.R var dto bean.AttributesDto err = decoder.Decode(&dto) if err != nil { - handler.logger.Errorw("request err, AddAttributes", "err", err, "payload", dto) + handler.logger.Errorw("request err, AddDeploymentEnforcementConfig", "err", err, "payload", dto) common.WriteJsonResp(w, err, nil, http.StatusBadRequest) return } + // Check if the key is enforce deployment type config + if dto.Key != bean.ENFORCE_DEPLOYMENT_TYPE_CONFIG { + common.WriteJsonResp(w, fmt.Errorf("invalid key: %q", dto.Key), nil, http.StatusBadRequest) + return + } + token := r.Header.Get("token") if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionCreate, "*"); !ok { common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) return } - handler.logger.Infow("request payload, AddAttributes", "payload", dto) + handler.logger.Infow("request payload, AddDeploymentEnforcementConfig", "payload", dto) resp, err := handler.attributesService.AddDeploymentEnforcementConfig(&dto) if err != nil { - handler.logger.Errorw("service err, AddAttributes", "err", err, "payload", dto) + handler.logger.Errorw("service err, AddDeploymentEnforcementConfig", "err", err, "payload", dto) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) return } diff --git a/api/restHandler/ImageScanRestHandler.go b/api/restHandler/ImageScanRestHandler.go index e28c286cee..3c0c2d528b 100644 --- a/api/restHandler/ImageScanRestHandler.go +++ b/api/restHandler/ImageScanRestHandler.go @@ -19,13 +19,16 @@ package restHandler import ( "encoding/json" "fmt" + "net/http" + "strconv" + "github.com/devtron-labs/devtron/pkg/cluster/environment" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning" securityBean "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/bean" security2 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository" + "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository/bean" "github.com/devtron-labs/devtron/util/sliceUtil" - "net/http" - "strconv" + "go.opentelemetry.io/otel" "github.com/devtron-labs/devtron/api/restHandler/common" "github.com/devtron-labs/devtron/internal/util" @@ -46,6 +49,8 @@ type ImageScanRestHandler interface { FetchExecutionDetail(w http.ResponseWriter, r *http.Request) FetchMinScanResultByAppIdAndEnvId(w http.ResponseWriter, r *http.Request) VulnerabilityExposure(w http.ResponseWriter, r *http.Request) + VulnerabilitySummary(w http.ResponseWriter, r *http.Request) + VulnerabilityListing(w http.ResponseWriter, r *http.Request) } type ImageScanRestHandlerImpl struct { @@ -402,3 +407,201 @@ func (impl ImageScanRestHandlerImpl) VulnerabilityExposure(w http.ResponseWriter results.VulnerabilityExposure = vulnerabilityExposure common.WriteJsonResp(w, err, results, http.StatusOK) } + +func (impl ImageScanRestHandlerImpl) VulnerabilitySummary(w http.ResponseWriter, r *http.Request) { + ctx, span := otel.Tracer("imageScanRestHandler").Start(r.Context(), "VulnerabilitySummary") + defer span.End() + + userId, err := impl.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.HandleUnauthorized(w, r) + return + } + + // Parse request body with filters + decoder := json.NewDecoder(r.Body) + var summaryRequest *securityBean.VulnerabilitySummaryRequest + err = decoder.Decode(&summaryRequest) + if err != nil { + impl.logger.Errorw("request err, VulnerabilitySummary", "err", err, "payload", r.Body) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Check if user is super admin first - this determines the optimization path + _, rbacSpan := otel.Tracer("imageScanRestHandler").Start(ctx, "RBACCheck") + token := r.Header.Get("token") + isSuperAdmin := impl.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*") + rbacSpan.End() + + var ids []int + + if isSuperAdmin { + // OPTIMIZATION: For super-admin users, skip deploy info fetching and filtering entirely + // The GetVulnerabilityRawData query already handles all filtering (env, cluster, app) at DB level + // When ids is empty, it doesn't apply RBAC filtering, which is correct for super-admins + ids = nil + } else { + // OPTIMIZATION: For non-super-admin users, use optimized single query + _, fetchSpan := otel.Tracer("imageScanRestHandler").Start(ctx, "FetchScannedDeployInfo") + filteredDeployInfoList, err := impl.imageScanService.FetchScannedDeployInfoWithFilters(ctx, summaryRequest.EnvironmentIds, summaryRequest.ClusterIds) + fetchSpan.End() + if err != nil { + impl.logger.Errorw("service err, VulnerabilitySummary", "err", err) + if util.IsErrNoRows(err) { + common.WriteJsonResp(w, nil, impl.getEmptyVulnerabilitySummary(), http.StatusOK) + } else { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + } + return + } + + // Apply RBAC filtering + _, rbacProcessSpan := otel.Tracer("imageScanRestHandler").Start(ctx, "RBACProcessing") + ids, err = impl.getAuthorisedImageScanDeployInfoIds(token, filteredDeployInfoList) + rbacProcessSpan.End() + if err != nil { + impl.logger.Errorw("error in getting authorised image scan deploy info ids", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + if len(ids) == 0 { + common.WriteJsonResp(w, nil, impl.getEmptyVulnerabilitySummary(), http.StatusOK) + return + } + } + + summary, err := impl.imageScanService.FetchVulnerabilitySummary(ctx, summaryRequest, ids) + if err != nil { + impl.logger.Errorw("service err, VulnerabilitySummary", "err", err) + if util.IsErrNoRows(err) { + common.WriteJsonResp(w, nil, impl.getEmptyVulnerabilitySummary(), http.StatusOK) + } else { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + } + return + } + common.WriteJsonResp(w, err, summary, http.StatusOK) +} + +// getEmptyVulnerabilitySummary returns an empty vulnerability summary response +func (impl ImageScanRestHandlerImpl) getEmptyVulnerabilitySummary() *securityBean.VulnerabilitySummary { + return &securityBean.VulnerabilitySummary{ + TotalVulnerabilities: 0, + SeverityCount: &securityBean.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + }, + FixableVulnerabilities: 0, + NotFixableVulnerabilities: 0, + } +} + +func (impl ImageScanRestHandlerImpl) VulnerabilityListing(w http.ResponseWriter, r *http.Request) { + ctx, span := otel.Tracer("imageScanRestHandler").Start(r.Context(), "VulnerabilityListing") + defer span.End() + + userId, err := impl.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.HandleUnauthorized(w, r) + return + } + + // Parse request body + decoder := json.NewDecoder(r.Body) + var request *securityBean.VulnerabilityListingRequest + err = decoder.Decode(&request) + if err != nil { + impl.logger.Errorw("request err, VulnerabilityListing", "err", err, "payload", r.Body) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Fetch all deploy info to apply RBAC + deployInfoRequest := &securityBean.ImageScanRequest{ + ImageScanFilter: bean.ImageScanFilter{ + EnvironmentIds: request.EnvironmentIds, + ClusterIds: request.ClusterIds, + }, + } + + deployInfoList, err := impl.imageScanService.FetchAllDeployInfo(deployInfoRequest) + if err != nil { + impl.logger.Errorw("service err, VulnerabilityListing", "err", err) + if util.IsErrNoRows(err) { + emptyResponse := &securityBean.VulnerabilityListingResponse{ + Offset: request.Offset, + Size: request.Size, + Total: 0, + Vulnerabilities: []*securityBean.VulnerabilityDetail{}, + } + common.WriteJsonResp(w, nil, emptyResponse, http.StatusOK) + } else { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + } + return + } + + filteredDeployInfoList, err := impl.imageScanService.FilterDeployInfoByScannedArtifactsDeployedInEnv(deployInfoList) + if err != nil { + impl.logger.Errorw("request err, FilterDeployInfoListForScannedArtifacts", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + // Apply RBAC + _, rbacSpan := otel.Tracer("imageScanRestHandler").Start(ctx, "RBACProcessing") + token := r.Header.Get("token") + isSuperAdmin := false + if ok := impl.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*"); ok { + isSuperAdmin = true + } + var ids []int + if isSuperAdmin { + ids = sliceUtil.NewSliceFromFuncExec(filteredDeployInfoList, func(item *security2.ImageScanDeployInfo) int { + return item.Id + }) + } else { + ids, err = impl.getAuthorisedImageScanDeployInfoIds(token, filteredDeployInfoList) + if err != nil { + impl.logger.Errorw("error in getting authorised image scan deploy info ids", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + } + rbacSpan.End() + + if len(ids) == 0 { + emptyResponse := &securityBean.VulnerabilityListingResponse{ + Offset: request.Offset, + Size: request.Size, + Total: 0, + Vulnerabilities: []*securityBean.VulnerabilityDetail{}, + } + common.WriteJsonResp(w, nil, emptyResponse, http.StatusOK) + return + } + + // Fetch vulnerability listing + listing, err := impl.imageScanService.FetchVulnerabilityListing(ctx, request, ids) + if err != nil { + impl.logger.Errorw("service err, VulnerabilityListing", "err", err) + if util.IsErrNoRows(err) { + emptyResponse := &securityBean.VulnerabilityListingResponse{ + Offset: request.Offset, + Size: request.Size, + Total: 0, + Vulnerabilities: []*securityBean.VulnerabilityDetail{}, + } + common.WriteJsonResp(w, nil, emptyResponse, http.StatusOK) + } else { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + } + return + } + common.WriteJsonResp(w, err, listing, http.StatusOK) +} diff --git a/api/restHandler/InfraOverviewRestHandler.go b/api/restHandler/InfraOverviewRestHandler.go new file mode 100644 index 0000000000..1417842226 --- /dev/null +++ b/api/restHandler/InfraOverviewRestHandler.go @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package restHandler + +import ( + "net/http" + + "github.com/devtron-labs/devtron/api/restHandler/common" + "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" + "github.com/devtron-labs/devtron/pkg/auth/user" + "github.com/devtron-labs/devtron/pkg/overview" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/cache" + "github.com/gorilla/schema" + "go.uber.org/zap" + "gopkg.in/go-playground/validator.v9" +) + +type InfraOverviewRestHandler interface { + GetClusterOverview(w http.ResponseWriter, r *http.Request) + DeleteClusterOverviewCache(w http.ResponseWriter, r *http.Request) + RefreshClusterOverviewCache(w http.ResponseWriter, r *http.Request) + GetClusterOverviewDetailedNodeInfo(w http.ResponseWriter, r *http.Request) +} + +type InfraOverviewRestHandlerImpl struct { + logger *zap.SugaredLogger + clusterOverviewService overview.ClusterOverviewService + clusterCacheService cache.ClusterCacheService + userService user.UserService + validator *validator.Validate + enforcer casbin.Enforcer +} + +func NewInfraOverviewRestHandlerImpl( + logger *zap.SugaredLogger, + clusterOverviewService overview.ClusterOverviewService, + clusterCacheService cache.ClusterCacheService, + userService user.UserService, + validator *validator.Validate, + enforcer casbin.Enforcer, +) *InfraOverviewRestHandlerImpl { + return &InfraOverviewRestHandlerImpl{ + logger: logger, + clusterOverviewService: clusterOverviewService, + clusterCacheService: clusterCacheService, + userService: userService, + validator: validator, + enforcer: enforcer, + } +} + +// GetClusterOverview handles cluster management overview requests +func (handler *InfraOverviewRestHandlerImpl) GetClusterOverview(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + result, err := handler.clusterOverviewService.GetClusterOverview(r.Context()) + if err != nil { + handler.logger.Errorw("error in getting cluster overview", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +// DeleteClusterOverviewCache handles cluster overview cache deletion requests +func (handler *InfraOverviewRestHandlerImpl) DeleteClusterOverviewCache(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + handler.clusterCacheService.InvalidateClusterOverview() + handler.logger.Infow("cluster overview cache deleted successfully", "userId", userId) + common.WriteJsonResp(w, nil, map[string]string{"message": "Cluster overview cache deleted successfully"}, http.StatusOK) +} + +// RefreshClusterOverviewCache handles cluster overview cache refresh requests +func (handler *InfraOverviewRestHandlerImpl) RefreshClusterOverviewCache(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + err = handler.clusterOverviewService.RefreshClusterOverviewCache(r.Context()) + if err != nil { + handler.logger.Errorw("error in refreshing cluster overview cache", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + handler.logger.Infow("cluster overview cache refreshed successfully", "userId", userId) + common.WriteJsonResp(w, nil, nil, http.StatusOK) +} + +func (handler *InfraOverviewRestHandlerImpl) GetClusterOverviewDetailedNodeInfo(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + var request bean.ClusterOverviewDetailRequest + decoder := schema.NewDecoder() + if err := decoder.Decode(&request, r.URL.Query()); err != nil { + handler.logger.Errorw("error in decoding request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Validate the request + if err := handler.validator.Struct(request); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.clusterOverviewService.GetClusterOverviewDetailedNodeInfo(r.Context(), &request) + if err != nil { + handler.logger.Errorw("error in getting cluster overview detail", "err", err, "groupBy", request.GroupBy) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} diff --git a/api/restHandler/OverviewRestHandler.go b/api/restHandler/OverviewRestHandler.go new file mode 100644 index 0000000000..8830392036 --- /dev/null +++ b/api/restHandler/OverviewRestHandler.go @@ -0,0 +1,643 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package restHandler + +import ( + "fmt" + "net/http" + + "github.com/devtron-labs/devtron/api/restHandler/common" + "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" + "github.com/devtron-labs/devtron/pkg/auth/user" + "github.com/devtron-labs/devtron/pkg/overview" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + "github.com/devtron-labs/devtron/pkg/overview/util" + "github.com/gorilla/schema" + "go.uber.org/zap" + "gopkg.in/go-playground/validator.v9" +) + +type OverviewRestHandler interface { + GetAppsOverview(w http.ResponseWriter, r *http.Request) + GetWorkflowOverview(w http.ResponseWriter, r *http.Request) + GetBuildDeploymentActivity(w http.ResponseWriter, r *http.Request) + GetBuildDeploymentActivityDetailed(w http.ResponseWriter, r *http.Request) + GetDoraMetrics(w http.ResponseWriter, r *http.Request) + GetInsights(w http.ResponseWriter, r *http.Request) + + // Cluster Management Overview + GetClusterOverview(w http.ResponseWriter, r *http.Request) + DeleteClusterOverviewCache(w http.ResponseWriter, r *http.Request) + RefreshClusterOverviewCache(w http.ResponseWriter, r *http.Request) + + // Cluster Overview Detailed Drill-down API (unified endpoint) + GetClusterOverviewDetailedNodeInfo(w http.ResponseWriter, r *http.Request) + + // Security Overview APIs + GetSecurityOverview(w http.ResponseWriter, r *http.Request) + GetSeverityInsights(w http.ResponseWriter, r *http.Request) + GetDeploymentSecurityStatus(w http.ResponseWriter, r *http.Request) + GetVulnerabilityTrend(w http.ResponseWriter, r *http.Request) + GetBlockedDeploymentsTrend(w http.ResponseWriter, r *http.Request) +} + +type OverviewRestHandlerImpl struct { + logger *zap.SugaredLogger + overviewService overview.OverviewService + userService user.UserService + validator *validator.Validate + enforcer casbin.Enforcer +} + +func NewOverviewRestHandlerImpl( + logger *zap.SugaredLogger, + overviewService overview.OverviewService, + userService user.UserService, + validator *validator.Validate, + enforcer casbin.Enforcer, +) *OverviewRestHandlerImpl { + return &OverviewRestHandlerImpl{ + logger: logger, + overviewService: overviewService, + userService: userService, + validator: validator, + enforcer: enforcer, + } +} + +// validateTimeParameters validates that either timeWindow is provided or both from and to are provided +// Returns error if validation fails +func validateTimeParameters(timeWindow, from, to string) error { + hasTimeWindow := len(timeWindow) > 0 + hasFromTo := len(from) > 0 && len(to) > 0 + + if !hasTimeWindow && !hasFromTo { + return fmt.Errorf("either timeWindow or both from/to parameters must be provided") + } + + return nil +} + +func (handler *OverviewRestHandlerImpl) GetAppsOverview(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + result, err := handler.overviewService.GetAppsOverview(r.Context()) + if err != nil { + handler.logger.Errorw("error in getting apps overview", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetWorkflowOverview(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + result, err := handler.overviewService.GetWorkflowOverview(r.Context()) + if err != nil { + handler.logger.Errorw("error in getting workflow overview", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetBuildDeploymentActivity(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Extract query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Parse from and to parameters + request, err := util.GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + buildDeploymentRequest := &bean.BuildDeploymentActivityRequest{ + From: request.From, + To: request.To, + } + + if err := handler.validator.Struct(buildDeploymentRequest); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetBuildDeploymentActivity(r.Context(), buildDeploymentRequest) + if err != nil { + handler.logger.Errorw("error in getting build deployment activity", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetBuildDeploymentActivityDetailed(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Extract query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + activityKind := r.URL.Query().Get("activityKind") + if activityKind == "" { + handler.logger.Errorw("activityKind query parameter is required") + common.WriteJsonResp(w, fmt.Errorf("activityKind query parameter is required"), nil, http.StatusBadRequest) + return + } + + request, err := util.GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + aggregationType := constants.GetAggregationType(constants.TimePeriod(timeWindow)) + + buildDeploymentDetailedRequest := &bean.BuildDeploymentActivityDetailedRequest{ + ActivityKind: bean.ActivityKind(activityKind), + AggregationType: aggregationType, + From: request.From, + To: request.To, + } + + if err := handler.validator.Struct(buildDeploymentDetailedRequest); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetBuildDeploymentActivityDetailed(r.Context(), buildDeploymentDetailedRequest) + if err != nil { + handler.logger.Errorw("error in getting build deployment activity detailed", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetDoraMetrics(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Extract query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Get both current and previous time ranges + currentTimeWindow, prevTimeWindow, err := util.GetCurrentAndPreviousTimeRangeBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing time periods", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + doraRequest := &bean.DoraMetricsRequest{ + TimeRangeRequest: currentTimeWindow, + PrevFrom: prevTimeWindow.From, + PrevTo: prevTimeWindow.To, + } + + if err := handler.validator.Struct(doraRequest); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetDoraMetrics(r.Context(), doraRequest) + if err != nil { + handler.logger.Errorw("error in getting DORA metrics", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetInsights(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Extract query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + request, err := util.GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Extract new query parameters + pipelineTypeStr := r.URL.Query().Get("pipelineType") + if pipelineTypeStr == "" { + handler.logger.Errorw("pipelineType parameter is required") + common.WriteJsonResp(w, fmt.Errorf("pipelineType parameter is required"), nil, http.StatusBadRequest) + return + } + + // Validate pipelineType + var pipelineType bean.PipelineType + switch pipelineTypeStr { + case string(bean.BuildPipelines): + pipelineType = bean.BuildPipelines + case string(bean.DeploymentPipelines): + pipelineType = bean.DeploymentPipelines + default: + handler.logger.Errorw("invalid pipelineType parameter", "pipelineType", pipelineTypeStr) + common.WriteJsonResp(w, fmt.Errorf("invalid pipelineType parameter. Must be 'buildPipelines' or 'deploymentPipelines'"), nil, http.StatusBadRequest) + return + } + + sortOrderStr := r.URL.Query().Get("sortOrder") + if sortOrderStr == "" { + sortOrderStr = string(bean.DESC) // Default to DESC + } + + // Validate sortOrder + var sortOrder bean.SortOrder + switch sortOrderStr { + case string(bean.ASC): + sortOrder = bean.ASC + case string(bean.DESC): + sortOrder = bean.DESC + default: + handler.logger.Errorw("invalid sortOrder parameter", "sortOrder", sortOrderStr) + common.WriteJsonResp(w, fmt.Errorf("invalid sortOrder parameter. Must be 'ASC' or 'DESC'"), nil, http.StatusBadRequest) + return + } + + limit, err := common.ExtractIntQueryParam(w, r, "limit", 10) + if err != nil { + handler.logger.Errorw("error in parsing limit parameter", "err", err) + return + } + + offset, err := common.ExtractIntQueryParam(w, r, "offset", 0) + if err != nil { + handler.logger.Errorw("error in parsing offset parameter", "err", err) + return + } + + insightsRequest := &bean.InsightsRequest{ + TimeRangeRequest: request, + PipelineType: pipelineType, + SortOrder: sortOrder, + Limit: limit, + Offset: offset, + } + + if err := handler.validator.Struct(insightsRequest); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetInsights(r.Context(), insightsRequest) + if err != nil { + handler.logger.Errorw("error in getting insights", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +// GetClusterOverview handles cluster management overview requests +func (handler *OverviewRestHandlerImpl) GetClusterOverview(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + result, err := handler.overviewService.GetClusterOverview(r.Context()) + if err != nil { + handler.logger.Errorw("error in getting cluster overview", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +// DeleteClusterOverviewCache handles cluster overview cache deletion requests +func (handler *OverviewRestHandlerImpl) DeleteClusterOverviewCache(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + err = handler.overviewService.DeleteClusterOverviewCache(r.Context()) + if err != nil { + handler.logger.Errorw("error in deleting cluster overview cache", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + handler.logger.Infow("cluster overview cache deleted successfully", "userId", userId) + common.WriteJsonResp(w, nil, map[string]string{"message": "Cluster overview cache deleted successfully"}, http.StatusOK) +} + +// RefreshClusterOverviewCache handles cluster overview cache refresh requests +func (handler *OverviewRestHandlerImpl) RefreshClusterOverviewCache(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + err = handler.overviewService.RefreshClusterOverviewCache(r.Context()) + if err != nil { + handler.logger.Errorw("error in refreshing cluster overview cache", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + handler.logger.Infow("cluster overview cache refreshed successfully", "userId", userId) + common.WriteJsonResp(w, nil, nil, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetClusterOverviewDetailedNodeInfo(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + var request bean.ClusterOverviewDetailRequest + decoder := schema.NewDecoder() + if err := decoder.Decode(&request, r.URL.Query()); err != nil { + handler.logger.Errorw("error in decoding request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Validate the request + if err := handler.validator.Struct(request); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetClusterOverviewDetailedNodeInfo(r.Context(), &request) + if err != nil { + handler.logger.Errorw("error in getting cluster overview detail", "err", err, "groupBy", request.GroupBy) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +// ============================================================================ +// Security Overview APIs +// ============================================================================ + +func (handler *OverviewRestHandlerImpl) GetSecurityOverview(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + decoder := schema.NewDecoder() + decoder.IgnoreUnknownKeys(true) + var request bean.SecurityOverviewRequest + if err := decoder.Decode(&request, r.URL.Query()); err != nil { + handler.logger.Errorw("error in decoding request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetSecurityOverview(r.Context(), &request) + if err != nil { + handler.logger.Errorw("error in getting security overview", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetSeverityInsights(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + decoder := schema.NewDecoder() + decoder.IgnoreUnknownKeys(true) + var request bean.SeverityInsightsRequest + if err := decoder.Decode(&request, r.URL.Query()); err != nil { + handler.logger.Errorw("error in decoding request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Validate the request + if err := handler.validator.Struct(request); err != nil { + handler.logger.Errorw("validation error", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetSeverityInsights(r.Context(), &request) + if err != nil { + handler.logger.Errorw("error in getting severity insights", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetDeploymentSecurityStatus(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + decoder := schema.NewDecoder() + decoder.IgnoreUnknownKeys(true) + var request bean.DeploymentSecurityStatusRequest + if err := decoder.Decode(&request, r.URL.Query()); err != nil { + handler.logger.Errorw("error in decoding request", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + result, err := handler.overviewService.GetDeploymentSecurityStatus(r.Context(), &request) + if err != nil { + handler.logger.Errorw("error in getting deployment security status", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetVulnerabilityTrend(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + envType := r.URL.Query().Get("envType") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Validate envType + if envType != string(bean.EnvTypeProd) && envType != string(bean.EnvTypeNonProd) && envType != string(bean.EnvTypeAll) { + handler.logger.Errorw("invalid envType", "envType", envType) + common.WriteJsonResp(w, fmt.Errorf("envType must be 'prod', 'non-prod' or 'all'"), nil, http.StatusBadRequest) + return + } + + // Get both current and previous time ranges + currentTimeWindow, _, err := util.GetCurrentAndPreviousTimeRangeBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing time periods", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Determine aggregation type based on time range + timePeriod := util.GetTimePeriodFromTimeRange(currentTimeWindow.From, currentTimeWindow.To) + aggregationType := constants.GetAggregationType(timePeriod) + + result, err := handler.overviewService.GetVulnerabilityTrend(r.Context(), currentTimeWindow, bean.EnvType(envType), aggregationType) + if err != nil { + handler.logger.Errorw("error in getting vulnerability trend", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} + +func (handler *OverviewRestHandlerImpl) GetBlockedDeploymentsTrend(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + + // Parse query parameters + timeWindow := r.URL.Query().Get("timeWindow") + from := r.URL.Query().Get("from") + to := r.URL.Query().Get("to") + + // Validate time parameters + if err := validateTimeParameters(timeWindow, from, to); err != nil { + handler.logger.Errorw("validation error for time parameters", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Get current time range + currentTimeWindow, err := util.GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + handler.logger.Errorw("error in parsing time period", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + // Determine aggregation type based on time range + timePeriod := util.GetTimePeriodFromTimeRange(currentTimeWindow.From, currentTimeWindow.To) + aggregationType := constants.GetAggregationType(timePeriod) + + result, err := handler.overviewService.GetBlockedDeploymentsTrend(r.Context(), currentTimeWindow, aggregationType) + if err != nil { + handler.logger.Errorw("error in getting blocked deployments trend", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, result, http.StatusOK) +} diff --git a/api/router/ImageScanRouter.go b/api/router/ImageScanRouter.go index 7feabb13a7..b67133eb65 100644 --- a/api/router/ImageScanRouter.go +++ b/api/router/ImageScanRouter.go @@ -34,6 +34,8 @@ func NewImageScanRouterImpl(imageScanRestHandler restHandler.ImageScanRestHandle func (impl ImageScanRouterImpl) InitImageScanRouter(configRouter *mux.Router) { configRouter.Path("/list").HandlerFunc(impl.imageScanRestHandler.ScanExecutionList).Methods("POST") + configRouter.Path("/summary").HandlerFunc(impl.imageScanRestHandler.VulnerabilitySummary).Methods("POST") + configRouter.Path("/vulnerabilities").HandlerFunc(impl.imageScanRestHandler.VulnerabilityListing).Methods("POST") //image=image:abc&envId=3&appId=100&artifactId=100&executionId=100 configRouter.Path("/executionDetail").HandlerFunc(impl.imageScanRestHandler.FetchExecutionDetail).Methods("GET") diff --git a/api/router/InfraOverviewRouter.go b/api/router/InfraOverviewRouter.go new file mode 100644 index 0000000000..038d6487bb --- /dev/null +++ b/api/router/InfraOverviewRouter.go @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package router + +import ( + "github.com/devtron-labs/devtron/api/restHandler" + "github.com/gorilla/mux" +) + +type InfraOverviewRouter interface { + InitInfraOverviewRouter(infraOverviewRouter *mux.Router) +} + +type InfraOverviewRouterImpl struct { + infraOverviewRestHandler restHandler.InfraOverviewRestHandler +} + +func NewInfraOverviewRouterImpl(infraOverviewRestHandler restHandler.InfraOverviewRestHandler) *InfraOverviewRouterImpl { + return &InfraOverviewRouterImpl{ + infraOverviewRestHandler: infraOverviewRestHandler, + } +} + +func (router InfraOverviewRouterImpl) InitInfraOverviewRouter(infraOverviewRouter *mux.Router) { + // Cluster Management Overview + infraOverviewRouter.Path(""). + HandlerFunc(router.infraOverviewRestHandler.GetClusterOverview). + Methods("GET") + + // Delete Cluster Overview Cache + infraOverviewRouter.Path("/cache"). + HandlerFunc(router.infraOverviewRestHandler.DeleteClusterOverviewCache). + Methods("DELETE") + + // Refresh Cluster Overview Cache + infraOverviewRouter.Path("/refresh"). + HandlerFunc(router.infraOverviewRestHandler.RefreshClusterOverviewCache). + Methods("GET") + + // Cluster Overview Detailed Node Info + infraOverviewRouter.Path("/node-list"). + HandlerFunc(router.infraOverviewRestHandler.GetClusterOverviewDetailedNodeInfo). + Methods("GET") +} diff --git a/api/router/OverviewRouter.go b/api/router/OverviewRouter.go new file mode 100644 index 0000000000..6a0c1acc0d --- /dev/null +++ b/api/router/OverviewRouter.go @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package router + +import ( + "github.com/devtron-labs/devtron/api/restHandler" + "github.com/gorilla/mux" +) + +type OverviewRouter interface { + InitOverviewRouter(overviewRouter *mux.Router) +} + +type OverviewRouterImpl struct { + overviewRestHandler restHandler.OverviewRestHandler + infraOverviewRouter InfraOverviewRouter +} + +func NewOverviewRouterImpl(overviewRestHandler restHandler.OverviewRestHandler, + infraOverviewRouter InfraOverviewRouter) *OverviewRouterImpl { + return &OverviewRouterImpl{ + overviewRestHandler: overviewRestHandler, + infraOverviewRouter: infraOverviewRouter, + } +} + +func (router OverviewRouterImpl) InitOverviewRouter(overviewRouter *mux.Router) { + // New Apps Overview API + overviewRouter.Path("/apps-overview"). + HandlerFunc(router.overviewRestHandler.GetAppsOverview). + Methods("GET") + + // New Workflow Overview API + overviewRouter.Path("/workflow-overview"). + HandlerFunc(router.overviewRestHandler.GetWorkflowOverview). + Methods("GET") + + // Build and Deployment Activity + overviewRouter.Path("/build-deployment-activity"). + HandlerFunc(router.overviewRestHandler.GetBuildDeploymentActivity). + Methods("GET") + + // Build and Deployment Activity Detailed + overviewRouter.Path("/build-deployment-activity/detailed"). + HandlerFunc(router.overviewRestHandler.GetBuildDeploymentActivityDetailed). + Methods("GET") + + // DORA Metrics + overviewRouter.Path("/dora-metrics"). + HandlerFunc(router.overviewRestHandler.GetDoraMetrics). + Methods("GET") + + // Pipeline Insights + overviewRouter.Path("/pipeline-insights"). + HandlerFunc(router.overviewRestHandler.GetInsights). + Methods("GET") + + // Infra Overview Subrouter + + infraOverviewRouter := overviewRouter.PathPrefix("/infra").Subrouter() + router.infraOverviewRouter.InitInfraOverviewRouter(infraOverviewRouter) + + // Cluster Management Overview + + // Security Overview Subrouter + securityOverviewRouter := overviewRouter.PathPrefix("/security").Subrouter() + + // Security Overview - "At a Glance" metrics (organization-wide) + securityOverviewRouter.Path("/security-glance"). + HandlerFunc(router.overviewRestHandler.GetSecurityOverview). + Methods("GET") + + // Severity Insights - With prod/non-prod filtering + securityOverviewRouter.Path("/severity-insights"). + HandlerFunc(router.overviewRestHandler.GetSeverityInsights). + Methods("GET") + + // Deployment Security Status + securityOverviewRouter.Path("/deployment-security-status"). + HandlerFunc(router.overviewRestHandler.GetDeploymentSecurityStatus). + Methods("GET") + + // Vulnerability Trend - Time-series with prod/non-prod filtering + securityOverviewRouter.Path("/vulnerability-trend"). + HandlerFunc(router.overviewRestHandler.GetVulnerabilityTrend). + Methods("GET") + + // Blocked Deployments Trend - Organization-wide + securityOverviewRouter.Path("/blocked-deployments-trend"). + HandlerFunc(router.overviewRestHandler.GetBlockedDeploymentsTrend). + Methods("GET") + +} diff --git a/api/router/router.go b/api/router/router.go index 0ea41a35a8..a1a7a05662 100644 --- a/api/router/router.go +++ b/api/router/router.go @@ -18,6 +18,8 @@ package router import ( "encoding/json" + "net/http" + "github.com/devtron-labs/devtron/api/apiToken" "github.com/devtron-labs/devtron/api/appStore" "github.com/devtron-labs/devtron/api/appStore/chartGroup" @@ -55,7 +57,6 @@ import ( "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" - "net/http" ) type MuxRouter struct { @@ -124,6 +125,7 @@ type MuxRouter struct { devtronResourceRouter devtronResource.DevtronResourceRouter scanningResultRouter resourceScan.ScanningResultRouter userResourceRouter userResource.Router + overviewRouter OverviewRouter } func NewMuxRouter(logger *zap.SugaredLogger, @@ -159,6 +161,7 @@ func NewMuxRouter(logger *zap.SugaredLogger, fluxApplicationRouter fluxApplication2.FluxApplicationRouter, scanningResultRouter resourceScan.ScanningResultRouter, userResourceRouter userResource.Router, + overviewRouter OverviewRouter, ) *MuxRouter { r := &MuxRouter{ Router: mux.NewRouter(), @@ -226,6 +229,7 @@ func NewMuxRouter(logger *zap.SugaredLogger, fluxApplicationRouter: fluxApplicationRouter, scanningResultRouter: scanningResultRouter, userResourceRouter: userResourceRouter, + overviewRouter: overviewRouter, } return r } @@ -444,4 +448,6 @@ func (r MuxRouter) Init() { fluxApplicationRouter := r.Router.PathPrefix("/orchestrator/flux-application").Subrouter() r.fluxApplicationRouter.InitFluxApplicationRouter(fluxApplicationRouter) + overviewRouter := r.Router.PathPrefix("/orchestrator/overview").Subrouter() + r.overviewRouter.InitOverviewRouter(overviewRouter) } diff --git a/api/util/logger.go b/api/util/logger.go index d00d4f62f5..defd9aa71b 100644 --- a/api/util/logger.go +++ b/api/util/logger.go @@ -25,6 +25,7 @@ import ( "github.com/devtron-labs/devtron/internal/middleware" "github.com/devtron-labs/devtron/pkg/auth/user" + "github.com/devtron-labs/devtron/util" ) type AuditLoggerDTO struct { @@ -36,6 +37,7 @@ type AuditLoggerDTO struct { RequestPayload []byte `json:"requestPayload"` RequestMethod string `json:"requestMethod"` ResponseTime time.Duration `json:"responseTime"` + ClientIp string `json:"clientIp"` } type LoggingMiddlewareImpl struct { @@ -56,13 +58,12 @@ type LoggingMiddleware interface { func (impl LoggingMiddlewareImpl) LoggingMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := middleware.NewDelegator(w, nil) - token := r.Header.Get("token") userEmail, err := impl.userService.GetEmailFromToken(token) if err != nil { log.Printf("AUDIT_LOG: user does not exists") } - + clientIp := util.GetClientIP(r) // Read the request body into a buffer var bodyBuffer bytes.Buffer _, err = io.Copy(&bodyBuffer, r.Body) @@ -83,6 +84,7 @@ func (impl LoggingMiddlewareImpl) LoggingMiddleware(next http.Handler) http.Hand QueryParams: r.URL.Query().Encode(), RequestPayload: bodyBuffer.Bytes(), RequestMethod: r.Method, + ClientIp: clientIp, } // Call the next handler in the chain. next.ServeHTTP(d, r) @@ -95,5 +97,5 @@ func (impl LoggingMiddlewareImpl) LoggingMiddleware(next http.Handler) http.Hand } func LogRequest(auditLogDto *AuditLoggerDTO) { - log.Printf("AUDIT_LOG: requestMethod: %s, urlPath: %s, queryParams: %s, updatedBy: %s, updatedOn: %s, apiResponseCode: %d, responseTime: %s, requestPayload: %s", auditLogDto.RequestMethod, auditLogDto.UrlPath, auditLogDto.QueryParams, auditLogDto.UserEmail, auditLogDto.UpdatedOn, auditLogDto.ApiResponseCode, auditLogDto.ResponseTime, auditLogDto.RequestPayload) + log.Printf("AUDIT_LOG: clientIp: %s, requestMethod: %s, urlPath: %s, queryParams: %s, updatedBy: %s, updatedOn: %s, apiResponseCode: %d, responseTime: %s, requestPayload: %s", auditLogDto.ClientIp, auditLogDto.RequestMethod, auditLogDto.UrlPath, auditLogDto.QueryParams, auditLogDto.UserEmail, auditLogDto.UpdatedOn, auditLogDto.ApiResponseCode, auditLogDto.ResponseTime, auditLogDto.RequestPayload) } diff --git a/assets/devtron-hero-image.webp b/assets/devtron-hero-image.webp new file mode 100644 index 0000000000..8499e122af Binary files /dev/null and b/assets/devtron-hero-image.webp differ diff --git a/charts/devtron/Chart.yaml b/charts/devtron/Chart.yaml index c0f7d3ef19..585c4b96d0 100644 --- a/charts/devtron/Chart.yaml +++ b/charts/devtron/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: devtron-operator -appVersion: 1.8.2 +appVersion: 2.0.0 description: Chart to configure and install Devtron. Devtron is a Kubernetes Orchestration system. keywords: - Devtron @@ -11,7 +11,7 @@ keywords: - argocd - Hyperion engine: gotpl -version: 0.22.98 +version: 0.22.99 sources: - https://github.com/devtron-labs/charts dependencies: diff --git a/charts/devtron/devtron-bom.yaml b/charts/devtron/devtron-bom.yaml index 27f17384f2..fe719fad27 100644 --- a/charts/devtron/devtron-bom.yaml +++ b/charts/devtron/devtron-bom.yaml @@ -15,7 +15,7 @@ global: PG_DATABASE: orchestrator extraManifests: [] installer: - release: "v1.8.2" + release: "v2.0.0" registry: "" image: "inception" tag: "473deaa4-185-21582" @@ -41,13 +41,13 @@ components: FEATURE_CODE_MIRROR_ENABLE: "true" FEATURE_GROUPED_APP_LIST_FILTERS_ENABLE: "true" registry: "" - image: "dashboard:b00aa204-690-36533" + image: "dashboard:b48d0910-690-38228" imagePullPolicy: IfNotPresent healthPort: 8080 devtron: registry: "" - image: "hyperion:261df88d-280-36531" - cicdImage: "devtron:261df88d-434-36530" + image: "hyperion:f0c18f20-280-38148" + cicdImage: "devtron:f0c18f20-434-38146" imagePullPolicy: IfNotPresent customOverrides: {} podSecurityContext: @@ -61,7 +61,7 @@ components: healthPort: 8080 ciRunner: registry: "" - image: "ci-runner:880420ac-138-36030" + image: "ci-runner:6b408df4-138-38163" argocdDexServer: registry: "" image: "dex:v2.30.2" @@ -70,7 +70,7 @@ components: authenticator: "authenticator:e414faff-393-13273" kubelink: registry: "" - image: "kubelink:880420ac-564-36036" + image: "kubelink:6b408df4-564-38159" imagePullPolicy: IfNotPresent configs: ENABLE_HELM_RELEASE_CACHE: "true" @@ -93,10 +93,11 @@ components: healthPort: 50052 kubewatch: registry: "" - image: "kubewatch:880420ac-419-36026" + image: "kubewatch:6b408df4-419-38172" imagePullPolicy: IfNotPresent healthPort: 8080 configs: + VELERO_INFORMER: "false" devtroncd_NAMESPACE: "devtron-ci" USE_CUSTOM_HTTP_TRANSPORT: "true" CI_INFORMER: "true" @@ -117,7 +118,7 @@ components: image: postgres_exporter:v0.10.1 gitsensor: registry: "" - image: "git-sensor:b82f5fdb-200-36532" + image: "git-sensor:6b408df4-200-38174" imagePullPolicy: IfNotPresent serviceMonitor: enabled: false @@ -135,7 +136,7 @@ components: # Values for lens lens: registry: "" - image: "lens:880420ac-333-36029" + image: "lens:6b408df4-333-38167" imagePullPolicy: IfNotPresent configs: GIT_SENSOR_PROTOCOL: GRPC @@ -170,7 +171,7 @@ components: entMigratorImage: "devtron-utils:geni-v1.1.4" chartSync: registry: "" - image: chart-sync:880420ac-836-36037 + image: chart-sync:6b408df4-836-38155 schedule: "0 19 * * *" podSecurityContext: fsGroup: 1001 @@ -208,7 +209,7 @@ workflowController: IMDSv1ExecutorImage: "argoexec:v3.0.7" security: imageScanner: - image: "image-scanner:f21e02cb-141-34534" + image: "image-scanner:6b408df4-141-38158" healthPort: 8080 configs: TRIVY_DB_REPOSITORY: mirror.gcr.io/aquasec/trivy-db @@ -219,7 +220,7 @@ security: tag: 4.3.6 # Values for notifier integration notifier: - image: "notifier:00f17215-372-36041" + image: "notifier:5c4b5b3a-372-38153" healthPort: 3000 minio: image: "minio:RELEASE.2021-02-14T04-01-33Z" @@ -241,6 +242,15 @@ monitoring: imagePullPolicy: IfNotPresent devtronEnterprise: enabled: false + finops: + enabled: false + costSync: + image: "cost-sync:46ed7c67-1159-38183" + schedule: "0 * * * *" + timeZone: UTC + timescale: + image: "timescaledb-ha:pg18" + volumeSize: 5Gi casbin: registry: "" image: "casbin:f6ff5f74-064b67e5-462-30822" diff --git a/charts/devtron/templates/NOTES.txt b/charts/devtron/templates/NOTES.txt index 3399e5f0e1..9a68ec31f9 100644 --- a/charts/devtron/templates/NOTES.txt +++ b/charts/devtron/templates/NOTES.txt @@ -2,7 +2,7 @@ {{- $liveCm := lookup "v1" "ConfigMap" "devtroncd" "devtron-custom-cm" }} {{- $currentValue := pluck "POSTGRES_MIGRATED" $liveCm.data | first | default "" }} {{- if ne $currentValue "14" }} - {{- fail "Upgrade Failed Please ensure that you have completed the pre-requisites mentioned in https://docs.devtron.ai/upgrade/devtron-upgrade-1.5.0" }} + {{- fail "Upgrade Failed Please ensure that you have completed the pre-requisites mentioned in https://docs.devtron.ai/docs/devtron/v1.7/setup/upgrade/devtron-upgrade-1.5.0" }} {{- end }} {{- end }} diff --git a/charts/devtron/templates/_helpers.tpl b/charts/devtron/templates/_helpers.tpl index 1d4879a9ea..bcbbac81ef 100644 --- a/charts/devtron/templates/_helpers.tpl +++ b/charts/devtron/templates/_helpers.tpl @@ -115,4 +115,4 @@ securityContext: securityContext: {{ toYaml .global.containerSecurityContext | indent 2 }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/templates/configmap-secret.yaml b/charts/devtron/templates/configmap-secret.yaml index 9a8f55af38..33692460c6 100644 --- a/charts/devtron/templates/configmap-secret.yaml +++ b/charts/devtron/templates/configmap-secret.yaml @@ -8,6 +8,8 @@ {{- $DEX_CSTOREKEY := include "getOrGeneratePass" (dict "Namespace" "devtroncd" "Kind" "Secret" "Name" "devtron-secret" "Key" "DEX_CSTOREKEY") }} {{- $postgresPwd := include "getOrGeneratePass" (dict "Namespace" "devtroncd" "Kind" "Secret" "Name" "postgresql-postgresql" "Key" "postgresql-password") }} {{- $WEBHOOK_TOKEN := include "getOrGeneratePass" (dict "Namespace" "devtroncd" "Kind" "Secret" "Name" "devtron-secret" "Key" "WEBHOOK_TOKEN") }} +{{- $TIMESCALE_PASSWORD := include "getOrGeneratePass" (dict "Namespace" "devtroncd" "Kind" "Secret" "Name" "timescaledb-cluster-pg15-superuser" "Key" "password") }} + {{- if $.Values.installer.modules }} {{- if has "cicd" $.Values.installer.modules }} apiVersion: v1 @@ -230,8 +232,13 @@ kind: Secret data: {{- if and $.Values.global.externalPostgres $.Values.global.externalPostgres.enabled }} DB_PASSWORD: {{ $.Values.global.externalPostgres.PG_PASSWORD | b64enc }} + PG_PASSWORD: {{ $.Values.global.externalPostgres.PG_PASSWORD | b64enc }} {{- else }} DB_PASSWORD: {{ $postgresPwd }} + PG_PASSWORD: {{ $postgresPwd }} +{{- end }} +{{- if $.Values.devtronEnterprise.finops.enabled }} + TIMESCALE_PASSWORD: {{ $TIMESCALE_PASSWORD }} {{- end }} metadata: name: postgresql-migrator @@ -240,7 +247,7 @@ metadata: chart: postgresql-8.6.4 release: "devtron" annotations: - "helm.sh/hook": pre-install + "helm.sh/hook": pre-install,pre-upgrade "helm.sh/hook-weight": "-4" "helm.sh/resource-policy": keep type: Opaque @@ -282,6 +289,9 @@ metadata: "helm.sh/hook": pre-install, pre-upgrade data: ORCH_TOKEN: {{ $ORCH_TOKEN }} + {{- if .Values.devtronEnterprise.finops.enabled}} + TIMESCALE_PASSWORD: {{ $TIMESCALE_PASSWORD }} + {{- end}} {{- if $.Values.installer.modules }} {{- if has "cicd" $.Values.installer.modules }} {{- if and ($.Values.minio.enabled) (not $.Values.configs.BLOB_STORAGE_PROVIDER) }} @@ -402,4 +412,38 @@ metadata: annotations: "helm.sh/hook": pre-install {{- end }} +{{- if $.Values.devtronEnterprise.finops.enabled }} +--- +apiVersion: v1 +kind: Secret +data: + POSTGRES_USER: {{ "postgres" | b64enc }} + POSTGRES_PASSWORD: {{ $TIMESCALE_PASSWORD }} + POSTGRES_DB: {{ "finops" | b64enc }} +metadata: + name: timescaledb-secret + namespace: devtroncd +type: Opaque +--- +apiVersion: v1 +data: + JOB_INTERVAL_MINUTES: "60" + LOG_LEVEL: info + MAX_INTERVALS_PER_RUN: "6" + PG_ADDR: {{ $.Values.global.dbConfig.PG_ADDR }} + PG_DATABASE: {{ $.Values.global.dbConfig.PG_DATABASE }} + PG_USER: {{ $.Values.global.dbConfig.PG_USER }} + PROMETHEUS_DELAY_MINUTES: "5" + TIMESCALE_ADDR: timescaledb-service.devtroncd + TIMESCALE_DATABASE: finops + TIMESCALE_HOST: timescaledb-service.devtroncd + TIMESCALE_NAME: finops + TIMESCALE_PORT: "5432" + TIMESCALE_SSL_MODE: disable + TIMESCALE_USER: postgres +kind: ConfigMap +metadata: + name: cost-sync-cm + namespace: devtroncd +{{- end }} {{- end }} diff --git a/charts/devtron/templates/cost-sync-job.yaml b/charts/devtron/templates/cost-sync-job.yaml new file mode 100644 index 0000000000..9026a67464 --- /dev/null +++ b/charts/devtron/templates/cost-sync-job.yaml @@ -0,0 +1,48 @@ +{{- if and $.Values.devtronEnterprise.enabled $.Values.devtronEnterprise.finops.enabled }} +{{- if $.Capabilities.APIVersions.Has "batch/v1/Job" }} +apiVersion: batch/v1 +{{- else }} +apiVersion: batch/v1beta1 +{{- end }} +kind: CronJob +metadata: + name: cost-sync-job + namespace: devtroncd +spec: + concurrencyPolicy: Allow + failedJobsHistoryLimit: 1 + jobTemplate: + spec: + activeDeadlineSeconds: 3600 + template: + metadata: + labels: + app: cost-sync-job + spec: + serviceAccountName: devtron-default-sa + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.devtronEnterprise.finops.nodeSelector "tolerations" $.Values.devtronEnterprise.finops.tolerations "imagePullSecrets" $.Values.devtronEnterprise.finops.imagePullSecrets "global" $.Values.global) | indent 10 }} + {{- include "common.podSecurityContext" (dict "podSecurityContext" $.Values.devtronEnterprise.finops.podSecurityContext "global" $.Values.global) | indent 10 }} + restartPolicy: OnFailure + containers: + - envFrom: + - configMapRef: + name: cost-sync-cm + - secretRef: + name: postgresql-migrator + - configMapRef: + name: devtron-common-cm + image: {{ include "common.image" (dict "component" $.Values.devtronEnterprise.finops.costSync "global" $.Values.global ) }} + imagePullPolicy: IfNotPresent + name: cost-sync-job + ports: + - containerPort: 8080 + name: app + protocol: TCP + resources: {} + terminationGracePeriodSeconds: 30 + schedule: {{ $.Values.devtronEnterprise.finops.costSync.schedule | quote }} + startingDeadlineSeconds: 100 + successfulJobsHistoryLimit: 3 + suspend: false + timeZone: {{ $.Values.devtronEnterprise.finops.costSync.timeZone }} +{{- end }} diff --git a/charts/devtron/templates/dashboard.yaml b/charts/devtron/templates/dashboard.yaml index 5f033101f2..31bccf38ed 100644 --- a/charts/devtron/templates/dashboard.yaml +++ b/charts/devtron/templates/dashboard.yaml @@ -20,6 +20,9 @@ metadata: data: {{- if $.Values.devtronEnterprise.enabled }} GATEKEEPER_URL: "https://license.devtron.ai/dashboard" + FEATURE_STORAGE_ENABLE: "true" + FEATURE_RESOURCE_WATCHER_ENABLE: "true" + FEATURE_SOFTWARE_DISTRIBUTION_HUB_ENABLE: "true" {{- end }} {{- if .config }} GA_ENABLED: {{ .config.analytics | default "false" | quote }} diff --git a/charts/devtron/templates/devtron-scc.yaml b/charts/devtron/templates/devtron-scc.yaml index 1f5f10d03e..144cd2935a 100644 --- a/charts/devtron/templates/devtron-scc.yaml +++ b/charts/devtron/templates/devtron-scc.yaml @@ -37,4 +37,4 @@ users: volumes: - '*' {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/devtron/templates/devtron.yaml b/charts/devtron/templates/devtron.yaml index 14ab283a11..d4664939ba 100644 --- a/charts/devtron/templates/devtron.yaml +++ b/charts/devtron/templates/devtron.yaml @@ -25,6 +25,9 @@ data: SCOOP_CLUSTER_CONFIG: '{"1":{"serviceName":"scoop-service","passKey":"qhihdidhwid","namespace":"devtroncd","port":"80"}}' {{- end }} {{- if $.Values.devtronEnterprise.enabled }} + TIMESCALE_ADDR: timescaledb-service.devtroncd + TIMESCALE_DATABASE: finops + TIMESCALE_USER: postgres CASBIN_CLIENT_URL: casbin-service.devtroncd:9000 CD_WORKFLOW_EXECUTOR_TYPE: SYSTEM CI_WORKFLOW_EXECUTOR_TYPE: SYSTEM diff --git a/charts/devtron/templates/migrator.yaml b/charts/devtron/templates/migrator.yaml index c95d8b7212..92da8fea60 100644 --- a/charts/devtron/templates/migrator.yaml +++ b/charts/devtron/templates/migrator.yaml @@ -1,4 +1,97 @@ {{- with .Values.components.migrator }} +{{- if and $.Values.devtronEnterprise.enabled $.Values.devtronEnterprise.finops.enabled}} +{{- if $.Capabilities.APIVersions.Has "batch/v1/Job" }} +apiVersion: batch/v1 +{{- else }} +apiVersion: batch/v1beta1 +{{- end }} +kind: Job +metadata: + name: postgresql-migrate-finops-{{ randAlphaNum 5 | lower }} + annotations: {} +spec: + template: + spec: + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron + {{- if and $.Values.global $.Values.global.podSecurityContext }} + securityContext: +{{- toYaml $.Values.global.podSecurityContext | nindent 8 }} + {{- end }} + initContainers: + - command: + - /bin/sh + - -c + - cp -r /scripts/. /shared/ + {{- if $.Values.installer.modules }} + {{- if (has "cicd" $.Values.installer.modules) }} + image: {{ include "common.image" (dict "component" $.Values.components.devtron "global" $.Values.global "extraImage" $.Values.components.devtron.cicdImage ) }} + {{- else }} + image: {{ include "common.image" (dict "component" $.Values.components.devtron "global" $.Values.global) }} + {{- end }} + {{- else }} + image: {{ include "common.image" (dict "component" $.Values.components.devtron "global" $.Values.global) }} + {{- end }} + name: init-devtron + {{- if and $.Values.global $.Values.global.containerSecurityContext }} + securityContext: +{{- toYaml $.Values.global.containerSecurityContext | nindent 12 }} + {{- end }} + envFrom: + - configMapRef: + name: devtron-common-cm + volumeMounts: + - mountPath: /shared + name: shared-volume + containers: + - command: + - /bin/sh + - -c + - 'if [ $(MIGRATE_TO_VERSION) -eq "0" ]; then migrate -path $(SCRIPT_LOCATION) -database postgres://$(TIMESCALE_USER):$(TIMESCALE_PASSWORD)@$(TIMESCALE_ADDR):$(PG_PORT)/$(TIMESCALE_DATABASE)?sslmode=disable up; else echo $(MIGRATE_TO_VERSION); migrate -path $(SCRIPT_LOCATION) -database postgres://$(TIMESCALE_USER):$(TIMESCALE_PASSWORD)@$(TIMESCALE_ADDR):$(PG_PORT)/$(TIMESCALE_DATABASE)?sslmode=disable goto $(MIGRATE_TO_VERSION); fi ' + env: + - name: SCRIPT_LOCATION + value: /shared/timescale/ + - name: DB_TYPE + value: postgres + - name: MIGRATE_TO_VERSION + value: "0" + - name: PG_DATABASE + value: finops + envFrom: + - secretRef: + name: postgresql-migrator + - configMapRef: + name: devtron-cm + - configMapRef: + name: devtron-custom-cm + - configMapRef: + name: devtron-common-cm + {{- if .image }} + image: {{ include "common.image" (dict "component" $.Values.components.migrator "global" $.Values.global) }} + {{- else }} + image: migrate/migrate + {{- end }} + {{- if and $.Values.global $.Values.global.containerSecurityContext }} + securityContext: +{{- toYaml $.Values.global.containerSecurityContext | nindent 12 }} + {{- end }} + {{- if .finops }} + {{- if .finops.initContainer.resources }} + resources: +{{- toYaml .finops.initContainer.resources | nindent 12 }} + {{- end }} + {{- end }} + name: postgresql-migrate-finops + volumeMounts: + - mountPath: /shared + name: shared-volume + restartPolicy: OnFailure + volumes: + - emptyDir: {} + name: shared-volume + backoffLimit: 20 + activeDeadlineSeconds: 1500 +{{- end }} --- {{- if $.Capabilities.APIVersions.Has "batch/v1/Job" }} apiVersion: batch/v1 diff --git a/charts/devtron/templates/timescale-db.yaml b/charts/devtron/templates/timescale-db.yaml new file mode 100644 index 0000000000..9def4e068f --- /dev/null +++ b/charts/devtron/templates/timescale-db.yaml @@ -0,0 +1,67 @@ +{{- if and $.Values.devtronEnterprise.enabled $.Values.devtronEnterprise.finops.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: timescaledb + labels: + release: devtron + app: timescaledb +spec: + serviceName: timescaledb-service + replicas: 1 + selector: + matchLabels: + app: timescaledb + template: + metadata: + labels: + app: timescaledb + spec: + serviceAccountName: devtron-default-sa + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.postgres.nodeSelector "tolerations" $.Values.components.postgres.tolerations "imagePullSecrets" $.Values.components.postgres.imagePullSecrets "global" $.Values.global) | indent 6 }} + securityContext: + fsGroup: 999 + containers: + - name: timescaledb + image: {{ include "common.image" (dict "component" $.Values.devtronEnterprise.finops.timescale "global" $.Values.global ) }} + securityContext: {} + envFrom: + - secretRef: + name: timescaledb-secret + - configMapRef: + name: devtron-common-cm + env: + - name: PGDATA + value: /var/lib/postgresql/data/pgdata + ports: + - containerPort: 5432 + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: timescale-vol + volumeClaimTemplates: + - metadata: + name: timescale-vol + spec: + accessModes: [ "ReadWriteOnce" ] + {{- include "common.storageclass" $ | indent 8 }} + resources: + requests: + storage: {{ $.Values.devtronEnterprise.finops.timescale.volumeSize | default "5Gi" }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: timescaledb-service + labels: + release: devtron + app: timescaledb +spec: + selector: + app: timescaledb + ports: + - protocol: TCP + port: 5432 + targetPort: 5432 + type: ClusterIP +{{- end }} diff --git a/charts/devtron/values.yaml b/charts/devtron/values.yaml index 6762ba1eec..1aabe83375 100644 --- a/charts/devtron/values.yaml +++ b/charts/devtron/values.yaml @@ -42,7 +42,7 @@ nfs: extraManifests: [] installer: repo: "devtron-labs/devtron" - release: "v1.8.2" + release: "v2.0.0" registry: "" image: inception tag: 473deaa4-185-21582 @@ -97,13 +97,13 @@ components: FEATURE_CODE_MIRROR_ENABLE: "true" FEATURE_GROUPED_APP_LIST_FILTERS_ENABLE: "true" registry: "" - image: "dashboard:b00aa204-690-36533" + image: "dashboard:b48d0910-690-38228" imagePullPolicy: IfNotPresent healthPort: 8080 devtron: registry: "" - image: "hyperion:261df88d-280-36531" - cicdImage: "devtron:261df88d-434-36530" + image: "hyperion:f0c18f20-280-38148" + cicdImage: "devtron:f0c18f20-434-38146" imagePullPolicy: IfNotPresent customOverrides: {} healthPort: 8080 @@ -140,7 +140,7 @@ components: # - devtron.example.com ciRunner: registry: "" - image: "ci-runner:880420ac-138-36030" + image: "ci-runner:6b408df4-138-38163" # Add annotations for ci-runner & cd-runner serviceAccount. annotations: {} argocdDexServer: @@ -151,7 +151,7 @@ components: authenticator: "authenticator:e414faff-393-13273" kubelink: registry: "" - image: "kubelink:880420ac-564-36036" + image: "kubelink:6b408df4-564-38159" imagePullPolicy: IfNotPresent healthPort: 50052 podSecurityContext: @@ -174,10 +174,11 @@ components: keyName: postgresql-password kubewatch: registry: "" - image: "kubewatch:880420ac-419-36026" + image: "kubewatch:6b408df4-419-38172" imagePullPolicy: IfNotPresent healthPort: 8080 configs: + VELERO_INFORMER: "false" devtroncd_NAMESPACE: "devtron-ci" USE_CUSTOM_HTTP_TRANSPORT: "true" CLUSTER_ARGO_CD_TYPE: "ALL_CLUSTER" @@ -200,7 +201,7 @@ components: volumeSize: "20Gi" gitsensor: registry: "" - image: "git-sensor:b82f5fdb-200-36532" + image: "git-sensor:6b408df4-200-38174" imagePullPolicy: IfNotPresent serviceMonitor: enabled: false @@ -218,7 +219,7 @@ components: # Values for lens lens: registry: "" - image: "lens:880420ac-333-36029" + image: "lens:6b408df4-333-38167" imagePullPolicy: IfNotPresent secrets: {} resources: {} @@ -255,7 +256,7 @@ components: entMigratorImage: "devtron-utils:geni-v1.1.4" chartSync: registry: "" - image: chart-sync:880420ac-836-36037 + image: chart-sync:6b408df4-836-38155 schedule: "0 19 * * *" extraConfigs: {} podSecurityContext: @@ -411,7 +412,7 @@ argo-cd: security: enabled: false imageScanner: - image: "image-scanner:f21e02cb-141-34534" + image: "image-scanner:6b408df4-141-38158" healthPort: 8080 configs: TRIVY_DB_REPOSITORY: mirror.gcr.io/aquasec/trivy-db @@ -430,7 +431,7 @@ security: notifier: enabled: false imagePullPolicy: IfNotPresent - image: "notifier:00f17215-372-36041" + image: "notifier:5c4b5b3a-372-38153" configs: CD_ENVIRONMENT: PROD secrets: {} @@ -482,6 +483,15 @@ monitoring: # Change these values for Devtron-Enterprise devtronEnterprise: enabled: false + finops: + enabled: false + costSync: + image: "cost-sync:46ed7c67-1159-38183" + schedule: "0 * * * *" + timeZone: UTC + timescale: + image: "timescaledb-ha:pg18" + volumeSize: 5Gi casbin: registry: "" image: "casbin:f6ff5f74-064b67e5-462-30822" diff --git a/client/lens/LensClient.go b/client/lens/LensClient.go index 784c14598d..4084f710e4 100644 --- a/client/lens/LensClient.go +++ b/client/lens/LensClient.go @@ -19,14 +19,16 @@ package lens import ( "bytes" "encoding/json" - "github.com/caarlos0/env" - "go.uber.org/zap" + "fmt" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" + + "github.com/caarlos0/env" + "go.uber.org/zap" ) type LensConfig struct { @@ -41,6 +43,7 @@ func (code StatusCode) IsSuccess() bool { type LensClient interface { GetAppMetrics(metricRequest *MetricRequest) (resBody []byte, resCode *StatusCode, err error) + GetBulkAppMetrics(bulkRequest *BulkMetricRequest) (*LensResponse, *StatusCode, error) } type LensClientImpl struct { httpClient *http.Client @@ -66,7 +69,21 @@ func NewLensClientImpl(config *LensConfig, logger *zap.SugaredLogger) (*LensClie type ClientRequest struct { Method string Path string - RequestBody *MetricRequest + RequestBody interface{} +} + +type LensResponse struct { + Code int `json:"code,omitempty"` + Status string `json:"status,omitempty"` + Result json.RawMessage `json:"result,omitempty"` + Errors []*LensApiError `json:"errors,omitempty"` +} +type LensApiError struct { + HttpStatusCode int `json:"-"` + Code string `json:"code,omitempty"` + InternalMessage string `json:"internalMessage,omitempty"` + UserMessage string `json:"userMessage,omitempty"` + UserDetailMessage string `json:"userDetailMessage,omitempty"` } func (session *LensClientImpl) doRequest(clientRequest *ClientRequest) (resBody []byte, resCode *StatusCode, err error) { @@ -109,6 +126,50 @@ type MetricRequest struct { To string `json:"to"` } +type AppEnvPair struct { + AppId int `json:"appId"` + EnvId int `json:"envId"` +} + +type BulkMetricRequest struct { + AppEnvPairs []AppEnvPair `json:"appEnvPairs"` + From *time.Time `json:"from"` + To *time.Time `json:"to"` +} + +type Metrics struct { + AverageCycleTime float64 `json:"average_cycle_time"` + AverageLeadTime float64 `json:"average_lead_time"` + ChangeFailureRate float64 `json:"change_failure_rate"` + AverageRecoveryTime float64 `json:"average_recovery_time"` + AverageDeploymentSize float32 `json:"average_deployment_size"` + AverageLineAdded float32 `json:"average_line_added"` + AverageLineDeleted float32 `json:"average_line_deleted"` + LastFailedTime string `json:"last_failed_time"` + RecoveryTimeLastFailed float64 `json:"recovery_time_last_failed"` +} + +type AppEnvMetrics struct { + AppId int `json:"appId"` + EnvId int `json:"envId"` + Metrics *Metrics `json:"metrics"` + Error string `json:"error,omitempty"` +} + +type BulkMetricsResponse struct { + Results []AppEnvMetrics `json:"results"` +} + +// DoraMetrics represents the new response structure from Lens API +type DoraMetrics struct { + AppId int `json:"app_id"` + EnvId int `json:"env_id"` + DeploymentFrequency float64 `json:"deployment_frequency"` // Deployments per day + ChangeFailureRate float64 `json:"change_failure_rate"` // Percentage + MeanLeadTimeForChanges float64 `json:"mean_lead_time_for_changes"` // Minutes + MeanTimeToRecovery float64 `json:"mean_time_to_recovery"` // Minutes +} + func (session *LensClientImpl) GetAppMetrics(metricRequest *MetricRequest) (resBody []byte, resCode *StatusCode, err error) { params := url.Values{} params.Add("app_id", strconv.Itoa(metricRequest.AppId)) @@ -128,3 +189,30 @@ func (session *LensClientImpl) GetAppMetrics(metricRequest *MetricRequest) (resB resBody, resCode, err = session.doRequest(req) return resBody, resCode, err } + +func (session *LensClientImpl) GetBulkAppMetrics(bulkRequest *BulkMetricRequest) (*LensResponse, *StatusCode, error) { + u, err := url.Parse("deployment-metrics/bulk") + if err != nil { + return nil, nil, err + } + req := &ClientRequest{ + Method: "GET", + Path: u.String(), + RequestBody: bulkRequest, + } + session.logger.Infow("lens bulk req", "req", req) + resBody, resCode, err := session.doRequest(req) + if err != nil { + return nil, resCode, err + } + if resCode.IsSuccess() { + apiRes := &LensResponse{} + err = json.Unmarshal(resBody, apiRes) + if err != nil { + return nil, resCode, err + } + return apiRes, resCode, nil + } + session.logger.Errorw("api err in git sensor response", "res", string(resBody)) + return nil, resCode, fmt.Errorf("res not success, Statuscode: %v", resCode) +} diff --git a/cmd/external-app/router.go b/cmd/external-app/router.go index f19c440ea0..01c0af8546 100644 --- a/cmd/external-app/router.go +++ b/cmd/external-app/router.go @@ -87,6 +87,7 @@ type MuxRouter struct { argoApplicationRouter argoApplication.ArgoApplicationRouter fluxApplicationRouter fluxApplication.FluxApplicationRouter userResourceRouter userResource.Router + infraOverviewRouter router.InfraOverviewRouter } func NewMuxRouter( @@ -121,6 +122,7 @@ func NewMuxRouter( appRouter app.AppRouterEAMode, rbacRoleRouter user.RbacRoleRouter, argoApplicationRouter argoApplication.ArgoApplicationRouter, fluxApplicationRouter fluxApplication.FluxApplicationRouter, userResourceRouter userResource.Router, + infraOverviewRouter router.InfraOverviewRouter, ) *MuxRouter { r := &MuxRouter{ Router: mux.NewRouter(), @@ -158,6 +160,7 @@ func NewMuxRouter( argoApplicationRouter: argoApplicationRouter, fluxApplicationRouter: fluxApplicationRouter, userResourceRouter: userResourceRouter, + infraOverviewRouter: infraOverviewRouter, } return r } @@ -301,4 +304,8 @@ func (r *MuxRouter) Init() { userResourcesRouter := r.Router.PathPrefix("/orchestrator/user/resource").Subrouter() r.userResourceRouter.InitUserResourceRouter(userResourcesRouter) + + infraOverviewRouter := r.Router.PathPrefix("/orchestrator/overview/infra").Subrouter() + r.infraOverviewRouter.InitInfraOverviewRouter(infraOverviewRouter) + } diff --git a/cmd/external-app/wire.go b/cmd/external-app/wire.go index 27acccd70e..18331bc4df 100644 --- a/cmd/external-app/wire.go +++ b/cmd/external-app/wire.go @@ -85,6 +85,9 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/providerConfig" "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs" repository2 "github.com/devtron-labs/devtron/pkg/kubernetesResourceAuditLogs/repository" + "github.com/devtron-labs/devtron/pkg/overview" + "github.com/devtron-labs/devtron/pkg/overview/cache" + config2 "github.com/devtron-labs/devtron/pkg/overview/config" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/scanTool" security2 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/scanTool/repository" @@ -299,6 +302,21 @@ func InitializeApp() (*App, error) { chartConfig.NewEnvConfigOverrideRepository, wire.Bind(new(chartConfig.EnvConfigOverrideRepository), new(*chartConfig.EnvConfigOverrideRepositoryImpl)), + + restHandler.NewInfraOverviewRestHandlerImpl, + wire.Bind(new(restHandler.InfraOverviewRestHandler), new(*restHandler.InfraOverviewRestHandlerImpl)), + + router.NewInfraOverviewRouterImpl, + wire.Bind(new(router.InfraOverviewRouter), new(*router.InfraOverviewRouterImpl)), + + // Cluster overview service (uses background refresh worker) + overview.NewClusterOverviewServiceImpl, + wire.Bind(new(overview.ClusterOverviewService), new(*overview.ClusterOverviewServiceImpl)), + + cache.NewClusterCacheServiceImpl, + wire.Bind(new(cache.ClusterCacheService), new(*cache.ClusterCacheServiceImpl)), + + config2.GetClusterOverviewConfig, ) return &App{}, nil } diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index f9a8452e36..d36dd1fa90 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -121,6 +121,9 @@ import ( read7 "github.com/devtron-labs/devtron/pkg/module/read" "github.com/devtron-labs/devtron/pkg/module/repo" "github.com/devtron-labs/devtron/pkg/module/store" + "github.com/devtron-labs/devtron/pkg/overview" + "github.com/devtron-labs/devtron/pkg/overview/cache" + config4 "github.com/devtron-labs/devtron/pkg/overview/config" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/scanTool" repository11 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/scanTool/repository" @@ -502,7 +505,15 @@ func InitializeApp() (*App, error) { userResourceServiceImpl := userResource.NewUserResourceServiceImpl(sugaredLogger, teamServiceImpl, environmentServiceImpl, clusterServiceImpl, k8sApplicationServiceImpl, enforcerUtilImpl, commonEnforcementUtilImpl, enforcerImpl, appCrudOperationServiceImpl) restHandlerImpl := userResource2.NewUserResourceRestHandler(sugaredLogger, userServiceImpl, userResourceServiceImpl) routerImpl := userResource2.NewUserResourceRouterImpl(restHandlerImpl) - muxRouter := NewMuxRouter(sugaredLogger, ssoLoginRouterImpl, teamRouterImpl, userAuthRouterImpl, userRouterImpl, commonRouterImpl, clusterRouterImpl, dashboardRouterImpl, helmAppRouterImpl, environmentRouterImpl, k8sApplicationRouterImpl, chartRepositoryRouterImpl, appStoreDiscoverRouterImpl, appStoreValuesRouterImpl, appStoreDeploymentRouterImpl, chartProviderRouterImpl, dockerRegRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, userAttributesRouterImpl, telemetryRouterImpl, userTerminalAccessRouterImpl, attributesRouterImpl, appRouterEAModeImpl, rbacRoleRouterImpl, argoApplicationRouterImpl, fluxApplicationRouterImpl, routerImpl) + clusterCacheServiceImpl := cache.NewClusterCacheServiceImpl(sugaredLogger) + clusterOverviewConfig, err := config4.GetClusterOverviewConfig() + if err != nil { + return nil, err + } + clusterOverviewServiceImpl := overview.NewClusterOverviewServiceImpl(sugaredLogger, clusterServiceImpl, k8sCapacityServiceImpl, clusterCacheServiceImpl, k8sCommonServiceImpl, enforcerImpl, clusterOverviewConfig) + infraOverviewRestHandlerImpl := restHandler.NewInfraOverviewRestHandlerImpl(sugaredLogger, clusterOverviewServiceImpl, clusterCacheServiceImpl, userServiceImpl, validate, enforcerImpl) + infraOverviewRouterImpl := router.NewInfraOverviewRouterImpl(infraOverviewRestHandlerImpl) + muxRouter := NewMuxRouter(sugaredLogger, ssoLoginRouterImpl, teamRouterImpl, userAuthRouterImpl, userRouterImpl, commonRouterImpl, clusterRouterImpl, dashboardRouterImpl, helmAppRouterImpl, environmentRouterImpl, k8sApplicationRouterImpl, chartRepositoryRouterImpl, appStoreDiscoverRouterImpl, appStoreValuesRouterImpl, appStoreDeploymentRouterImpl, chartProviderRouterImpl, dockerRegRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, userAttributesRouterImpl, telemetryRouterImpl, userTerminalAccessRouterImpl, attributesRouterImpl, appRouterEAModeImpl, rbacRoleRouterImpl, argoApplicationRouterImpl, fluxApplicationRouterImpl, routerImpl, infraOverviewRouterImpl) mainApp := NewApp(db, sessionManager, muxRouter, telemetryEventClientImpl, posthogClient, sugaredLogger, userServiceImpl) return mainApp, nil } diff --git a/devtron-images.txt.source b/devtron-images.txt.source index 2cd1633f26..568b10a494 100644 --- a/devtron-images.txt.source +++ b/devtron-images.txt.source @@ -7,26 +7,26 @@ quay.io/devtron/authenticator:e414faff-393-13273 quay.io/devtron/bats:v1.4.1 quay.io/devtron/busybox:1.31.1 quay.io/devtron/centos-k8s-utils:latest -quay.io/devtron/chart-sync:880420ac-836-36037 -quay.io/devtron/ci-runner:880420ac-138-36030 +quay.io/devtron/chart-sync:6b408df4-836-38155 +quay.io/devtron/ci-runner:6b408df4-138-38163 quay.io/devtron/clair:4.3.6 quay.io/devtron/curl:7.73.0 -quay.io/devtron/dashboard:b00aa204-690-36533 +quay.io/devtron/dashboard:b48d0910-690-38228 quay.io/devtron/devtron-utils:dup-chart-repo-v1.1.0 -quay.io/devtron/devtron:261df88d-434-36530 +quay.io/devtron/devtron:f0c18f20-434-38146 quay.io/devtron/dex:v2.30.2 -quay.io/devtron/git-sensor:b82f5fdb-200-36532 +quay.io/devtron/git-sensor:6b408df4-200-38174 quay.io/devtron/grafana:7.3.1 -quay.io/devtron/hyperion:261df88d-280-36531 -quay.io/devtron/image-scanner:f21e02cb-141-34534 +quay.io/devtron/hyperion:f0c18f20-280-38148 +quay.io/devtron/image-scanner:6b408df4-141-38158 quay.io/devtron/inception:473deaa4-185-21582 quay.io/devtron/k8s-sidecar:1.1.0 quay.io/devtron/k8s-utils:tutum-curl quay.io/devtron/k9s-k8s-utils:latest quay.io/devtron/kubectl:latest -quay.io/devtron/kubelink:880420ac-564-36036 -quay.io/devtron/kubewatch:880420ac-419-36026 -quay.io/devtron/lens:880420ac-333-36029 +quay.io/devtron/kubelink:6b408df4-564-38159 +quay.io/devtron/kubewatch:6b408df4-419-38172 +quay.io/devtron/lens:6b408df4-333-38167 quay.io/devtron/migrator:v4.16.2 quay.io/devtron/minideb:latest quay.io/devtron/minio-mc:RELEASE.2021-02-14T04-28-06Z @@ -34,9 +34,11 @@ quay.io/devtron/minio:RELEASE.2021-02-14T04-01-33Z quay.io/devtron/nats-box quay.io/devtron/nats-server-config-reloader:0.6.2 quay.io/devtron/nats:2.9.3-alpine -quay.io/devtron/notifier:00f17215-372-36041 +quay.io/devtron/notifier:5c4b5b3a-372-38153 quay.io/devtron/postgres:14.9 quay.io/devtron/postgres_exporter:v0.10.1 quay.io/devtron/postgres_exporter:v0.4.7 quay.io/devtron/prometheus-nats-exporter:0.9.0 quay.io/devtron/ubuntu-k8s-utils:latest +quay.io/devtron/cost-sync:46ed7c67-1159-38183 +quay.io/devtron/timescaledb-ha:pg18 diff --git a/env_gen.json b/env_gen.json index 99df280700..2694919f4a 100644 --- a/env_gen.json +++ b/env_gen.json @@ -1 +1 @@ -[{"Category":"CD","Fields":[{"Env":"ARGO_APP_MANUAL_SYNC_TIME","EnvType":"int","EnvValue":"3","EnvDescription":"retry argocd app manual sync if the timeline is stuck in ARGOCD_SYNC_INITIATED state for more than this defined time (in mins)","Example":"","Deprecated":"false"},{"Env":"CD_FLUX_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time to check the pipeline status for flux cd pipeline","Example":"","Deprecated":"false"},{"Env":"CD_HELM_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time to check the pipeline status ","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time for CD pipeline status","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_TIMEOUT_DURATION","EnvType":"string","EnvValue":"20","EnvDescription":"Timeout for CD pipeline to get healthy","Example":"","Deprecated":"false"},{"Env":"DEPLOY_STATUS_CRON_GET_PIPELINE_DEPLOYED_WITHIN_HOURS","EnvType":"int","EnvValue":"12","EnvDescription":"This flag is used to fetch the deployment status of the application. It retrieves the status of deployments that occurred between 12 hours and 10 minutes prior to the current time. It fetches non-terminal statuses.","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_ARGO_CD_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"1","EnvDescription":"Context timeout for gitops concurrent async deployments","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"6","EnvDescription":"Context timeout for no gitops concurrent async deployments","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CD_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FEATURE_MIGRATE_ARGOCD_APPLICATION_ENABLE","EnvType":"bool","EnvValue":"false","EnvDescription":"enable migration of external argocd application to devtron pipeline","Example":"","Deprecated":"false"},{"Env":"FEATURE_MIGRATE_FLUX_APPLICATION_ENABLE","EnvType":"bool","EnvValue":"false","EnvDescription":"enable flux application services","Example":"","Deprecated":"false"},{"Env":"FLUX_CD_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"eligible time for checking flux app status periodically and update in db, value is in seconds., default is 120, if wfr is updated within configured time i.e. FLUX_CD_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME then do not include for this cron cycle.","Example":"","Deprecated":"false"},{"Env":"HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"eligible time for checking helm app status periodically and update in db, value is in seconds., default is 120, if wfr is updated within configured time i.e. HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME then do not include for this cron cycle.","Example":"","Deprecated":"false"},{"Env":"IS_INTERNAL_USE","EnvType":"bool","EnvValue":"true","EnvDescription":"If enabled then cd pipeline and helm apps will not need the deployment app type mandatorily. Couple this flag with HIDE_GITOPS_OR_HELM_OPTION (in Dashborad) and if gitops is configured and allowed for the env, pipeline/ helm app will gitops else no-gitops.","Example":"","Deprecated":"false"},{"Env":"MIGRATE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"migrate deployment config data from charts table to deployment_config table","Example":"","Deprecated":"false"},{"Env":"PIPELINE_DEGRADED_TIME","EnvType":"string","EnvValue":"10","EnvDescription":"Time to mark a pipeline degraded if not healthy in defined time","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_DEVTRON_APP","EnvType":"int","EnvValue":"1","EnvDescription":"Count for devtron application rivision history","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_EXTERNAL_HELM_APP","EnvType":"int","EnvValue":"0","EnvDescription":"Count for external helm application rivision history","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_HELM_APP","EnvType":"int","EnvValue":"1","EnvDescription":"To set the history limit for the helm app being deployed through devtron","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_LINKED_HELM_APP","EnvType":"int","EnvValue":"15","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SHOULD_CHECK_NAMESPACE_ON_CLONE","EnvType":"bool","EnvValue":"false","EnvDescription":"should we check if namespace exists or not while cloning app","Example":"","Deprecated":"false"},{"Env":"USE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"use deployment config data from deployment_config table","Example":"","Deprecated":"true"},{"Env":"VALIDATE_EXT_APP_CHART_TYPE","EnvType":"bool","EnvValue":"false","EnvDescription":"validate external flux app chart","Example":"","Deprecated":"false"}]},{"Category":"CI_BUILDX","Fields":[{"Env":"ASYNC_BUILDX_CACHE_EXPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async container image cache export","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_MODE_MIN","EnvType":"bool","EnvValue":"false","EnvDescription":"To set build cache mode to minimum in buildx","Example":"","Deprecated":"false"},{"Env":"BUILDX_INTERRUPTION_MAX_RETRY","EnvType":"int","EnvValue":"3","EnvDescription":"Maximum number of retries for buildx builder interruption","Example":"","Deprecated":"false"}]},{"Category":"CI_RUNNER","Fields":[{"Env":"AZURE_ACCOUNT_KEY","EnvType":"string","EnvValue":"","EnvDescription":"If blob storage is being used of azure then pass the secret key to access the bucket","Example":"","Deprecated":"false"},{"Env":"AZURE_ACCOUNT_NAME","EnvType":"string","EnvValue":"","EnvDescription":"Account name for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_CACHE","EnvType":"string","EnvValue":"","EnvDescription":"Cache bucket name for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_LOG","EnvType":"string","EnvValue":"","EnvDescription":"Log bucket for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_CONNECTION_INSECURE","EnvType":"bool","EnvValue":"true","EnvDescription":"Azure gateway connection allows insecure if true","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_URL","EnvType":"string","EnvValue":"http://devtron-minio.devtroncd:9000","EnvDescription":"Sent to CI runner for blob","Example":"","Deprecated":"false"},{"Env":"BASE_LOG_LOCATION_PATH","EnvType":"string","EnvValue":"/home/devtron/","EnvDescription":"Used to store, download logs of ci workflow, artifact","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_GCP_CREDENTIALS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"GCP cred json for GCS blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_PROVIDER","EnvType":"","EnvValue":"S3","EnvDescription":"Blob storage provider name(AWS/GCP/Azure)","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ACCESS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"S3 access key for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_BUCKET_VERSIONED","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable buctet versioning for blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT","EnvType":"string","EnvValue":"","EnvDescription":"S3 endpoint URL for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT_INSECURE","EnvType":"bool","EnvValue":"false","EnvDescription":"To use insecure s3 endpoint","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_SECRET_KEY","EnvType":"string","EnvValue":"","EnvDescription":"Secret key for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/devtron/buildx","EnvDescription":"Path for the buildx cache","Example":"","Deprecated":"false"},{"Env":"BUILDX_K8S_DRIVER_OPTIONS","EnvType":"string","EnvValue":"","EnvDescription":"To enable the k8s driver and pass args for k8s driver in buildx","Example":"","Deprecated":"false"},{"Env":"BUILDX_PROVENANCE_MODE","EnvType":"string","EnvValue":"","EnvDescription":"provinance is set to true by default by docker. this will add some build related data in generated build manifest.it also adds some unknown:unknown key:value pair which may not be compatible by some container registries. with buildx k8s driver , provinenance=true is causing issue when push manifest to quay registry, so setting it to false","Example":"","Deprecated":"false"},{"Env":"BUILD_LOG_TTL_VALUE_IN_SECS","EnvType":"int","EnvValue":"3600","EnvDescription":"This is the time that the pods of ci/pre-cd/post-cd live after completion state.","Example":"","Deprecated":"false"},{"Env":"CACHE_LIMIT","EnvType":"int64","EnvValue":"5000000000","EnvDescription":"Cache limit.","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"To pass the IP cidr for Pre/Post cd ","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"The subnet size to allocate from the base pool for CD","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"CPU Resource Limit Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"Memory Resource Limit Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"Node label selector for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"Toleration key for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"Toleration value for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"CPU Resource Rquest Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"Memory Resource Rquest Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"Executor type for Pre/Post CD(AWF,System)","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"cd-runner","EnvDescription":"Service account to be used in Pre/Post CD pod","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"To pass the IP cidr for CI","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"The subnet size to allocate from the base pool for CI","Example":"","Deprecated":"false"},{"Env":"CI_IGNORE_DOCKER_CACHE","EnvType":"bool","EnvValue":"","EnvDescription":"Ignoring docker cache ","Example":"","Deprecated":"false"},{"Env":"CI_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"Prefix for build logs","Example":"","Deprecated":"false"},{"Env":"CI_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"Node label selector for CI","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"Toleration key for CI","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"","EnvDescription":"Toleration value for CI","Example":"","Deprecated":"false"},{"Env":"CI_RUNNER_DOCKER_MTU_VALUE","EnvType":"int","EnvValue":"-1","EnvDescription":"this is to control the bytes of inofrmation passed in a network packet in ci-runner. default is -1 (defaults to the underlying node mtu value)","Example":"","Deprecated":"false"},{"Env":"CI_SUCCESS_AUTO_TRIGGER_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"this is to control the no of linked pipelines should be hanled in one go when a ci-success event of an parent ci is received","Example":"","Deprecated":"false"},{"Env":"CI_VOLUME_MOUNTS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"additional volume mount data for CI and JOB","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"Executor type for CI(AWF,System)","Example":"","Deprecated":"false"},{"Env":"DEFAULT_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"arsenal-v1/ci-artifacts","EnvDescription":"Key location for artifacts being created","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_BUCKET","EnvType":"string","EnvValue":"devtron-pro-ci-logs","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"arsenal-v1","EnvDescription":"Bucket prefix for build logs","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET","EnvType":"string","EnvValue":"ci-caching","EnvDescription":"Bucket name for build cache","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"Build Cache bucket region","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"","EnvDescription":"Bucket prefix for build cache","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_LOGS_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_NAMESPACE","EnvType":"string","EnvValue":"","EnvDescription":"Namespace for devtron stack","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_TIMEOUT","EnvType":"int64","EnvValue":"3600","EnvDescription":"Timeout for Pre/Post-Cd to be completed","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CI_IMAGE","EnvType":"string","EnvValue":"686244538589.dkr.ecr.us-east-2.amazonaws.com/cirunner:47","EnvDescription":"To pass the ci-runner image","Example":"","Deprecated":"false"},{"Env":"DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-ci","EnvDescription":"Timeout for CI to be completed","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TARGET_PLATFORM","EnvType":"string","EnvValue":"","EnvDescription":"Default architecture for buildx","Example":"","Deprecated":"false"},{"Env":"DOCKER_BUILD_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/docker","EnvDescription":"Path to store cache of docker build (/var/lib/docker-\u003e for legacy docker build, /var/lib/devtron-\u003e for buildx)","Example":"","Deprecated":"false"},{"Env":"ENABLE_BUILD_CONTEXT","EnvType":"bool","EnvValue":"false","EnvDescription":"To Enable build context in Devtron.","Example":"","Deprecated":"false"},{"Env":"ENABLE_WORKFLOW_EXECUTION_STAGE","EnvType":"bool","EnvValue":"true","EnvDescription":"if enabled then we will display build stages separately for CI/Job/Pre-Post CD","Example":"true","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_CM_NAME","EnvType":"string","EnvValue":"blob-storage-cm","EnvDescription":"name of the config map(contains bucket name, etc.) in external cluster when there is some operation related to external cluster, for example:-downloading cd artifact pushed in external cluster's env and we need to download from there, downloads ci logs pushed in external cluster's blob","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_SECRET_NAME","EnvType":"string","EnvValue":"blob-storage-secret","EnvDescription":"name of the secret(contains password, accessId,passKeys, etc.) in external cluster when there is some operation related to external cluster, for example:-downloading cd artifact pushed in external cluster's env and we need to download from there, downloads ci logs pushed in external cluster's blob","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"This is an array of strings used when submitting a workflow for pre or post-CD execution. If the ","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_API_SECRET","EnvType":"string","EnvValue":"devtroncd-secret","EnvDescription":"External CI API secret.","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_PAYLOAD","EnvType":"string","EnvValue":"{\"ciProjectDetails\":[{\"gitRepository\":\"https://github.com/vikram1601/getting-started-nodejs.git\",\"checkoutPath\":\"./abc\",\"commitHash\":\"239077135f8cdeeccb7857e2851348f558cb53d3\",\"commitTime\":\"2022-10-30T20:00:00\",\"branch\":\"master\",\"message\":\"Update README.md\",\"author\":\"User Name \"}],\"dockerImage\":\"445808685819.dkr.ecr.us-east-2.amazonaws.com/orch:23907713-2\"}","EnvDescription":"External CI payload with project details.","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_WEB_HOOK_URL","EnvType":"string","EnvValue":"","EnvDescription":"default is {{HOST_URL}}/orchestrator/webhook/ext-ci. It is used for external ci.","Example":"","Deprecated":"false"},{"Env":"IGNORE_CM_CS_IN_CI_JOB","EnvType":"bool","EnvValue":"false","EnvDescription":"Ignore CM/CS in CI-pipeline as Job","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_COUNT","EnvType":"int","EnvValue":"0","EnvDescription":"push artifact(image) in ci retry count ","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_INTERVAL","EnvType":"int","EnvValue":"5","EnvDescription":"image retry interval takes value in seconds","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCANNER_ENDPOINT","EnvType":"string","EnvValue":"http://image-scanner-new-demo-devtroncd-service.devtroncd:80","EnvDescription":"Image-scanner micro-service URL","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_MAX_RETRIES","EnvType":"int","EnvValue":"3","EnvDescription":"Max retry count for image-scanning","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"Delay for the image-scaning to start","Example":"","Deprecated":"false"},{"Env":"IN_APP_LOGGING_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"Used in case of argo workflow is enabled. If enabled logs push will be managed by us, else will be managed by argo workflow.","Example":"","Deprecated":"false"},{"Env":"MAX_CD_WORKFLOW_RUNNER_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"Maximum time pre/post-cd-workflow create pod if it fails to complete","Example":"","Deprecated":"false"},{"Env":"MAX_CI_WORKFLOW_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"Maximum time CI-workflow create pod if it fails to complete","Example":"","Deprecated":"false"},{"Env":"MODE","EnvType":"string","EnvValue":"DEV","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_SERVER_HOST","EnvType":"string","EnvValue":"localhost:4222","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ORCH_HOST","EnvType":"string","EnvValue":"http://devtroncd-orchestrator-service-prod.devtroncd/webhook/msg/nats","EnvDescription":"Orchestrator micro-service URL ","Example":"","Deprecated":"false"},{"Env":"ORCH_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"Orchestrator token","Example":"","Deprecated":"false"},{"Env":"PRE_CI_CACHE_PATH","EnvType":"string","EnvValue":"/devtroncd-cache","EnvDescription":"Cache path for Pre CI tasks","Example":"","Deprecated":"false"},{"Env":"SHOW_DOCKER_BUILD_ARGS","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable showing the args passed for CI in build logs","Example":"","Deprecated":"false"},{"Env":"SKIP_CI_JOB_BUILD_CACHE_PUSH_PULL","EnvType":"bool","EnvValue":"false","EnvDescription":"To skip cache Push/Pull for ci job","Example":"","Deprecated":"false"},{"Env":"SKIP_CREATING_ECR_REPO","EnvType":"bool","EnvValue":"false","EnvDescription":"By disabling this ECR repo won't get created if it's not available on ECR from build configuration","Example":"","Deprecated":"false"},{"Env":"TERMINATION_GRACE_PERIOD_SECS","EnvType":"int","EnvValue":"180","EnvDescription":"this is the time given to workflow pods to shutdown. (grace full termination time)","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_QUERY_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 query for listing artifacts","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CD_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable blob storage in pre and post cd","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CI_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable blob storage in pre and post ci","Example":"","Deprecated":"false"},{"Env":"USE_BUILDX","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable buildx feature globally","Example":"","Deprecated":"false"},{"Env":"USE_DOCKER_API_TO_GET_DIGEST","EnvType":"bool","EnvValue":"false","EnvDescription":"when user do not pass the digest then this flag controls , finding the image digest using docker API or not. if set to true we get the digest from docker API call else use docker pull command. [logic in ci-runner]","Example":"","Deprecated":"false"},{"Env":"USE_EXTERNAL_NODE","EnvType":"bool","EnvValue":"false","EnvDescription":"It is used in case of Pre/ Post Cd with run in application mode. If enabled the node lebels are read from EXTERNAL_CD_NODE_LABEL_SELECTOR else from CD_NODE_LABEL_SELECTOR MODE: if the vale is DEV, it will read the local kube config file or else from the cluser location.","Example":"","Deprecated":"false"},{"Env":"USE_IMAGE_TAG_FROM_GIT_PROVIDER_FOR_TAG_BASED_BUILD","EnvType":"bool","EnvValue":"false","EnvDescription":"To use the same tag in container image as that of git tag","Example":"","Deprecated":"false"},{"Env":"WF_CONTROLLER_INSTANCE_ID","EnvType":"string","EnvValue":"devtron-runner","EnvDescription":"Workflow controller instance ID.","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_CACHE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"flag is used to configure how Docker caches are handled during a CI/CD ","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"ci-runner","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"DEVTRON","Fields":[{"Env":"-","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ADDITIONAL_NODE_GROUP_LABELS","EnvType":"","EnvValue":"","EnvDescription":"Add comma separated list of additional node group labels to default labels","Example":"karpenter.sh/nodepool,cloud.google.com/gke-nodepool","Deprecated":"false"},{"Env":"APP_SYNC_IMAGE","EnvType":"string","EnvValue":"quay.io/devtron/chart-sync:1227622d-132-3775","EnvDescription":"For the app sync image, this image will be used in app-manual sync job","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_JOB_RESOURCES_OBJ","EnvType":"string","EnvValue":"","EnvDescription":"To pass the resource of app sync","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"chart-sync","EnvDescription":"Service account to be used in app sync Job","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SHUTDOWN_WAIT_DURATION","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_AUTO_SYNC_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"If enabled all argocd application will have auto sync enabled","Example":"true","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_COUNT_ON_CONFLICT","EnvType":"int","EnvValue":"3","EnvDescription":"retry argocd app manual sync if the timeline is stuck in ARGOCD_SYNC_INITIATED state for more than this defined time (in mins)","Example":"","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_DELAY_ON_CONFLICT","EnvType":"int","EnvValue":"1","EnvDescription":"Delay on retrying the maifest commit the on gitops","Example":"","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_COUNT","EnvType":"int","EnvValue":"4","EnvDescription":"Retry count for registering a GitOps repository to ArgoCD","Example":"3","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"Delay (in Seconds) between the retries for registering a GitOps repository to ArgoCD","Example":"5","Deprecated":"false"},{"Env":"BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"there is feature to get URL's of services/ingresses. so to extract those, we need to parse all the servcie and ingress objects of the application. this BATCH_SIZE flag controls the no of these objects get parsed in one go.","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Host for the devtron stack","Example":"","Deprecated":"false"},{"Env":"CD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PORT","EnvType":"string","EnvValue":"8000","EnvDescription":"Port for pre/post-cd","Example":"","Deprecated":"false"},{"Env":"CExpirationTime","EnvType":"int","EnvValue":"600","EnvDescription":"Caching expiration time.","Example":"","Deprecated":"false"},{"Env":"CI_TRIGGER_CRON_TIME","EnvType":"int","EnvValue":"2","EnvDescription":"For image poll plugin","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_STATUS_UPDATE_CRON","EnvType":"string","EnvValue":"*/5 * * * *","EnvDescription":"Cron schedule for CI pipeline status","Example":"","Deprecated":"false"},{"Env":"CLI_CMD_TIMEOUT_GLOBAL_SECONDS","EnvType":"int","EnvValue":"0","EnvDescription":"Used in git cli opeartion timeout","Example":"","Deprecated":"false"},{"Env":"CLUSTER_STATUS_CRON_TIME","EnvType":"int","EnvValue":"15","EnvDescription":"Cron schedule for cluster status on resource browser","Example":"","Deprecated":"false"},{"Env":"CONSUMER_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_LOG_TIME_LIMIT","EnvType":"int64","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TIMEOUT","EnvType":"float64","EnvValue":"3600","EnvDescription":"Timeout for CI to be completed","Example":"","Deprecated":"false"},{"Env":"DEVTRON_BOM_URL","EnvType":"string","EnvValue":"https://raw.githubusercontent.com/devtron-labs/devtron/%s/charts/devtron/devtron-bom.yaml","EnvDescription":"Path to devtron-bom.yaml of devtron charts, used for module installation and devtron upgrade","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEX_SECRET_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace of dex secret","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_CHART_NAME","EnvType":"string","EnvValue":"devtron-operator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"Name of the Devtron Helm release. ","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace of the Devtron Helm release","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"Is used to install modules (stack manager)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_URL","EnvType":"string","EnvValue":"https://helm.devtron.ai","EnvDescription":"Is used to install modules (stack manager)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLATION_TYPE","EnvType":"string","EnvValue":"","EnvDescription":"Devtron Installation type(EA/Full)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLER_MODULES_PATH","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"Path to devtron installer modules, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLER_RELEASE_PATH","EnvType":"string","EnvValue":"installer.release","EnvDescription":"Path to devtron installer release, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_MODULES_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_OPERATOR_BASE_PATH","EnvType":"string","EnvValue":"","EnvDescription":"Base path for devtron operator, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_SECRET_NAME","EnvType":"string","EnvValue":"devtron-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_VERSION_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.release","EnvDescription":"devtron operator version identifier in helm values yaml","Example":"","Deprecated":"false"},{"Env":"DEX_CID","EnvType":"string","EnvValue":"example-app","EnvDescription":"dex client id ","Example":"","Deprecated":"false"},{"Env":"DEX_CLIENT_ID","EnvType":"string","EnvValue":"argo-cd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CSTOREKEY","EnvType":"string","EnvValue":"","EnvDescription":"DEX CSTOREKEY.","Example":"","Deprecated":"false"},{"Env":"DEX_JWTKEY","EnvType":"string","EnvValue":"","EnvDescription":"DEX JWT key. ","Example":"","Deprecated":"false"},{"Env":"DEX_RURL","EnvType":"string","EnvValue":"http://127.0.0.1:8080/callback","EnvDescription":"Dex redirect URL(http://argocd-dex-server.devtroncd:8080/callback)","Example":"","Deprecated":"false"},{"Env":"DEX_SCOPES","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_SECRET","EnvType":"string","EnvValue":"","EnvDescription":"Dex secret","Example":"","Deprecated":"false"},{"Env":"DEX_URL","EnvType":"string","EnvValue":"","EnvDescription":"Dex service endpoint with dex path(http://argocd-dex-server.devtroncd:5556/dex)","Example":"","Deprecated":"false"},{"Env":"ECR_REPO_NAME_PREFIX","EnvType":"string","EnvValue":"test/","EnvDescription":"Prefix for ECR repo to be created in does not exist","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_ARGO_CD_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async installation of gitops application","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async installation of no-gitops application","Example":"","Deprecated":"false"},{"Env":"ENABLE_LINKED_CI_ARTIFACT_COPY","EnvType":"bool","EnvValue":"false","EnvDescription":"Enable copying artifacts from parent CI pipeline to linked CI pipeline during creation","Example":"","Deprecated":"false"},{"Env":"ENABLE_PASSWORD_ENCRYPTION","EnvType":"bool","EnvValue":"true","EnvDescription":"enable password encryption","Example":"","Deprecated":"false"},{"Env":"EPHEMERAL_SERVER_VERSION_REGEX","EnvType":"string","EnvValue":"v[1-9]\\.\\b(2[3-9]\\|[3-9][0-9])\\b.*","EnvDescription":"ephemeral containers support version regex that is compared with k8sServerVersion","Example":"","Deprecated":"false"},{"Env":"EVENT_URL","EnvType":"string","EnvValue":"http://localhost:3000/notify","EnvDescription":"Notifier service url","Example":"","Deprecated":"false"},{"Env":"EXECUTE_WIRE_NIL_CHECKER","EnvType":"bool","EnvValue":"false","EnvDescription":"checks for any nil pointer in wire.go","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CI_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"To expose CI metrics","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"restart workload retrieval batch size ","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_WORKER_POOL_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"restart workload retrieval pool size","Example":"","Deprecated":"false"},{"Env":"FORCE_SECURITY_SCANNING","EnvType":"bool","EnvValue":"false","EnvDescription":"By enabling this no one can disable image scaning on ci-pipeline from UI","Example":"","Deprecated":"false"},{"Env":"GITHUB_ORG_NAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITHUB_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITHUB_USERNAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITOPS_REPO_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"Prefix for Gitops repo being creation for argocd application","Example":"","Deprecated":"false"},{"Env":"GO_RUNTIME_ENV","EnvType":"string","EnvValue":"production","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Host URL for the grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace for grafana","Example":"","Deprecated":"false"},{"Env":"GRAFANA_ORG_ID","EnvType":"int","EnvValue":"2","EnvDescription":"Org ID for grafana for application metrics","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PASSWORD","EnvType":"string","EnvValue":"prom-operator","EnvDescription":"Password for grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PORT","EnvType":"string","EnvValue":"8090","EnvDescription":"Port for grafana micro-service","Example":"","Deprecated":"false"},{"Env":"GRAFANA_URL","EnvType":"string","EnvValue":"","EnvDescription":"Host URL for the grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"Username for grafana ","Example":"","Deprecated":"false"},{"Env":"HIDE_API_TOKENS","EnvType":"bool","EnvValue":"false","EnvDescription":"Boolean flag for should the api tokens generated be hidden from the UI","Example":"","Deprecated":"false"},{"Env":"HIDE_IMAGE_TAGGING_HARD_DELETE","EnvType":"bool","EnvValue":"false","EnvDescription":"Flag to hide the hard delete option in the image tagging service","Example":"","Deprecated":"false"},{"Env":"IGNORE_AUTOCOMPLETE_AUTH_CHECK","EnvType":"bool","EnvValue":"false","EnvDescription":"flag for ignoring auth check in autocomplete apis.","Example":"","Deprecated":"false"},{"Env":"INSTALLED_MODULES","EnvType":"","EnvValue":"","EnvDescription":"List of installed modules given in helm values/yaml are written in cm and used by devtron to know which modules are given","Example":"security.trivy,security.clair","Deprecated":"false"},{"Env":"INSTALLER_CRD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"namespace where Custom Resource Definitions get installed","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_GROUP_NAME","EnvType":"string","EnvValue":"installer.devtron.ai","EnvDescription":"Devtron installer CRD group name, partially deprecated.","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_RESOURCE","EnvType":"string","EnvValue":"installers","EnvDescription":"Devtron installer CRD resource name, partially deprecated","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_VERSION","EnvType":"string","EnvValue":"v1alpha1","EnvDescription":"version of the CRDs. default is v1alpha1","Example":"","Deprecated":"false"},{"Env":"IS_AIR_GAP_ENVIRONMENT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"JwtExpirationTime","EnvType":"int","EnvValue":"120","EnvDescription":"JWT expiration time.","Example":"","Deprecated":"false"},{"Env":"K8s_CLIENT_MAX_IDLE_CONNS_PER_HOST","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_IDLE_CONN_TIMEOUT","EnvType":"int","EnvValue":"300","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_KEEPALIVE","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_TIMEOUT","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TLS_HANDSHAKE_TIMEOUT","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LENS_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"Lens microservice timeout.","Example":"","Deprecated":"false"},{"Env":"LENS_URL","EnvType":"string","EnvValue":"http://lens-milandevtron-service:80","EnvDescription":"Lens micro-service URL","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LINKED_CI_ARTIFACT_COPY_LIMIT","EnvType":"int","EnvValue":"10","EnvDescription":"Maximum number of artifacts to copy from parent CI pipeline to linked CI pipeline","Example":"","Deprecated":"false"},{"Env":"LOGGER_DEV_MODE","EnvType":"bool","EnvValue":"false","EnvDescription":"Enables a different logger theme.","Example":"","Deprecated":"false"},{"Env":"LOG_LEVEL","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_SESSION_PER_USER","EnvType":"int","EnvValue":"5","EnvDescription":"max no of cluster terminal pods can be created by an user","Example":"","Deprecated":"false"},{"Env":"MODULE_METADATA_API_URL","EnvType":"string","EnvValue":"https://api.devtron.ai/module?name=%s","EnvDescription":"Modules list and meta info will be fetched from this server, that is central api server of devtron.","Example":"","Deprecated":"false"},{"Env":"MODULE_STATUS_HANDLING_CRON_DURATION_MIN","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_ACK_WAIT_IN_SECS","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_BUFFER_SIZE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_MAX_AGE","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_PROCESSING_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_REPLICAS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NOTIFICATION_MEDIUM","EnvType":"NotificationMedium","EnvValue":"rest","EnvDescription":"notification medium","Example":"","Deprecated":"false"},{"Env":"OTEL_COLLECTOR_URL","EnvType":"string","EnvValue":"","EnvDescription":"Opentelemetry URL ","Example":"","Deprecated":"false"},{"Env":"PARALLELISM_LIMIT_FOR_TAG_PROCESSING","EnvType":"int","EnvValue":"","EnvDescription":"App manual sync job parallel tag processing count.","Example":"","Deprecated":"false"},{"Env":"PG_EXPORT_PROM_METRICS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_FAILURE_QUERIES","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_QUERY","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_SLOW_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_QUERY_DUR_THRESHOLD","EnvType":"int64","EnvValue":"5000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PLUGIN_NAME","EnvType":"string","EnvValue":"Pull images from container repository","EnvDescription":"Handles image retrieval from a container repository and triggers subsequent CI processes upon detecting new images.Current default plugin name: Pull Images from Container Repository.","Example":"","Deprecated":"false"},{"Env":"PROPAGATE_EXTRA_LABELS","EnvType":"bool","EnvValue":"false","EnvDescription":"Add additional propagate labels like api.devtron.ai/appName, api.devtron.ai/envName, api.devtron.ai/project along with the user defined ones.","Example":"","Deprecated":"false"},{"Env":"PROXY_SERVICE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"Proxy configuration for micro-service to be accessible on orhcestrator ingress","Example":"","Deprecated":"false"},{"Env":"REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESTRICT_TERMINAL_ACCESS_FOR_NON_SUPER_USER","EnvType":"bool","EnvValue":"false","EnvDescription":"To restrict the cluster terminal from user having non-super admin acceess","Example":"","Deprecated":"false"},{"Env":"RUNTIME_CONFIG_LOCAL_DEV","EnvType":"LocalDevMode","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable scoped variable option","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_FORMAT","EnvType":"string","EnvValue":"@{{%s}}","EnvDescription":"Its a scope format for varialbe name.","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_HANDLE_PRIMITIVES","EnvType":"bool","EnvValue":"false","EnvDescription":"This describe should we handle primitives or not in scoped variable template parsing.","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_NAME_REGEX","EnvType":"string","EnvValue":"^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$","EnvDescription":"Regex for scoped variable name that must passed this regex.","Example":"","Deprecated":"false"},{"Env":"SOCKET_DISCONNECT_DELAY_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"The server closes a session when a client receiving connection have not been seen for a while.This delay is configured by this setting. By default the session is closed when a receiving connection wasn't seen for 5 seconds.","Example":"","Deprecated":"false"},{"Env":"SOCKET_HEARTBEAT_SECONDS","EnvType":"int","EnvValue":"25","EnvDescription":"In order to keep proxies and load balancers from closing long running http requests we need to pretend that the connection is active and send a heartbeat packet once in a while. This setting controls how often this is done. By default a heartbeat packet is sent every 25 seconds.","Example":"","Deprecated":"false"},{"Env":"STREAM_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SYSTEM_VAR_PREFIX","EnvType":"string","EnvValue":"DEVTRON_","EnvDescription":"Scoped variable prefix, variable name must have this prefix.","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"default","EnvDescription":"Cluster terminal default namespace","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_INACTIVE_DURATION_IN_MINS","EnvType":"int","EnvValue":"10","EnvDescription":"Timeout for cluster terminal to be inactive","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_STATUS_SYNC_In_SECS","EnvType":"int","EnvValue":"600","EnvDescription":"this is the time interval at which the status of the cluster terminal pod","Example":"","Deprecated":"false"},{"Env":"TEST_APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_LOG_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PASSWORD","EnvType":"string","EnvValue":"postgrespw","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PORT","EnvType":"string","EnvValue":"55000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_FOR_FAILED_CI_BUILD","EnvType":"string","EnvValue":"15","EnvDescription":"Timeout for Failed CI build ","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_IN_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"timeout to compute the urls from services and ingress objects of an application","Example":"","Deprecated":"false"},{"Env":"USER_SESSION_DURATION_SECONDS","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_API_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 API for listing artifacts in Listing the images in pipeline","Example":"","Deprecated":"false"},{"Env":"USE_CUSTOM_HTTP_TRANSPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_GIT_CLI","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable git cli","Example":"","Deprecated":"false"},{"Env":"USE_RBAC_CREATION_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 for RBAC creation","Example":"","Deprecated":"false"},{"Env":"VARIABLE_CACHE_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"This is used to control caching of all the scope variables defined in the system.","Example":"","Deprecated":"false"},{"Env":"VARIABLE_EXPRESSION_REGEX","EnvType":"string","EnvValue":"@{{([^}]+)}}","EnvDescription":"Scoped variable expression regex","Example":"","Deprecated":"false"},{"Env":"WEBHOOK_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"If you want to continue using jenkins for CI then please provide this for authentication of requests","Example":"","Deprecated":"false"}]},{"Category":"GITOPS","Fields":[{"Env":"ACD_CM","EnvType":"string","EnvValue":"argocd-cm","EnvDescription":"Name of the argocd CM","Example":"","Deprecated":"false"},{"Env":"ACD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"To pass the argocd namespace","Example":"","Deprecated":"false"},{"Env":"ACD_PASSWORD","EnvType":"string","EnvValue":"","EnvDescription":"Password for the Argocd (deprecated)","Example":"","Deprecated":"false"},{"Env":"ACD_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"User name for argocd","Example":"","Deprecated":"false"},{"Env":"GITOPS_SECRET_NAME","EnvType":"string","EnvValue":"devtron-gitops-secret","EnvDescription":"devtron-gitops-secret","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS","EnvType":"string","EnvValue":"Deployment,Rollout,StatefulSet,ReplicaSet","EnvDescription":"this holds the list of k8s resource names which support replicas key. this list used in hibernate/un hibernate process","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS_BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"this the batch size to control no of above resources can be parsed in one go to determine hibernate status","Example":"","Deprecated":"false"}]},{"Category":"INFRA_SETUP","Fields":[{"Env":"DASHBOARD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Dashboard micro-service URL","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Dashboard micro-service namespace","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_PORT","EnvType":"string","EnvValue":"3000","EnvDescription":"Port for dashboard micro-service","Example":"","Deprecated":"false"},{"Env":"DEX_HOST","EnvType":"string","EnvValue":"http://localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_PORT","EnvType":"string","EnvValue":"5556","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_PROTOCOL","EnvType":"string","EnvValue":"REST","EnvDescription":"Protocol to connect with git-sensor micro-service","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_SERVICE_CONFIG","EnvType":"string","EnvValue":"{\"loadBalancingPolicy\":\"pick_first\"}","EnvDescription":"git-sensor grpc service config","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"Timeout for getting response from the git-sensor","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_URL","EnvType":"string","EnvValue":"127.0.0.1:7070","EnvDescription":"git-sensor micro-service url ","Example":"","Deprecated":"false"},{"Env":"HELM_CLIENT_URL","EnvType":"string","EnvValue":"127.0.0.1:50051","EnvDescription":"Kubelink micro-service url ","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_RECEIVE_MSG_SIZE","EnvType":"int","EnvValue":"20","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_SEND_MSG_SIZE","EnvType":"int","EnvValue":"4","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_SERVICE_CONFIG","EnvType":"string","EnvValue":"{\"loadBalancingPolicy\":\"round_robin\"}","EnvDescription":"kubelink grpc service config","Example":"","Deprecated":"false"}]},{"Category":"POSTGRES","Fields":[{"Env":"APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"Application name","Example":"","Deprecated":"false"},{"Env":"CASBIN_DATABASE","EnvType":"string","EnvValue":"casbin","EnvDescription":"Database for casbin","Example":"","Deprecated":"false"},{"Env":"PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"address of postgres service","Example":"postgresql-postgresql.devtroncd","Deprecated":"false"},{"Env":"PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"postgres database to be made connection with","Example":"orchestrator, casbin, git_sensor, lens","Deprecated":"false"},{"Env":"PG_PASSWORD","EnvType":"string","EnvValue":"{password}","EnvDescription":"password for postgres, associated with PG_USER","Example":"confidential ;)","Deprecated":"false"},{"Env":"PG_PORT","EnvType":"string","EnvValue":"5432","EnvDescription":"port of postgresql service","Example":"5432","Deprecated":"false"},{"Env":"PG_READ_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"Time out for read operation in postgres","Example":"","Deprecated":"false"},{"Env":"PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"user for postgres","Example":"postgres","Deprecated":"false"},{"Env":"PG_WRITE_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"Time out for write operation in postgres","Example":"","Deprecated":"false"}]},{"Category":"RBAC","Fields":[{"Env":"ENFORCER_CACHE","EnvType":"bool","EnvValue":"false","EnvDescription":"To Enable enforcer cache.","Example":"","Deprecated":"false"},{"Env":"ENFORCER_CACHE_EXPIRATION_IN_SEC","EnvType":"int","EnvValue":"86400","EnvDescription":"Expiration time (in seconds) for enforcer cache. ","Example":"","Deprecated":"false"},{"Env":"ENFORCER_MAX_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"Maximum batch size for the enforcer.","Example":"","Deprecated":"false"},{"Env":"USE_CASBIN_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable casbin V2 API","Example":"","Deprecated":"false"}]}] \ No newline at end of file +[{"Category":"CD","Fields":[{"Env":"ARGO_APP_MANUAL_SYNC_TIME","EnvType":"int","EnvValue":"3","EnvDescription":"retry argocd app manual sync if the timeline is stuck in ARGOCD_SYNC_INITIATED state for more than this defined time (in mins)","Example":"","Deprecated":"false"},{"Env":"CD_FLUX_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time to check the pipeline status for flux cd pipeline","Example":"","Deprecated":"false"},{"Env":"CD_HELM_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time to check the pipeline status ","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_CRON_TIME","EnvType":"string","EnvValue":"*/2 * * * *","EnvDescription":"Cron time for CD pipeline status","Example":"","Deprecated":"false"},{"Env":"CD_PIPELINE_STATUS_TIMEOUT_DURATION","EnvType":"string","EnvValue":"20","EnvDescription":"Timeout for CD pipeline to get healthy","Example":"","Deprecated":"false"},{"Env":"DEPLOY_STATUS_CRON_GET_PIPELINE_DEPLOYED_WITHIN_HOURS","EnvType":"int","EnvValue":"12","EnvDescription":"This flag is used to fetch the deployment status of the application. It retrieves the status of deployments that occurred between 12 hours and 10 minutes prior to the current time. It fetches non-terminal statuses.","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_ARGO_CD_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"1","EnvDescription":"Context timeout for gitops concurrent async deployments","Example":"","Deprecated":"false"},{"Env":"DEVTRON_CHART_INSTALL_REQUEST_TIMEOUT","EnvType":"int","EnvValue":"6","EnvDescription":"Context timeout for no gitops concurrent async deployments","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CD_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"FEATURE_MIGRATE_ARGOCD_APPLICATION_ENABLE","EnvType":"bool","EnvValue":"false","EnvDescription":"enable migration of external argocd application to devtron pipeline","Example":"","Deprecated":"false"},{"Env":"FEATURE_MIGRATE_FLUX_APPLICATION_ENABLE","EnvType":"bool","EnvValue":"false","EnvDescription":"enable flux application services","Example":"","Deprecated":"false"},{"Env":"FLUX_CD_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"eligible time for checking flux app status periodically and update in db, value is in seconds., default is 120, if wfr is updated within configured time i.e. FLUX_CD_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME then do not include for this cron cycle.","Example":"","Deprecated":"false"},{"Env":"HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME","EnvType":"string","EnvValue":"120","EnvDescription":"eligible time for checking helm app status periodically and update in db, value is in seconds., default is 120, if wfr is updated within configured time i.e. HELM_PIPELINE_STATUS_CHECK_ELIGIBLE_TIME then do not include for this cron cycle.","Example":"","Deprecated":"false"},{"Env":"IS_INTERNAL_USE","EnvType":"bool","EnvValue":"true","EnvDescription":"If enabled then cd pipeline and helm apps will not need the deployment app type mandatorily. Couple this flag with HIDE_GITOPS_OR_HELM_OPTION (in Dashborad) and if gitops is configured and allowed for the env, pipeline/ helm app will gitops else no-gitops.","Example":"","Deprecated":"false"},{"Env":"MIGRATE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"migrate deployment config data from charts table to deployment_config table","Example":"","Deprecated":"false"},{"Env":"PIPELINE_DEGRADED_TIME","EnvType":"string","EnvValue":"10","EnvDescription":"Time to mark a pipeline degraded if not healthy in defined time","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_DEVTRON_APP","EnvType":"int","EnvValue":"1","EnvDescription":"Count for devtron application rivision history","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_EXTERNAL_HELM_APP","EnvType":"int","EnvValue":"0","EnvDescription":"Count for external helm application rivision history","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_HELM_APP","EnvType":"int","EnvValue":"1","EnvDescription":"To set the history limit for the helm app being deployed through devtron","Example":"","Deprecated":"false"},{"Env":"REVISION_HISTORY_LIMIT_LINKED_HELM_APP","EnvType":"int","EnvValue":"15","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SHOULD_CHECK_NAMESPACE_ON_CLONE","EnvType":"bool","EnvValue":"false","EnvDescription":"should we check if namespace exists or not while cloning app","Example":"","Deprecated":"false"},{"Env":"USE_DEPLOYMENT_CONFIG_DATA","EnvType":"bool","EnvValue":"false","EnvDescription":"use deployment config data from deployment_config table","Example":"","Deprecated":"true"},{"Env":"VALIDATE_EXT_APP_CHART_TYPE","EnvType":"bool","EnvValue":"false","EnvDescription":"validate external flux app chart","Example":"","Deprecated":"false"}]},{"Category":"CI_BUILDX","Fields":[{"Env":"ASYNC_BUILDX_CACHE_EXPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async container image cache export","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_MODE_MIN","EnvType":"bool","EnvValue":"false","EnvDescription":"To set build cache mode to minimum in buildx","Example":"","Deprecated":"false"},{"Env":"BUILDX_INTERRUPTION_MAX_RETRY","EnvType":"int","EnvValue":"3","EnvDescription":"Maximum number of retries for buildx builder interruption","Example":"","Deprecated":"false"}]},{"Category":"CI_RUNNER","Fields":[{"Env":"AZURE_ACCOUNT_KEY","EnvType":"string","EnvValue":"","EnvDescription":"If blob storage is being used of azure then pass the secret key to access the bucket","Example":"","Deprecated":"false"},{"Env":"AZURE_ACCOUNT_NAME","EnvType":"string","EnvValue":"","EnvDescription":"Account name for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_CACHE","EnvType":"string","EnvValue":"","EnvDescription":"Cache bucket name for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_BLOB_CONTAINER_CI_LOG","EnvType":"string","EnvValue":"","EnvDescription":"Log bucket for azure blob storage","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_CONNECTION_INSECURE","EnvType":"bool","EnvValue":"true","EnvDescription":"Azure gateway connection allows insecure if true","Example":"","Deprecated":"false"},{"Env":"AZURE_GATEWAY_URL","EnvType":"string","EnvValue":"http://devtron-minio.devtroncd:9000","EnvDescription":"Sent to CI runner for blob","Example":"","Deprecated":"false"},{"Env":"BASE_LOG_LOCATION_PATH","EnvType":"string","EnvValue":"/home/devtron/","EnvDescription":"Used to store, download logs of ci workflow, artifact","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_GCP_CREDENTIALS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"GCP cred json for GCS blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_PROVIDER","EnvType":"","EnvValue":"S3","EnvDescription":"Blob storage provider name(AWS/GCP/Azure)","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ACCESS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"S3 access key for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_BUCKET_VERSIONED","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable buctet versioning for blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT","EnvType":"string","EnvValue":"","EnvDescription":"S3 endpoint URL for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_ENDPOINT_INSECURE","EnvType":"bool","EnvValue":"false","EnvDescription":"To use insecure s3 endpoint","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_S3_SECRET_KEY","EnvType":"string","EnvValue":"","EnvDescription":"Secret key for s3 blob storage","Example":"","Deprecated":"false"},{"Env":"BUILDX_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/devtron/buildx","EnvDescription":"Path for the buildx cache","Example":"","Deprecated":"false"},{"Env":"BUILDX_K8S_DRIVER_OPTIONS","EnvType":"string","EnvValue":"","EnvDescription":"To enable the k8s driver and pass args for k8s driver in buildx","Example":"","Deprecated":"false"},{"Env":"BUILDX_PROVENANCE_MODE","EnvType":"string","EnvValue":"","EnvDescription":"provinance is set to true by default by docker. this will add some build related data in generated build manifest.it also adds some unknown:unknown key:value pair which may not be compatible by some container registries. with buildx k8s driver , provinenance=true is causing issue when push manifest to quay registry, so setting it to false","Example":"","Deprecated":"false"},{"Env":"BUILD_LOG_TTL_VALUE_IN_SECS","EnvType":"int","EnvValue":"3600","EnvDescription":"This is the time that the pods of ci/pre-cd/post-cd live after completion state.","Example":"","Deprecated":"false"},{"Env":"CACHE_LIMIT","EnvType":"int64","EnvValue":"5000000000","EnvDescription":"Cache limit.","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"To pass the IP cidr for Pre/Post cd ","Example":"","Deprecated":"false"},{"Env":"CD_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"The subnet size to allocate from the base pool for CD","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"CPU Resource Limit Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"Memory Resource Limit Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"Node label selector for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"Toleration key for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"Toleration value for Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"CPU Resource Rquest Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"Memory Resource Rquest Pre/Post CD","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"Executor type for Pre/Post CD(AWF,System)","Example":"","Deprecated":"false"},{"Env":"CD_WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"cd-runner","EnvDescription":"Service account to be used in Pre/Post CD pod","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_BASE_CIDR","EnvType":"string","EnvValue":"","EnvDescription":"To pass the IP cidr for CI","Example":"","Deprecated":"false"},{"Env":"CI_DEFAULT_ADDRESS_POOL_SIZE","EnvType":"int","EnvValue":"","EnvDescription":"The subnet size to allocate from the base pool for CI","Example":"","Deprecated":"false"},{"Env":"CI_IGNORE_DOCKER_CACHE","EnvType":"bool","EnvValue":"","EnvDescription":"Ignoring docker cache ","Example":"","Deprecated":"false"},{"Env":"CI_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"Prefix for build logs","Example":"","Deprecated":"false"},{"Env":"CI_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"Node label selector for CI","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"","EnvDescription":"Toleration key for CI","Example":"","Deprecated":"false"},{"Env":"CI_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"","EnvDescription":"Toleration value for CI","Example":"","Deprecated":"false"},{"Env":"CI_RUNNER_DOCKER_MTU_VALUE","EnvType":"int","EnvValue":"-1","EnvDescription":"this is to control the bytes of inofrmation passed in a network packet in ci-runner. default is -1 (defaults to the underlying node mtu value)","Example":"","Deprecated":"false"},{"Env":"CI_SUCCESS_AUTO_TRIGGER_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"this is to control the no of linked pipelines should be hanled in one go when a ci-success event of an parent ci is received","Example":"","Deprecated":"false"},{"Env":"CI_VOLUME_MOUNTS_JSON","EnvType":"string","EnvValue":"","EnvDescription":"additional volume mount data for CI and JOB","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_EXECUTOR_TYPE","EnvType":"","EnvValue":"AWF","EnvDescription":"Executor type for CI(AWF,System)","Example":"","Deprecated":"false"},{"Env":"DEFAULT_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"arsenal-v1/ci-artifacts","EnvDescription":"Key location for artifacts being created","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_BUCKET","EnvType":"string","EnvValue":"devtron-pro-ci-logs","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_BUILD_LOGS_KEY_PREFIX","EnvType":"string","EnvValue":"arsenal-v1","EnvDescription":"Bucket prefix for build logs","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET","EnvType":"string","EnvValue":"ci-caching","EnvDescription":"Bucket name for build cache","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CACHE_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"Build Cache bucket region","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_ARTIFACT_KEY_LOCATION","EnvType":"string","EnvValue":"","EnvDescription":"Bucket prefix for build cache","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_LOGS_BUCKET_REGION","EnvType":"string","EnvValue":"us-east-2","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_NAMESPACE","EnvType":"string","EnvValue":"","EnvDescription":"Namespace for devtron stack","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CD_TIMEOUT","EnvType":"int64","EnvValue":"3600","EnvDescription":"Timeout for Pre/Post-Cd to be completed","Example":"","Deprecated":"false"},{"Env":"DEFAULT_CI_IMAGE","EnvType":"string","EnvValue":"686244538589.dkr.ecr.us-east-2.amazonaws.com/cirunner:47","EnvDescription":"To pass the ci-runner image","Example":"","Deprecated":"false"},{"Env":"DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtron-ci","EnvDescription":"Timeout for CI to be completed","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TARGET_PLATFORM","EnvType":"string","EnvValue":"","EnvDescription":"Default architecture for buildx","Example":"","Deprecated":"false"},{"Env":"DOCKER_BUILD_CACHE_PATH","EnvType":"string","EnvValue":"/var/lib/docker","EnvDescription":"Path to store cache of docker build (/var/lib/docker-\u003e for legacy docker build, /var/lib/devtron-\u003e for buildx)","Example":"","Deprecated":"false"},{"Env":"ENABLE_BUILD_CONTEXT","EnvType":"bool","EnvValue":"false","EnvDescription":"To Enable build context in Devtron.","Example":"","Deprecated":"false"},{"Env":"ENABLE_WORKFLOW_EXECUTION_STAGE","EnvType":"bool","EnvValue":"true","EnvDescription":"if enabled then we will display build stages separately for CI/Job/Pre-Post CD","Example":"true","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_CM_NAME","EnvType":"string","EnvValue":"blob-storage-cm","EnvDescription":"name of the config map(contains bucket name, etc.) in external cluster when there is some operation related to external cluster, for example:-downloading cd artifact pushed in external cluster's env and we need to download from there, downloads ci logs pushed in external cluster's blob","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_BLOB_STORAGE_SECRET_NAME","EnvType":"string","EnvValue":"blob-storage-secret","EnvDescription":"name of the secret(contains password, accessId,passKeys, etc.) in external cluster when there is some operation related to external cluster, for example:-downloading cd artifact pushed in external cluster's env and we need to download from there, downloads ci logs pushed in external cluster's blob","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_LABEL_SELECTOR","EnvType":"","EnvValue":"","EnvDescription":"This is an array of strings used when submitting a workflow for pre or post-CD execution. If the ","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_KEY","EnvType":"string","EnvValue":"dedicated","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CD_NODE_TAINTS_VALUE","EnvType":"string","EnvValue":"ci","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_API_SECRET","EnvType":"string","EnvValue":"devtroncd-secret","EnvDescription":"External CI API secret.","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_PAYLOAD","EnvType":"string","EnvValue":"{\"ciProjectDetails\":[{\"gitRepository\":\"https://github.com/vikram1601/getting-started-nodejs.git\",\"checkoutPath\":\"./abc\",\"commitHash\":\"239077135f8cdeeccb7857e2851348f558cb53d3\",\"commitTime\":\"2022-10-30T20:00:00\",\"branch\":\"master\",\"message\":\"Update README.md\",\"author\":\"User Name \"}],\"dockerImage\":\"445808685819.dkr.ecr.us-east-2.amazonaws.com/orch:23907713-2\"}","EnvDescription":"External CI payload with project details.","Example":"","Deprecated":"false"},{"Env":"EXTERNAL_CI_WEB_HOOK_URL","EnvType":"string","EnvValue":"","EnvDescription":"default is {{HOST_URL}}/orchestrator/webhook/ext-ci. It is used for external ci.","Example":"","Deprecated":"false"},{"Env":"IGNORE_CM_CS_IN_CI_JOB","EnvType":"bool","EnvValue":"false","EnvDescription":"Ignore CM/CS in CI-pipeline as Job","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_COUNT","EnvType":"int","EnvValue":"0","EnvDescription":"push artifact(image) in ci retry count ","Example":"","Deprecated":"false"},{"Env":"IMAGE_RETRY_INTERVAL","EnvType":"int","EnvValue":"5","EnvDescription":"image retry interval takes value in seconds","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCANNER_ENDPOINT","EnvType":"string","EnvValue":"http://image-scanner-new-demo-devtroncd-service.devtroncd:80","EnvDescription":"Image-scanner micro-service URL","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_MAX_RETRIES","EnvType":"int","EnvValue":"3","EnvDescription":"Max retry count for image-scanning","Example":"","Deprecated":"false"},{"Env":"IMAGE_SCAN_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"Delay for the image-scaning to start","Example":"","Deprecated":"false"},{"Env":"IN_APP_LOGGING_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"Used in case of argo workflow is enabled. If enabled logs push will be managed by us, else will be managed by argo workflow.","Example":"","Deprecated":"false"},{"Env":"MAX_CD_WORKFLOW_RUNNER_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"Maximum time pre/post-cd-workflow create pod if it fails to complete","Example":"","Deprecated":"false"},{"Env":"MAX_CI_WORKFLOW_RETRIES","EnvType":"int","EnvValue":"0","EnvDescription":"Maximum time CI-workflow create pod if it fails to complete","Example":"","Deprecated":"false"},{"Env":"MODE","EnvType":"string","EnvValue":"DEV","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_SERVER_HOST","EnvType":"string","EnvValue":"localhost:4222","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ORCH_HOST","EnvType":"string","EnvValue":"http://devtroncd-orchestrator-service-prod.devtroncd/webhook/msg/nats","EnvDescription":"Orchestrator micro-service URL ","Example":"","Deprecated":"false"},{"Env":"ORCH_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"Orchestrator token","Example":"","Deprecated":"false"},{"Env":"PRE_CI_CACHE_PATH","EnvType":"string","EnvValue":"/devtroncd-cache","EnvDescription":"Cache path for Pre CI tasks","Example":"","Deprecated":"false"},{"Env":"SHOW_DOCKER_BUILD_ARGS","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable showing the args passed for CI in build logs","Example":"","Deprecated":"false"},{"Env":"SKIP_CI_JOB_BUILD_CACHE_PUSH_PULL","EnvType":"bool","EnvValue":"false","EnvDescription":"To skip cache Push/Pull for ci job","Example":"","Deprecated":"false"},{"Env":"SKIP_CREATING_ECR_REPO","EnvType":"bool","EnvValue":"false","EnvDescription":"By disabling this ECR repo won't get created if it's not available on ECR from build configuration","Example":"","Deprecated":"false"},{"Env":"TERMINATION_GRACE_PERIOD_SECS","EnvType":"int","EnvValue":"180","EnvDescription":"this is the time given to workflow pods to shutdown. (grace full termination time)","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_QUERY_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 query for listing artifacts","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CD_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable blob storage in pre and post cd","Example":"","Deprecated":"false"},{"Env":"USE_BLOB_STORAGE_CONFIG_IN_CI_WORKFLOW","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable blob storage in pre and post ci","Example":"","Deprecated":"false"},{"Env":"USE_BUILDX","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable buildx feature globally","Example":"","Deprecated":"false"},{"Env":"USE_DOCKER_API_TO_GET_DIGEST","EnvType":"bool","EnvValue":"false","EnvDescription":"when user do not pass the digest then this flag controls , finding the image digest using docker API or not. if set to true we get the digest from docker API call else use docker pull command. [logic in ci-runner]","Example":"","Deprecated":"false"},{"Env":"USE_EXTERNAL_NODE","EnvType":"bool","EnvValue":"false","EnvDescription":"It is used in case of Pre/ Post Cd with run in application mode. If enabled the node lebels are read from EXTERNAL_CD_NODE_LABEL_SELECTOR else from CD_NODE_LABEL_SELECTOR MODE: if the vale is DEV, it will read the local kube config file or else from the cluser location.","Example":"","Deprecated":"false"},{"Env":"USE_IMAGE_TAG_FROM_GIT_PROVIDER_FOR_TAG_BASED_BUILD","EnvType":"bool","EnvValue":"false","EnvDescription":"To use the same tag in container image as that of git tag","Example":"","Deprecated":"false"},{"Env":"WF_CONTROLLER_INSTANCE_ID","EnvType":"string","EnvValue":"devtron-runner","EnvDescription":"Workflow controller instance ID.","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_CACHE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"flag is used to configure how Docker caches are handled during a CI/CD ","Example":"","Deprecated":"false"},{"Env":"WORKFLOW_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"ci-runner","EnvDescription":"","Example":"","Deprecated":"false"}]},{"Category":"DEVTRON","Fields":[{"Env":"-","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ADDITIONAL_NODE_GROUP_LABELS","EnvType":"","EnvValue":"","EnvDescription":"Add comma separated list of additional node group labels to default labels","Example":"karpenter.sh/nodepool,cloud.google.com/gke-nodepool","Deprecated":"false"},{"Env":"APP_SYNC_IMAGE","EnvType":"string","EnvValue":"quay.io/devtron/chart-sync:1227622d-132-3775","EnvDescription":"For the app sync image, this image will be used in app-manual sync job","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_JOB_RESOURCES_OBJ","EnvType":"string","EnvValue":"","EnvDescription":"To pass the resource of app sync","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SERVICE_ACCOUNT","EnvType":"string","EnvValue":"chart-sync","EnvDescription":"Service account to be used in app sync Job","Example":"","Deprecated":"false"},{"Env":"APP_SYNC_SHUTDOWN_WAIT_DURATION","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"ARGO_AUTO_SYNC_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"If enabled all argocd application will have auto sync enabled","Example":"true","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_COUNT_ON_CONFLICT","EnvType":"int","EnvValue":"3","EnvDescription":"retry argocd app manual sync if the timeline is stuck in ARGOCD_SYNC_INITIATED state for more than this defined time (in mins)","Example":"","Deprecated":"false"},{"Env":"ARGO_GIT_COMMIT_RETRY_DELAY_ON_CONFLICT","EnvType":"int","EnvValue":"1","EnvDescription":"Delay on retrying the maifest commit the on gitops","Example":"","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_COUNT","EnvType":"int","EnvValue":"4","EnvDescription":"Retry count for registering a GitOps repository to ArgoCD","Example":"3","Deprecated":"false"},{"Env":"ARGO_REPO_REGISTER_RETRY_DELAY","EnvType":"int","EnvValue":"5","EnvDescription":"Delay (in Seconds) between the retries for registering a GitOps repository to ArgoCD","Example":"5","Deprecated":"false"},{"Env":"BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"there is feature to get URL's of services/ingresses. so to extract those, we need to parse all the servcie and ingress objects of the application. this BATCH_SIZE flag controls the no of these objects get parsed in one go.","Example":"","Deprecated":"false"},{"Env":"BLOB_STORAGE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Host for the devtron stack","Example":"","Deprecated":"false"},{"Env":"CD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"CD_PORT","EnvType":"string","EnvValue":"8000","EnvDescription":"Port for pre/post-cd","Example":"","Deprecated":"false"},{"Env":"CExpirationTime","EnvType":"int","EnvValue":"600","EnvDescription":"Caching expiration time.","Example":"","Deprecated":"false"},{"Env":"CI_TRIGGER_CRON_TIME","EnvType":"int","EnvValue":"2","EnvDescription":"For image poll plugin","Example":"","Deprecated":"false"},{"Env":"CI_WORKFLOW_STATUS_UPDATE_CRON","EnvType":"string","EnvValue":"*/5 * * * *","EnvDescription":"Cron schedule for CI pipeline status","Example":"","Deprecated":"false"},{"Env":"CLI_CMD_TIMEOUT_GLOBAL_SECONDS","EnvType":"int","EnvValue":"0","EnvDescription":"Used in git cli opeartion timeout","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_BACKGROUND_REFRESH_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"Enable background refresh of cluster overview cache","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_CACHE_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"Enable caching for cluster overview data","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_MAX_PARALLEL_CLUSTERS","EnvType":"int","EnvValue":"15","EnvDescription":"Maximum number of clusters to fetch in parallel during refresh","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_MAX_STALE_DATA_SECONDS","EnvType":"int","EnvValue":"30","EnvDescription":"Maximum age of cached data in seconds before warning","Example":"","Deprecated":"false"},{"Env":"CLUSTER_OVERVIEW_REFRESH_INTERVAL_SECONDS","EnvType":"int","EnvValue":"15","EnvDescription":"Background cache refresh interval in seconds","Example":"","Deprecated":"false"},{"Env":"CLUSTER_STATUS_CRON_TIME","EnvType":"int","EnvValue":"15","EnvDescription":"Cron schedule for cluster status on resource browser","Example":"","Deprecated":"false"},{"Env":"CONSUMER_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_LOG_TIME_LIMIT","EnvType":"int64","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEFAULT_TIMEOUT","EnvType":"float64","EnvValue":"3600","EnvDescription":"Timeout for CI to be completed","Example":"","Deprecated":"false"},{"Env":"DEVTRON_BOM_URL","EnvType":"string","EnvValue":"https://raw.githubusercontent.com/devtron-labs/devtron/%s/charts/devtron/devtron-bom.yaml","EnvDescription":"Path to devtron-bom.yaml of devtron charts, used for module installation and devtron upgrade","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_DEX_SECRET_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace of dex secret","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_CHART_NAME","EnvType":"string","EnvValue":"devtron-operator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"Name of the Devtron Helm release. ","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_RELEASE_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace of the Devtron Helm release","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_NAME","EnvType":"string","EnvValue":"devtron","EnvDescription":"Is used to install modules (stack manager)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_HELM_REPO_URL","EnvType":"string","EnvValue":"https://helm.devtron.ai","EnvDescription":"Is used to install modules (stack manager)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLATION_TYPE","EnvType":"string","EnvValue":"","EnvDescription":"Devtron Installation type(EA/Full)","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLER_MODULES_PATH","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"Path to devtron installer modules, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_INSTALLER_RELEASE_PATH","EnvType":"string","EnvValue":"installer.release","EnvDescription":"Path to devtron installer release, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_MODULES_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.modules","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_OPERATOR_BASE_PATH","EnvType":"string","EnvValue":"","EnvDescription":"Base path for devtron operator, used to find the helm charts and values files","Example":"","Deprecated":"false"},{"Env":"DEVTRON_SECRET_NAME","EnvType":"string","EnvValue":"devtron-secret","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEVTRON_VERSION_IDENTIFIER_IN_HELM_VALUES","EnvType":"string","EnvValue":"installer.release","EnvDescription":"devtron operator version identifier in helm values yaml","Example":"","Deprecated":"false"},{"Env":"DEX_CID","EnvType":"string","EnvValue":"example-app","EnvDescription":"dex client id ","Example":"","Deprecated":"false"},{"Env":"DEX_CLIENT_ID","EnvType":"string","EnvValue":"argo-cd","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_CSTOREKEY","EnvType":"string","EnvValue":"","EnvDescription":"DEX CSTOREKEY.","Example":"","Deprecated":"false"},{"Env":"DEX_JWTKEY","EnvType":"string","EnvValue":"","EnvDescription":"DEX JWT key. ","Example":"","Deprecated":"false"},{"Env":"DEX_RURL","EnvType":"string","EnvValue":"http://127.0.0.1:8080/callback","EnvDescription":"Dex redirect URL(http://argocd-dex-server.devtroncd:8080/callback)","Example":"","Deprecated":"false"},{"Env":"DEX_SCOPES","EnvType":"","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_SECRET","EnvType":"string","EnvValue":"","EnvDescription":"Dex secret","Example":"","Deprecated":"false"},{"Env":"DEX_URL","EnvType":"string","EnvValue":"","EnvDescription":"Dex service endpoint with dex path(http://argocd-dex-server.devtroncd:5556/dex)","Example":"","Deprecated":"false"},{"Env":"ECR_REPO_NAME_PREFIX","EnvType":"string","EnvValue":"test/","EnvDescription":"Prefix for ECR repo to be created in does not exist","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_ARGO_CD_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async installation of gitops application","Example":"","Deprecated":"false"},{"Env":"ENABLE_ASYNC_INSTALL_DEVTRON_CHART","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable async installation of no-gitops application","Example":"","Deprecated":"false"},{"Env":"ENABLE_LINKED_CI_ARTIFACT_COPY","EnvType":"bool","EnvValue":"false","EnvDescription":"Enable copying artifacts from parent CI pipeline to linked CI pipeline during creation","Example":"","Deprecated":"false"},{"Env":"ENABLE_PASSWORD_ENCRYPTION","EnvType":"bool","EnvValue":"true","EnvDescription":"enable password encryption","Example":"","Deprecated":"false"},{"Env":"EPHEMERAL_SERVER_VERSION_REGEX","EnvType":"string","EnvValue":"v[1-9]\\.\\b(2[3-9]\\|[3-9][0-9])\\b.*","EnvDescription":"ephemeral containers support version regex that is compared with k8sServerVersion","Example":"","Deprecated":"false"},{"Env":"EVENT_URL","EnvType":"string","EnvValue":"http://localhost:3000/notify","EnvDescription":"Notifier service url","Example":"","Deprecated":"false"},{"Env":"EXECUTE_WIRE_NIL_CHECKER","EnvType":"bool","EnvValue":"false","EnvDescription":"checks for any nil pointer in wire.go","Example":"","Deprecated":"false"},{"Env":"EXPOSE_CI_METRICS","EnvType":"bool","EnvValue":"false","EnvDescription":"To expose CI metrics","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"restart workload retrieval batch size ","Example":"","Deprecated":"false"},{"Env":"FEATURE_RESTART_WORKLOAD_WORKER_POOL_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"restart workload retrieval pool size","Example":"","Deprecated":"false"},{"Env":"FORCE_SECURITY_SCANNING","EnvType":"bool","EnvValue":"false","EnvDescription":"By enabling this no one can disable image scaning on ci-pipeline from UI","Example":"","Deprecated":"false"},{"Env":"GITHUB_ORG_NAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITHUB_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITHUB_USERNAME","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GITOPS_REPO_PREFIX","EnvType":"string","EnvValue":"","EnvDescription":"Prefix for Gitops repo being creation for argocd application","Example":"","Deprecated":"false"},{"Env":"GO_RUNTIME_ENV","EnvType":"string","EnvValue":"production","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GRAFANA_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Host URL for the grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Namespace for grafana","Example":"","Deprecated":"false"},{"Env":"GRAFANA_ORG_ID","EnvType":"int","EnvValue":"2","EnvDescription":"Org ID for grafana for application metrics","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PASSWORD","EnvType":"string","EnvValue":"prom-operator","EnvDescription":"Password for grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_PORT","EnvType":"string","EnvValue":"8090","EnvDescription":"Port for grafana micro-service","Example":"","Deprecated":"false"},{"Env":"GRAFANA_URL","EnvType":"string","EnvValue":"","EnvDescription":"Host URL for the grafana dashboard","Example":"","Deprecated":"false"},{"Env":"GRAFANA_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"Username for grafana ","Example":"","Deprecated":"false"},{"Env":"HIDE_API_TOKENS","EnvType":"bool","EnvValue":"false","EnvDescription":"Boolean flag for should the api tokens generated be hidden from the UI","Example":"","Deprecated":"false"},{"Env":"HIDE_IMAGE_TAGGING_HARD_DELETE","EnvType":"bool","EnvValue":"false","EnvDescription":"Flag to hide the hard delete option in the image tagging service","Example":"","Deprecated":"false"},{"Env":"IGNORE_AUTOCOMPLETE_AUTH_CHECK","EnvType":"bool","EnvValue":"false","EnvDescription":"flag for ignoring auth check in autocomplete apis.","Example":"","Deprecated":"false"},{"Env":"INSTALLED_MODULES","EnvType":"","EnvValue":"","EnvDescription":"List of installed modules given in helm values/yaml are written in cm and used by devtron to know which modules are given","Example":"security.trivy,security.clair","Deprecated":"false"},{"Env":"INSTALLER_CRD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"namespace where Custom Resource Definitions get installed","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_GROUP_NAME","EnvType":"string","EnvValue":"installer.devtron.ai","EnvDescription":"Devtron installer CRD group name, partially deprecated.","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_RESOURCE","EnvType":"string","EnvValue":"installers","EnvDescription":"Devtron installer CRD resource name, partially deprecated","Example":"","Deprecated":"false"},{"Env":"INSTALLER_CRD_OBJECT_VERSION","EnvType":"string","EnvValue":"v1alpha1","EnvDescription":"version of the CRDs. default is v1alpha1","Example":"","Deprecated":"false"},{"Env":"IS_AIR_GAP_ENVIRONMENT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"JwtExpirationTime","EnvType":"int","EnvValue":"120","EnvDescription":"JWT expiration time.","Example":"","Deprecated":"false"},{"Env":"K8s_CLIENT_MAX_IDLE_CONNS_PER_HOST","EnvType":"int","EnvValue":"25","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_IDLE_CONN_TIMEOUT","EnvType":"int","EnvValue":"300","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_KEEPALIVE","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TCP_TIMEOUT","EnvType":"int","EnvValue":"30","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"K8s_TLS_HANDSHAKE_TIMEOUT","EnvType":"int","EnvValue":"10","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LENS_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"Lens microservice timeout.","Example":"","Deprecated":"false"},{"Env":"LENS_URL","EnvType":"string","EnvValue":"http://lens-milandevtron-service:80","EnvDescription":"Lens micro-service URL","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LIMIT_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"LINKED_CI_ARTIFACT_COPY_LIMIT","EnvType":"int","EnvValue":"10","EnvDescription":"Maximum number of artifacts to copy from parent CI pipeline to linked CI pipeline","Example":"","Deprecated":"false"},{"Env":"LOGGER_DEV_MODE","EnvType":"bool","EnvValue":"false","EnvDescription":"Enables a different logger theme.","Example":"","Deprecated":"false"},{"Env":"LOG_LEVEL","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"MAX_SESSION_PER_USER","EnvType":"int","EnvValue":"5","EnvDescription":"max no of cluster terminal pods can be created by an user","Example":"","Deprecated":"false"},{"Env":"MODULE_METADATA_API_URL","EnvType":"string","EnvValue":"https://api.devtron.ai/module?name=%s","EnvDescription":"Modules list and meta info will be fetched from this server, that is central api server of devtron.","Example":"","Deprecated":"false"},{"Env":"MODULE_STATUS_HANDLING_CRON_DURATION_MIN","EnvType":"int","EnvValue":"3","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_ACK_WAIT_IN_SECS","EnvType":"int","EnvValue":"120","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_BUFFER_SIZE","EnvType":"int","EnvValue":"-1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_MAX_AGE","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_PROCESSING_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NATS_MSG_REPLICAS","EnvType":"int","EnvValue":"0","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"NOTIFICATION_MEDIUM","EnvType":"NotificationMedium","EnvValue":"rest","EnvDescription":"notification medium","Example":"","Deprecated":"false"},{"Env":"OTEL_COLLECTOR_URL","EnvType":"string","EnvValue":"","EnvDescription":"Opentelemetry URL ","Example":"","Deprecated":"false"},{"Env":"PARALLELISM_LIMIT_FOR_TAG_PROCESSING","EnvType":"int","EnvValue":"","EnvDescription":"App manual sync job parallel tag processing count.","Example":"","Deprecated":"false"},{"Env":"PG_EXPORT_PROM_METRICS","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_FAILURE_QUERIES","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_ALL_QUERY","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_LOG_SLOW_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PG_QUERY_DUR_THRESHOLD","EnvType":"int64","EnvValue":"5000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"PLUGIN_NAME","EnvType":"string","EnvValue":"Pull images from container repository","EnvDescription":"Handles image retrieval from a container repository and triggers subsequent CI processes upon detecting new images.Current default plugin name: Pull Images from Container Repository.","Example":"","Deprecated":"false"},{"Env":"PROPAGATE_EXTRA_LABELS","EnvType":"bool","EnvValue":"false","EnvDescription":"Add additional propagate labels like api.devtron.ai/appName, api.devtron.ai/envName, api.devtron.ai/project along with the user defined ones.","Example":"","Deprecated":"false"},{"Env":"PROXY_SERVICE_CONFIG","EnvType":"string","EnvValue":"{}","EnvDescription":"Proxy configuration for micro-service to be accessible on orhcestrator ingress","Example":"","Deprecated":"false"},{"Env":"REQ_CI_CPU","EnvType":"string","EnvValue":"0.5","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"REQ_CI_MEM","EnvType":"string","EnvValue":"3G","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"RESTRICT_TERMINAL_ACCESS_FOR_NON_SUPER_USER","EnvType":"bool","EnvValue":"false","EnvDescription":"To restrict the cluster terminal from user having non-super admin acceess","Example":"","Deprecated":"false"},{"Env":"RUNTIME_CONFIG_LOCAL_DEV","EnvType":"LocalDevMode","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_ENABLED","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable scoped variable option","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_FORMAT","EnvType":"string","EnvValue":"@{{%s}}","EnvDescription":"Its a scope format for varialbe name.","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_HANDLE_PRIMITIVES","EnvType":"bool","EnvValue":"false","EnvDescription":"This describe should we handle primitives or not in scoped variable template parsing.","Example":"","Deprecated":"false"},{"Env":"SCOPED_VARIABLE_NAME_REGEX","EnvType":"string","EnvValue":"^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$","EnvDescription":"Regex for scoped variable name that must passed this regex.","Example":"","Deprecated":"false"},{"Env":"SOCKET_DISCONNECT_DELAY_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"The server closes a session when a client receiving connection have not been seen for a while.This delay is configured by this setting. By default the session is closed when a receiving connection wasn't seen for 5 seconds.","Example":"","Deprecated":"false"},{"Env":"SOCKET_HEARTBEAT_SECONDS","EnvType":"int","EnvValue":"25","EnvDescription":"In order to keep proxies and load balancers from closing long running http requests we need to pretend that the connection is active and send a heartbeat packet once in a while. This setting controls how often this is done. By default a heartbeat packet is sent every 25 seconds.","Example":"","Deprecated":"false"},{"Env":"STREAM_CONFIG_JSON","EnvType":"string","EnvValue":"","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"SYSTEM_VAR_PREFIX","EnvType":"string","EnvValue":"DEVTRON_","EnvDescription":"Scoped variable prefix, variable name must have this prefix.","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_DEFAULT_NAMESPACE","EnvType":"string","EnvValue":"default","EnvDescription":"Cluster terminal default namespace","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_INACTIVE_DURATION_IN_MINS","EnvType":"int","EnvValue":"10","EnvDescription":"Timeout for cluster terminal to be inactive","Example":"","Deprecated":"false"},{"Env":"TERMINAL_POD_STATUS_SYNC_In_SECS","EnvType":"int","EnvValue":"600","EnvDescription":"this is the time interval at which the status of the cluster terminal pod","Example":"","Deprecated":"false"},{"Env":"TEST_APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_LOG_QUERY","EnvType":"bool","EnvValue":"true","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PASSWORD","EnvType":"string","EnvValue":"postgrespw","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_PORT","EnvType":"string","EnvValue":"55000","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TEST_PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_FOR_FAILED_CI_BUILD","EnvType":"string","EnvValue":"15","EnvDescription":"Timeout for Failed CI build ","Example":"","Deprecated":"false"},{"Env":"TIMEOUT_IN_SECONDS","EnvType":"int","EnvValue":"5","EnvDescription":"timeout to compute the urls from services and ingress objects of an application","Example":"","Deprecated":"false"},{"Env":"USER_SESSION_DURATION_SECONDS","EnvType":"int","EnvValue":"86400","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_ARTIFACT_LISTING_API_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 API for listing artifacts in Listing the images in pipeline","Example":"","Deprecated":"false"},{"Env":"USE_CUSTOM_HTTP_TRANSPORT","EnvType":"bool","EnvValue":"false","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"USE_GIT_CLI","EnvType":"bool","EnvValue":"false","EnvDescription":"To enable git cli","Example":"","Deprecated":"false"},{"Env":"USE_RBAC_CREATION_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To use the V2 for RBAC creation","Example":"","Deprecated":"false"},{"Env":"VARIABLE_CACHE_ENABLED","EnvType":"bool","EnvValue":"true","EnvDescription":"This is used to control caching of all the scope variables defined in the system.","Example":"","Deprecated":"false"},{"Env":"VARIABLE_EXPRESSION_REGEX","EnvType":"string","EnvValue":"@{{([^}]+)}}","EnvDescription":"Scoped variable expression regex","Example":"","Deprecated":"false"},{"Env":"WEBHOOK_TOKEN","EnvType":"string","EnvValue":"","EnvDescription":"If you want to continue using jenkins for CI then please provide this for authentication of requests","Example":"","Deprecated":"false"}]},{"Category":"GITOPS","Fields":[{"Env":"ACD_CM","EnvType":"string","EnvValue":"argocd-cm","EnvDescription":"Name of the argocd CM","Example":"","Deprecated":"false"},{"Env":"ACD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"To pass the argocd namespace","Example":"","Deprecated":"false"},{"Env":"ACD_PASSWORD","EnvType":"string","EnvValue":"","EnvDescription":"Password for the Argocd (deprecated)","Example":"","Deprecated":"false"},{"Env":"ACD_USERNAME","EnvType":"string","EnvValue":"admin","EnvDescription":"User name for argocd","Example":"","Deprecated":"false"},{"Env":"GITOPS_SECRET_NAME","EnvType":"string","EnvValue":"devtron-gitops-secret","EnvDescription":"devtron-gitops-secret","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS","EnvType":"string","EnvValue":"Deployment,Rollout,StatefulSet,ReplicaSet","EnvDescription":"this holds the list of k8s resource names which support replicas key. this list used in hibernate/un hibernate process","Example":"","Deprecated":"false"},{"Env":"RESOURCE_LIST_FOR_REPLICAS_BATCH_SIZE","EnvType":"int","EnvValue":"5","EnvDescription":"this the batch size to control no of above resources can be parsed in one go to determine hibernate status","Example":"","Deprecated":"false"}]},{"Category":"INFRA_SETUP","Fields":[{"Env":"DASHBOARD_HOST","EnvType":"string","EnvValue":"localhost","EnvDescription":"Dashboard micro-service URL","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_NAMESPACE","EnvType":"string","EnvValue":"devtroncd","EnvDescription":"Dashboard micro-service namespace","Example":"","Deprecated":"false"},{"Env":"DASHBOARD_PORT","EnvType":"string","EnvValue":"3000","EnvDescription":"Port for dashboard micro-service","Example":"","Deprecated":"false"},{"Env":"DEX_HOST","EnvType":"string","EnvValue":"http://localhost","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"DEX_PORT","EnvType":"string","EnvValue":"5556","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_PROTOCOL","EnvType":"string","EnvValue":"REST","EnvDescription":"Protocol to connect with git-sensor micro-service","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_SERVICE_CONFIG","EnvType":"string","EnvValue":"{\"loadBalancingPolicy\":\"pick_first\"}","EnvDescription":"git-sensor grpc service config","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_TIMEOUT","EnvType":"int","EnvValue":"0","EnvDescription":"Timeout for getting response from the git-sensor","Example":"","Deprecated":"false"},{"Env":"GIT_SENSOR_URL","EnvType":"string","EnvValue":"127.0.0.1:7070","EnvDescription":"git-sensor micro-service url ","Example":"","Deprecated":"false"},{"Env":"HELM_CLIENT_URL","EnvType":"string","EnvValue":"127.0.0.1:50051","EnvDescription":"Kubelink micro-service url ","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_RECEIVE_MSG_SIZE","EnvType":"int","EnvValue":"20","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_MAX_SEND_MSG_SIZE","EnvType":"int","EnvValue":"4","EnvDescription":"","Example":"","Deprecated":"false"},{"Env":"KUBELINK_GRPC_SERVICE_CONFIG","EnvType":"string","EnvValue":"{\"loadBalancingPolicy\":\"round_robin\"}","EnvDescription":"kubelink grpc service config","Example":"","Deprecated":"false"}]},{"Category":"POSTGRES","Fields":[{"Env":"APP","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"Application name","Example":"","Deprecated":"false"},{"Env":"CASBIN_DATABASE","EnvType":"string","EnvValue":"casbin","EnvDescription":"Database for casbin","Example":"","Deprecated":"false"},{"Env":"PG_ADDR","EnvType":"string","EnvValue":"127.0.0.1","EnvDescription":"address of postgres service","Example":"postgresql-postgresql.devtroncd","Deprecated":"false"},{"Env":"PG_DATABASE","EnvType":"string","EnvValue":"orchestrator","EnvDescription":"postgres database to be made connection with","Example":"orchestrator, casbin, git_sensor, lens","Deprecated":"false"},{"Env":"PG_PASSWORD","EnvType":"string","EnvValue":"{password}","EnvDescription":"password for postgres, associated with PG_USER","Example":"confidential ;)","Deprecated":"false"},{"Env":"PG_PORT","EnvType":"string","EnvValue":"5432","EnvDescription":"port of postgresql service","Example":"5432","Deprecated":"false"},{"Env":"PG_READ_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"Time out for read operation in postgres","Example":"","Deprecated":"false"},{"Env":"PG_USER","EnvType":"string","EnvValue":"postgres","EnvDescription":"user for postgres","Example":"postgres","Deprecated":"false"},{"Env":"PG_WRITE_TIMEOUT","EnvType":"int64","EnvValue":"30","EnvDescription":"Time out for write operation in postgres","Example":"","Deprecated":"false"}]},{"Category":"RBAC","Fields":[{"Env":"ENFORCER_CACHE","EnvType":"bool","EnvValue":"false","EnvDescription":"To Enable enforcer cache.","Example":"","Deprecated":"false"},{"Env":"ENFORCER_CACHE_EXPIRATION_IN_SEC","EnvType":"int","EnvValue":"86400","EnvDescription":"Expiration time (in seconds) for enforcer cache. ","Example":"","Deprecated":"false"},{"Env":"ENFORCER_MAX_BATCH_SIZE","EnvType":"int","EnvValue":"1","EnvDescription":"Maximum batch size for the enforcer.","Example":"","Deprecated":"false"},{"Env":"USE_CASBIN_V2","EnvType":"bool","EnvValue":"true","EnvDescription":"To enable casbin V2 API","Example":"","Deprecated":"false"}]}] \ No newline at end of file diff --git a/env_gen.md b/env_gen.md index a328621194..539bb63e5f 100644 --- a/env_gen.md +++ b/env_gen.md @@ -157,6 +157,11 @@ | CI_TRIGGER_CRON_TIME | int |2 | For image poll plugin | | false | | CI_WORKFLOW_STATUS_UPDATE_CRON | string |*/5 * * * * | Cron schedule for CI pipeline status | | false | | CLI_CMD_TIMEOUT_GLOBAL_SECONDS | int |0 | Used in git cli opeartion timeout | | false | + | CLUSTER_OVERVIEW_BACKGROUND_REFRESH_ENABLED | bool |true | Enable background refresh of cluster overview cache | | false | + | CLUSTER_OVERVIEW_CACHE_ENABLED | bool |true | Enable caching for cluster overview data | | false | + | CLUSTER_OVERVIEW_MAX_PARALLEL_CLUSTERS | int |15 | Maximum number of clusters to fetch in parallel during refresh | | false | + | CLUSTER_OVERVIEW_MAX_STALE_DATA_SECONDS | int |30 | Maximum age of cached data in seconds before warning | | false | + | CLUSTER_OVERVIEW_REFRESH_INTERVAL_SECONDS | int |15 | Background cache refresh interval in seconds | | false | | CLUSTER_STATUS_CRON_TIME | int |15 | Cron schedule for cluster status on resource browser | | false | | CONSUMER_CONFIG_JSON | string | | | | false | | DEFAULT_LOG_TIME_LIMIT | int64 |1 | | | false | diff --git a/go.mod b/go.mod index 83df112d14..a509f3e0fa 100644 --- a/go.mod +++ b/go.mod @@ -122,7 +122,7 @@ require ( github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver/v4 v4.0.0 // indirect + github.com/blang/semver/v4 v4.0.0 github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect github.com/bombsimon/logrusr/v2 v2.0.1 // indirect github.com/bradleyfalzon/ghinstallation/v2 v2.12.0 // indirect @@ -338,7 +338,7 @@ require ( replace ( github.com/argoproj/argo-workflows/v3 v3.5.13 => github.com/devtron-labs/argo-workflows/v3 v3.5.13 github.com/cyphar/filepath-securejoin v0.4.1 => github.com/cyphar/filepath-securejoin v0.3.6 // indirect - github.com/devtron-labs/authenticator => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251208113217-e733437afcfe - github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251208113217-e733437afcfe + github.com/devtron-labs/authenticator => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251217072923-a2f0562a9b35 + github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251217072923-a2f0562a9b35 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 ) diff --git a/go.sum b/go.sum index 515c7c9bcf..d000d25122 100644 --- a/go.sum +++ b/go.sum @@ -237,10 +237,10 @@ github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzq github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devtron-labs/argo-workflows/v3 v3.5.13 h1:3pINq0gXOSeTw2z/vYe+j80lRpSN5Rp/8mfQORh8SmU= github.com/devtron-labs/argo-workflows/v3 v3.5.13/go.mod h1:/vqxcovDPT4zqr4DjR5v7CF8ggpY1l3TSa2CIG3jmjA= -github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251208113217-e733437afcfe h1:o1NsU3J1Qmbt+JLMZ/nPXNY8wzn+BnV2gf/RMaXw/lo= -github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251208113217-e733437afcfe/go.mod h1:9LCkYfiWaEKIBkmxw9jX1GujvEMyHwmDtVsatffAkeU= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251208113217-e733437afcfe h1:KUis7ii4fQ4v8Y1B2KmkhvhaCrOn3fuLJCNf1mNE+5w= -github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251208113217-e733437afcfe/go.mod h1:CqHnvltrng2O40hNGVl56qcAhv7tiXK3SKx47LKyE/A= +github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251217072923-a2f0562a9b35 h1:RUzuJvbT4d8bE94GTl9TbP5h3H5iA2RAPJJt9ip0n1c= +github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251217072923-a2f0562a9b35/go.mod h1:9LCkYfiWaEKIBkmxw9jX1GujvEMyHwmDtVsatffAkeU= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251217072923-a2f0562a9b35 h1:hEVPoB3ndscqPxZUVGDj9PU8AMjYrCTiie9YnsMNRnE= +github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251217072923-a2f0562a9b35/go.mod h1:CqHnvltrng2O40hNGVl56qcAhv7tiXK3SKx47LKyE/A= github.com/devtron-labs/go-bitbucket v0.9.60-beta h1:VEx1jvDgdtDPS6A1uUFoaEi0l1/oLhbr+90xOwr6sDU= github.com/devtron-labs/go-bitbucket v0.9.60-beta/go.mod h1:GnuiCesvh8xyHeMCb+twm8lBR/kQzJYSKL28ZfObp1Y= github.com/devtron-labs/protos v0.0.3-0.20250323220609-ecf8a0f7305e h1:U6UdYbW8a7xn5IzFPd8cywjVVPfutGJCudjePAfL/Hs= diff --git a/internal/sql/repository/app/AppRepository.go b/internal/sql/repository/app/AppRepository.go index 93710da779..d237ec8f7a 100644 --- a/internal/sql/repository/app/AppRepository.go +++ b/internal/sql/repository/app/AppRepository.go @@ -18,13 +18,14 @@ package app import ( "fmt" + "time" + "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/pkg/team/repository" "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" - "time" ) type App struct { @@ -96,6 +97,11 @@ type AppRepository interface { FindJobCount() (int, error) UpdateAppOfferingModeForAppIds(successAppIds []*int, appOfferingMode string, userId int32) error + + // Overview methods + FindAllChartStoreApps() ([]*App, error) + FindAllActiveDevtronAppsInTimeRange(from, to *time.Time) ([]*App, error) + FindAllActiveChartStoreAppsInTimeRange(from, to *time.Time) ([]*App, error) } const DevtronApp = "DevtronApp" @@ -542,3 +548,52 @@ func (repo AppRepositoryImpl) UpdateAppOfferingModeForAppIds(successAppIds []*in _, err := repo.dbConnection.Query(app, query, appOfferingMode, userId, time.Now(), pg.In(successAppIds)) return err } + +// Overview methods implementation +func (repo AppRepositoryImpl) FindAllChartStoreApps() ([]*App, error) { + var apps []*App + err := repo.dbConnection.Model(&apps).Where("active = ?", true).Where("app_type = ?", helper.ChartStoreApp).Select() + return apps, err +} + +func (repo AppRepositoryImpl) FindAllActiveDevtronAppsInTimeRange(from, to *time.Time) ([]*App, error) { + var apps []*App + query := repo.dbConnection.Model(&apps). + Where("active = ?", true). + Where("app_type = ?", helper.CustomApp) // Only normal CI/CD apps (appType = 0) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + err := query.Select() + if err != nil { + repo.logger.Errorw("error getting apps in time range", "from", from, "to", to, "err", err) + return nil, err + } + return apps, nil +} + +func (repo AppRepositoryImpl) FindAllActiveChartStoreAppsInTimeRange(from, to *time.Time) ([]*App, error) { + var apps []*App + query := repo.dbConnection.Model(&apps). + Where("active = ?", true). + Where("app_type = ?", helper.ChartStoreApp) // Only chart store apps (appType = 1) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + err := query.Select() + if err != nil { + repo.logger.Errorw("error getting chart store apps in time range", "from", from, "to", to, "err", err) + return nil, err + } + return apps, nil +} diff --git a/internal/sql/repository/deploymentConfig/repository.go b/internal/sql/repository/deploymentConfig/repository.go index 578205287e..be987fa8ef 100644 --- a/internal/sql/repository/deploymentConfig/repository.go +++ b/internal/sql/repository/deploymentConfig/repository.go @@ -18,6 +18,7 @@ package deploymentConfig import ( "fmt" + "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" @@ -66,6 +67,8 @@ type Repository interface { GetAllConfigsForActiveApps() ([]*DeploymentConfig, error) GetAllEnvLevelConfigsWithReleaseMode(releaseMode string) ([]*DeploymentConfig, error) GetDeploymentAppTypeForChartStoreAppByAppId(appId int) (string, error) + // GitOps count methods + GetGitOpsEnabledPipelineCount() (int, error) } type RepositoryImpl struct { @@ -298,3 +301,37 @@ func (impl *RepositoryImpl) GetDeploymentAppTypeForChartStoreAppByAppId(appId in Select() return result.DeploymentAppType, err } + +// GetGitOpsEnabledPipelineCount returns count of GitOps enabled pipelines +// This handles lazy migration from pipeline table to deployment_config table +func (impl *RepositoryImpl) GetGitOpsEnabledPipelineCount() (int, error) { + var count int + // Complex query to handle lazy migration: + // 1. Count pipelines that have deployment_config entry with argo_cd + // 2. Count pipelines that don't have deployment_config entry but have argo_cd in pipeline table + query := ` + SELECT COUNT(DISTINCT p.id) + FROM pipeline p + JOIN environment e ON p.environment_id = e.id + JOIN app a ON p.app_id = a.id + LEFT JOIN deployment_config dc ON dc.app_id = p.app_id + AND dc.environment_id = p.environment_id + AND dc.active = true + WHERE p.deleted = false + AND e.active = true + AND a.active = true + AND ( + -- Case 1: deployment_config exists and is argo_cd + dc.deployment_app_type = 'argo_cd' + OR + -- Case 2: no deployment_config entry, fallback to pipeline table + (dc.id IS NULL AND p.deployment_app_type = 'argo_cd') + ) + ` + + _, err := impl.dbConnection.Query(&count, query) + if err != nil { + return 0, fmt.Errorf("error getting GitOps enabled pipeline count: %w", err) + } + return count, nil +} diff --git a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go index 6b6001a8dd..452e1f336f 100644 --- a/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CdWorfkflowRepository.go @@ -19,16 +19,19 @@ package pipelineConfig import ( "context" "errors" + "fmt" + "time" + apiBean "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow/cdWorkflow" "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/constants" + bean2 "github.com/devtron-labs/devtron/pkg/overview/bean" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "go.opentelemetry.io/otel" "go.uber.org/zap" - "time" ) type CdWorkflowRepository interface { @@ -80,6 +83,12 @@ type CdWorkflowRepository interface { MigrateIsArtifactUploaded(wfrId int, isArtifactUploaded bool) MigrateCdArtifactLocation(wfrId int, cdArtifactLocation string) FindDeployedCdWorkflowRunnersByPipelineId(pipelineId int) ([]*CdWorkflowRunner, error) + + // Overview methods + GetDeploymentCountInTimeRange(from, to *time.Time) (int, error) + GetDeploymentWorkflowsForStatusTrend(from, to *time.Time) ([]DeploymentStatusData, error) + GetBlockedDeploymentsForTrend(from, to *time.Time) ([]BlockedDeploymentData, error) + GetTriggeredCDPipelines(from, to *time.Time, sortOrder bean2.SortOrder, limit, offset int) ([]PipelineUsageData, int, error) } type CdWorkflowRepositoryImpl struct { @@ -182,6 +191,15 @@ type AppDeploymentStatus struct { WfrId int `json:"wfrId,omitempty"` } +type DeploymentStatusData struct { + StartedOn time.Time `db:"started_on"` + Status string `db:"status"` +} + +type BlockedDeploymentData struct { + StartedOn time.Time `db:"started_on"` +} + func NewCdWorkflowRepositoryImpl(dbConnection *pg.DB, logger *zap.SugaredLogger) *CdWorkflowRepositoryImpl { return &CdWorkflowRepositoryImpl{ dbConnection: dbConnection, @@ -811,3 +829,143 @@ SELECT "app_id","env_id","ci_artifact_id","parent_ci_artifact","scanned" FROM Ra } return runners, nil } + +// Overview methods implementation +func (impl *CdWorkflowRepositoryImpl) GetDeploymentCountInTimeRange(from, to *time.Time) (int, error) { + var count int + query := impl.dbConnection.Model((*CdWorkflowRunner)(nil)). + Where("created_on >= ? AND created_on <= ?", from, to). + Where("workflow_type = ?", "DEPLOY") + + count, err := query.Count() + if err != nil { + impl.logger.Errorw("error getting deployment count in time range", "from", from, "to", to, "err", err) + return 0, err + } + return count, nil +} + +// GetDeploymentWorkflowsForStatusTrend returns all deployment workflows in the date range including deployments of deleted pipelines +func (impl *CdWorkflowRepositoryImpl) GetDeploymentWorkflowsForStatusTrend(from, to *time.Time) ([]DeploymentStatusData, error) { + var deployments []DeploymentStatusData + + query := ` + SELECT cwr.started_on, cwr.status + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cwr.cd_workflow_id = cw.id + INNER JOIN pipeline p ON cw.pipeline_id = p.id + INNER JOIN app a ON p.app_id = a.id + WHERE cwr.started_on >= ? AND cwr.started_on <= ? + AND cwr.workflow_type = 'DEPLOY' + ORDER BY cwr.started_on + ` + + _, err := impl.dbConnection.Query(&deployments, query, from, to) + if err != nil { + impl.logger.Errorw("error fetching deployment workflows for status trend", "from", from, "to", to, "err", err) + return nil, err + } + + return deployments, nil +} + +// GetBlockedDeploymentsForTrend returns all deployment attempts that were blocked by security scan policy +// This includes: +// 1. Deployments blocked BEFORE workflow creation (tracked in resource_filter_evaluation_audit with filter_type=6) +// 2. Deployments that started but failed due to vulnerability (tracked in cd_workflow_runner with message) +func (impl *CdWorkflowRepositoryImpl) GetBlockedDeploymentsForTrend(from, to *time.Time) ([]BlockedDeploymentData, error) { + var blockedDeployments []BlockedDeploymentData + + // Query to get blocked deployments from two sources: + // 1. resource_filter_evaluation_audit: Deployments blocked by security scan policy BEFORE workflow creation + // - filter_type = 6 (SECURITY_SCAN_POLICY) + // - subject_type = 0 (Artifact) + // - reference_type = 1 (PipelineStage) + // 2. cd_workflow_runner: Deployments that started but failed due to vulnerability + // - status = 'Failed' + // - message contains 'Found vulnerability on image' + query := ` + SELECT created_on as started_on + FROM resource_filter_evaluation_audit + WHERE created_on >= ? AND created_on <= ? + AND filter_type = 6 + AND subject_type = 0 + AND reference_type = 1 + UNION ALL + SELECT cwr.started_on + FROM cd_workflow_runner cwr + WHERE cwr.started_on >= ? AND cwr.started_on <= ? + AND cwr.workflow_type = 'DEPLOY' + AND cwr.status = 'Failed' + AND cwr.message LIKE '%Found vulnerability on image%' + ORDER BY started_on + ` + + _, err := impl.dbConnection.Query(&blockedDeployments, query, from, to, from, to) + if err != nil { + impl.logger.Errorw("error fetching blocked deployments for trend", "from", from, "to", to, "err", err) + return nil, err + } + + return blockedDeployments, nil +} + +func (impl *CdWorkflowRepositoryImpl) GetTriggeredCDPipelines(from, to *time.Time, sortOrder bean2.SortOrder, limit, offset int) ([]PipelineUsageData, int, error) { + var results []PipelineUsageData + var totalCount int + + // First get the total count + countQuery := ` + SELECT COUNT(DISTINCT p.id) + FROM pipeline p + INNER JOIN app a ON p.app_id = a.id + INNER JOIN environment e ON p.environment_id = e.id + LEFT JOIN cd_workflow cw ON p.id = cw.pipeline_id + LEFT JOIN cd_workflow_runner cwr ON cw.id = cwr.cd_workflow_id + AND cwr.created_on >= ? AND cwr.created_on <= ? + AND cwr.workflow_type = 'DEPLOY' AND a.app_type = 0 + WHERE p.deleted = false AND a.active = true + ` + + _, err := impl.dbConnection.Query(&totalCount, countQuery, from, to) + if err != nil { + impl.logger.Errorw("error getting total count of CD pipelines", "from", from, "to", to, "err", err) + return nil, 0, err + } + + // Build the main query with sorting and pagination + orderClause := "ORDER BY trigger_count DESC, p.id DESC" + if sortOrder == bean2.ASC { + orderClause = "ORDER BY trigger_count ASC, p.id ASC" + } + + query := fmt.Sprintf(` + SELECT + a.id as app_id, + e.id as env_id, + p.id as pipeline_id, + p.pipeline_name as pipeline_name, + a.app_name, + e.environment_name as env_name, + COALESCE(COUNT(cwr.id), 0) as trigger_count + FROM pipeline p + INNER JOIN app a ON p.app_id = a.id + INNER JOIN environment e ON p.environment_id = e.id + LEFT JOIN cd_workflow cw ON p.id = cw.pipeline_id + LEFT JOIN cd_workflow_runner cwr ON cw.id = cwr.cd_workflow_id + AND cwr.created_on >= ? AND cwr.created_on <= ? + AND cwr.workflow_type = 'DEPLOY' AND a.app_type = 0 + WHERE p.deleted = false AND a.active = true + GROUP BY a.id, e.id, p.id, p.pipeline_name, a.app_name, e.environment_name + %s + LIMIT ? OFFSET ? + `, orderClause) + + _, err = impl.dbConnection.Query(&results, query, from, to, limit, offset) + if err != nil { + impl.logger.Errorw("error getting triggered CD pipelines", "from", from, "to", to, "sortOrder", sortOrder, "limit", limit, "offset", offset, "err", err) + return nil, 0, err + } + + return results, totalCount, nil +} diff --git a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go index c6adb8b7a8..2920f991e8 100644 --- a/internal/sql/repository/pipelineConfig/CiPipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/CiPipelineRepository.go @@ -19,8 +19,14 @@ package pipelineConfig import ( "context" "fmt" + "strconv" + "time" + "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" + "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/ciPipeline" + workflowConstants "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/constants" buildCommonBean "github.com/devtron-labs/devtron/pkg/build/pipeline/bean/common" repository2 "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" "github.com/devtron-labs/devtron/pkg/sql" @@ -29,8 +35,6 @@ import ( "github.com/go-pg/pg/orm" "go.opentelemetry.io/otel" "go.uber.org/zap" - "strconv" - "time" ) type CiPipeline struct { @@ -88,6 +92,18 @@ type CiPipelineScript struct { sql.AuditLog } +// WorkflowWithAppEnvDetails represents a workflow with app and environment details for security enablement +type WorkflowWithAppEnvDetails struct { + WorkflowId int `pg:"workflow_id"` + WorkflowName string `pg:"workflow_name"` + AppId int `pg:"app_id"` + AppName string `pg:"app_name"` + Environments string `pg:"environments"` // Comma-separated environment names + CiPipelineIds string `pg:"ci_pipeline_ids"` // Comma-separated CI pipeline IDs (for RBAC) + ScanEnabled bool `pg:"scan_enabled"` + CiPipelineType string `pg:"ci_pipeline_type"` // CI pipeline type (CI_BUILD, LINKED, EXTERNAL, CI_JOB, LINKED_CD) +} + // CiPipelineRepository : // use config.CiPipelineConfigReadService instead of directly using CiPipelineRepository type CiPipelineRepository interface { @@ -146,6 +162,19 @@ type CiPipelineRepository interface { GetLinkedCiPipelines(ctx context.Context, ciPipelineId int) ([]*CiPipeline, error) GetDownStreamInfo(ctx context.Context, sourceCiPipelineId int, appNameMatch, envNameMatch string, req *pagination.RepositoryRequest) ([]ciPipeline.LinkedCIDetails, int, error) + GetActiveCiPipelineCount() (int, error) + GetActiveCiPipelineCountInTimeRange(from, to *time.Time) (int, error) + GetScanEnabledCiPipelineCount() (int, error) + GetCiPipelineCountWithImageScanPluginInPostCiOrPreCd() (int, error) + // Security Enablement methods + FindAppWorkflowsWithScanDetails(appIds, clusterIds, envIds []int, searchQuery string, scanEnablement string, sortBy, sortOrder string, offset, size int) ([]*WorkflowWithAppEnvDetails, int, error) + FindAllAppWorkflowIdsByFilters(appIds, clusterIds, envIds []int, searchQuery string, scanEnablement string) ([]int, error) + FindAppWorkflowsByIds(workflowIds []int) ([]*WorkflowWithAppEnvDetails, error) + BulkUpdateScanEnabled(workflowIds []int, scanEnabled bool, userId int32) error + + // External CI count methods for performance optimization + GetActiveExternalCiPipelineCount() (int, error) + GetActiveExternalCiPipelineCountInTimeRange(from, to *time.Time) (int, error) } type CiPipelineRepositoryImpl struct { @@ -726,3 +755,458 @@ func (impl *CiPipelineRepositoryImpl) GetDownStreamInfo(ctx context.Context, sou } return linkedCIDetails, totalCount, err } + +// Count methods implementation for performance optimization +func (impl *CiPipelineRepositoryImpl) GetActiveCiPipelineCount() (int, error) { + count, err := impl.dbConnection.Model((*CiPipeline)(nil)). + Where("active = ?", true). + Where("deleted = ?", false). + Where("ci_pipeline_type = ?", buildCommonBean.CI_BUILD.ToString()). + Count() + + if err != nil { + impl.logger.Errorw("error getting active CI pipeline count", "err", err) + return 0, err + } + return count, nil +} + +func (impl *CiPipelineRepositoryImpl) GetActiveCiPipelineCountInTimeRange(from, to *time.Time) (int, error) { + query := impl.dbConnection.Model((*CiPipeline)(nil)). + Where("active = ?", true). + Where("deleted = ?", false) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + count, err := query.Count() + if err != nil { + impl.logger.Errorw("error getting active CI pipeline count in time range", "from", from, "to", to, "err", err) + return 0, err + } + return count, nil +} + +func (impl *CiPipelineRepositoryImpl) GetScanEnabledCiPipelineCount() (int, error) { + count, err := impl.dbConnection.Model((*CiPipeline)(nil)). + Where("active = ?", true). + Where("deleted = ?", false). + Where("scan_enabled = ?", true). + Where("ci_pipeline_type = ?", buildCommonBean.CI_BUILD.ToString()). + Count() + + if err != nil { + impl.logger.Errorw("error getting scan enabled CI pipeline count", "err", err) + return 0, err + } + return count, nil +} + +// FindAppWorkflowsWithScanDetails fetches app workflows with scan enablement details for security enablement page +func (impl *CiPipelineRepositoryImpl) FindAppWorkflowsWithScanDetails(appIds, clusterIds, envIds []int, searchQuery string, scanEnablement string, sortBy, sortOrder string, offset, size int) ([]*WorkflowWithAppEnvDetails, int, error) { + var results []*WorkflowWithAppEnvDetails + + // Optimized query strategy: + // 1. Single CTE to filter and aggregate in one pass + // 2. Avoid redundant joins by reusing data from first join + // 3. Use window function for total count to avoid separate CTE + // 4. Filter early to reduce rows processed in aggregation + // 5. Dynamic sorting based on sortBy and sortOrder parameters + query := ` + WITH workflow_data AS ( + SELECT + aw.id as workflow_id, + aw.name as workflow_name, + aw.app_id, + a.app_name, + cp.id as ci_pipeline_id, + cp.scan_enabled, + cp.ci_pipeline_type, + e.environment_name + FROM app_workflow aw + INNER JOIN app a ON a.id = aw.app_id AND a.active = true AND a.app_type != ? + INNER JOIN app_workflow_mapping awm ON awm.app_workflow_id = aw.id AND awm.active = true AND awm.type = ? + INNER JOIN ci_pipeline cp ON cp.id = awm.component_id AND cp.active = true AND cp.deleted = false + LEFT JOIN pipeline p ON p.ci_pipeline_id = cp.id AND p.deleted = false + LEFT JOIN environment e ON e.id = p.environment_id + WHERE aw.active = true + ` + + // Initialize query params with Job type constant and CI_PIPELINE constant + queryParams := []interface{}{helper.Job, appWorkflow.CIPIPELINE} + + // Apply app filter early + if len(appIds) > 0 { + query += " AND aw.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + // Apply search filter early + if searchQuery != "" { + query += " AND aw.name ILIKE ?" + queryParams = append(queryParams, "%"+searchQuery+"%") + } + + // Apply scan enablement filter early using constants + // Empty string means fetch all workflows + if scanEnablement == string(workflowConstants.ScanEnabled) { + query += " AND cp.scan_enabled = true" + } else if scanEnablement == string(workflowConstants.ScanNotEnabled) { + query += " AND cp.scan_enabled = false" + } + // If scanEnablement is empty or any other value, no filter is applied (fetch all) + + // Apply cluster/env filters early using direct join instead of EXISTS subquery + if len(clusterIds) > 0 || len(envIds) > 0 { + // Add filter condition - at least one pipeline must match + // We'll handle this by ensuring the workflow has matching pipelines + query += ` + AND EXISTS ( + SELECT 1 FROM pipeline p2 + INNER JOIN environment e2 ON e2.id = p2.environment_id + WHERE p2.ci_pipeline_id = cp.id + AND p2.deleted = false + ` + if len(clusterIds) > 0 { + query += " AND e2.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + if len(envIds) > 0 { + query += " AND e2.id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + query += ` + ) + ` + } + + query += ` + ) + SELECT + workflow_id, + workflow_name, + app_id, + app_name, + MAX(scan_enabled::int)::boolean as scan_enabled, + STRING_AGG(DISTINCT environment_name, ', ' ORDER BY environment_name) FILTER (WHERE environment_name IS NOT NULL) as environments, + STRING_AGG(DISTINCT ci_pipeline_id::text, ',' ORDER BY ci_pipeline_id::text) as ci_pipeline_ids, + MAX(ci_pipeline_type) as ci_pipeline_type + FROM workflow_data + GROUP BY workflow_id, workflow_name, app_id, app_name + ` + + // Add dynamic sorting + orderByClause := impl.buildWorkflowSortClause(sortBy, sortOrder) + query += orderByClause + + // Fetch all results (no pagination in SQL) + var allResults []*WorkflowWithAppEnvDetails + _, err := impl.dbConnection.Query(&allResults, query, queryParams...) + if err != nil { + impl.logger.Errorw("error fetching workflows with app and env details", "err", err, "query", query, "params", queryParams) + return nil, 0, err + } + + // Calculate total count + totalCount := len(allResults) + + // Apply pagination in code + start := offset + end := offset + size + + // Handle edge cases + if start > totalCount { + start = totalCount + } + if end > totalCount { + end = totalCount + } + if start < 0 { + start = 0 + } + + // Slice the results for pagination + results = allResults[start:end] + + return results, totalCount, nil +} + +// buildWorkflowSortClause builds the ORDER BY clause for workflow listing based on sortBy and sortOrder +func (impl *CiPipelineRepositoryImpl) buildWorkflowSortClause(sortBy, sortOrder string) string { + // Default sorting + orderByClause := " ORDER BY app_name ASC, workflow_name ASC" + + // Validate and sanitize sortOrder using constants + validSortOrder := string(workflowConstants.SortOrderAsc) + if sortOrder == string(workflowConstants.SortOrderDesc) { + validSortOrder = string(workflowConstants.SortOrderDesc) + } + + // Build ORDER BY based on sortBy field using constants + switch sortBy { + case string(workflowConstants.WorkflowSortByWorkflowName): + orderByClause = " ORDER BY workflow_name " + validSortOrder + ", app_name " + validSortOrder + case string(workflowConstants.WorkflowSortByAppName): + orderByClause = " ORDER BY app_name " + validSortOrder + ", workflow_name " + validSortOrder + case string(workflowConstants.WorkflowSortByScanEnabled): + orderByClause = " ORDER BY scan_enabled " + validSortOrder + ", app_name " + validSortOrder + ", workflow_name " + validSortOrder + default: + // Default: sort by app_name, then workflow_name + orderByClause = " ORDER BY app_name " + validSortOrder + ", workflow_name " + validSortOrder + } + + return orderByClause +} + +// FindAllAppWorkflowIdsByFilters fetches ALL app workflow IDs matching the filters (no pagination) +// This is used for bulk operations where we need to get all matching workflow IDs +func (impl *CiPipelineRepositoryImpl) FindAllAppWorkflowIdsByFilters(appIds, clusterIds, envIds []int, searchQuery string, scanEnablement string) ([]int, error) { + query := ` + WITH workflow_data AS ( + SELECT + aw.id as workflow_id, + cp.scan_enabled + FROM app_workflow aw + INNER JOIN app a ON a.id = aw.app_id AND a.active = true AND a.app_type != ? + INNER JOIN app_workflow_mapping awm ON awm.app_workflow_id = aw.id AND awm.active = true AND awm.type = ? + INNER JOIN ci_pipeline cp ON cp.id = awm.component_id AND cp.active = true AND cp.deleted = false + LEFT JOIN pipeline p ON p.ci_pipeline_id = cp.id AND p.deleted = false + LEFT JOIN environment e ON e.id = p.environment_id + WHERE aw.active = true + ` + + // Initialize query params with Job type constant and CI_PIPELINE constant + queryParams := []interface{}{helper.Job, appWorkflow.CIPIPELINE} + + // Apply filters + if len(appIds) > 0 { + query += " AND aw.app_id IN (?)" + queryParams = append(queryParams, pg.In(appIds)) + } + + if len(clusterIds) > 0 { + query += " AND e.cluster_id IN (?)" + queryParams = append(queryParams, pg.In(clusterIds)) + } + + if len(envIds) > 0 { + query += " AND e.id IN (?)" + queryParams = append(queryParams, pg.In(envIds)) + } + + if searchQuery != "" { + query += " AND aw.name ILIKE ?" + queryParams = append(queryParams, "%"+searchQuery+"%") + } + + // Apply scan enablement filter using constants + if scanEnablement == string(workflowConstants.ScanEnabled) { + query += " AND cp.scan_enabled = true" + } else if scanEnablement == string(workflowConstants.ScanNotEnabled) { + query += " AND cp.scan_enabled = false" + } + + // Close the CTE and select distinct workflow IDs + query += ` + ) + SELECT DISTINCT workflow_id + FROM workflow_data + ORDER BY workflow_id + ` + + var workflowIds []int + _, err := impl.dbConnection.Query(&workflowIds, query, queryParams...) + if err != nil { + impl.logger.Errorw("error fetching all app workflow IDs by filters", "err", err, "query", query, "params", queryParams) + return nil, err + } + + return workflowIds, nil +} + +// FindAppWorkflowsByIds fetches app workflows by specific workflow IDs (for RBAC checks in bulk operations) +func (impl *CiPipelineRepositoryImpl) FindAppWorkflowsByIds(workflowIds []int) ([]*WorkflowWithAppEnvDetails, error) { + if len(workflowIds) == 0 { + return []*WorkflowWithAppEnvDetails{}, nil + } + + query := ` + WITH workflow_data AS ( + SELECT + aw.id as workflow_id, + aw.name as workflow_name, + aw.app_id, + a.app_name, + cp.id as ci_pipeline_id, + cp.scan_enabled, + cp.ci_pipeline_type, + e.environment_name + FROM app_workflow aw + INNER JOIN app a ON a.id = aw.app_id AND a.active = true AND a.app_type != ? + INNER JOIN app_workflow_mapping awm ON awm.app_workflow_id = aw.id AND awm.active = true AND awm.type = ? + INNER JOIN ci_pipeline cp ON cp.id = awm.component_id AND cp.active = true AND cp.deleted = false + LEFT JOIN pipeline p ON p.ci_pipeline_id = cp.id AND p.deleted = false + LEFT JOIN environment e ON e.id = p.environment_id + WHERE aw.active = true + AND aw.id = ANY(?) + ) + SELECT + workflow_id, + workflow_name, + app_id, + app_name, + MAX(scan_enabled::int)::boolean as scan_enabled, + STRING_AGG(DISTINCT environment_name, ', ' ORDER BY environment_name) FILTER (WHERE environment_name IS NOT NULL) as environments, + STRING_AGG(DISTINCT ci_pipeline_id::text, ',' ORDER BY ci_pipeline_id::text) as ci_pipeline_ids, + MAX(ci_pipeline_type) as ci_pipeline_type + FROM workflow_data + GROUP BY workflow_id, workflow_name, app_id, app_name + ORDER BY workflow_name ASC + ` + + var results []*WorkflowWithAppEnvDetails + _, err := impl.dbConnection.Query(&results, query, helper.Job, appWorkflow.CIPIPELINE, pg.Array(workflowIds)) + if err != nil { + impl.logger.Errorw("error fetching app workflows by IDs", "err", err, "workflowIds", workflowIds) + return nil, err + } + + return results, nil +} + +// BulkUpdateScanEnabled updates scan_enabled for CI pipelines in app workflows +// workflowIds are app_workflow.id values, we fetch ci_pipeline_ids from app_workflow_mapping and update ci_pipeline +func (impl *CiPipelineRepositoryImpl) BulkUpdateScanEnabled(workflowIds []int, scanEnabled bool, userId int32) error { + if len(workflowIds) == 0 { + return nil + } + + // Get distinct ci_pipeline_ids from app_workflow_mapping table for the given app workflow IDs + query := ` + SELECT DISTINCT awm.component_id + FROM app_workflow_mapping awm + WHERE awm.app_workflow_id IN (?) + AND awm.type = ? + AND awm.active = true + ` + + var ciPipelineIds []int + _, err := impl.dbConnection.Query(&ciPipelineIds, query, pg.In(workflowIds), appWorkflow.CIPIPELINE) + + if err != nil { + impl.logger.Errorw("error fetching ci_pipeline_ids from app_workflow_mapping", "workflowIds", workflowIds, "err", err) + return err + } + + if len(ciPipelineIds) == 0 { + impl.logger.Warnw("no ci_pipeline_ids found for given app_workflow_ids", "workflowIds", workflowIds) + return fmt.Errorf("no ci_pipeline_ids found for given app_workflow_ids") + } + + // Update scan_enabled for the fetched ci_pipeline_ids + _, err = impl.dbConnection.Model(&CiPipeline{}). + Set("scan_enabled = ?", scanEnabled). + Set("updated_on = ?", time.Now()). + Set("updated_by = ?", userId). + Where("id IN (?)", pg.In(ciPipelineIds)). + Where("active = ?", true). + Where("deleted = ?", false). + Update() + + if err != nil { + impl.logger.Errorw("error bulk updating scan_enabled", "ciPipelineIds", ciPipelineIds, "scanEnabled", scanEnabled, "err", err) + return err + } + + impl.logger.Infow("successfully updated scan_enabled for ci_pipelines", "workflowIds", workflowIds, "ciPipelineIds", ciPipelineIds, "scanEnabled", scanEnabled) + return nil +} + +// External CI count methods implementation +func (impl *CiPipelineRepositoryImpl) GetActiveExternalCiPipelineCount() (int, error) { + count, err := impl.dbConnection.Model((*ExternalCiPipeline)(nil)). + Where("active = ?", true). + Count() + + if err != nil { + impl.logger.Errorw("error getting active external CI pipeline count", "err", err) + return 0, err + } + return count, nil +} + +func (impl *CiPipelineRepositoryImpl) GetActiveExternalCiPipelineCountInTimeRange(from, to *time.Time) (int, error) { + query := impl.dbConnection.Model((*ExternalCiPipeline)(nil)). + Where("active = ?", true) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + count, err := query.Count() + if err != nil { + impl.logger.Errorw("error getting active external CI pipeline count in time range", "from", from, "to", to, "err", err) + return 0, err + } + return count, nil +} + +// GetCiPipelineCountWithImageScanPluginInPostCiOrPreCd returns count of active CI pipelines that have IMAGE SCAN plugin configured +// in POST-CI or PRE-CD stages. Image scanning plugin is identified by plugin name 'IMAGE SCAN' (constant IMAGE_SCANNING_PLUGIN from pipelineStage.go) +func (impl *CiPipelineRepositoryImpl) GetCiPipelineCountWithImageScanPluginInPostCiOrPreCd() (int, error) { + var count int + + // Query to count distinct CI pipelines that have IMAGE SCAN plugin configured in POST-CI or PRE-CD stages + // We need to check both: + // 1. POST-CI stages (pipeline_stage.ci_pipeline_id is set) + // 2. PRE-CD stages (pipeline_stage.cd_pipel1ine_id is set, need to join with pipeline table to get ci_pipeline_id) + query := ` + SELECT COUNT(DISTINCT ci_pipeline_id) FROM ( + -- CI pipelines with IMAGE SCAN plugin in POST-CI stage + SELECT cp.id as ci_pipeline_id + FROM ci_pipeline cp + INNER JOIN pipeline_stage ps ON ps.ci_pipeline_id = cp.id + INNER JOIN pipeline_stage_step pss ON pss.pipeline_stage_id = ps.id + INNER JOIN plugin_metadata pm ON pm.id = pss.ref_plugin_id + WHERE cp.active = true + AND cp.deleted = false + AND ps.deleted = false + AND pss.deleted = false + AND pm.deleted = false + AND pm.name = 'IMAGE SCAN' + + UNION + + -- CI pipelines with IMAGE SCAN plugin in PRE-CD stage + SELECT p.ci_pipeline_id + FROM pipeline p + INNER JOIN pipeline_stage ps ON ps.cd_pipeline_id = p.id + INNER JOIN pipeline_stage_step pss ON pss.pipeline_stage_id = ps.id + INNER JOIN plugin_metadata pm ON pm.id = pss.ref_plugin_id + INNER JOIN ci_pipeline cp ON cp.id = p.ci_pipeline_id + WHERE p.deleted = false + AND cp.active = true + AND cp.deleted = false + AND ps.deleted = false + AND pss.deleted = false + AND pm.deleted = false + AND pm.name = 'IMAGE SCAN' + AND p.ci_pipeline_id IS NOT NULL + ) AS combined_pipelines + ` + + _, err := impl.dbConnection.Query(&count, query) + if err != nil { + impl.logger.Errorw("error getting CI pipeline count with image scanning plugin", "err", err) + return 0, err + } + + return count, nil +} diff --git a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go index ef43684d21..2e9c527010 100644 --- a/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go +++ b/internal/sql/repository/pipelineConfig/CiWorkflowRepository.go @@ -17,11 +17,13 @@ package pipelineConfig import ( + "fmt" "time" "github.com/devtron-labs/devtron/internal/sql/constants" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/workflow/cdWorkflow" + "github.com/devtron-labs/devtron/pkg/overview/bean" "github.com/go-pg/pg" "go.uber.org/zap" ) @@ -53,6 +55,49 @@ type CiWorkflowRepository interface { MigrateIsArtifactUploaded(wfId int, isArtifactUploaded bool) MigrateCiArtifactLocation(wfId int, artifactLocation string) + + // Overview methods + GetTriggeredCIPipelines(from, to *time.Time, sortOrder bean.SortOrder, limit, offset int) ([]PipelineUsageData, int, error) + GetCiBuildCountInTimeRange(from, to *time.Time) (int, error) + GetSuccessfulCIBuildsForBuildTime(from, to *time.Time) ([]WorkflowBuildTime, error) + GetCIBuildsForStatusTrend(from, to *time.Time) ([]WorkflowStatusData, error) +} + +// Data structures for overview queries +type PipelineUsageData struct { + AppID int `json:"appId"` // Required for both CI and CD pipelines + EnvID int `json:"envId,omitempty"` // Only for deployment pipelines + PipelineID int `json:"pipelineId"` + PipelineName string `json:"pipelineName"` + AppName string `json:"appName"` + EnvName string `json:"envName,omitempty"` // Only for deployment pipelines + TriggerCount int `json:"triggerCount"` +} + +type ActivityData struct { + ID int `json:"id"` + Type string `json:"type"` + AppName string `json:"appName"` + EnvName string `json:"envName,omitempty"` + CiPipelineName string `json:"ciPipelineName,omitempty"` + Status string `json:"status"` + TriggeredAt time.Time `json:"triggeredAt"` + TriggeredBy int `json:"triggeredBy"` +} + +type WorkflowDetails struct { + Name string `json:"name"` + CreatedOn time.Time `json:"createdOn"` +} + +type WorkflowBuildTime struct { + StartedOn time.Time `db:"started_on"` + FinishedOn time.Time `db:"finished_on"` +} + +type WorkflowStatusData struct { + StartedOn time.Time `db:"started_on"` + Status string `db:"status"` } type CiWorkflowRepositoryImpl struct { @@ -423,3 +468,123 @@ func (impl *CiWorkflowRepositoryImpl) MigrateCiArtifactLocation(wfId int, artifa impl.logger.Errorw("error occurred while updating ci_artifact_location", "wfId", wfId, "err", err) } } + +// Overview methods implementation +func (impl *CiWorkflowRepositoryImpl) GetTriggeredCIPipelines(from, to *time.Time, sortOrder bean.SortOrder, limit, offset int) ([]PipelineUsageData, int, error) { + var results []PipelineUsageData + var totalCount int + + // First get the total count + countQuery := ` + SELECT COUNT(DISTINCT cp.id) + FROM ci_pipeline cp + INNER JOIN app a ON cp.app_id = a.id + LEFT JOIN ci_workflow cw ON cp.id = cw.ci_pipeline_id + AND cw.started_on >= ? AND cw.started_on <= ? + WHERE cp.deleted = false AND a.app_type = 0 AND a.active = true + ` + + _, err := impl.dbConnection.Query(&totalCount, countQuery, from, to) + if err != nil { + impl.logger.Errorw("error getting total count of CI pipelines", "from", from, "to", to, "err", err) + return nil, 0, err + } + + // Build the main query with sorting and pagination + orderClause := "ORDER BY trigger_count DESC, cp.id DESC" + if sortOrder == bean.ASC { + orderClause = "ORDER BY trigger_count ASC, cp.id ASC" + } + + query := fmt.Sprintf(` + SELECT + a.id as app_id, + cp.id as pipeline_id, + cp.name as pipeline_name, + a.app_name, + COALESCE(COUNT(cw.id), 0) as trigger_count + FROM ci_pipeline cp + INNER JOIN app a ON cp.app_id = a.id + LEFT JOIN ci_workflow cw ON cp.id = cw.ci_pipeline_id + AND cw.started_on >= ? AND cw.started_on <= ? + WHERE cp.deleted = false AND a.app_type = 0 AND a.active = true + GROUP BY a.id, cp.id, cp.name, a.app_name + %s + LIMIT ? OFFSET ? + `, orderClause) + + _, err = impl.dbConnection.Query(&results, query, from, to, limit, offset) + if err != nil { + impl.logger.Errorw("error getting triggered CI pipelines", "from", from, "to", to, "sortOrder", sortOrder, "limit", limit, "offset", offset, "err", err) + return nil, 0, err + } + + return results, totalCount, nil +} + +func (impl *CiWorkflowRepositoryImpl) GetCiBuildCountInTimeRange(from, to *time.Time) (int, error) { + var count int + query := ` + SELECT COUNT(*) + FROM ci_workflow cw + INNER JOIN ci_pipeline cp ON cw.ci_pipeline_id = cp.id + INNER JOIN app a ON cp.app_id = a.id + WHERE cw.started_on >= ? AND cw.started_on <= ? + AND cp.ci_pipeline_type = 'CI_BUILD' + ` + + _, err := impl.dbConnection.Query(&count, query, from, to) + if err != nil { + impl.logger.Errorw("error getting CI_BUILD pipeline count in time range", "from", from, "to", to, "err", err) + return 0, err + } + + return count, nil +} + +func (impl *CiWorkflowRepositoryImpl) GetSuccessfulCIBuildsForBuildTime(from, to *time.Time) ([]WorkflowBuildTime, error) { + var workflows []WorkflowBuildTime + + query := ` + SELECT cw.started_on, cw.finished_on + FROM ci_workflow cw + INNER JOIN ci_pipeline cp ON cw.ci_pipeline_id = cp.id + INNER JOIN app a ON cp.app_id = a.id + WHERE cw.started_on >= ? AND cw.started_on <= ? + AND cw.finished_on IS NOT NULL + AND cw.status = 'Succeeded' + AND cp.ci_pipeline_type = 'CI_BUILD' + ORDER BY cw.started_on + ` + + _, err := impl.dbConnection.Query(&workflows, query, from, to) + if err != nil { + impl.logger.Errorw("error fetching successful CI builds for build time", "from", from, "to", to, "err", err) + return nil, err + } + + return workflows, nil +} + +// GetCIBuildsForStatusTrend returns all CI builds in the date range including builds of deleted pipelines +func (impl *CiWorkflowRepositoryImpl) GetCIBuildsForStatusTrend(from, to *time.Time) ([]WorkflowStatusData, error) { + var workflows []WorkflowStatusData + + query := ` + SELECT cw.started_on, cw.status + FROM ci_workflow cw + INNER JOIN ci_pipeline cp ON cw.ci_pipeline_id = cp.id + INNER JOIN app a ON cp.app_id = a.id + WHERE cw.started_on >= ? AND cw.started_on <= ? + AND cp.ci_pipeline_type = 'CI_BUILD' + ORDER BY cw.started_on + ` + + _, err := impl.dbConnection.Query(&workflows, query, from, to) + if err != nil { + impl.logger.Errorw("error fetching CI builds for status trend", "from", from, "to", to, "err", err) + return nil, err + } + + return workflows, nil +} diff --git a/internal/sql/repository/pipelineConfig/PipelineRepository.go b/internal/sql/repository/pipelineConfig/PipelineRepository.go index 0bd629a0e0..a89c23b007 100644 --- a/internal/sql/repository/pipelineConfig/PipelineRepository.go +++ b/internal/sql/repository/pipelineConfig/PipelineRepository.go @@ -81,6 +81,12 @@ type Pipeline struct { sql.AuditLog } +// PipelineWithAppData represents production pipeline data with app information +type PipelineWithAppData struct { + AppId int `sql:"app_id"` + EnvironmentId int `sql:"environment_id"` +} + type PipelineRepository interface { Save(pipeline []*Pipeline, tx *pg.Tx) error Update(pipeline *Pipeline, tx *pg.Tx) error @@ -147,6 +153,15 @@ type PipelineRepository interface { GetAllArgoAppInfoByDeploymentAppNames(deploymentAppNames []string) ([]*PipelineDeploymentConfigObj, error) FindEnvIdsByIdsInIncludingDeleted(ids []int) ([]int, error) GetPipelineCountByDeploymentType(deploymentType string) (int, error) + + // Overview methods + FindActiveByCiPipelineIdsIn(ciPipelineIds []int) ([]*Pipeline, error) + GetPipelineCountByEnvironmentType(isProd bool) (int, error) + FindActiveByEnvironmentType(isProd bool) ([]*Pipeline, error) + // Count methods for performance optimization + GetActivePipelineCountByEnvironmentTypeInTimeRange(isProd bool, from, to *time.Time) (int, error) + // FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange returns production pipelines with app data that have deployment history within the specified time range + FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange(from, to *time.Time) ([]*PipelineWithAppData, error) } type CiArtifactDTO struct { @@ -986,3 +1001,100 @@ func (impl *PipelineRepositoryImpl) GetPipelineCountByDeploymentType(deploymentT } return count, nil } + +func (impl *PipelineRepositoryImpl) FindActiveByCiPipelineIdsIn(ciPipelineIds []int) ([]*Pipeline, error) { + var pipelines []*Pipeline + err := impl.dbConnection.Model(&pipelines). + Where("ci_pipeline_id in (?)", pg.In(ciPipelineIds)). + Where("deleted = ?", false). + Select() + return pipelines, err +} + +func (impl *PipelineRepositoryImpl) GetPipelineCountByEnvironmentType(isProd bool) (int, error) { + var count int + query := `SELECT COUNT(*) FROM pipeline p + JOIN environment e ON p.environment_id = e.id + WHERE p.deleted = false AND e.active = true AND e.default = ?` + + _, err := impl.dbConnection.Query(&count, query, isProd) + if err != nil { + impl.logger.Errorw("error getting pipeline count by environment type", "isProd", isProd, "err", err) + return 0, err + } + return count, nil +} + +func (impl *PipelineRepositoryImpl) FindActiveByEnvironmentType(isProd bool) ([]*Pipeline, error) { + var pipelines []*Pipeline + err := impl.dbConnection.Model(&pipelines). + Join("JOIN environment e ON pipeline.environment_id = e.id"). + Where("pipeline.deleted = ?", false). + Where("e.active = ?", true). + Where("e.default = ?", isProd). + Select() + + if err != nil { + impl.logger.Errorw("error finding active pipelines by environment type", "isProd", isProd, "err", err) + return nil, err + } + return pipelines, nil +} + +// Count methods implementation for performance optimization +func (impl *PipelineRepositoryImpl) GetActivePipelineCountByEnvironmentTypeInTimeRange(isProd bool, from, to *time.Time) (int, error) { + query := `SELECT COUNT(*) FROM pipeline p + JOIN environment e ON p.environment_id = e.id + WHERE p.deleted = false AND e.active = true AND e.default = ?` + + args := []interface{}{isProd} + + if from != nil { + query += " AND p.created_on >= ?" + args = append(args, from) + } + if to != nil { + query += " AND p.created_on <= ?" + args = append(args, to) + } + + var count int + _, err := impl.dbConnection.Query(&count, query, args...) + if err != nil { + impl.logger.Errorw("error getting active pipeline count by environment type in time range", "isProd", isProd, "from", from, "to", to, "err", err) + return 0, err + } + return count, nil +} + +// FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange returns production pipelines with app data that have deployment history within the specified time range +// This optimized method combines multiple queries into one by joining pipeline, environment, app, and cd_workflow tables with time filtering +func (impl *PipelineRepositoryImpl) FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange(from, to *time.Time) ([]*PipelineWithAppData, error) { + var results []*PipelineWithAppData + + query := ` + SELECT DISTINCT + p.app_id, + p.environment_id + FROM pipeline p + INNER JOIN environment e ON p.environment_id = e.id + INNER JOIN app a ON p.app_id = a.id + INNER JOIN cd_workflow cw ON p.id = cw.pipeline_id + INNER JOIN cd_workflow_runner cwr ON cw.id = cwr.cd_workflow_id + WHERE p.deleted = false + AND e.active = true + AND e.default = true + AND a.active = true + AND cwr.workflow_type = 'DEPLOY' + AND cwr.started_on >= ? + AND cwr.started_on <= ? + ` + + _, err := impl.dbConnection.Query(&results, query, from, to) + if err != nil { + impl.logger.Errorw("error finding production pipelines with app data and deployment history in time range", "from", from, "to", to, "err", err) + return nil, err + } + + return results, nil +} diff --git a/internal/sql/repository/pipelineConfig/bean/constants/WorkflowConstants.go b/internal/sql/repository/pipelineConfig/bean/constants/WorkflowConstants.go new file mode 100644 index 0000000000..a10c795b57 --- /dev/null +++ b/internal/sql/repository/pipelineConfig/bean/constants/WorkflowConstants.go @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package constants + +// ScanEnablementType represents scan enablement filter options +type ScanEnablementType string + +const ( + ScanEnabled ScanEnablementType = "scanEnabled" // Workflows with scanning enabled + ScanNotEnabled ScanEnablementType = "scanNotEnabled" // Workflows with scanning disabled +) + +// WorkflowSortBy represents sort field for workflow listing +type WorkflowSortBy string + +const ( + WorkflowSortByWorkflowName WorkflowSortBy = "workflowName" // Sort by workflow name + WorkflowSortByAppName WorkflowSortBy = "appName" // Sort by application name + WorkflowSortByScanEnabled WorkflowSortBy = "scanEnabled" // Sort by scan enabled status +) + +// SortOrder represents sort order +type SortOrder string + +const ( + SortOrderAsc SortOrder = "ASC" + SortOrderDesc SortOrder = "DESC" +) diff --git a/manifests/install/devtron-installer.yaml b/manifests/install/devtron-installer.yaml index e8a0f9edab..837d98b963 100644 --- a/manifests/install/devtron-installer.yaml +++ b/manifests/install/devtron-installer.yaml @@ -4,4 +4,4 @@ metadata: name: installer-devtron namespace: devtroncd spec: - url: https://raw.githubusercontent.com/devtron-labs/devtron/v1.8.2/manifests/installation-script + url: https://raw.githubusercontent.com/devtron-labs/devtron/v2.0.0/manifests/installation-script diff --git a/manifests/installation-script b/manifests/installation-script index 8a2214d355..daaafa5557 100644 --- a/manifests/installation-script +++ b/manifests/installation-script @@ -1,4 +1,4 @@ -LTAG="v1.8.2"; +LTAG="v2.0.0"; REPO_RAW_URL="https://raw.githubusercontent.com/devtron-labs/devtron/"; shebang = `#!/bin/bash `; diff --git a/manifests/release.txt b/manifests/release.txt index 0de219bacc..7305fcbe2f 100644 --- a/manifests/release.txt +++ b/manifests/release.txt @@ -1 +1 @@ -beta -1 v1.5.0 +stable -1 v2.0.0 diff --git a/manifests/version.txt b/manifests/version.txt index 2e7bd91085..46b105a30d 100644 --- a/manifests/version.txt +++ b/manifests/version.txt @@ -1 +1 @@ -v1.5.0 +v2.0.0 diff --git a/pkg/appStore/bean/bean.go b/pkg/appStore/bean/bean.go index 84dfdd3bfd..86d6c712da 100644 --- a/pkg/appStore/bean/bean.go +++ b/pkg/appStore/bean/bean.go @@ -305,8 +305,6 @@ const ( ) var CHART_PROXY_TEMPLATE = "reference-chart-proxy" -var REQUIREMENTS_YAML_FILE = "requirements.yaml" -var VALUES_YAML_FILE = "values.yaml" type InstalledAppsResponse struct { AppStoreApplicationName string `json:"appStoreApplicationName"` diff --git a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go index caae5043cb..aed670d8a8 100644 --- a/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go +++ b/pkg/appStore/installedApp/service/FullMode/deployment/InstalledAppGitOpsService.go @@ -28,18 +28,14 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/bean" commonBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/common/bean" "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git" - gitBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git/bean" validationBean "github.com/devtron-labs/devtron/pkg/deployment/gitOps/validation/bean" chartRefBean "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" globalUtil "github.com/devtron-labs/devtron/util" - "github.com/devtron-labs/devtron/util/sliceUtil" "github.com/google/go-github/github" "github.com/microsoft/azure-devops-go-api/azuredevops" "github.com/xanzy/go-gitlab" "helm.sh/helm/v3/pkg/chart" "net/http" - "os" - "path/filepath" "sigs.k8s.io/yaml" "strconv" "strings" @@ -137,20 +133,17 @@ func (impl *FullModeDeploymentServiceImpl) UpdateAppGitOpsOperations(manifest *b gitOpsResponse := &bean.AppStoreGitOpsResponse{} ctx := context.Background() + cloneChartToGitRequest := adapter.ParseChartGitPushRequest(installAppVersionRequest, "") + err := impl.gitOperationService.MigrateProxyChartDependenciesIfRequired(ctx, updateDependencies, cloneChartToGitRequest, manifest.ChartMetaDataConfig.FileContent) + if err != nil { + impl.Logger.Errorw("error in checking if proxy chart dependencies should be migrated", "err", err) + return nil, err + } if updateDependencies { // update dependency if chart or chart version is changed _, _, requirementsCommitErr = impl.gitOperationService.CommitValues(ctx, manifest.ChartMetaDataConfig) gitHash, _, valuesCommitErr = impl.gitOperationService.CommitValues(ctx, manifest.ValuesConfig) } else { - cloneChartToGitRequest := adapter.ParseChartGitPushRequest(installAppVersionRequest, "") - migrateDependencies, err := impl.shouldMigrateProxyChartDependencies(cloneChartToGitRequest, manifest.ChartMetaDataConfig.FileContent) - if err != nil { - impl.Logger.Errorw("error in checking if proxy chart dependencies should be migrated", "err", err) - return nil, err - } - if migrateDependencies { - _, _, requirementsCommitErr = impl.gitOperationService.CommitValues(ctx, manifest.ChartMetaDataConfig) - } // only values are changed in update, so commit values config gitHash, _, valuesCommitErr = impl.gitOperationService.CommitValues(ctx, manifest.ValuesConfig) } @@ -372,7 +365,7 @@ func (impl *FullModeDeploymentServiceImpl) getValuesAndChartMetaDataForGitConfig impl.Logger.Errorw("error in marshalling values content", "err", err) return nil, nil, err } - valuesConfig, err := impl.getGitCommitConfig(installAppVersionRequest, string(valuesContent), appStoreBean.VALUES_YAML_FILE) + valuesConfig, err := impl.getGitCommitConfig(installAppVersionRequest, string(valuesContent), chartRefBean.VALUES_YAML_FILE) if err != nil { impl.Logger.Errorw("error in creating values config for git", "err", err) return nil, nil, err @@ -426,83 +419,3 @@ func (impl *FullModeDeploymentServiceImpl) CreateArgoRepoSecretIfNeeded(appStore } return nil } - -func (impl *FullModeDeploymentServiceImpl) shouldMigrateProxyChartDependencies(pushChartToGitRequest *gitBean.PushChartToGitRequestDTO, expectedChartYamlContent string) (bool, error) { - clonedDir, err := impl.gitOperationService.CloneChartForHelmApp(pushChartToGitRequest.AppName, pushChartToGitRequest.RepoURL, pushChartToGitRequest.TargetRevision) - if err != nil { - impl.Logger.Errorw("error in cloning chart for helm app", "appName", pushChartToGitRequest.AppName, "repoUrl", pushChartToGitRequest.RepoURL, "err", err) - return false, err - } - defer impl.chartTemplateService.CleanDir(clonedDir) - gitOpsChartLocation := fmt.Sprintf("%s-%s", pushChartToGitRequest.AppName, pushChartToGitRequest.EnvName) - dir := filepath.Join(clonedDir, gitOpsChartLocation) - chartYamlPath := filepath.Join(dir, chartRefBean.CHART_YAML_FILE) - if _, err := os.Stat(chartYamlPath); os.IsNotExist(err) { - impl.Logger.Debugw("chart.yaml not found in cloned repo from git-ops, no migrations required", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName) - return false, nil - } else if err != nil { - impl.Logger.Errorw("error in checking chart.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) - return false, err - } - expectedChartMetaData := &chart.Metadata{} - expectedChartJsonContent, err := yaml.YAMLToJSON([]byte(expectedChartYamlContent)) - if err != nil { - impl.Logger.Errorw("error in converting requirements.yaml to json", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) - return false, err - } - err = json.Unmarshal(expectedChartJsonContent, &expectedChartMetaData) - if err != nil { - impl.Logger.Errorw("error in unmarshalling requirements.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) - return false, err - } - if len(expectedChartMetaData.Dependencies) == 0 { - impl.Logger.Debugw("no dependencies found in requirements.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName) - return false, nil - } - impl.Logger.Debugw("dependencies found in requirements.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "dependencies", expectedChartMetaData.Dependencies) - // check if chart.yaml file has dependencies - chartYamlContent, err := os.ReadFile(chartYamlPath) - if err != nil { - impl.Logger.Errorw("error in reading chart.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) - return false, err - } - chartMetadata := &chart.Metadata{} - chartJsonContent, err := yaml.YAMLToJSON(chartYamlContent) - if err != nil { - impl.Logger.Errorw("error in converting chart.yaml to json", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) - return false, err - } - err = json.Unmarshal(chartJsonContent, chartMetadata) - if err != nil { - impl.Logger.Errorw("error in unmarshalling chart.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) - return false, err - } - if len(chartMetadata.Dependencies) == 0 { - impl.Logger.Debugw("no dependencies found in chart.yaml file, need to migrate proxy chart dependencies", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName) - return true, nil - } - impl.Logger.Debugw("dependencies found in chart.yaml file, validating against requirements.yaml", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "chartDependencies", chartMetadata.Dependencies) - // validate if chart.yaml dependencies are present in requirements.yaml - latestDependencies := sliceUtil.NewMapFromFuncExec(chartMetadata.Dependencies, func(dependency *chart.Dependency) string { - return getUniqueKeyFromDependency(dependency) - }) - previousDependencies := sliceUtil.NewMapFromFuncExec(expectedChartMetaData.Dependencies, func(dependency *chart.Dependency) string { - return getUniqueKeyFromDependency(dependency) - }) - for key := range latestDependencies { - if _, ok := previousDependencies[key]; !ok { - impl.Logger.Debugw("dependency found in chart.yaml but not in requirements.yaml, need to migrate proxy chart dependencies", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "dependency", key) - return true, nil - } - } - impl.Logger.Debugw("all dependencies found in chart.yaml and requirements.yaml, no migration required", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName) - return false, nil -} - -func getUniqueKeyFromDependency(dependency *chart.Dependency) string { - // return unique key for dependency - return fmt.Sprintf("%s-%s-%s", - strings.ToLower(strings.TrimSpace(dependency.Name)), - strings.ToLower(strings.TrimSpace(dependency.Version)), - strings.ToLower(strings.TrimSpace(dependency.Repository))) -} diff --git a/pkg/asyncProvider/WorkerPoolWrapper.go b/pkg/asyncProvider/WorkerPoolWrapper.go new file mode 100644 index 0000000000..a564606355 --- /dev/null +++ b/pkg/asyncProvider/WorkerPoolWrapper.go @@ -0,0 +1,11 @@ +package asyncProvider + +import ( + "github.com/devtron-labs/common-lib/constants" + "github.com/devtron-labs/common-lib/workerPool" + "go.uber.org/zap" +) + +func NewBatchWorker[T any](batchSize int, logger *zap.SugaredLogger) *workerPool.WorkerPool[T] { + return workerPool.NewWorkerPool[T](batchSize, constants.Orchestrator, logger) +} diff --git a/pkg/attributes/bean/bean.go b/pkg/attributes/bean/bean.go index b5803fb625..5bce1823fe 100644 --- a/pkg/attributes/bean/bean.go +++ b/pkg/attributes/bean/bean.go @@ -24,6 +24,11 @@ const ( UserPreferencesResourcesKey = "resources" ) +// InternalOnlyKeys are the internal attribute keys - cannot be read or written via API +var InternalOnlyKeys = map[string]bool{ + API_SECRET_KEY: true, +} + type AttributesDto struct { Id int `json:"id"` Key string `json:"key,omitempty"` diff --git a/pkg/auth/user/UserService.go b/pkg/auth/user/UserService.go index aff943faa0..5be9e8f017 100644 --- a/pkg/auth/user/UserService.go +++ b/pkg/auth/user/UserService.go @@ -19,17 +19,18 @@ package user import ( "context" "fmt" + "net/http" + "strconv" + "strings" + "sync" + "time" + bean4 "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin/bean" "github.com/devtron-labs/devtron/pkg/auth/user/adapter" userHelper "github.com/devtron-labs/devtron/pkg/auth/user/helper" adapter2 "github.com/devtron-labs/devtron/pkg/auth/user/repository/adapter" "github.com/devtron-labs/devtron/pkg/auth/user/repository/helper" util3 "github.com/devtron-labs/devtron/pkg/auth/user/util" - "net/http" - "strconv" - "strings" - "sync" - "time" "github.com/devtron-labs/authenticator/jwt" "github.com/devtron-labs/authenticator/middleware" @@ -1237,7 +1238,7 @@ func (impl *UserServiceImpl) GetLoggedInUser(r *http.Request) (int32, error) { userId, userType, err := impl.GetUserByToken(r.Context(), token) // if user is of api-token type, then update lastUsedBy and lastUsedAt if err == nil && userType == userBean.USER_TYPE_API_TOKEN { - go impl.saveUserAudit(r, userId) + go impl.updateUserAudit(r, userId) } return userId, err } @@ -1667,6 +1668,15 @@ func (impl *UserServiceImpl) saveUserAudit(r *http.Request, userId int32) { impl.userAuditService.Save(userAudit) } +func (impl *UserServiceImpl) updateUserAudit(r *http.Request, userId int32) { + clientIp := util2.GetClientIP(r) + userAudit := &UserAudit{ + UserId: userId, + ClientIp: clientIp, + } + impl.userAuditService.Update(userAudit) +} + func (impl *UserServiceImpl) GetRoleFiltersByUserRoleGroups(userRoleGroups []userBean.UserRoleGroup) ([]userBean.RoleFilter, error) { groupNames := make([]string, 0) for _, userRoleGroup := range userRoleGroups { diff --git a/pkg/cluster/ClusterService.go b/pkg/cluster/ClusterService.go index 6a1b44878b..0bd534bc1a 100644 --- a/pkg/cluster/ClusterService.go +++ b/pkg/cluster/ClusterService.go @@ -94,6 +94,7 @@ type ClusterService interface { ConvertClusterBeanObjectToCluster(bean *bean.ClusterBean) *v1alpha1.Cluster GetClusterConfigByClusterId(clusterId int) (*k8s.ClusterConfig, error) + FindActiveClustersExcludingVirtual() ([]bean.ClusterBean, error) } type ClusterServiceImpl struct { @@ -1099,3 +1100,16 @@ func (impl *ClusterServiceImpl) GetClusterConfigByClusterId(clusterId int) (*k8s clusterConfig := rq.GetClusterConfig() return clusterConfig, nil } + +func (impl *ClusterServiceImpl) FindActiveClustersExcludingVirtual() ([]bean.ClusterBean, error) { + models, err := impl.clusterRepository.FindAllActiveExceptVirtual() + if err != nil { + return nil, err + } + var beans []bean.ClusterBean + for _, model := range models { + bean := adapter.GetClusterBean(model) + beans = append(beans, bean) + } + return beans, nil +} diff --git a/pkg/cluster/environment/EnvironmentService.go b/pkg/cluster/environment/EnvironmentService.go index 8ad9077537..c7c0e5a8db 100644 --- a/pkg/cluster/environment/EnvironmentService.go +++ b/pkg/cluster/environment/EnvironmentService.go @@ -70,6 +70,7 @@ type EnvironmentService interface { GetCombinedEnvironmentListForDropDownByClusterIds(token string, clusterIds []int, auth func(token string, object string) bool) ([]*bean2.ClusterEnvDto, error) HandleErrorInClusterConnections(clusters []*bean4.ClusterBean, respMap *sync.Map, clusterExistInDb bool) GetDetailsById(envId int) (*repository.Environment, error) + FindNamesByIds(envIds []int) (map[int]string, error) } type EnvironmentServiceImpl struct { @@ -752,3 +753,7 @@ func (impl EnvironmentServiceImpl) GetDetailsById(envId int) (*repository.Enviro } return envDetails, nil } + +func (impl EnvironmentServiceImpl) FindNamesByIds(envIds []int) (map[int]string, error) { + return impl.environmentRepository.FindNamesByIds(envIds) +} diff --git a/pkg/cluster/environment/repository/EnvironmentRepository.go b/pkg/cluster/environment/repository/EnvironmentRepository.go index b573cf6f2f..1a479a9dc4 100644 --- a/pkg/cluster/environment/repository/EnvironmentRepository.go +++ b/pkg/cluster/environment/repository/EnvironmentRepository.go @@ -18,9 +18,13 @@ package repository import ( "fmt" + "time" + "github.com/devtron-labs/devtron/internal/sql/repository/appStatus" "github.com/devtron-labs/devtron/internal/sql/repository/helper" "github.com/devtron-labs/devtron/pkg/cluster/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "github.com/go-pg/pg/orm" @@ -61,6 +65,8 @@ type EnvironmentRepository interface { Create(mappings *Environment) error FindAll() ([]Environment, error) FindAllActive() ([]*Environment, error) + FindAllActiveInTimeRange(from, to *time.Time) ([]*Environment, error) + GetAggregatedEnvironmentTrendWithParams(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.TimeDataPoint, error) FindAllActiveEnvironmentCount() (int, error) MarkEnvironmentDeleted(mappings *Environment, tx *pg.Tx) error GetConnection() (dbConnection *pg.DB) @@ -80,6 +86,7 @@ type EnvironmentRepository interface { FindByClusterIdAndNamespace(namespaceClusterPair []*ClusterNamespacePair) ([]*Environment, error) FindByClusterIds(clusterIds []int) ([]*Environment, error) FindIdsByNames(envNames []string) ([]int, error) + FindNamesByIds(envIds []int) (map[int]string, error) FindByNames(envNames []string) ([]*Environment, error) FindByEnvName(envName string) ([]*Environment, error) @@ -361,6 +368,29 @@ func (repo EnvironmentRepositoryImpl) FindIdsByNames(envNames []string) ([]int, return ids, err } +// FindNamesByIds returns a map of environment id to environment name for the given ids +func (repo *EnvironmentRepositoryImpl) FindNamesByIds(envIds []int) (map[int]string, error) { + if len(envIds) == 0 { + return make(map[int]string), nil + } + type EnvIdName struct { + Id int `sql:"id"` + Name string `sql:"environment_name"` + } + var envIdNames []EnvIdName + query := "SELECT id, environment_name FROM environment WHERE id IN (?) AND active = ?;" + _, err := repo.dbConnection.Query(&envIdNames, query, pg.In(envIds), true) + if err != nil { + return nil, err + } + + envMap := make(map[int]string, len(envIdNames)) + for _, env := range envIdNames { + envMap[env.Id] = env.Name + } + return envMap, nil +} + func (repo EnvironmentRepositoryImpl) FindByNames(envNames []string) ([]*Environment, error) { var environment []*Environment err := repo.dbConnection. @@ -436,3 +466,70 @@ func (repositoryImpl EnvironmentRepositoryImpl) FindEnvLinkedWithCiPipelines(ext //" INNER JOIN " + //" (SELECT apf2.app_workflow_id FROM app_workflow_mapping apf2 WHERE component_id IN (?) AND type='CI_PIPELINE') sqt " + //" ON apf.app_workflow_id = sqt.app_workflow_id;" + +func (repo EnvironmentRepositoryImpl) FindAllActiveInTimeRange(from, to *time.Time) ([]*Environment, error) { + var mappings []*Environment + query := repo. + dbConnection.Model(&mappings). + Where("environment.active = ?", true) + + if from != nil { + query = query.Where("environment.created_on >= ?", from) + } + if to != nil { + query = query.Where("environment.created_on <= ?", to) + } + + err := query.Select() + return mappings, err +} + +func (repo EnvironmentRepositoryImpl) GetAggregatedEnvironmentTrendWithParams(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.TimeDataPoint, error) { + var results []struct { + Date string `json:"date"` + Count int `json:"count"` + } + + var query string + if aggregationType == constants.AggregateByHour { + // Aggregate by hour for "Today" + query = ` + SELECT + TO_CHAR(DATE_TRUNC('hour', created_on), 'YYYY-MM-DD HH24:00') as date, + COUNT(*) as count + FROM environment + WHERE active = true + AND created_on >= ? AND created_on <= ? + GROUP BY DATE_TRUNC('hour', created_on) + ORDER BY DATE_TRUNC('hour', created_on) + ` + } else { + // Aggregate by day for other periods + query = ` + SELECT + TO_CHAR(DATE_TRUNC('day', created_on), 'YYYY-MM-DD') as date, + COUNT(*) as count + FROM environment + WHERE active = true + AND created_on >= ? AND created_on <= ? + GROUP BY DATE_TRUNC('day', created_on) + ORDER BY DATE_TRUNC('day', created_on) + ` + } + + _, err := repo.dbConnection.Query(&results, query, from, to) + if err != nil { + return nil, err + } + + // Convert to TimeDataPoint + trendData := make([]bean.TimeDataPoint, 0, len(results)) + for _, result := range results { + trendData = append(trendData, bean.TimeDataPoint{ + Date: result.Date, + Count: result.Count, + }) + } + + return trendData, nil +} diff --git a/pkg/deployment/gitOps/git/GitOperationService.go b/pkg/deployment/gitOps/git/GitOperationService.go index f2c82459e5..75f070cd0a 100644 --- a/pkg/deployment/gitOps/git/GitOperationService.go +++ b/pkg/deployment/gitOps/git/GitOperationService.go @@ -18,6 +18,7 @@ package git import ( "context" + "encoding/json" "fmt" "github.com/devtron-labs/common-lib/utils/retryFunc" bean2 "github.com/devtron-labs/devtron/api/bean" @@ -29,14 +30,17 @@ import ( "github.com/devtron-labs/devtron/pkg/deployment/gitOps/git/bean" chartRefBean "github.com/devtron-labs/devtron/pkg/deployment/manifest/deploymentTemplate/chartRef/bean" globalUtil "github.com/devtron-labs/devtron/util" + "github.com/devtron-labs/devtron/util/sliceUtil" dirCopy "github.com/otiai10/copy" "go.opentelemetry.io/otel" "go.uber.org/zap" + "helm.sh/helm/v3/pkg/chart" "net/url" "os" "path" "path/filepath" "regexp" + "sigs.k8s.io/yaml" "strings" "time" ) @@ -52,6 +56,7 @@ type GitOperationService interface { PushChartToGitRepo(ctx context.Context, gitOpsRepoName, chartLocation, tempReferenceTemplateDir, repoUrl, targetRevision string, userId int32) (err error) CloneChartForHelmApp(helmAppName, gitRepoUrl, targetRevision string) (string, error) PushChartToGitOpsRepoForHelmApp(ctx context.Context, pushChartToGitRequest *bean.PushChartToGitRequestDTO, valuesConfig *ChartConfig) (*commonBean.ChartGitAttribute, string, error) + MigrateProxyChartDependenciesIfRequired(ctx context.Context, isChartUpdated bool, pushChartToGitRequest *bean.PushChartToGitRequestDTO, expectedChartYamlContent string) error CreateRepository(ctx context.Context, dto *apiBean.GitOpsConfigDto, userId int32) (string, bool, bool, error) @@ -378,6 +383,188 @@ func (impl *GitOperationServiceImpl) PushChartToGitOpsRepoForHelmApp(ctx context }, commit, err } +func (impl *GitOperationServiceImpl) MigrateProxyChartDependenciesIfRequired(ctx context.Context, isChartUpdated bool, pushChartToGitRequest *bean.PushChartToGitRequestDTO, expectedChartYamlContent string) error { + clonedDir, err := impl.CloneChartForHelmApp(pushChartToGitRequest.AppName, pushChartToGitRequest.RepoURL, pushChartToGitRequest.TargetRevision) + if err != nil { + impl.logger.Errorw("error in cloning chart for helm app", "appName", pushChartToGitRequest.AppName, "repoUrl", pushChartToGitRequest.RepoURL, "err", err) + return err + } + defer impl.chartTemplateService.CleanDir(clonedDir) + gitOpsChartLocation := fmt.Sprintf("%s-%s", pushChartToGitRequest.AppName, pushChartToGitRequest.EnvName) + workingDir := filepath.Join(clonedDir, gitOpsChartLocation) + deleteRequirementsYaml, requirementsYamlPath, err := impl.shouldDeleteProxyChartRequirementsYaml(workingDir, pushChartToGitRequest) + if err != nil { + impl.logger.Errorw("error in checking if requirements.yaml should be deleted", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return err + } + err = impl.deleteProxyChartRequirementsYaml(deleteRequirementsYaml, requirementsYamlPath) + if err != nil { + impl.logger.Errorw("error in deleting requirements.yaml", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return err + } + shouldMigrate, chartYamlPath, err := impl.shouldMigrateProxyChartDependencies(isChartUpdated, workingDir, pushChartToGitRequest, expectedChartYamlContent) + if err != nil { + impl.logger.Errorw("error in checking if proxy chart dependencies should be migrated", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return err + } + err = impl.updateChartYamlWithDependencies(shouldMigrate, chartYamlPath, expectedChartYamlContent) + if err != nil { + impl.logger.Errorw("error in migrating proxy chart dependencies", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return err + } + if shouldMigrate || deleteRequirementsYaml { + userEmailId, userName := impl.gitOpsConfigReadService.GetUserEmailIdAndNameForGitOpsCommit(pushChartToGitRequest.UserId) + commit, err := impl.gitFactory.GitOpsHelper.CommitAndPushAllChanges(ctx, clonedDir, pushChartToGitRequest.TargetRevision, "mirage proxy chart dependencies", userName, userEmailId) + if err != nil { + impl.logger.Warn("re-trying, taking pull and then push again") + err = impl.GitPull(clonedDir, pushChartToGitRequest.RepoURL, pushChartToGitRequest.TargetRevision) + if err != nil { + impl.logger.Errorw("error in git pull", "appName", gitOpsChartLocation, "err", err) + return err + } + if deleteRequirementsYaml { + err = impl.deleteProxyChartRequirementsYaml(deleteRequirementsYaml, requirementsYamlPath) + if err != nil { + impl.logger.Errorw("error in deleting requirements.yaml", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return err + } + } + if shouldMigrate { + err = impl.updateChartYamlWithDependencies(shouldMigrate, chartYamlPath, expectedChartYamlContent) + if err != nil { + impl.logger.Errorw("error in migrating proxy chart dependencies", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return err + } + } + commit, err = impl.gitFactory.GitOpsHelper.CommitAndPushAllChanges(ctx, clonedDir, pushChartToGitRequest.TargetRevision, "mirage proxy chart dependencies", userName, userEmailId) + if err != nil { + impl.logger.Errorw("error in pushing git", "err", err) + return err + } + } + impl.logger.Debugw("proxy chart dependencies committed", "url", pushChartToGitRequest.RepoURL, "commit", commit) + } + return nil +} + +func (impl *GitOperationServiceImpl) shouldDeleteProxyChartRequirementsYaml(clonedDir string, pushChartToGitRequest *bean.PushChartToGitRequestDTO) (delete bool, requirementsYamlPath string, err error) { + requirementsYamlPath = filepath.Join(clonedDir, chartRefBean.REQUIREMENTS_YAML_FILE) + if _, err = os.Stat(requirementsYamlPath); os.IsNotExist(err) { + impl.logger.Debugw("requirements.yaml not found in cloned repo from git-ops, no need to delete requirements.yaml", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName) + return delete, requirementsYamlPath, nil + } else if err != nil { + impl.logger.Errorw("error in checking requirements.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return delete, requirementsYamlPath, err + } + delete = true + return delete, requirementsYamlPath, nil +} + +func (impl *GitOperationServiceImpl) deleteProxyChartRequirementsYaml(deleteRequirementsYaml bool, requirementsYamlPath string) error { + if !deleteRequirementsYaml { + return nil + } + impl.logger.Warnw("requirements.yaml found in cloned repo from git-ops, need to delete requirements.yaml", "requirementsYamlPath", requirementsYamlPath) + err := os.Remove(requirementsYamlPath) + if err != nil && !os.IsNotExist(err) { + impl.logger.Errorw("error in deleting requirements.yaml file", "requirementsYamlPath", requirementsYamlPath, "err", err) + return err + } + return nil +} + +func (impl *GitOperationServiceImpl) shouldMigrateProxyChartDependencies(isChartUpdated bool, clonedDir string, pushChartToGitRequest *bean.PushChartToGitRequestDTO, expectedChartYamlContent string) (shouldMigrate bool, chartYamlPath string, err error) { + chartYamlPath = filepath.Join(clonedDir, chartRefBean.CHART_YAML_FILE) + if isChartUpdated { + // as the chart is updated, + // the dependencies as the chart.yaml file will be updated as per the new chart version + // no need of migrating the dependencies + return shouldMigrate, chartYamlPath, err + } + if _, err = os.Stat(chartYamlPath); os.IsNotExist(err) { + impl.logger.Debugw("chart.yaml not found in cloned repo from git-ops, no migrations required", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName) + return shouldMigrate, chartYamlPath, err + } else if err != nil { + impl.logger.Errorw("error in checking chart.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return shouldMigrate, chartYamlPath, err + } + expectedChartMetaData := &chart.Metadata{} + expectedChartJsonContent, err := yaml.YAMLToJSON([]byte(expectedChartYamlContent)) + if err != nil { + impl.logger.Errorw("error in converting requirements.yaml to json", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return shouldMigrate, chartYamlPath, err + } + err = json.Unmarshal(expectedChartJsonContent, &expectedChartMetaData) + if err != nil { + impl.logger.Errorw("error in unmarshalling requirements.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return shouldMigrate, chartYamlPath, err + } + if len(expectedChartMetaData.Dependencies) == 0 { + impl.logger.Debugw("no dependencies found in requirements.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName) + return shouldMigrate, chartYamlPath, err + } + impl.logger.Debugw("dependencies found in requirements.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "dependencies", expectedChartMetaData.Dependencies) + // check if chart.yaml file has dependencies + chartYamlContent, err := os.ReadFile(chartYamlPath) + if err != nil { + impl.logger.Errorw("error in reading chart.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return shouldMigrate, chartYamlPath, err + } + chartMetadata := &chart.Metadata{} + chartJsonContent, err := yaml.YAMLToJSON(chartYamlContent) + if err != nil { + impl.logger.Errorw("error in converting chart.yaml to json", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return shouldMigrate, chartYamlPath, err + } + err = json.Unmarshal(chartJsonContent, chartMetadata) + if err != nil { + impl.logger.Errorw("error in unmarshalling chart.yaml file", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "err", err) + return shouldMigrate, chartYamlPath, err + } + if len(chartMetadata.Dependencies) == 0 { + impl.logger.Warnw("no dependencies found in chart.yaml file, need to migrate proxy chart dependencies", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName) + shouldMigrate = true + return shouldMigrate, chartYamlPath, err + } + impl.logger.Debugw("dependencies found in chart.yaml file, validating against requirements.yaml", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "chartDependencies", chartMetadata.Dependencies) + // validate if chart.yaml dependencies are present in requirements.yaml + latestDependencies := sliceUtil.NewMapFromFuncExec(chartMetadata.Dependencies, func(dependency *chart.Dependency) string { + return getUniqueKeyFromDependency(dependency) + }) + previousDependencies := sliceUtil.NewMapFromFuncExec(expectedChartMetaData.Dependencies, func(dependency *chart.Dependency) string { + return getUniqueKeyFromDependency(dependency) + }) + for key := range latestDependencies { + if _, ok := previousDependencies[key]; !ok { + impl.logger.Warnw("dependency found in chart.yaml but not in requirements.yaml, need to migrate proxy chart dependencies", "appName", pushChartToGitRequest.AppName, "envName", pushChartToGitRequest.EnvName, "dependency", key) + shouldMigrate = true + return shouldMigrate, chartYamlPath, err + } + } + return shouldMigrate, chartYamlPath, nil +} + +func (impl *GitOperationServiceImpl) updateChartYamlWithDependencies(shouldMigrate bool, chartYamlPath string, expectedChartYamlContent string) (err error) { + if !shouldMigrate { + return nil + } + impl.logger.Warnw("migrating proxy chart dependencies", "chartYamlPath", chartYamlPath) + chartYamlPath, err = util2.CreateFileAtFilePathAndWrite(chartYamlPath, expectedChartYamlContent) + if err != nil { + impl.logger.Errorw("error in creating yaml file", "err", err) + return err + } + return nil +} + +func getUniqueKeyFromDependency(dependency *chart.Dependency) string { + // return unique key for dependency + return fmt.Sprintf("%s-%s-%s", + strings.ToLower(strings.TrimSpace(dependency.Name)), + strings.ToLower(strings.TrimSpace(dependency.Version)), + strings.ToLower(strings.TrimSpace(dependency.Repository))) +} + func (impl *GitOperationServiceImpl) GetClonedDir(ctx context.Context, chartDir, repoUrl, targetRevision string) (string, error) { _, span := otel.Tracer("orchestrator").Start(ctx, "GitOperationServiceImpl.GetClonedDir") defer span.End() @@ -399,6 +586,7 @@ func (impl *GitOperationServiceImpl) cloneInDir(repoUrl, chartDir, targetRevisio } return clonedDir, nil } + func (impl *GitOperationServiceImpl) ReloadGitOpsProvider() error { return impl.gitFactory.Reload(impl.gitOpsConfigReadService) } diff --git a/pkg/deployment/manifest/deploymentTemplate/chartRef/bean/bean.go b/pkg/deployment/manifest/deploymentTemplate/chartRef/bean/bean.go index cc91e4ef62..ee9b075b9f 100644 --- a/pkg/deployment/manifest/deploymentTemplate/chartRef/bean/bean.go +++ b/pkg/deployment/manifest/deploymentTemplate/chartRef/bean/bean.go @@ -143,4 +143,8 @@ type PipelineStrategy struct { Default bool `json:"default"` } -var CHART_YAML_FILE = "Chart.yaml" +const ( + CHART_YAML_FILE = "Chart.yaml" + REQUIREMENTS_YAML_FILE = "requirements.yaml" + VALUES_YAML_FILE = "values.yaml" +) diff --git a/pkg/deployment/trigger/devtronApps/userDeploymentRequest/repository/UserDeploymentRequestRepository.go b/pkg/deployment/trigger/devtronApps/userDeploymentRequest/repository/UserDeploymentRequestRepository.go index 261b431346..758b038fdc 100644 --- a/pkg/deployment/trigger/devtronApps/userDeploymentRequest/repository/UserDeploymentRequestRepository.go +++ b/pkg/deployment/trigger/devtronApps/userDeploymentRequest/repository/UserDeploymentRequestRepository.go @@ -48,8 +48,9 @@ type UserDeploymentRequest struct { type UserDeploymentRequestWithAdditionalFields struct { UserDeploymentRequest - CdWorkflowRunnerId int `sql:"cd_workflow_runner_id"` - PipelineOverrideId int `sql:"pipeline_override_id"` + tableName struct{} `sql:"user_deployment_request" pg:",discard_unknown_columns"` + CdWorkflowRunnerId int `sql:"cd_workflow_runner_id"` + PipelineOverrideId int `sql:"pipeline_override_id"` } type UserDeploymentRequestRepository interface { diff --git a/pkg/overview/AppManagementService.go b/pkg/overview/AppManagementService.go new file mode 100644 index 0000000000..6bc2c7bece --- /dev/null +++ b/pkg/overview/AppManagementService.go @@ -0,0 +1,906 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "fmt" + "math" + "strconv" + "time" + + "github.com/devtron-labs/devtron/internal/sql/repository/app" + deploymentConfigRepo "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + workflowStageRepository "github.com/devtron-labs/devtron/pkg/pipeline/workflowStatus/repository" + teamRepository "github.com/devtron-labs/devtron/pkg/team/repository" + "go.uber.org/zap" +) + +type AppManagementService interface { + GetAppsOverview(ctx context.Context) (*bean.AppsOverviewResponse, error) + GetWorkflowOverview(ctx context.Context) (*bean.WorkflowOverviewResponse, error) + GetBuildDeploymentActivity(ctx context.Context, request *bean.BuildDeploymentActivityRequest) (*bean.BuildDeploymentActivityResponse, error) + GetBuildDeploymentActivityDetailed(ctx context.Context, request *bean.BuildDeploymentActivityDetailedRequest) (*bean.BuildDeploymentActivityDetailedResponse, error) +} + +type AppManagementServiceImpl struct { + logger *zap.SugaredLogger + appRepository app.AppRepository + pipelineRepository pipelineConfig.PipelineRepository + ciPipelineRepository pipelineConfig.CiPipelineRepository + ciWorkflowRepository pipelineConfig.CiWorkflowRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository + environmentRepository repository.EnvironmentRepository + teamRepository teamRepository.TeamRepository + workflowStageRepository workflowStageRepository.WorkflowStageRepository + deploymentConfigRepository deploymentConfigRepo.Repository +} + +func NewAppManagementServiceImpl( + logger *zap.SugaredLogger, + appRepository app.AppRepository, + pipelineRepository pipelineConfig.PipelineRepository, + ciPipelineRepository pipelineConfig.CiPipelineRepository, + ciWorkflowRepository pipelineConfig.CiWorkflowRepository, + cdWorkflowRepository pipelineConfig.CdWorkflowRepository, + environmentRepository repository.EnvironmentRepository, + teamRepository teamRepository.TeamRepository, + workflowStageRepository workflowStageRepository.WorkflowStageRepository, + deploymentConfigRepository deploymentConfigRepo.Repository, +) *AppManagementServiceImpl { + return &AppManagementServiceImpl{ + logger: logger, + appRepository: appRepository, + pipelineRepository: pipelineRepository, + ciPipelineRepository: ciPipelineRepository, + ciWorkflowRepository: ciWorkflowRepository, + cdWorkflowRepository: cdWorkflowRepository, + environmentRepository: environmentRepository, + teamRepository: teamRepository, + workflowStageRepository: workflowStageRepository, + deploymentConfigRepository: deploymentConfigRepository, + } +} + +func (impl *AppManagementServiceImpl) GetAppsOverview(ctx context.Context) (*bean.AppsOverviewResponse, error) { + + allProjects, err := impl.teamRepository.FindAllActive() + if err != nil { + impl.logger.Errorw("error getting all projects", "err", err) + return nil, err + } + + allDevtronApps, err := impl.appRepository.FindAll() + if err != nil { + impl.logger.Errorw("error getting all devtron apps", "err", err) + return nil, err + } + + allHelmApps, err := impl.appRepository.FindAllChartStoreApps() + if err != nil { + impl.logger.Errorw("error getting all helm apps", "err", err) + return nil, err + } + + allEnvironments, err := impl.environmentRepository.FindAllActive() + if err != nil { + impl.logger.Errorw("error getting all environments", "err", err) + return nil, err + } + + response := &bean.AppsOverviewResponse{ + Projects: &bean.AtAGlanceMetric{ + Total: len(allProjects), + }, + YourApplications: &bean.AtAGlanceMetric{ + Total: len(allDevtronApps), + }, + HelmApplications: &bean.AtAGlanceMetric{ + Total: len(allHelmApps), + }, + Environments: &bean.AtAGlanceMetric{ + Total: len(allEnvironments), + }, + } + + return response, nil +} + +func (impl *AppManagementServiceImpl) GetWorkflowOverview(ctx context.Context) (*bean.WorkflowOverviewResponse, error) { + allTimeMetrics, err := impl.fetchAllWorkflowMetrics(ctx) + if err != nil { + return nil, err + } + + response := impl.buildWorkflowOverviewResponse(allTimeMetrics) + return response, nil +} + +func (impl *AppManagementServiceImpl) GetBuildDeploymentActivity(ctx context.Context, request *bean.BuildDeploymentActivityRequest) (*bean.BuildDeploymentActivityResponse, error) { + impl.logger.Infow("getting build deployment activity overview", "request", request) + + // Get current period counts - now tracking only CI_BUILD pipeline builds (including failed ones) + currentTotalBuilds, err := impl.ciWorkflowRepository.GetCiBuildCountInTimeRange(request.From, request.To) + if err != nil { + impl.logger.Errorw("error getting current total builds count", "err", err) + return nil, err + } + + // Get average build time (only for CI_BUILD pipelines) + avgBuildTime, err := impl.calculateAverageBuildTime(request.From, request.To) + if err != nil { + impl.logger.Errorw("error calculating average build time", "err", err) + // Don't fail the request, just set to 0 + avgBuildTime = 0 + } + + // Get current total deployments count - now tracking ALL triggered deployments (including failed ones) + currentTotalDeployments, err := impl.cdWorkflowRepository.GetDeploymentCountInTimeRange(request.From, request.To) + if err != nil { + impl.logger.Errorw("error getting current total deployments count", "err", err) + return nil, err + } + + response := &bean.BuildDeploymentActivityResponse{ + TotalBuildTriggers: currentTotalBuilds, + AverageBuildTime: avgBuildTime, + TotalDeploymentTriggers: currentTotalDeployments, + } + + return response, nil +} + +func (impl *AppManagementServiceImpl) GetBuildDeploymentActivityDetailed(ctx context.Context, request *bean.BuildDeploymentActivityDetailedRequest) (*bean.BuildDeploymentActivityDetailedResponse, error) { + impl.logger.Infow("getting build deployment activity detailed", "request", request) + + response := &bean.BuildDeploymentActivityDetailedResponse{ + ActivityKind: request.ActivityKind, + AggregationType: request.AggregationType, + } + + // Based on activityKind, fetch only the requested data + switch request.ActivityKind { + case bean.ActivityKindBuildTrigger: + buildTriggersTrend, err := impl.getAggregatedBuildStatusTrend(request.From, request.To, request.AggregationType) + if err != nil { + impl.logger.Errorw("error getting aggregated build status trend", "err", err) + return nil, err + } + response.BuildTriggersTrend = buildTriggersTrend + + case bean.ActivityKindDeploymentTrigger: + deploymentTriggersTrend, err := impl.getAggregatedDeploymentStatusTrend(request.From, request.To, request.AggregationType) + if err != nil { + impl.logger.Errorw("error getting aggregated deployment status trend", "err", err) + return nil, err + } + response.DeploymentTriggersTrend = deploymentTriggersTrend + + case bean.ActivityKindAvgBuildTime: + avgBuildTimeTrend, err := impl.getAggregatedBuildTimeTrend(request.From, request.To, request.AggregationType) + if err != nil { + impl.logger.Errorw("error getting aggregated build time trend", "err", err) + return nil, err + } + response.AvgBuildTimeTrend = avgBuildTimeTrend + + default: + return nil, fmt.Errorf("invalid activityKind: %s", request.ActivityKind) + } + + return response, nil +} + +func (impl *AppManagementServiceImpl) getProjectMetrics(ctx context.Context, from, to *time.Time) (*bean.ProjectMetrics, error) { + teams, err := impl.teamRepository.FindAllActiveInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error in getting projects", "err", err) + return nil, err + } + + details := make([]bean.EntityMetadata, 0, len(teams)) + for _, team := range teams { + details = append(details, bean.EntityMetadata{ + Name: team.Name, + CreatedOn: team.CreatedOn, + }) + } + + return &bean.ProjectMetrics{ + Total: len(teams), + Details: details, + }, nil +} + +func (impl *AppManagementServiceImpl) getAppMetrics(ctx context.Context, from, to *time.Time) (*bean.AppMetrics, error) { + // Get normal apps (CI/CD apps with appType = 0) with details in time range + devtronApps, err := impl.appRepository.FindAllActiveDevtronAppsInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error in getting all devtron apps", "err", err) + return nil, err + } + + normalAppsDetails := make([]bean.EntityMetadata, 0, len(devtronApps)) + for _, app := range devtronApps { + normalAppsDetails = append(normalAppsDetails, bean.EntityMetadata{ + Name: app.AppName, + CreatedOn: app.CreatedOn, + }) + } + + // Get chart store apps (external apps with appType = 1) with details in time range + chartStoreApps, err := impl.appRepository.FindAllActiveChartStoreAppsInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error in getting all chart store apps", "err", err) + return nil, err + } + + chartStoreAppsDetails := make([]bean.EntityMetadata, 0, len(chartStoreApps)) + for _, app := range chartStoreApps { + chartStoreAppsDetails = append(chartStoreAppsDetails, bean.EntityMetadata{ + Name: app.AppName, + CreatedOn: app.CreatedOn, + }) + } + + totalApps := len(devtronApps) + len(chartStoreApps) + + return &bean.AppMetrics{ + Total: totalApps, + YourApps: &bean.AppTypeMetrics{ + Total: len(devtronApps), + Details: normalAppsDetails, + }, + ThirdPartyApps: &bean.AppTypeMetrics{ + Total: len(chartStoreApps), + Details: chartStoreAppsDetails, + }, + }, nil +} + +func (impl *AppManagementServiceImpl) getEnvironmentMetrics(ctx context.Context, from, to *time.Time) (*bean.EnvironmentMetrics, error) { + environments, err := impl.environmentRepository.FindAllActiveInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error in getting environments", "err", err) + return nil, err + } + + details := make([]bean.EntityMetadata, 0, len(environments)) + for _, env := range environments { + details = append(details, bean.EntityMetadata{ + Name: env.Name, + CreatedOn: env.CreatedOn, + }) + } + + return &bean.EnvironmentMetrics{ + Total: len(environments), + Details: details, + }, nil +} + +func (impl *AppManagementServiceImpl) getBuildPipelineMetrics(ctx context.Context, from, to *time.Time) (*bean.BuildPipelineMetrics, error) { + // Get counts directly instead of fetching full structs + normalCiCount, err := impl.ciPipelineRepository.GetActiveCiPipelineCountInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error getting normal CI pipelines count", "err", err) + return nil, err + } + + externalCiCount, err := impl.ciPipelineRepository.GetActiveExternalCiPipelineCountInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error getting external CI pipelines count", "err", err) + return nil, err + } + + // For details, we still need to fetch some data, but only if details are actually needed + // For now, we'll provide empty details arrays since the main use case is just counts + var normalPipelines []bean.EntityMetadata + var externalPipelines []bean.EntityMetadata + + total := normalCiCount + externalCiCount + + return &bean.BuildPipelineMetrics{ + Total: total, + NormalCiPipelines: &bean.CiPipelineTypeMetrics{ + Total: normalCiCount, + Details: normalPipelines, // Empty for performance - can be populated if needed + }, + ExternalCiPipelines: &bean.CiPipelineTypeMetrics{ + Total: externalCiCount, + Details: externalPipelines, // Empty for performance - can be populated if needed + }, + }, nil +} + +func (impl *AppManagementServiceImpl) getCdPipelineMetrics(ctx context.Context, from, to *time.Time) (*bean.CdPipelineMetrics, error) { + // Get counts directly instead of fetching full structs + prodCount, err := impl.pipelineRepository.GetActivePipelineCountByEnvironmentTypeInTimeRange(true, from, to) + if err != nil { + impl.logger.Errorw("error getting production pipelines count", "err", err) + return nil, err + } + + nonProdCount, err := impl.pipelineRepository.GetActivePipelineCountByEnvironmentTypeInTimeRange(false, from, to) + if err != nil { + impl.logger.Errorw("error getting non-production pipelines count", "err", err) + return nil, err + } + + // For details, we provide empty arrays since the main use case is just counts + var prodDetails []bean.EntityMetadata + var nonProdDetails []bean.EntityMetadata + + total := prodCount + nonProdCount + + return &bean.CdPipelineMetrics{ + Total: total, + Production: &bean.PipelineEnvironmentMetrics{ + Total: prodCount, + Details: prodDetails, // Empty for performance - can be populated if needed + }, + NonProduction: &bean.PipelineEnvironmentMetrics{ + Total: nonProdCount, + Details: nonProdDetails, // Empty for performance - can be populated if needed + }, + }, nil +} + +func (impl *AppManagementServiceImpl) getEnvironmentTrendMetrics(ctx context.Context, from, to *time.Time, aggregationType constants.AggregationType) (*bean.EnvironmentTrendMetrics, error) { + // Get aggregated environment trend data + trendData, err := impl.environmentRepository.GetAggregatedEnvironmentTrendWithParams(from, to, aggregationType) + if err != nil { + impl.logger.Errorw("error getting environment trend data", "err", err) + return nil, err + } + + // Calculate total + total := 0 + for _, data := range trendData { + total += data.Count + } + + return &bean.EnvironmentTrendMetrics{ + Total: total, + Trend: trendData, + }, nil +} + +func (impl *AppManagementServiceImpl) getAggregatedBuildStatusTrend(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.BuildStatusDataPoint, error) { + workflows, err := impl.ciWorkflowRepository.GetCIBuildsForStatusTrend(from, to) + if err != nil { + impl.logger.Errorw("error fetching CI builds for status trend", "err", err) + return nil, err + } + + statusMap := make(map[string]map[string]int) // timeKey -> status -> count + + targetLocation := from.Location() + + for _, workflow := range workflows { + // Convert UTC workflow.StartedOn to the target timezone for proper time bucketing + localStartedOn := workflow.StartedOn.In(targetLocation) + + var timeKey string + if aggregationType == constants.AggregateByHour { + timeKey = localStartedOn.Truncate(time.Hour).Format("2006-01-02T15:04:05Z") + } else if aggregationType == constants.AggregateByMonth { + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), 1, 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else { + timeKey = localStartedOn.Truncate(24 * time.Hour).Format("2006-01-02T15:04:05Z") + } + + if statusMap[timeKey] == nil { + statusMap[timeKey] = make(map[string]int) + } + + // Categorize status + switch workflow.Status { + case "Succeeded": + statusMap[timeKey]["successful"]++ + case "Failed", "Error", "Cancelled", "CANCELLED": + statusMap[timeKey]["failed"]++ + } + statusMap[timeKey]["total"]++ + } + var trendData []bean.BuildStatusDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.BuildStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + // Generate monthly series + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.BuildStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Generate daily series + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.BuildStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData, nil +} + +// Helper method to get aggregated deployment status trend with success/failed breakdown +func (impl *AppManagementServiceImpl) getAggregatedDeploymentStatusTrend(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.DeploymentStatusDataPoint, error) { + // Fetch all deployment workflows in the date range from repository + deployments, err := impl.cdWorkflowRepository.GetDeploymentWorkflowsForStatusTrend(from, to) + if err != nil { + impl.logger.Errorw("error fetching deployment workflows for status trend", "err", err) + return nil, err + } + + // Group deployments by time period and count statuses + statusMap := make(map[string]map[string]int) // timeKey -> status -> count + + // Get the timezone from the from/to parameters for proper time bucketing + targetLocation := from.Location() + + for _, deployment := range deployments { + // Convert UTC deployment.StartedOn to the target timezone for proper time bucketing + localStartedOn := deployment.StartedOn.In(targetLocation) + + var timeKey string + if aggregationType == constants.AggregateByHour { + timeKey = localStartedOn.Truncate(time.Hour).Format("2006-01-02T15:04:05Z") + } else if aggregationType == constants.AggregateByMonth { + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), 1, 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else { + timeKey = localStartedOn.Truncate(24 * time.Hour).Format("2006-01-02T15:04:05Z") + } + + if statusMap[timeKey] == nil { + statusMap[timeKey] = make(map[string]int) + } + + // Categorize status + switch deployment.Status { + case "Succeeded": + statusMap[timeKey]["successful"]++ + case "Failed", "Error", "Cancelled", "CANCELLED": + statusMap[timeKey]["failed"]++ + } + statusMap[timeKey]["total"]++ + } + // Generate complete time series and populate with counts + var trendData []bean.DeploymentStatusDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.DeploymentStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + // Generate monthly series + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.DeploymentStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Generate daily series + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + counts := statusMap[timeKey] + + trendData = append(trendData, bean.DeploymentStatusDataPoint{ + Timestamp: current, + Total: counts["total"], + Successful: counts["successful"], + Failed: counts["failed"], + }) + + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData, nil +} + +func (impl *AppManagementServiceImpl) getAggregatedBuildTimeTrend(from, to *time.Time, aggregationType constants.AggregationType) ([]bean.BuildTimeDataPoint, error) { + workflows, err := impl.getSuccessfulBuildsFromStages(from, to) + if err != nil { + impl.logger.Errorw("error fetching successful workflows from stages", "err", err) + // Fallback to original method if new method fails + impl.logger.Infow("falling back to original method for build time trend calculation") + workflows, err = impl.ciWorkflowRepository.GetSuccessfulCIBuildsForBuildTime(from, to) + if err != nil { + impl.logger.Errorw("error fetching successful workflows (fallback)", "err", err) + return nil, err + } + } + + // Calculate build times and group by time period + buildTimeMap := make(map[string][]float64) + + // Get the timezone from the from/to parameters for proper time bucketing + targetLocation := from.Location() + + for _, workflow := range workflows { + // Calculate build time in minutes + duration := workflow.FinishedOn.Sub(workflow.StartedOn) + buildTimeMinutes := duration.Minutes() + + // Convert UTC workflow.StartedOn to the target timezone for proper time bucketing + localStartedOn := workflow.StartedOn.In(targetLocation) + + var timeKey string + if aggregationType == constants.AggregateByHour { + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), localStartedOn.Day(), localStartedOn.Hour(), 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else if aggregationType == constants.AggregateByMonth { + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), 1, 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else { + // For daily aggregation, get midnight of the local date + timeKey = time.Date(localStartedOn.Year(), localStartedOn.Month(), localStartedOn.Day(), 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } + + buildTimeMap[timeKey] = append(buildTimeMap[timeKey], buildTimeMinutes) + } + // Generate complete time series and calculate averages + var trendData []bean.BuildTimeDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + avgBuildTime := 0.0 + + if buildTimes, exists := buildTimeMap[timeKey]; exists && len(buildTimes) > 0 { + sum := 0.0 + for _, bt := range buildTimes { + sum += bt + } + avgBuildTime = sum / float64(len(buildTimes)) + } + + trendData = append(trendData, bean.BuildTimeDataPoint{ + Timestamp: current, + AverageBuildTime: math.Round(avgBuildTime*100) / 100, // Round to 2 decimal places + }) + + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + // Generate monthly series + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + avgBuildTime := 0.0 + + if buildTimes, exists := buildTimeMap[timeKey]; exists && len(buildTimes) > 0 { + sum := 0.0 + for _, bt := range buildTimes { + sum += bt + } + avgBuildTime = sum / float64(len(buildTimes)) + } + + trendData = append(trendData, bean.BuildTimeDataPoint{ + Timestamp: current, + AverageBuildTime: math.Round(avgBuildTime*100) / 100, // Round to 2 decimal places + }) + + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Generate daily series + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + avgBuildTime := 0.0 + + if buildTimes, exists := buildTimeMap[timeKey]; exists && len(buildTimes) > 0 { + sum := 0.0 + for _, bt := range buildTimes { + sum += bt + } + avgBuildTime = sum / float64(len(buildTimes)) + } + + trendData = append(trendData, bean.BuildTimeDataPoint{ + Timestamp: current, + AverageBuildTime: math.Round(avgBuildTime*100) / 100, // Round to 2 decimal places + }) + + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData, nil +} + +// WorkflowMetrics holds aggregated workflow metrics for a time period +type WorkflowMetrics struct { + BuildPipelinesCount int // CI pipelines count + ProductionPipelinesCount int // Production deployment pipelines count + NonProdPipelinesCount int // Non-production deployment pipelines count + ExternalCICount int + ScanningEnabledPercentage float64 // Percentage of build pipelines where scanning is enabled + GitOpsComplianceCount int // Count of GitOps enabled pipelines + GitOpsCoveragePercentage float64 // Percentage of GitOps coverage +} + +func (impl *AppManagementServiceImpl) fetchAllWorkflowMetrics(ctx context.Context) (*WorkflowMetrics, error) { + buildPipelinesCount, err := impl.ciPipelineRepository.GetActiveCiPipelineCount() + if err != nil { + impl.logger.Errorw("error getting build pipelines count", "err", err) + return nil, err + } + + productionPipelinesCount, err := impl.pipelineRepository.GetPipelineCountByEnvironmentType(true) + if err != nil { + impl.logger.Errorw("error getting production pipelines count", "err", err) + return nil, err + } + + nonProdPipelinesCount, err := impl.pipelineRepository.GetPipelineCountByEnvironmentType(false) + if err != nil { + impl.logger.Errorw("error getting non-production pipelines count", "err", err) + return nil, err + } + + // Get scanning enabled count directly + scanningEnabledCount, err := impl.ciPipelineRepository.GetScanEnabledCiPipelineCount() + if err != nil { + impl.logger.Errorw("error getting scanning enabled count", "err", err) + return nil, err + } + + // Calculate scanning enabled percentage + var scanningEnabledPercentage float64 + if buildPipelinesCount > 0 { + scanningEnabledPercentage = (float64(scanningEnabledCount) / float64(buildPipelinesCount)) * 100 + } + + // Get external CI count directly + externalCICount, err := impl.ciPipelineRepository.GetActiveExternalCiPipelineCount() + if err != nil { + impl.logger.Errorw("error getting external CI count", "err", err) + return nil, err + } + + // Get GitOps compliance count directly + gitOpsComplianceCount, err := impl.deploymentConfigRepository.GetGitOpsEnabledPipelineCount() + if err != nil { + impl.logger.Errorw("error getting GitOps compliance count", "err", err) + return nil, err + } + + // Calculate GitOps coverage percentage + totalActivePipelines := productionPipelinesCount + nonProdPipelinesCount + var gitOpsCoveragePercentage float64 + if totalActivePipelines > 0 { + gitOpsCoveragePercentage = (float64(gitOpsComplianceCount) / float64(totalActivePipelines)) * 100 + } + + metrics := &WorkflowMetrics{ + BuildPipelinesCount: buildPipelinesCount, + ProductionPipelinesCount: productionPipelinesCount, + NonProdPipelinesCount: nonProdPipelinesCount, + ExternalCICount: externalCICount, + ScanningEnabledPercentage: scanningEnabledPercentage, + GitOpsComplianceCount: gitOpsComplianceCount, + GitOpsCoveragePercentage: gitOpsCoveragePercentage, + } + + return metrics, nil +} + +func (impl *AppManagementServiceImpl) buildWorkflowOverviewResponse(allTime *WorkflowMetrics) *bean.WorkflowOverviewResponse { + allTimeAllDeployments := allTime.ProductionPipelinesCount + allTime.NonProdPipelinesCount + + scanningMetric := &bean.AtAGlanceMetric{ + Percentage: allTime.ScanningEnabledPercentage, + } + + gitOpsMetric := &bean.AtAGlanceMetric{ + Total: allTime.GitOpsComplianceCount, // All-time count + Percentage: allTime.GitOpsCoveragePercentage, + } + + productionMetric := &bean.AtAGlanceMetric{ + Total: allTime.ProductionPipelinesCount, // All-time count + } + + return &bean.WorkflowOverviewResponse{ + BuildPipelines: &bean.AtAGlanceMetric{ + Total: allTime.BuildPipelinesCount, + }, + ExternalImageSource: &bean.AtAGlanceMetric{ + Total: allTime.ExternalCICount, + }, + AllDeploymentPipelines: &bean.AtAGlanceMetric{ + Total: allTimeAllDeployments, + }, + ScanningEnabledInWorkflows: scanningMetric, + GitOpsComplianceProdPipelines: gitOpsMetric, + ProductionPipelines: productionMetric, + } +} + +// calculateAverageBuildTime calculates the average build time for successful CI_BUILD pipelines +// in the given time range using accurate timing data from workflow_execution_stage table. +// This provides more precise build times by using the actual start_time and end_time from +// the Execution stage where workflow_type=CI, stage_name=Execution, status=SUCCEEDED, status_for=workflow. +func (impl *AppManagementServiceImpl) calculateAverageBuildTime(from, to *time.Time) (float64, error) { + // Fetch successful builds from workflow_execution_stage table for accurate timing + successfulBuilds, err := impl.getSuccessfulBuildsFromStages(from, to) + if err != nil { + impl.logger.Errorw("error getting successful builds for build time calculation from stages", "from", from, "to", to, "err", err) + // Fallback to original method if new method fails + impl.logger.Infow("falling back to original method for build time calculation") + successfulBuilds, err = impl.ciWorkflowRepository.GetSuccessfulCIBuildsForBuildTime(from, to) + if err != nil { + impl.logger.Errorw("error getting successful builds for build time calculation (fallback)", "from", from, "to", to, "err", err) + return 0, err + } + } + + // Return 0 if no successful builds found + if len(successfulBuilds) == 0 { + impl.logger.Infow("no successful builds found for average build time calculation", "from", from, "to", to) + return 0, nil + } + + // Calculate average build time in code for better precision and error handling + totalBuildTimeMinutes := float64(0) + validBuilds := 0 + + for _, build := range successfulBuilds { + // Ensure both timestamps are valid + if !build.StartedOn.IsZero() && !build.FinishedOn.IsZero() && build.FinishedOn.After(build.StartedOn) { + // Calculate duration in minutes with millisecond precision + duration := build.FinishedOn.Sub(build.StartedOn) + buildTimeMinutes := duration.Minutes() + + // Only include positive build times (sanity check) + if buildTimeMinutes > 0 { + totalBuildTimeMinutes += buildTimeMinutes + validBuilds++ + } + } + } + + // Calculate average if we have valid builds + avgBuildTime := float64(0) + if validBuilds > 0 { + avgBuildTime = totalBuildTimeMinutes / float64(validBuilds) + } + + return avgBuildTime, nil +} + +// getSuccessfulBuildsFromStages fetches successful CI builds from workflow_execution_stage table +// and converts them to WorkflowBuildTime format for compatibility with existing logic +func (impl *AppManagementServiceImpl) getSuccessfulBuildsFromStages(from, to *time.Time) ([]pipelineConfig.WorkflowBuildTime, error) { + stages, err := impl.workflowStageRepository.GetSuccessfulCIExecutionStages(from, to) + if err != nil { + return nil, err + } + + var workflows []pipelineConfig.WorkflowBuildTime + for _, stage := range stages { + startTime, err := impl.parseTimeString(stage.StartTime) + if err != nil { + impl.logger.Warnw("failed to parse start_time, skipping stage", "workflowId", stage.WorkflowId, "startTime", stage.StartTime, "err", err) + continue + } + + endTime, err := impl.parseTimeString(stage.EndTime) + if err != nil { + impl.logger.Warnw("failed to parse end_time, skipping stage", "workflowId", stage.WorkflowId, "endTime", stage.EndTime, "err", err) + continue + } + + if !endTime.After(startTime) { + impl.logger.Warnw("end_time is not after start_time, skipping stage", "workflowId", stage.WorkflowId, "startTime", startTime, "endTime", endTime) + continue + } + + workflows = append(workflows, pipelineConfig.WorkflowBuildTime{ + StartedOn: startTime, + FinishedOn: endTime, + }) + } + + return workflows, nil +} + +// parseTimeString parses time string that can be either ISO format or Unix timestamp +func (impl *AppManagementServiceImpl) parseTimeString(timeStr string) (time.Time, error) { + if timeStr == "" { + return time.Time{}, fmt.Errorf("empty time string") + } + + // Try parsing as ISO format first (e.g., "2024-01-15T10:30:45Z") + if t, err := time.Parse(time.RFC3339, timeStr); err == nil { + return t, nil + } + + // Try parsing as Unix timestamp in milliseconds + if timestamp, err := strconv.ParseInt(timeStr, 10, 64); err == nil { + return time.Unix(timestamp/1000, (timestamp%1000)*1000000), nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string: %s", timeStr) +} diff --git a/pkg/overview/ClusterOverviewAdapter.go b/pkg/overview/ClusterOverviewAdapter.go new file mode 100644 index 0000000000..9c6856ebe2 --- /dev/null +++ b/pkg/overview/ClusterOverviewAdapter.go @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + "github.com/devtron-labs/devtron/pkg/overview/util" +) + +// NewClusterOverviewResponse creates and initializes a new ClusterOverviewResponse with default values +func NewClusterOverviewResponse(totalClusters int) *bean.ClusterOverviewResponse { + return &bean.ClusterOverviewResponse{ + TotalClusters: totalClusters, + TotalCpuCapacity: NewResourceCapacity("0", "cores"), + TotalMemoryCapacity: NewResourceCapacity("0", "Gi"), + ClusterStatusBreakdown: NewClusterStatusBreakdown(), + NodeSchedulingBreakdown: NewNodeSchedulingBreakdown(), + NodeErrorBreakdown: NewNodeErrorBreakdown(), + ClusterDistribution: NewClusterDistribution(), + ClusterCapacityDistribution: []bean.ClusterCapacityDistribution{}, + NodeDistribution: NewNodeDistribution(), + } +} + +// NewEmptyClusterOverviewResponse creates an empty ClusterOverviewResponse with zero values +func NewEmptyClusterOverviewResponse() *bean.ClusterOverviewResponse { + return NewClusterOverviewResponse(0) +} + +// NewResourceCapacity creates a new ResourceCapacity with the given value and unit +func NewResourceCapacity(value, unit string) *bean.ResourceCapacity { + return &bean.ResourceCapacity{ + Value: value, + Unit: unit, + } +} + +// NewClusterStatusBreakdown creates a new ClusterStatusBreakdown with zero values +func NewClusterStatusBreakdown() *bean.ClusterStatusBreakdown { + return &bean.ClusterStatusBreakdown{ + Healthy: 0, + Unhealthy: 0, + ConnectionFailed: 0, + } +} + +// NewNodeSchedulingBreakdown creates a new NodeSchedulingBreakdown with initialized slices +func NewNodeSchedulingBreakdown() *bean.NodeSchedulingBreakdown { + return &bean.NodeSchedulingBreakdown{ + Schedulable: 0, + Unschedulable: 0, + Total: 0, + SchedulableNodes: []bean.NodeSchedulingDetail{}, + UnschedulableNodes: []bean.NodeSchedulingDetail{}, + } +} + +// NewNodeErrorBreakdown creates a new NodeErrorBreakdown with initialized error counts map +func NewNodeErrorBreakdown() *bean.NodeErrorBreakdown { + errorCounts := make(map[string]int) + // Initialize all error types with zero counts + errorCounts[constants.NodeErrorNetworkUnavailable] = 0 + errorCounts[constants.NodeErrorMemoryPressure] = 0 + errorCounts[constants.NodeErrorDiskPressure] = 0 + errorCounts[constants.NodeErrorPIDPressure] = 0 + errorCounts[constants.NodeErrorKubeletNotReady] = 0 + errorCounts[constants.NodeErrorOthers] = 0 + + return &bean.NodeErrorBreakdown{ + ErrorCounts: errorCounts, + Total: 0, + NodeErrors: []bean.NodeErrorDetail{}, + } +} + +// NewClusterDistribution creates a new ClusterDistribution with empty slices +func NewClusterDistribution() *bean.ClusterDistribution { + return &bean.ClusterDistribution{ + ByProvider: []bean.ProviderDistribution{}, + ByVersion: []bean.VersionDistribution{}, + } +} + +// NewNodeDistribution creates a new NodeDistribution with empty slices +func NewNodeDistribution() *bean.NodeDistribution { + return &bean.NodeDistribution{ + ByClusters: []bean.ClusterNodeCount{}, + ByAutoscaler: []bean.AutoscalerNodeCount{}, + } +} + +// NewClusterCapacityDistribution creates a new ClusterCapacityDistribution entry +func NewClusterCapacityDistribution(clusterID int, clusterName string, serverVersion string, cpuCapacity float64, cpuUtil, cpuRequest, cpuLimit float64, memCapacity float64, memUtil, memRequest, memLimit float64) bean.ClusterCapacityDistribution { + return bean.ClusterCapacityDistribution{ + ClusterID: clusterID, + ClusterName: clusterName, + ServerVersion: serverVersion, + CPU: NewClusterResourceMetric(cpuCapacity, cpuUtil, cpuRequest, cpuLimit), + Memory: NewClusterResourceMetric(memCapacity, memUtil, memRequest, memLimit), + } +} + +// NewClusterResourceMetric creates a new ClusterResourceMetric with capacity and percentages rounded to 2 decimal places +func NewClusterResourceMetric(capacity float64, utilPercent, requestPercent, limitPercent float64) *bean.ClusterResourceMetric { + return &bean.ClusterResourceMetric{ + Capacity: util.RoundToTwoDecimals(capacity), + UtilizationPercent: util.RoundToTwoDecimals(utilPercent), + RequestsPercent: util.RoundToTwoDecimals(requestPercent), + LimitsPercent: util.RoundToTwoDecimals(limitPercent), + } +} + +// NewClusterNodeCount creates a new ClusterNodeCount entry +func NewClusterNodeCount(clusterID int, clusterName string, nodeCount int) bean.ClusterNodeCount { + return bean.ClusterNodeCount{ + ClusterID: clusterID, + ClusterName: clusterName, + NodeCount: nodeCount, + } +} + +// NewNodeErrorDetail creates a new NodeErrorDetail entry +func NewNodeErrorDetail(nodeName, clusterName string, clusterID int, errors []string, nodeStatus string) bean.NodeErrorDetail { + return bean.NodeErrorDetail{ + NodeName: nodeName, + ClusterName: clusterName, + ClusterID: clusterID, + Errors: errors, + NodeStatus: nodeStatus, + } +} + +// NewNodeSchedulingDetail creates a new NodeSchedulingDetail entry +func NewNodeSchedulingDetail(nodeName, clusterName string, clusterID int, schedulable bool) bean.NodeSchedulingDetail { + return bean.NodeSchedulingDetail{ + NodeName: nodeName, + ClusterName: clusterName, + ClusterID: clusterID, + Schedulable: schedulable, + } +} + +// NewProviderDistribution creates a new ProviderDistribution entry +func NewProviderDistribution(provider string, count int) bean.ProviderDistribution { + return bean.ProviderDistribution{ + Provider: provider, + Count: count, + } +} + +// NewVersionDistribution creates a new VersionDistribution entry +func NewVersionDistribution(version string, count int) bean.VersionDistribution { + return bean.VersionDistribution{ + Version: version, + Count: count, + } +} + +// NewClusterOverviewNodeDetailedResponse creates a new ClusterOverviewNodeDetailedResponse +func NewClusterOverviewNodeDetailedResponse(totalCount int, nodeList []bean.ClusterOverviewNodeDetailedItem) *bean.ClusterOverviewNodeDetailedResponse { + return &bean.ClusterOverviewNodeDetailedResponse{ + TotalCount: totalCount, + NodeList: nodeList, + } +} + +// NewEmptyClusterOverviewNodeDetailedResponse creates an empty response for when cache is not found +func NewEmptyClusterOverviewNodeDetailedResponse() *bean.ClusterOverviewNodeDetailedResponse { + return &bean.ClusterOverviewNodeDetailedResponse{ + TotalCount: 0, + NodeList: []bean.ClusterOverviewNodeDetailedItem{}, + } +} + +// NewClusterUpgradeOverviewResponse creates a new ClusterUpgradeOverviewResponse +func NewClusterUpgradeOverviewResponse(canUpgrade bool, latestVersion string, clusterList []bean.ClusterUpgradeDetails) *bean.ClusterUpgradeOverviewResponse { + return &bean.ClusterUpgradeOverviewResponse{ + CanCurrentUserUpgrade: canUpgrade, + LatestVersion: latestVersion, + ClusterList: clusterList, + } +} diff --git a/pkg/overview/ClusterOverviewService.go b/pkg/overview/ClusterOverviewService.go new file mode 100644 index 0000000000..f0b9667dd7 --- /dev/null +++ b/pkg/overview/ClusterOverviewService.go @@ -0,0 +1,1189 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/blang/semver/v4" + "github.com/devtron-labs/devtron/pkg/asyncProvider" + "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" + clusterService "github.com/devtron-labs/devtron/pkg/cluster" + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" + "github.com/devtron-labs/devtron/pkg/k8s" + capacityService "github.com/devtron-labs/devtron/pkg/k8s/capacity" + capacityBean "github.com/devtron-labs/devtron/pkg/k8s/capacity/bean" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/cache" + "github.com/devtron-labs/devtron/pkg/overview/config" + "github.com/devtron-labs/devtron/pkg/overview/constants" + overviewUtil "github.com/devtron-labs/devtron/pkg/overview/util" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +// ClusterOverviewService provides cluster management overview functionality +type ClusterOverviewService interface { + GetClusterOverview(ctx context.Context) (*bean.ClusterOverviewResponse, error) + GetClusterOverviewDetailedNodeInfo(ctx context.Context, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) + RefreshClusterOverviewCache(ctx context.Context) error +} + +// ClusterOverviewServiceImpl implements ClusterOverviewService +type ClusterOverviewServiceImpl struct { + logger *zap.SugaredLogger + clusterService clusterService.ClusterService + k8sCapacityService capacityService.K8sCapacityService + clusterCacheService cache.ClusterCacheService + k8sCommonService k8s.K8sCommonService + enforcer casbin.Enforcer + config *config.ClusterOverviewConfig +} + +func NewClusterOverviewServiceImpl( + logger *zap.SugaredLogger, + clusterService clusterService.ClusterService, + k8sCapacityService capacityService.K8sCapacityService, + clusterCacheService cache.ClusterCacheService, + k8sCommonService k8s.K8sCommonService, + enforcer casbin.Enforcer, + cfg *config.ClusterOverviewConfig, +) *ClusterOverviewServiceImpl { + service := &ClusterOverviewServiceImpl{ + logger: logger, + clusterService: clusterService, + k8sCapacityService: k8sCapacityService, + clusterCacheService: clusterCacheService, + k8sCommonService: k8sCommonService, + enforcer: enforcer, + config: cfg, + } + + // Start background refresh worker if enabled + if cfg.CacheEnabled && cfg.BackgroundRefreshEnabled { + ctx := context.Background() + service.StartBackgroundRefresh(ctx) + logger.Info("Background cache refresh worker started") + } else { + logger.Info("Background cache refresh worker disabled") + } + + return service +} + +// StartBackgroundRefresh starts the background cache refresh worker +func (impl *ClusterOverviewServiceImpl) StartBackgroundRefresh(ctx context.Context) { + if !impl.config.CacheEnabled || !impl.config.BackgroundRefreshEnabled { + impl.logger.Info("Background refresh disabled") + return + } + + impl.logger.Infow("Starting background cache refresh worker", + "refreshInterval", impl.config.GetRefreshInterval(), + "maxParallelClusters", impl.config.MaxParallelClusters) + + // Initial cache population + go func() { + impl.logger.Info("Performing initial cache population") + if err := impl.refreshCache(ctx); err != nil { + impl.logger.Errorw("initial cache population failed", "err", err) + } + }() + + // Start periodic refresh + ticker := time.NewTicker(impl.config.GetRefreshInterval()) + go func() { + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + impl.logger.Info("background refresh worker stopped") + return + case <-ticker.C: + impl.logger.Info("background refresh triggered") + if err := impl.refreshCache(ctx); err != nil { + impl.logger.Errorw("background cache refresh failed", "err", err) + } + } + } + }() +} + +// RefreshClusterOverviewCache manually triggers a cache refresh +func (impl *ClusterOverviewServiceImpl) RefreshClusterOverviewCache(ctx context.Context) error { + return impl.refreshCache(ctx) +} + +// refreshCache fetches fresh data and updates cache +func (impl *ClusterOverviewServiceImpl) refreshCache(ctx context.Context) error { + // Prevent concurrent refreshes + if impl.clusterCacheService.IsRefreshing() { + impl.logger.Debug("Cache refresh already in progress, skipping") + return nil + } + + impl.clusterCacheService.SetRefreshing(true) + defer impl.clusterCacheService.SetRefreshing(false) + + startTime := time.Now() + impl.logger.Debug("Starting cache refresh") + + // Fetch clusters + clusters, err := impl.clusterService.FindActiveClustersExcludingVirtual() + if err != nil { + impl.logger.Errorw("error fetching clusters for cache refresh", "err", err) + return err + } + + // Fetch cluster data in parallel + response, err := impl.fetchClusterDataParallel(ctx, clusters) + if err != nil { + impl.logger.Errorw("error fetching cluster data for cache refresh", "err", err) + return err + } + + // Update cache + if err := impl.clusterCacheService.SetClusterOverview(response); err != nil { + impl.logger.Errorw("error updating cache", "err", err) + return err + } + + duration := time.Since(startTime) + impl.logger.Infow("Cache refresh completed", "duration", duration, "clusterCount", len(clusters), "totalClusters", response.TotalClusters) + + return nil +} + +// fetchClusterDataParallel fetches cluster data using worker pool for parallel execution +func (impl *ClusterOverviewServiceImpl) fetchClusterDataParallel(ctx context.Context, clusters []clusterBean.ClusterBean) (*bean.ClusterOverviewResponse, error) { + if len(clusters) == 0 { + return NewEmptyClusterOverviewResponse(), nil + } + + // Separate clusters into valid and error clusters + validClusters := make([]clusterBean.ClusterBean, 0, len(clusters)) + errorClusters := make([]clusterBean.ClusterBean, 0) + + for _, cluster := range clusters { + if len(cluster.ErrorInConnecting) > 0 { + impl.logger.Debugw("Skipping cluster with connection error", "clusterId", cluster.Id, "clusterName", cluster.ClusterName, "error", cluster.ErrorInConnecting) + errorClusters = append(errorClusters, cluster) + continue + } + validClusters = append(validClusters, cluster) + } + + if len(errorClusters) > 0 { + impl.logger.Infow("Skipped clusters with connection errors", "skippedCount", len(errorClusters), "validCount", len(validClusters), "totalCount", len(clusters)) + } + + // Create placeholder capacity details for clusters with errors + errorClusterDetails := make([]*capacityBean.ClusterCapacityDetail, 0, len(errorClusters)) + for _, cluster := range errorClusters { + errorClusterDetails = append(errorClusterDetails, &capacityBean.ClusterCapacityDetail{ + Id: cluster.Id, + Name: cluster.ClusterName, + ErrorInConnection: cluster.ErrorInConnecting, + Status: capacityBean.ClusterStatusConnectionFailed, + IsVirtualCluster: cluster.IsVirtualCluster, + IsProd: cluster.IsProd, + }) + } + + // Create combined cluster bean list (all clusters) + allClusterPointers := make([]*clusterBean.ClusterBean, len(clusters)) + for i := range clusters { + allClusterPointers[i] = &clusters[i] + } + // If all clusters have connection errors, return response with error clusters only + if len(validClusters) == 0 { + impl.logger.Warn("All clusters have connection errors, returning response with error clusters only") + return impl.aggregateClusterCapacityDetails(ctx, errorClusterDetails, allClusterPointers), nil + } + + // Create worker pool with configured parallelism + wp := asyncProvider.NewBatchWorker[*capacityBean.ClusterCapacityDetail]( + impl.config.MaxParallelClusters, + impl.logger, + ) + wp.InitializeResponse() + + // Convert to pointer slice (only valid clusters) + clusterPointers := make([]*clusterBean.ClusterBean, len(validClusters)) + for i := range validClusters { + clusterPointers[i] = &validClusters[i] + } + + // Submit cluster fetch tasks to worker pool + for _, cluster := range clusterPointers { + clusterCopy := cluster // Capture for closure + wp.Submit(func() (*capacityBean.ClusterCapacityDetail, error) { + impl.logger.Debugw("Fetching cluster capacity", "clusterId", clusterCopy.Id, "clusterName", clusterCopy.ClusterName) + + // Fetch cluster capacity detail + detail, err := impl.k8sCapacityService.GetClusterCapacityDetail(ctx, clusterCopy, false) + if err != nil { + impl.logger.Warnw("error fetching cluster capacity, skipping", "clusterId", clusterCopy.Id, "clusterName", clusterCopy.ClusterName, "err", err) + // Populate error for this cluster + detail = &capacityBean.ClusterCapacityDetail{ + ErrorInConnection: err.Error(), + Status: capacityBean.ClusterStatusConnectionFailed, + } + // Continue to next cluster, returning error will stop the worker pool from further processing + } + + // Set cluster metadata + detail.Id = clusterCopy.Id + detail.Name = clusterCopy.ClusterName + detail.IsVirtualCluster = clusterCopy.IsVirtualCluster + detail.IsProd = clusterCopy.IsProd + + return detail, nil + }) + } + + // Wait for all tasks to complete + if err := wp.StopWait(); err != nil { + impl.logger.Errorw("error waiting for worker pool tasks", "err", err) + // Continue anyway to return partial results + } + + // Get results from worker pool + results := wp.GetResponse() + + // Combine successful results with error cluster placeholders + allClusterDetails := make([]*capacityBean.ClusterCapacityDetail, 0, len(results)+len(errorClusterDetails)) + allClusterDetails = append(allClusterDetails, results...) + allClusterDetails = append(allClusterDetails, errorClusterDetails...) + + // Log summary + successCount := len(results) + failedCount := len(validClusters) - successCount + if failedCount > 0 || len(errorClusters) > 0 { + impl.logger.Infow("Cluster fetch summary", "successCount", successCount, "failedCount", failedCount, "skippedCount", len(errorClusters), "totalClusters", len(clusters)) + } + + // Aggregate all results (including error clusters) into response + return impl.aggregateClusterCapacityDetails(ctx, allClusterDetails, allClusterPointers), nil +} + +// aggregateClusterCapacityDetails aggregates cluster capacity details into overview response +func (impl *ClusterOverviewServiceImpl) aggregateClusterCapacityDetails(ctx context.Context, details []*capacityBean.ClusterCapacityDetail, clusterBeans []*clusterBean.ClusterBean) *bean.ClusterOverviewResponse { + return impl.buildClusterOverviewResponse(ctx, details, clusterBeans) +} + +// GetClusterOverview retrieves comprehensive cluster management overview +// Returns from cache if enabled and available, otherwise fetches directly +func (impl *ClusterOverviewServiceImpl) GetClusterOverview(ctx context.Context) (*bean.ClusterOverviewResponse, error) { + // If cache is disabled, fetch directly + if !impl.config.CacheEnabled { + impl.logger.Debug("Cache disabled, fetching cluster overview directly") + return impl.fetchClusterOverviewDirect(ctx) + } + + // Try to get from cache + if cachedData, found := impl.clusterCacheService.GetClusterOverview(); found { + return impl.handleCacheHit(cachedData) + } + + // Cache miss - fallback to direct fetch + return impl.handleCacheMiss(ctx) +} + +// handleCacheHit processes a cache hit and returns the cached data +func (impl *ClusterOverviewServiceImpl) handleCacheHit(cachedData *bean.ClusterOverviewResponse) (*bean.ClusterOverviewResponse, error) { + cacheAge := impl.clusterCacheService.GetCacheAge() + + // Warn if cache is stale but return it anyway + if cacheAge > impl.config.GetMaxStaleDataDuration() { + impl.logger.Warnw("cache is stale but returning anyway", + "cacheAge", cacheAge, + "maxStaleAge", impl.config.GetMaxStaleDataDuration()) + } + + impl.logger.Infow("returning cluster overview from cache", "cacheAge", cacheAge) + return cachedData, nil +} + +// handleCacheMiss handles cache miss by attempting to refresh cache or fetching directly +func (impl *ClusterOverviewServiceImpl) handleCacheMiss(ctx context.Context) (*bean.ClusterOverviewResponse, error) { + impl.logger.Warn("cache miss - background refresh may not be running, attempting fallback") + + // Try to refresh cache synchronously + if err := impl.refreshCache(ctx); err != nil { + impl.logger.Errorw("error refreshing cache synchronously, falling back to direct fetch", "err", err) + // Fallback to direct fetch without caching + return impl.fetchClusterOverviewDirect(ctx) + } + + // Try to get from cache after refresh + if cachedData, found := impl.clusterCacheService.GetClusterOverview(); found { + impl.logger.Info("successfully populated cache, returning data") + return cachedData, nil + } + + // Cache refresh succeeded but data not in cache (shouldn't happen) + impl.logger.Warn("cache refresh succeeded but data not found in cache, falling back to direct fetch") + return impl.fetchClusterOverviewDirect(ctx) +} + +// fetchClusterOverviewDirect fetches cluster overview without using cache +func (impl *ClusterOverviewServiceImpl) fetchClusterOverviewDirect(ctx context.Context) (*bean.ClusterOverviewResponse, error) { + impl.logger.Debug("Fetching cluster overview directly (bypassing cache)") + + // Fetch active clusters + clusters, err := impl.clusterService.FindActiveClustersExcludingVirtual() + if err != nil { + impl.logger.Errorw("error fetching clusters", "err", err) + return nil, fmt.Errorf("failed to fetch clusters: %w", err) + } + + // Fetch cluster data in parallel + response, err := impl.fetchClusterDataParallel(ctx, clusters) + if err != nil { + impl.logger.Errorw("error fetching cluster data", "err", err) + return nil, fmt.Errorf("failed to fetch cluster data: %w", err) + } + + impl.logger.Infow("successfully fetched cluster overview directly", "totalClusters", response.TotalClusters) + return response, nil +} + +func (impl *ClusterOverviewServiceImpl) buildClusterOverviewResponse(ctx context.Context, clusterCapacityDetails []*capacityBean.ClusterCapacityDetail, clusterBeans []*clusterBean.ClusterBean) *bean.ClusterOverviewResponse { + // Initialize response using adapter + response := NewClusterOverviewResponse(len(clusterCapacityDetails)) + + // Tracking variables for aggregation + var totalCpuCapacityCores, totalMemoryCapacityGi float64 + providerCounts := make(map[string]int) + versionCounts := make(map[string]int) + autoscalerCounts := make(map[string]int) + autoscalerNodeDetailsMap := make(map[string][]bean.AutoscalerNodeDetail) + + // Create a map of cluster ID to cluster bean for quick lookup + clusterBeanMap := make(map[int]*clusterBean.ClusterBean) + for _, cb := range clusterBeans { + clusterBeanMap[cb.Id] = cb + } + + // Process each cluster to extract and aggregate data + for _, cluster := range clusterCapacityDetails { + impl.processClusterStatus(cluster, response) + + if len(cluster.ErrorInConnection) == 0 { + metrics := impl.processClusterCapacity(cluster, &totalCpuCapacityCores, &totalMemoryCapacityGi) + impl.addClusterCapacityDistribution(cluster, response, metrics) + + // Get the corresponding cluster bean for autoscaler detection + if clusterBeanForAutoscaler, exists := clusterBeanMap[cluster.Id]; exists { + impl.processNodeDistributionAndAutoscaler(ctx, cluster, clusterBeanForAutoscaler, response, autoscalerCounts, autoscalerNodeDetailsMap) + } else { + impl.logger.Warnw("cluster bean not found for autoscaler detection", + "clusterId", cluster.Id, + "clusterName", cluster.Name) + } + impl.aggregateClusterMetadata(cluster, providerCounts, versionCounts) + } + impl.processNodeDetails(cluster, response) + impl.aggregateNodeErrorCounts(cluster, response) + impl.addRawClusterCapacityDetails(cluster, response) + } + + impl.finalizeResponse(response, totalCpuCapacityCores, totalMemoryCapacityGi, providerCounts, versionCounts, autoscalerNodeDetailsMap) + + return response +} + +// processClusterStatus updates cluster status breakdown based on cluster health +func (impl *ClusterOverviewServiceImpl) processClusterStatus(cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse) { + if cluster.Status == capacityBean.ClusterStatusHealthy { + response.ClusterStatusBreakdown.Healthy++ + } else if cluster.Status == capacityBean.ClusterStatusConnectionFailed { + response.ClusterStatusBreakdown.ConnectionFailed++ + } else { + response.ClusterStatusBreakdown.Unhealthy++ + } +} + +// clusterCapacityMetrics holds parsed capacity metrics for a cluster +type clusterCapacityMetrics struct { + cpuCapacity float64 + cpuUtil float64 + cpuRequest float64 + cpuLimit float64 + memoryCapacity float64 + memoryUtil float64 + memoryRequest float64 + memoryLimit float64 +} + +// processClusterCapacity extracts and aggregates CPU and memory metrics from cluster +func (impl *ClusterOverviewServiceImpl) processClusterCapacity(cluster *capacityBean.ClusterCapacityDetail, totalCpu, totalMemory *float64) clusterCapacityMetrics { + metrics := clusterCapacityMetrics{} + + // Process CPU metrics + if cluster.Cpu != nil { + cpuCapacityFloat, err := strconv.ParseFloat(cluster.Cpu.Capacity, 64) + if err != nil { + impl.logger.Errorw("error in parsing cpu capacity", "err", err, "capacity", cluster.Cpu.Capacity) + cpuCapacityFloat = 0 + } + metrics.cpuCapacity = cpuCapacityFloat + *totalCpu += cpuCapacityFloat + + metrics.cpuUtil, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Cpu.UsagePercentage, "%"), 64) + metrics.cpuRequest, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Cpu.RequestPercentage, "%"), 64) + metrics.cpuLimit, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Cpu.LimitPercentage, "%"), 64) + } + + // Process Memory metrics + if cluster.Memory != nil { + memoryCapacityStr := strings.TrimSuffix(cluster.Memory.Capacity, "Gi") + memoryCapacityFloat, err := strconv.ParseFloat(memoryCapacityStr, 64) + if err != nil { + impl.logger.Errorw("error in parsing memory capacity", "err", err, "capacity", cluster.Memory.Capacity) + memoryCapacityFloat = 0 + } + metrics.memoryCapacity = memoryCapacityFloat + *totalMemory += memoryCapacityFloat + + metrics.memoryUtil, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Memory.UsagePercentage, "%"), 64) + metrics.memoryRequest, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Memory.RequestPercentage, "%"), 64) + metrics.memoryLimit, _ = strconv.ParseFloat(strings.TrimSuffix(cluster.Memory.LimitPercentage, "%"), 64) + } + + return metrics +} + +// addClusterCapacityDistribution adds cluster capacity distribution entry to response +func (impl *ClusterOverviewServiceImpl) addClusterCapacityDistribution(cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse, metrics clusterCapacityMetrics) { + response.ClusterCapacityDistribution = append(response.ClusterCapacityDistribution, + NewClusterCapacityDistribution( + cluster.Id, + cluster.Name, + cluster.ServerVersion, + metrics.cpuCapacity, + metrics.cpuUtil, + metrics.cpuRequest, + metrics.cpuLimit, + metrics.memoryCapacity, + metrics.memoryUtil, + metrics.memoryRequest, + metrics.memoryLimit, + )) +} + +// addClusterCapacityDistribution adds cluster capacity distribution entry to response +func (impl *ClusterOverviewServiceImpl) addRawClusterCapacityDetails(capacity *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse) { + response.RawClusterCapacityDetails = append(response.RawClusterCapacityDetails, capacity) +} + +// processNodeDistributionAndAutoscaler adds cluster node count to distribution and aggregates autoscaler counts across all clusters +func (impl *ClusterOverviewServiceImpl) processNodeDistributionAndAutoscaler(ctx context.Context, cluster *capacityBean.ClusterCapacityDetail, clusterBean *clusterBean.ClusterBean, response *bean.ClusterOverviewResponse, autoscalerCounts map[string]int, autoscalerNodeDetailsMap map[string][]bean.AutoscalerNodeDetail) { + // Add cluster node count to distribution + response.NodeDistribution.ByClusters = append(response.NodeDistribution.ByClusters, + NewClusterNodeCount(cluster.Id, cluster.Name, cluster.NodeCount)) + + // Fetch node details with labels to determine autoscaler types + nodeCapacityDetails, err := impl.k8sCapacityService.GetNodeCapacityDetailsListByCluster(ctx, clusterBean) + if err != nil { + impl.logger.Errorw("error fetching node capacity details for autoscaler detection, skipping autoscaler aggregation", + "clusterId", cluster.Id, + "clusterName", cluster.Name, + "err", err) + return + } + + // Process each node to determine autoscaler type and aggregate globally + for _, nodeDetail := range nodeCapacityDetails { + autoscalerType := overviewUtil.DetermineAutoscalerTypeFromLabelArray(nodeDetail.Labels) + + // Add to global autoscaler counts + autoscalerCounts[autoscalerType]++ + + // Collect node details for this autoscaler type globally across all clusters + autoscalerNodeDetailsMap[autoscalerType] = append(autoscalerNodeDetailsMap[autoscalerType], bean.AutoscalerNodeDetail{ + NodeName: nodeDetail.Name, + ClusterName: cluster.Name, + ClusterID: cluster.Id, + ManagedBy: autoscalerType, + }) + } +} + +// processNodeDetails processes node details to populate scheduling and error information +func (impl *ClusterOverviewServiceImpl) processNodeDetails(cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse) { + if cluster.NodeDetails == nil { + return + } + + // Build node errors map for quick lookup + nodeErrorsMap := impl.buildNodeErrorsMap(cluster.NodeErrors) + + // Process each node + for _, nodeDetail := range cluster.NodeDetails { + if errorTypes, hasErrors := nodeErrorsMap[nodeDetail.NodeName]; hasErrors { + impl.addNodeWithErrors(nodeDetail, cluster, errorTypes, response) + } else { + impl.addSchedulableNode(nodeDetail, cluster, response) + } + } +} + +// buildNodeErrorsMap creates a map of node names to their error types +func (impl *ClusterOverviewServiceImpl) buildNodeErrorsMap(nodeErrors map[corev1.NodeConditionType][]string) map[string][]string { + nodeErrorsMap := make(map[string][]string) + for conditionType, nodeNames := range nodeErrors { + for _, nodeName := range nodeNames { + errorType := impl.getHumanReadableErrorType(conditionType) + nodeErrorsMap[nodeName] = append(nodeErrorsMap[nodeName], errorType) + } + } + return nodeErrorsMap +} + +// addNodeWithErrors adds a node with errors to error breakdown and unschedulable nodes +func (impl *ClusterOverviewServiceImpl) addNodeWithErrors(nodeDetail capacityBean.NodeDetails, cluster *capacityBean.ClusterCapacityDetail, errorTypes []string, response *bean.ClusterOverviewResponse) { + nodeStatus := "Not Ready" + if len(errorTypes) == 0 { + nodeStatus = "Ready" + } + + // Store errors as array directly, no need to convert to comma-separated string + response.NodeErrorBreakdown.NodeErrors = append(response.NodeErrorBreakdown.NodeErrors, + NewNodeErrorDetail(nodeDetail.NodeName, cluster.Name, cluster.Id, errorTypes, nodeStatus)) + + response.NodeSchedulingBreakdown.UnschedulableNodes = append(response.NodeSchedulingBreakdown.UnschedulableNodes, + NewNodeSchedulingDetail(nodeDetail.NodeName, cluster.Name, cluster.Id, false)) + response.NodeSchedulingBreakdown.Unschedulable++ +} + +// addSchedulableNode adds a schedulable node to the scheduling breakdown +func (impl *ClusterOverviewServiceImpl) addSchedulableNode(nodeDetail capacityBean.NodeDetails, cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse) { + response.NodeSchedulingBreakdown.SchedulableNodes = append(response.NodeSchedulingBreakdown.SchedulableNodes, + NewNodeSchedulingDetail(nodeDetail.NodeName, cluster.Name, cluster.Id, true)) + response.NodeSchedulingBreakdown.Schedulable++ +} + +// aggregateNodeErrorCounts aggregates node error counts by error type +func (impl *ClusterOverviewServiceImpl) aggregateNodeErrorCounts(cluster *capacityBean.ClusterCapacityDetail, response *bean.ClusterOverviewResponse) { + for conditionType, nodeNames := range cluster.NodeErrors { + errorCount := len(nodeNames) + + switch conditionType { + case constants.NodeConditionNetworkUnavailable: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorNetworkUnavailable] += errorCount + case constants.NodeConditionMemoryPressure: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorMemoryPressure] += errorCount + case constants.NodeConditionDiskPressure: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorDiskPressure] += errorCount + case constants.NodeConditionPIDPressure: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorPIDPressure] += errorCount + case constants.NodeConditionReady: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorKubeletNotReady] += errorCount + default: + response.NodeErrorBreakdown.ErrorCounts[constants.NodeErrorOthers] += errorCount + } + } +} + +// aggregateClusterMetadata aggregates cluster metadata (provider and version) +func (impl *ClusterOverviewServiceImpl) aggregateClusterMetadata(cluster *capacityBean.ClusterCapacityDetail, providerCounts, versionCounts map[string]int) { + provider := impl.determineProviderFromCluster(cluster) + providerCounts[provider]++ + + version := impl.extractMajorMinorVersion(cluster.ServerVersion) + versionCounts[version]++ +} + +// finalizeResponse sets total values and builds distribution arrays +func (impl *ClusterOverviewServiceImpl) finalizeResponse(response *bean.ClusterOverviewResponse, totalCpu, totalMemory float64, providerCounts, versionCounts map[string]int, autoscalerNodeDetailsMap map[string][]bean.AutoscalerNodeDetail) { + // Set total capacity values with 2 decimal precision + response.TotalCpuCapacity.Value = fmt.Sprintf("%.2f", overviewUtil.RoundToTwoDecimals(totalCpu)) + response.TotalMemoryCapacity.Value = fmt.Sprintf("%.2f", overviewUtil.RoundToTwoDecimals(totalMemory)) + + // Build provider distribution + for provider, count := range providerCounts { + response.ClusterDistribution.ByProvider = append(response.ClusterDistribution.ByProvider, + NewProviderDistribution(provider, count)) + } + + // Build version distribution + for version, count := range versionCounts { + response.ClusterDistribution.ByVersion = append(response.ClusterDistribution.ByVersion, + NewVersionDistribution(version, count)) + } + + // Build autoscaler distribution - aggregated across all clusters + for autoscalerType, nodeDetails := range autoscalerNodeDetailsMap { + response.NodeDistribution.ByAutoscaler = append(response.NodeDistribution.ByAutoscaler, bean.AutoscalerNodeCount{ + AutoscalerType: autoscalerType, + NodeCount: len(nodeDetails), + NodeDetails: nodeDetails, + }) + } + + // Set total counts for breakdowns + response.NodeSchedulingBreakdown.Total = response.NodeSchedulingBreakdown.Schedulable + response.NodeSchedulingBreakdown.Unschedulable + response.NodeErrorBreakdown.Total = len(response.NodeErrorBreakdown.NodeErrors) +} + +// getHumanReadableErrorType converts Kubernetes node condition types to human-readable error types +func (impl *ClusterOverviewServiceImpl) getHumanReadableErrorType(conditionType corev1.NodeConditionType) string { + switch conditionType { + case corev1.NodeNetworkUnavailable: + return constants.NodeErrorNetworkUnavailable + case corev1.NodeMemoryPressure: + return constants.NodeErrorMemoryPressure + case corev1.NodeDiskPressure: + return constants.NodeErrorDiskPressure + case corev1.NodePIDPressure: + return constants.NodeErrorPIDPressure + case corev1.NodeReady: + return constants.NodeErrorKubeletNotReady + default: + return constants.NodeErrorOthers + } +} + +func (impl *ClusterOverviewServiceImpl) determineProviderFromCluster(cluster *capacityBean.ClusterCapacityDetail) string { + if cluster.NodeDetails != nil && len(cluster.NodeDetails) > 0 { + for _, nodeDetail := range cluster.NodeDetails { + provider := impl.determineProviderFromNodeName(nodeDetail.NodeName) + if provider != constants.ProviderUnknown { + return provider + } + } + } + + return constants.ProviderUnknown +} + +// determineProviderFromNodeName determines cloud provider from node name patterns +func (impl *ClusterOverviewServiceImpl) determineProviderFromNodeName(nodeName string) string { + nodeNameLower := strings.ToLower(nodeName) + + // Google Cloud Platform (GKE) patterns + // Examples: gke-shared-cluster-ci-nodes-818049c0-6knz, gke-cluster-default-pool-12345678-abcd + if strings.HasPrefix(nodeNameLower, constants.NodePrefixGKE) { + return constants.ProviderGCP + } + + // Azure (AKS) patterns + // Examples: aks-newpool-37469834-vmss000000, aks-nodepool1-12345678-vmss000001 + if strings.HasPrefix(nodeNameLower, constants.NodePrefixAKS) { + return constants.ProviderAzure + } + + // AWS (EKS) patterns + // Examples: ip-192-168-1-100.us-west-2.compute.internal, ip-10-0-1-50.ec2.internal + if strings.Contains(nodeNameLower, constants.NodePatternAWSComputeInternal) || strings.Contains(nodeNameLower, constants.NodePatternAWSEC2Internal) { + return constants.ProviderAWS + } + // EKS managed node groups: eks-nodegroup-12345678-abcd + if strings.HasPrefix(nodeNameLower, constants.NodePrefixEKS) { + return constants.ProviderAWS + } + + // Additional AWS patterns: nodes with AWS region patterns + for _, pattern := range constants.AWSRegionPatterns { + if strings.Contains(nodeNameLower, pattern) { + return constants.ProviderAWS + } + } + + // Oracle Cloud (OKE) patterns + // Examples: oke-cywiqripuyg-nsgagklgnst-st2qczvnmba-0, oke-c1a2b3c4d5e-n6f7g8h9i0j-s1k2l3m4n5o-1 + if strings.HasPrefix(nodeNameLower, constants.NodePrefixOKE) { + return constants.ProviderOracle + } + + // DigitalOcean (DOKS) patterns + // Examples: pool--, nodes often contain "digitalocean" in metadata + if strings.Contains(nodeNameLower, constants.NodePatternDigitalOcean) { + return constants.ProviderDigitalOcean + } + + // IBM Cloud (IKS) patterns + // Examples: kube--, 10.x.x.x.kube- + if strings.Contains(nodeNameLower, constants.NodePatternIBMKube) { + return constants.ProviderIBM + } + + // Alibaba Cloud (ACK) patterns + // Examples: aliyun.com-59176-test, cn-hangzhou.i-bp12h6biv9bg24lmdc2o + // Nodes often contain "aliyun" in their names or "cn-" prefix for Chinese regions + if strings.Contains(nodeNameLower, constants.NodePatternAliyun) { + return constants.ProviderAlibaba + } + // Alibaba Cloud region patterns (cn-hangzhou, cn-beijing, etc.) + if strings.HasPrefix(nodeNameLower, constants.NodePatternAlibabaRegion) { + return constants.ProviderAlibaba + } + + // Additional Azure patterns: nodes with Azure region indicators + if strings.Contains(nodeNameLower, constants.NodePatternAzureVMSS) || strings.Contains(nodeNameLower, constants.NodePatternAzureScaleSets) { + return constants.ProviderAzure + } + + // Additional GCP patterns + if strings.Contains(nodeNameLower, constants.NodePatternGCP) || strings.Contains(nodeNameLower, constants.NodePatternGoogle) { + return constants.ProviderGCP + } + + return constants.ProviderUnknown +} + +// extractMajorMinorVersion extracts major.minor version from Kubernetes version string using semver +// Examples: "v1.28.3" -> "1.28", "1.29.0-gke.1234" -> "1.29", "v1.30" -> "1.30" +func (impl *ClusterOverviewServiceImpl) extractMajorMinorVersion(version string) string { + if version == "" { + return constants.VersionUnknown + } + + cleanVersion := version + if strings.HasPrefix(version, "v") { + cleanVersion = version[1:] + } + + // Parse using semver library (same as ClusterUpgradeService) + semverVersion, err := semver.Parse(cleanVersion) + if err != nil { + impl.logger.Warnw("failed to parse version using semver, falling back to string parsing", "version", version, "err", err) + // Fallback to simple string parsing if semver fails + parts := strings.Split(cleanVersion, ".") + if len(parts) >= 2 { + return fmt.Sprintf("%s.%s", parts[0], parts[1]) + } + return constants.VersionUnknown + } + + // Use the same approach as ClusterUpgradeService: TrimToMajorAndMinorVersion + return fmt.Sprintf("%d.%d", semverVersion.Major, semverVersion.Minor) +} + +// GetClusterOverviewDetailedNodeInfo retrieves paginated and filtered node details from cache based on node view group type +func (impl *ClusterOverviewServiceImpl) GetClusterOverviewDetailedNodeInfo(ctx context.Context, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + clusterOverview, found := impl.clusterCacheService.GetClusterOverview() + if !found { + impl.logger.Warnw("cluster overview cache not found, returning empty response") + return NewEmptyClusterOverviewNodeDetailedResponse(), nil + } + + switch request.GroupBy { + case bean.NodeViewGroupTypeNodeErrors: + return impl.getNodeErrorsDetail(clusterOverview, request) + case bean.NodeViewGroupTypeNodeScheduling: + return impl.getNodeSchedulingDetail(clusterOverview, request) + case bean.NodeViewGroupTypeAutoscaler: + return impl.getAutoscalerDetail(clusterOverview, request) + default: + return nil, fmt.Errorf("invalid node view group type: %s", request.GroupBy) + } +} + +// getNodeErrorsDetail retrieves paginated and filtered node error details +func (impl *ClusterOverviewServiceImpl) getNodeErrorsDetail(clusterOverview *bean.ClusterOverviewResponse, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + // Get all node errors from cache + allNodes := clusterOverview.NodeErrorBreakdown.NodeErrors + + // Apply error type filter if specified + if request.ErrorType != "" { + allNodes = impl.filterNodeErrorsByType(allNodes, request.ErrorType) + } + + // Apply search filter + filteredNodes := impl.filterNodeErrors(allNodes, request.SearchKey) + + // Apply sorting + sortedNodes := impl.sortNodeErrors(filteredNodes, request.SortBy, request.SortOrder) + + // Apply pagination + totalCount := len(sortedNodes) + paginatedNodes := impl.paginateNodeErrors(sortedNodes, request.Offset, request.Limit) + + // Convert to unified response format + unifiedNodes := make([]bean.ClusterOverviewNodeDetailedItem, len(paginatedNodes)) + for i, node := range paginatedNodes { + unifiedNodes[i] = bean.ClusterOverviewNodeDetailedItem{ + NodeName: node.NodeName, + ClusterName: node.ClusterName, + ClusterID: node.ClusterID, + NodeErrors: node.Errors, + NodeStatus: node.NodeStatus, + } + } + + return NewClusterOverviewNodeDetailedResponse(totalCount, unifiedNodes), nil +} + +// getNodeSchedulingDetail retrieves paginated and filtered node scheduling details +func (impl *ClusterOverviewServiceImpl) getNodeSchedulingDetail(clusterOverview *bean.ClusterOverviewResponse, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + // Filter by schedulable type if specified + var allNodes []bean.NodeSchedulingDetail + if request.SchedulableType != "" { + switch request.SchedulableType { + case constants.SchedulableTypeSchedulable: + allNodes = clusterOverview.NodeSchedulingBreakdown.SchedulableNodes + case constants.SchedulableTypeUnschedulable: + allNodes = clusterOverview.NodeSchedulingBreakdown.UnschedulableNodes + default: + // Invalid schedulableType, return all nodes + allNodes = append(clusterOverview.NodeSchedulingBreakdown.SchedulableNodes, clusterOverview.NodeSchedulingBreakdown.UnschedulableNodes...) + } + } else { + // Combine schedulable and unschedulable nodes if no filter specified + allNodes = append(clusterOverview.NodeSchedulingBreakdown.SchedulableNodes, clusterOverview.NodeSchedulingBreakdown.UnschedulableNodes...) + } + + // Apply search filter + filteredNodes := impl.filterNodeScheduling(allNodes, request.SearchKey) + + // Apply sorting + sortedNodes := impl.sortNodeScheduling(filteredNodes, request.SortBy, request.SortOrder) + + // Apply pagination + totalCount := len(sortedNodes) + paginatedNodes := impl.paginateNodeScheduling(sortedNodes, request.Offset, request.Limit) + + unifiedNodes := make([]bean.ClusterOverviewNodeDetailedItem, len(paginatedNodes)) + for i, node := range paginatedNodes { + unifiedNodes[i] = bean.ClusterOverviewNodeDetailedItem{ + NodeName: node.NodeName, + ClusterName: node.ClusterName, + ClusterID: node.ClusterID, + Schedulable: node.Schedulable, + } + } + + return NewClusterOverviewNodeDetailedResponse(totalCount, unifiedNodes), nil +} + +// getAutoscalerDetail retrieves paginated and filtered autoscaler node details +func (impl *ClusterOverviewServiceImpl) getAutoscalerDetail(clusterOverview *bean.ClusterOverviewResponse, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + // Filter by autoscaler type if specified + var allNodes []bean.AutoscalerNodeDetail + if request.AutoscalerType != "" { + // Get nodes only for the specified autoscaler type + for _, autoscalerGroup := range clusterOverview.NodeDistribution.ByAutoscaler { + if autoscalerGroup.AutoscalerType == request.AutoscalerType { + allNodes = append(allNodes, autoscalerGroup.NodeDetails...) + break + } + } + } else { + // Combine all autoscaler nodes from all autoscaler types + for _, autoscalerGroup := range clusterOverview.NodeDistribution.ByAutoscaler { + allNodes = append(allNodes, autoscalerGroup.NodeDetails...) + } + } + + // Apply search filter + filteredNodes := impl.filterAutoscalerNodes(allNodes, request.SearchKey) + + // Apply sorting + sortedNodes := impl.sortAutoscalerNodes(filteredNodes, request.SortBy, request.SortOrder) + + // Apply pagination + totalCount := len(sortedNodes) + paginatedNodes := impl.paginateAutoscalerNodes(sortedNodes, request.Offset, request.Limit) + + unifiedNodes := make([]bean.ClusterOverviewNodeDetailedItem, len(paginatedNodes)) + for i, node := range paginatedNodes { + unifiedNodes[i] = bean.ClusterOverviewNodeDetailedItem{ + NodeName: node.NodeName, + ClusterName: node.ClusterName, + ClusterID: node.ClusterID, + AutoscalerType: node.ManagedBy, + } + } + + return NewClusterOverviewNodeDetailedResponse(totalCount, unifiedNodes), nil +} + +// Helper methods for node error filtering, sorting, and pagination + +// filterNodeErrorsByType filters nodes by specific error type +func (impl *ClusterOverviewServiceImpl) filterNodeErrorsByType(nodes []bean.NodeErrorDetail, errorType string) []bean.NodeErrorDetail { + if errorType == "" { + return nodes + } + + var filtered []bean.NodeErrorDetail + errorTypeLower := strings.ToLower(errorType) + + for _, node := range nodes { + // Check if the node's error array contains the specified error type + for _, err := range node.Errors { + if strings.ToLower(err) == errorTypeLower { + filtered = append(filtered, node) + break // Found the error, no need to check other errors for this node + } + } + } + + return filtered +} + +func (impl *ClusterOverviewServiceImpl) filterNodeErrors(nodes []bean.NodeErrorDetail, searchKey string) []bean.NodeErrorDetail { + if searchKey == "" { + return nodes + } + + searchKey = strings.ToLower(searchKey) + var filtered []bean.NodeErrorDetail + + for _, node := range nodes { + // Check if search key matches node name, cluster name, or node status + if strings.Contains(strings.ToLower(node.NodeName), searchKey) || + strings.Contains(strings.ToLower(node.ClusterName), searchKey) || + strings.Contains(strings.ToLower(node.NodeStatus), searchKey) { + filtered = append(filtered, node) + continue + } + + // Check if search key matches any error in the errors array + for _, err := range node.Errors { + if strings.Contains(strings.ToLower(err), searchKey) { + filtered = append(filtered, node) + break // Found match, no need to check other errors + } + } + } + + return filtered +} + +func (impl *ClusterOverviewServiceImpl) sortNodeErrors(nodes []bean.NodeErrorDetail, sortBy, sortOrder string) []bean.NodeErrorDetail { + if sortBy == "" { + sortBy = constants.SortFieldNodeName // default sort + } + if sortOrder == "" { + sortOrder = constants.SortOrderAsc // default order + } + + // Create a copy to avoid modifying the original + sorted := make([]bean.NodeErrorDetail, len(nodes)) + copy(sorted, nodes) + + sort.Slice(sorted, func(i, j int) bool { + var compareResult int + + switch sortBy { + case constants.SortFieldNodeName: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + case constants.SortFieldClusterName: + compareResult = strings.Compare(sorted[i].ClusterName, sorted[j].ClusterName) + case constants.SortFieldNodeErrors: + // Sort by joining the error array for comparison + errorsI := strings.Join(sorted[i].Errors, ", ") + errorsJ := strings.Join(sorted[j].Errors, ", ") + compareResult = strings.Compare(errorsI, errorsJ) + case constants.SortFieldNodeStatus: + compareResult = strings.Compare(sorted[i].NodeStatus, sorted[j].NodeStatus) + default: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + } + + if sortOrder == constants.SortOrderDesc { + return compareResult > 0 + } + return compareResult < 0 + }) + + return sorted +} + +func (impl *ClusterOverviewServiceImpl) paginateNodeErrors(nodes []bean.NodeErrorDetail, offset, limit int) []bean.NodeErrorDetail { + if limit <= 0 { + limit = 10 // default limit + } + + start := offset + if start < 0 { + start = 0 + } + if start >= len(nodes) { + return []bean.NodeErrorDetail{} + } + + end := start + limit + if end > len(nodes) { + end = len(nodes) + } + + return nodes[start:end] +} + +// Helper methods for node scheduling filtering, sorting, and pagination + +func (impl *ClusterOverviewServiceImpl) filterNodeScheduling(nodes []bean.NodeSchedulingDetail, searchKey string) []bean.NodeSchedulingDetail { + if searchKey == "" { + return nodes + } + + searchKey = strings.ToLower(searchKey) + var filtered []bean.NodeSchedulingDetail + + for _, node := range nodes { + schedulableStr := "schedulable" + if !node.Schedulable { + schedulableStr = "unschedulable" + } + + if strings.Contains(strings.ToLower(node.NodeName), searchKey) || + strings.Contains(strings.ToLower(node.ClusterName), searchKey) || + strings.Contains(schedulableStr, searchKey) { + filtered = append(filtered, node) + } + } + + return filtered +} + +func (impl *ClusterOverviewServiceImpl) sortNodeScheduling(nodes []bean.NodeSchedulingDetail, sortBy, sortOrder string) []bean.NodeSchedulingDetail { + if sortBy == "" { + sortBy = constants.SortFieldNodeName // default sort + } + if sortOrder == "" { + sortOrder = constants.SortOrderAsc // default order + } + + // Create a copy to avoid modifying the original + sorted := make([]bean.NodeSchedulingDetail, len(nodes)) + copy(sorted, nodes) + + sort.Slice(sorted, func(i, j int) bool { + var compareResult int + + switch sortBy { + case constants.SortFieldNodeName: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + case constants.SortFieldClusterName: + compareResult = strings.Compare(sorted[i].ClusterName, sorted[j].ClusterName) + case constants.SortFieldSchedulable: + // For boolean comparison: false < true + if sorted[i].Schedulable == sorted[j].Schedulable { + compareResult = 0 + } else if sorted[i].Schedulable { + compareResult = 1 + } else { + compareResult = -1 + } + default: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + } + + if sortOrder == constants.SortOrderDesc { + return compareResult > 0 + } + return compareResult < 0 + }) + + return sorted +} + +func (impl *ClusterOverviewServiceImpl) paginateNodeScheduling(nodes []bean.NodeSchedulingDetail, offset, limit int) []bean.NodeSchedulingDetail { + if limit <= 0 { + limit = 10 // default limit + } + + start := offset + if start < 0 { + start = 0 + } + if start >= len(nodes) { + return []bean.NodeSchedulingDetail{} + } + + end := start + limit + if end > len(nodes) { + end = len(nodes) + } + + return nodes[start:end] +} + +// Helper methods for autoscaler node filtering, sorting, and pagination + +func (impl *ClusterOverviewServiceImpl) filterAutoscalerNodes(nodes []bean.AutoscalerNodeDetail, searchKey string) []bean.AutoscalerNodeDetail { + if searchKey == "" { + return nodes + } + + searchKey = strings.ToLower(searchKey) + var filtered []bean.AutoscalerNodeDetail + + for _, node := range nodes { + if strings.Contains(strings.ToLower(node.NodeName), searchKey) || + strings.Contains(strings.ToLower(node.ClusterName), searchKey) || + strings.Contains(strings.ToLower(node.ManagedBy), searchKey) { + filtered = append(filtered, node) + } + } + + return filtered +} + +func (impl *ClusterOverviewServiceImpl) sortAutoscalerNodes(nodes []bean.AutoscalerNodeDetail, sortBy, sortOrder string) []bean.AutoscalerNodeDetail { + if sortBy == "" { + sortBy = constants.SortFieldNodeName // default sort + } + if sortOrder == "" { + sortOrder = constants.SortOrderAsc // default order + } + + // Create a copy to avoid modifying the original + sorted := make([]bean.AutoscalerNodeDetail, len(nodes)) + copy(sorted, nodes) + + sort.Slice(sorted, func(i, j int) bool { + var compareResult int + + switch sortBy { + case constants.SortFieldNodeName: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + case constants.SortFieldClusterName: + compareResult = strings.Compare(sorted[i].ClusterName, sorted[j].ClusterName) + case constants.SortFieldAutoscalerType: + compareResult = strings.Compare(sorted[i].ManagedBy, sorted[j].ManagedBy) + default: + compareResult = strings.Compare(sorted[i].NodeName, sorted[j].NodeName) + } + + if sortOrder == constants.SortOrderDesc { + return compareResult > 0 + } + return compareResult < 0 + }) + + return sorted +} + +func (impl *ClusterOverviewServiceImpl) paginateAutoscalerNodes(nodes []bean.AutoscalerNodeDetail, offset, limit int) []bean.AutoscalerNodeDetail { + if limit <= 0 { + limit = 10 // default limit + } + + start := offset + if start < 0 { + start = 0 + } + if start >= len(nodes) { + return []bean.AutoscalerNodeDetail{} + } + + end := start + limit + if end > len(nodes) { + end = len(nodes) + } + + return nodes[start:end] +} diff --git a/pkg/overview/DoraMetricsService.go b/pkg/overview/DoraMetricsService.go new file mode 100644 index 0000000000..cfae44b40a --- /dev/null +++ b/pkg/overview/DoraMetricsService.go @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/devtron-labs/devtron/client/lens" + //"github.com/devtron-labs/devtron/client/lens" + "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/util" + "go.uber.org/zap" +) + +type DoraMetricsService interface { + GetDoraMetrics(ctx context.Context, request *bean.DoraMetricsRequest) (*bean.DoraMetricsResponse, error) +} +type DoraMetricsServiceImpl struct { + logger *zap.SugaredLogger + lensClient lens.LensClient + appRepository app.AppRepository + pipelineRepository pipelineConfig.PipelineRepository + environmentRepository repository.EnvironmentRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository +} + +func NewDoraMetricsServiceImpl( + logger *zap.SugaredLogger, + lensClient lens.LensClient, + appRepository app.AppRepository, + pipelineRepository pipelineConfig.PipelineRepository, + environmentRepository repository.EnvironmentRepository, + cdWorkflowRepository pipelineConfig.CdWorkflowRepository, +) *DoraMetricsServiceImpl { + return &DoraMetricsServiceImpl{ + logger: logger, + lensClient: lensClient, + appRepository: appRepository, + pipelineRepository: pipelineRepository, + environmentRepository: environmentRepository, + cdWorkflowRepository: cdWorkflowRepository, + } +} + +func (impl *DoraMetricsServiceImpl) GetDoraMetrics(ctx context.Context, request *bean.DoraMetricsRequest) (*bean.DoraMetricsResponse, error) { + impl.logger.Infow("getting DORA metrics", "request", request) + + // Get all apps with production pipelines using optimized query for current period + appEnvPairs, err := impl.getAppEnvironmentPairsOptimized(ctx, request.TimeRangeRequest.From, request.TimeRangeRequest.To) + if err != nil { + impl.logger.Errorw("error getting app-environment pairs", "err", err) + return nil, err + } + + if len(appEnvPairs) == 0 { + impl.logger.Warnw("no production pipelines found with deployment history") + return bean.NewDoraMetricsResponse(), nil + } + + // Calculate all DORA metrics using single Lens API call per app-env pair + allMetrics, err := impl.calculateAllDoraMetricsFromLens(ctx, request, appEnvPairs) + if err != nil { + impl.logger.Errorw("error calculating DORA metrics from lens", "err", err) + return nil, err + } + + response := &bean.DoraMetricsResponse{ + ProdDeploymentPipelineCount: len(appEnvPairs), + DeploymentFrequency: allMetrics.DeploymentFrequency, + MeanLeadTime: allMetrics.MeanLeadTime, + ChangeFailureRate: allMetrics.ChangeFailureRate, + MeanTimeToRecovery: allMetrics.MeanTimeToRecovery, + } + + return response, nil +} + +// getAppEnvironmentPairsOptimized is an optimized version that uses a single query +// to fetch production pipelines with deployment history within the specified time range +// This method only fetches the minimal data needed (AppId and EnvId) for better performance +func (impl *DoraMetricsServiceImpl) getAppEnvironmentPairsOptimized(ctx context.Context, from, to *time.Time) ([]lens.AppEnvPair, error) { + prodPipelines, err := impl.pipelineRepository.FindProdPipelinesWithAppDataAndDeploymentHistoryInTimeRange(from, to) + if err != nil { + impl.logger.Errorw("error getting production pipelines with deployment history in time range", "from", from, "to", to, "err", err) + return nil, err + } + + if len(prodPipelines) == 0 { + impl.logger.Warnw("no production pipelines found with deployment history in time range", "from", from, "to", to) + return []lens.AppEnvPair{}, nil + } + + // Convert to our simplified structure, only keeping the IDs + var appEnvPairs []lens.AppEnvPair + for _, pipeline := range prodPipelines { + appEnvPairs = append(appEnvPairs, lens.AppEnvPair{ + AppId: pipeline.AppId, + EnvId: pipeline.EnvironmentId, + }) + } + + return appEnvPairs, nil +} + +// calculateAllDoraMetricsFromLens calculates all DORA metrics using single Lens API call per app-env pair +func (impl *DoraMetricsServiceImpl) calculateAllDoraMetricsFromLens(ctx context.Context, request *bean.DoraMetricsRequest, appEnvPairs []lens.AppEnvPair) (*bean.AllDoraMetrics, error) { + currentMetricsData, err := impl.fetchAllMetricsFromLens(ctx, appEnvPairs, request.TimeRangeRequest.From, request.TimeRangeRequest.To) + if err != nil { + impl.logger.Errorw("error fetching current period metrics from lens", "err", err) + return nil, err + } + + // Get app-env pairs for previous period + previousAppEnvPairs, err := impl.getAppEnvironmentPairsOptimized(ctx, request.PrevFrom, request.PrevTo) + if err != nil { + impl.logger.Errorw("error getting app-environment pairs for previous period", "err", err) + // Continue without comparison if we can't get previous period data + return impl.createAllDoraMetricsWithoutComparison(currentMetricsData), nil + } + + var previousMetricsData map[string]*bean.LensMetrics + if len(previousAppEnvPairs) > 0 { + previousMetricsData, err = impl.fetchAllMetricsFromLens(ctx, previousAppEnvPairs, request.PrevFrom, request.PrevTo) + if err != nil { + impl.logger.Errorw("error fetching previous period metrics from lens", "err", err) + // Continue without comparison if we can't get previous period data + return impl.createAllDoraMetricsWithoutComparison(currentMetricsData), nil + } + } + + // Calculate all metrics with comparison + return impl.createAllDoraMetricsWithComparison(currentMetricsData, previousMetricsData), nil +} + +// fetchAllMetricsFromLens fetches all DORA metrics from Lens using single bulk API call +func (impl *DoraMetricsServiceImpl) fetchAllMetricsFromLens(ctx context.Context, bulkAppEnvPairs []lens.AppEnvPair, from, to *time.Time) (map[string]*bean.LensMetrics, error) { + metricsData := make(map[string]*bean.LensMetrics) + + if len(bulkAppEnvPairs) == 0 { + return metricsData, nil + } + + bulkRequest := &lens.BulkMetricRequest{ + AppEnvPairs: bulkAppEnvPairs, + From: from, + To: to, + } + + // Make single bulk call to get all metrics for all app-env pairs + lensResp, resCode, err := impl.lensClient.GetBulkAppMetrics(bulkRequest) + if err != nil { + impl.logger.Errorw("error calling lens bulk API for all metrics", "err", err) + return nil, err + } + + if !resCode.IsSuccess() { + impl.logger.Errorw("lens bulk API returned error", "statusCode", *resCode) + return nil, fmt.Errorf("lens bulk API returned error with status code: %d", *resCode) + } + + // Parse the new bulk response - now it's directly an array of DoraMetrics + var doraMetricsArray []*lens.DoraMetrics + if err := json.Unmarshal(lensResp.Result, &doraMetricsArray); err != nil { + impl.logger.Errorw("error unmarshaling lens bulk response", "err", err) + return nil, err + } + + // Process results and map them to app-env keys + for _, doraMetric := range doraMetricsArray { + if doraMetric == nil { + impl.logger.Warnw("nil dora metric in response") + continue + } + + // Convert lens.DoraMetrics to LensMetrics (our internal struct) + // Map the new field names to the old structure for backward compatibility + lensMetrics := &bean.LensMetrics{ + AverageCycleTime: doraMetric.DeploymentFrequency, // DeploymentFrequency maps to AverageCycleTime + AverageLeadTime: doraMetric.MeanLeadTimeForChanges, // MeanLeadTimeForChanges maps to AverageLeadTime + ChangeFailureRate: doraMetric.ChangeFailureRate, // ChangeFailureRate maps directly + AverageRecoveryTime: doraMetric.MeanTimeToRecovery, // MeanTimeToRecovery maps to AverageRecoveryTime + } + + // Store metrics with app-env ID key + key := fmt.Sprintf("%d-%d", doraMetric.AppId, doraMetric.EnvId) + metricsData[key] = lensMetrics + } + + return metricsData, nil +} + +// createAllDoraMetricsWithoutComparison creates all DORA metrics without comparison data +func (impl *DoraMetricsServiceImpl) createAllDoraMetricsWithoutComparison(currentMetricsData map[string]*bean.LensMetrics) *bean.AllDoraMetrics { + // Extract all metric values from current data + var deploymentFreqValues, leadTimeValues, changeFailureValues, recoveryTimeValues []float64 + + for _, metrics := range currentMetricsData { + deploymentFreqValues = append(deploymentFreqValues, metrics.AverageCycleTime) + leadTimeValues = append(leadTimeValues, metrics.AverageLeadTime) + changeFailureValues = append(changeFailureValues, metrics.ChangeFailureRate) + recoveryTimeValues = append(recoveryTimeValues, metrics.AverageRecoveryTime) + } + + // Calculate averages + deploymentFreqAvg := util.CalculateAverageFromValues(deploymentFreqValues) + leadTimeAvg := util.CalculateAverageFromValues(leadTimeValues) + changeFailureAvg := util.CalculateAverageFromValues(changeFailureValues) + recoveryTimeAvg := util.CalculateAverageFromValues(recoveryTimeValues) + + // Calculate performance levels for each metric separately + deploymentFreqPerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryDeploymentFrequency) + leadTimePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryMeanLeadTime) + changeFailurePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryChangeFailureRate) + recoveryTimePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryMeanTimeToRecovery) + + deploymentFrequency := util.CreateDoraMetricObject(deploymentFreqAvg, bean.MetricValueUnitNumber, 0, bean.ComparisonUnitPercentage, deploymentFreqPerformanceLevels) + meanLeadTime := util.CreateDoraMetricObject(leadTimeAvg, bean.MetricValueUnitMinutes, 0, bean.ComparisonUnitMinutes, leadTimePerformanceLevels) + changeFailureRate := util.CreateDoraMetricObject(changeFailureAvg, bean.MetricValueUnitPercentage, 0, bean.ComparisonUnitPercentage, changeFailurePerformanceLevels) + meanTimeToRecovery := util.CreateDoraMetricObject(recoveryTimeAvg, bean.MetricValueUnitMinutes, 0, bean.ComparisonUnitMinutes, recoveryTimePerformanceLevels) + + allDoraMetrics := bean.NewAllDoraMetrics(). + WithDeploymentFrequency(deploymentFrequency). + WithMeanLeadTime(meanLeadTime). + WithChangeFailureRate(changeFailureRate). + WithMeanTimeToRecovery(meanTimeToRecovery) + + return allDoraMetrics + +} + +// createAllDoraMetricsWithComparison creates all DORA metrics with comparison data +func (impl *DoraMetricsServiceImpl) createAllDoraMetricsWithComparison(currentMetricsData, previousMetricsData map[string]*bean.LensMetrics) *bean.AllDoraMetrics { + // Extract current period values + var currentDeploymentFreq, currentLeadTime, currentChangeFailure, currentRecoveryTime []float64 + for _, metrics := range currentMetricsData { + currentDeploymentFreq = append(currentDeploymentFreq, metrics.AverageCycleTime) + currentLeadTime = append(currentLeadTime, metrics.AverageLeadTime) + currentChangeFailure = append(currentChangeFailure, metrics.ChangeFailureRate) + currentRecoveryTime = append(currentRecoveryTime, metrics.AverageRecoveryTime) + } + + // Extract previous period values + var previousDeploymentFreq, previousLeadTime, previousChangeFailure, previousRecoveryTime []float64 + for _, metrics := range previousMetricsData { + previousDeploymentFreq = append(previousDeploymentFreq, metrics.AverageCycleTime) + previousLeadTime = append(previousLeadTime, metrics.AverageLeadTime) + previousChangeFailure = append(previousChangeFailure, metrics.ChangeFailureRate) + previousRecoveryTime = append(previousRecoveryTime, metrics.AverageRecoveryTime) + } + + // Calculate averages + currentDeploymentFreqAvg := util.CalculateAverageFromValues(currentDeploymentFreq) + currentLeadTimeAvg := util.CalculateAverageFromValues(currentLeadTime) + currentChangeFailureAvg := util.CalculateAverageFromValues(currentChangeFailure) + currentRecoveryTimeAvg := util.CalculateAverageFromValues(currentRecoveryTime) + + previousDeploymentFreqAvg := util.CalculateAverageFromValues(previousDeploymentFreq) + previousLeadTimeAvg := util.CalculateAverageFromValues(previousLeadTime) + previousChangeFailureAvg := util.CalculateAverageFromValues(previousChangeFailure) + previousRecoveryTimeAvg := util.CalculateAverageFromValues(previousRecoveryTime) + + // Calculate comparisons + deploymentFreqCompValue := util.CalculateComparison(currentDeploymentFreqAvg, previousDeploymentFreqAvg, bean.MetricCategoryDeploymentFrequency) + leadTimeCompValue := util.CalculateComparison(currentLeadTimeAvg, previousLeadTimeAvg, bean.MetricCategoryMeanLeadTime) + changeFailureCompValue := util.CalculateComparison(currentChangeFailureAvg, previousChangeFailureAvg, bean.MetricCategoryChangeFailureRate) + recoveryTimeCompValue := util.CalculateComparison(currentRecoveryTimeAvg, previousRecoveryTimeAvg, bean.MetricCategoryMeanTimeToRecovery) + + // Calculate performance levels for each metric separately using current period data + deploymentFreqPerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryDeploymentFrequency) + leadTimePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryMeanLeadTime) + changeFailurePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryChangeFailureRate) + recoveryTimePerformanceLevels := util.CalculatePerformanceLevelsForMetric(currentMetricsData, bean.MetricCategoryMeanTimeToRecovery) + + deploymentFrequency := util.CreateDoraMetricObject(currentDeploymentFreqAvg, bean.MetricValueUnitNumber, deploymentFreqCompValue, bean.ComparisonUnitPercentage, deploymentFreqPerformanceLevels) + meanLeadTime := util.CreateDoraMetricObject(currentLeadTimeAvg, bean.MetricValueUnitMinutes, leadTimeCompValue, bean.ComparisonUnitMinutes, leadTimePerformanceLevels) + changeFailureRate := util.CreateDoraMetricObject(currentChangeFailureAvg, bean.MetricValueUnitPercentage, changeFailureCompValue, bean.ComparisonUnitPercentage, changeFailurePerformanceLevels) + meanTimeToRecovery := util.CreateDoraMetricObject(currentRecoveryTimeAvg, bean.MetricValueUnitMinutes, recoveryTimeCompValue, bean.ComparisonUnitMinutes, recoveryTimePerformanceLevels) + + allDoraMetrics := bean.NewAllDoraMetrics(). + WithDeploymentFrequency(deploymentFrequency). + WithMeanLeadTime(meanLeadTime). + WithChangeFailureRate(changeFailureRate). + WithMeanTimeToRecovery(meanTimeToRecovery) + + return allDoraMetrics +} diff --git a/pkg/overview/InsightsService.go b/pkg/overview/InsightsService.go new file mode 100644 index 0000000000..1fbf3422e0 --- /dev/null +++ b/pkg/overview/InsightsService.go @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "fmt" + + "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "go.uber.org/zap" +) + +type InsightsService interface { + GetInsights(ctx context.Context, request *bean.InsightsRequest) (*bean.InsightsResponse, error) +} + +type InsightsServiceImpl struct { + logger *zap.SugaredLogger + appRepository app.AppRepository + pipelineRepository pipelineConfig.PipelineRepository + ciPipelineRepository pipelineConfig.CiPipelineRepository + ciWorkflowRepository pipelineConfig.CiWorkflowRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository + environmentRepository repository.EnvironmentRepository +} + +func NewInsightsServiceImpl( + logger *zap.SugaredLogger, + appRepository app.AppRepository, + pipelineRepository pipelineConfig.PipelineRepository, + ciPipelineRepository pipelineConfig.CiPipelineRepository, + ciWorkflowRepository pipelineConfig.CiWorkflowRepository, + cdWorkflowRepository pipelineConfig.CdWorkflowRepository, + environmentRepository repository.EnvironmentRepository, +) *InsightsServiceImpl { + return &InsightsServiceImpl{ + logger: logger, + appRepository: appRepository, + pipelineRepository: pipelineRepository, + ciPipelineRepository: ciPipelineRepository, + ciWorkflowRepository: ciWorkflowRepository, + cdWorkflowRepository: cdWorkflowRepository, + environmentRepository: environmentRepository, + } +} + +func (impl *InsightsServiceImpl) GetInsights(ctx context.Context, request *bean.InsightsRequest) (*bean.InsightsResponse, error) { + var pipelines []bean.PipelineUsageItem + var totalCount int + var err error + + switch request.PipelineType { + case bean.BuildPipelines: + pipelines, totalCount, err = impl.getTriggeredBuildPipelines(ctx, request) + if err != nil { + impl.logger.Errorw("error getting triggered build pipelines", "err", err) + return nil, err + } + case bean.DeploymentPipelines: + pipelines, totalCount, err = impl.getTriggeredDeploymentPipelines(ctx, request) + if err != nil { + impl.logger.Errorw("error getting triggered deployment pipelines", "err", err) + return nil, err + } + default: + impl.logger.Errorw("invalid pipeline type", "pipelineType", request.PipelineType) + return nil, fmt.Errorf("invalid pipeline type: %s", request.PipelineType) + } + + response := &bean.InsightsResponse{ + Pipelines: pipelines, + TotalCount: totalCount, + } + + return response, nil +} + +func (impl *InsightsServiceImpl) getTriggeredBuildPipelines(ctx context.Context, request *bean.InsightsRequest) ([]bean.PipelineUsageItem, int, error) { + pipelineData, totalCount, err := impl.ciWorkflowRepository.GetTriggeredCIPipelines(request.TimeRangeRequest.From, request.TimeRangeRequest.To, request.SortOrder, request.Limit, request.Offset) + if err != nil { + impl.logger.Errorw("error getting triggered CI pipelines", "err", err) + return nil, 0, err + } + + var pipelineUsage []bean.PipelineUsageItem + for _, data := range pipelineData { + pipelineUsage = append(pipelineUsage, bean.PipelineUsageItem{ + AppID: data.AppID, + PipelineID: data.PipelineID, + PipelineName: data.PipelineName, + AppName: data.AppName, + TriggerCount: data.TriggerCount, + }) + } + + return pipelineUsage, totalCount, nil +} + +func (impl *InsightsServiceImpl) getTriggeredDeploymentPipelines(ctx context.Context, request *bean.InsightsRequest) ([]bean.PipelineUsageItem, int, error) { + pipelineData, totalCount, err := impl.cdWorkflowRepository.GetTriggeredCDPipelines(request.TimeRangeRequest.From, request.TimeRangeRequest.To, request.SortOrder, request.Limit, request.Offset) + if err != nil { + impl.logger.Errorw("error getting triggered CD pipelines", "err", err) + return nil, 0, err + } + + var pipelineUsage []bean.PipelineUsageItem + for _, data := range pipelineData { + pipelineUsage = append(pipelineUsage, bean.PipelineUsageItem{ + AppID: data.AppID, + EnvID: data.EnvID, + PipelineID: data.PipelineID, + PipelineName: data.PipelineName, + AppName: data.AppName, + EnvName: data.EnvName, + TriggerCount: data.TriggerCount, + }) + } + + return pipelineUsage, totalCount, nil +} + +// Approval policy coverage methods moved to ApprovalPolicyService diff --git a/pkg/overview/OverviewService.go b/pkg/overview/OverviewService.go new file mode 100644 index 0000000000..30bf4a09b9 --- /dev/null +++ b/pkg/overview/OverviewService.go @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + + "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/cache" + "github.com/devtron-labs/devtron/pkg/overview/constants" +) + +type OverviewService interface { + // New Apps Overview + GetAppsOverview(ctx context.Context) (*bean.AppsOverviewResponse, error) + + // New Workflow Overview + GetWorkflowOverview(ctx context.Context) (*bean.WorkflowOverviewResponse, error) + + // Build and Deployment Activity + GetBuildDeploymentActivity(ctx context.Context, request *bean.BuildDeploymentActivityRequest) (*bean.BuildDeploymentActivityResponse, error) + GetBuildDeploymentActivityDetailed(ctx context.Context, request *bean.BuildDeploymentActivityDetailedRequest) (*bean.BuildDeploymentActivityDetailedResponse, error) + + // DORA Metrics + GetDoraMetrics(ctx context.Context, request *bean.DoraMetricsRequest) (*bean.DoraMetricsResponse, error) + + // Insights + GetInsights(ctx context.Context, request *bean.InsightsRequest) (*bean.InsightsResponse, error) + + // Cluster Management Overview + GetClusterOverview(ctx context.Context) (*bean.ClusterOverviewResponse, error) + DeleteClusterOverviewCache(ctx context.Context) error + RefreshClusterOverviewCache(ctx context.Context) error + + // Cluster Overview Detailed Drill-down API (unified endpoint for all node view group types) + GetClusterOverviewDetailedNodeInfo(ctx context.Context, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) + + // Security Overview APIs + GetSecurityOverview(ctx context.Context, request *bean.SecurityOverviewRequest) (*bean.SecurityOverviewResponse, error) + GetSeverityInsights(ctx context.Context, request *bean.SeverityInsightsRequest) (*bean.SeverityInsightsResponse, error) + GetDeploymentSecurityStatus(ctx context.Context, request *bean.DeploymentSecurityStatusRequest) (*bean.DeploymentSecurityStatusResponse, error) + GetVulnerabilityTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, envType bean.EnvType, aggregationType constants.AggregationType) (*bean.VulnerabilityTrendResponse, error) + GetBlockedDeploymentsTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, aggregationType constants.AggregationType) (*bean.BlockedDeploymentsTrendResponse, error) +} + +type OverviewServiceImpl struct { + appManagementService AppManagementService + doraMetricsService DoraMetricsService + insightsService InsightsService + clusterOverviewService ClusterOverviewService + clusterCacheService cache.ClusterCacheService + securityOverviewService SecurityOverviewService +} + +func NewOverviewServiceImpl( + appManagementService AppManagementService, + doraMetricsService DoraMetricsService, + insightsService InsightsService, + clusterOverviewService ClusterOverviewService, + clusterCacheService cache.ClusterCacheService, + securityOverviewService SecurityOverviewService, +) *OverviewServiceImpl { + return &OverviewServiceImpl{ + appManagementService: appManagementService, + doraMetricsService: doraMetricsService, + insightsService: insightsService, + clusterOverviewService: clusterOverviewService, + clusterCacheService: clusterCacheService, + securityOverviewService: securityOverviewService, + } +} + +func (impl *OverviewServiceImpl) GetAppsOverview(ctx context.Context) (*bean.AppsOverviewResponse, error) { + return impl.appManagementService.GetAppsOverview(ctx) +} + +func (impl *OverviewServiceImpl) GetWorkflowOverview(ctx context.Context) (*bean.WorkflowOverviewResponse, error) { + return impl.appManagementService.GetWorkflowOverview(ctx) +} + +func (impl *OverviewServiceImpl) GetBuildDeploymentActivity(ctx context.Context, request *bean.BuildDeploymentActivityRequest) (*bean.BuildDeploymentActivityResponse, error) { + return impl.appManagementService.GetBuildDeploymentActivity(ctx, request) +} + +func (impl *OverviewServiceImpl) GetBuildDeploymentActivityDetailed(ctx context.Context, request *bean.BuildDeploymentActivityDetailedRequest) (*bean.BuildDeploymentActivityDetailedResponse, error) { + return impl.appManagementService.GetBuildDeploymentActivityDetailed(ctx, request) +} + +func (impl *OverviewServiceImpl) GetDoraMetrics(ctx context.Context, request *bean.DoraMetricsRequest) (*bean.DoraMetricsResponse, error) { + return impl.doraMetricsService.GetDoraMetrics(ctx, request) +} + +func (impl *OverviewServiceImpl) GetInsights(ctx context.Context, request *bean.InsightsRequest) (*bean.InsightsResponse, error) { + return impl.insightsService.GetInsights(ctx, request) +} + +func (impl *OverviewServiceImpl) GetClusterOverview(ctx context.Context) (*bean.ClusterOverviewResponse, error) { + return impl.clusterOverviewService.GetClusterOverview(ctx) +} + +func (impl *OverviewServiceImpl) DeleteClusterOverviewCache(ctx context.Context) error { + impl.clusterCacheService.InvalidateClusterOverview() + return nil +} + +func (impl *OverviewServiceImpl) RefreshClusterOverviewCache(ctx context.Context) error { + return impl.clusterOverviewService.RefreshClusterOverviewCache(ctx) +} + +func (impl *OverviewServiceImpl) GetClusterOverviewDetailedNodeInfo(ctx context.Context, request *bean.ClusterOverviewDetailRequest) (*bean.ClusterOverviewNodeDetailedResponse, error) { + return impl.clusterOverviewService.GetClusterOverviewDetailedNodeInfo(ctx, request) +} + +// ============================================================================ +// Security Overview APIs +// ============================================================================ + +func (impl *OverviewServiceImpl) GetSecurityOverview(ctx context.Context, request *bean.SecurityOverviewRequest) (*bean.SecurityOverviewResponse, error) { + return impl.securityOverviewService.GetSecurityOverview(ctx, request) +} + +func (impl *OverviewServiceImpl) GetSeverityInsights(ctx context.Context, request *bean.SeverityInsightsRequest) (*bean.SeverityInsightsResponse, error) { + return impl.securityOverviewService.GetSeverityInsights(ctx, request) +} + +func (impl *OverviewServiceImpl) GetDeploymentSecurityStatus(ctx context.Context, request *bean.DeploymentSecurityStatusRequest) (*bean.DeploymentSecurityStatusResponse, error) { + return impl.securityOverviewService.GetDeploymentSecurityStatus(ctx, request) +} + +func (impl *OverviewServiceImpl) GetVulnerabilityTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, envType bean.EnvType, aggregationType constants.AggregationType) (*bean.VulnerabilityTrendResponse, error) { + return impl.securityOverviewService.GetVulnerabilityTrend(ctx, currentTimeRange, envType, aggregationType) +} + +func (impl *OverviewServiceImpl) GetBlockedDeploymentsTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, aggregationType constants.AggregationType) (*bean.BlockedDeploymentsTrendResponse, error) { + return impl.securityOverviewService.GetBlockedDeploymentsTrend(ctx, currentTimeRange, aggregationType) +} diff --git a/pkg/overview/SecurityOverviewService.go b/pkg/overview/SecurityOverviewService.go new file mode 100644 index 0000000000..7ef74eb463 --- /dev/null +++ b/pkg/overview/SecurityOverviewService.go @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "context" + "fmt" + "time" + + "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/overview/adaptor" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + "github.com/devtron-labs/devtron/pkg/overview/util" + imageScanRepo "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository" + scanBean "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository/bean" + "go.uber.org/zap" +) + +type SecurityOverviewService interface { + // 1. Security Overview API - "At a Glance" metrics (organization-wide) + GetSecurityOverview(ctx context.Context, request *bean.SecurityOverviewRequest) (*bean.SecurityOverviewResponse, error) + + // 2. Severity Insights API - With prod/non-prod filtering + GetSeverityInsights(ctx context.Context, request *bean.SeverityInsightsRequest) (*bean.SeverityInsightsResponse, error) + + // 3. Deployment Security Status API + GetDeploymentSecurityStatus(ctx context.Context, request *bean.DeploymentSecurityStatusRequest) (*bean.DeploymentSecurityStatusResponse, error) + + // 5. Vulnerability Trend API - Time-series with prod/non-prod filtering + GetVulnerabilityTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, envType bean.EnvType, aggregationType constants.AggregationType) (*bean.VulnerabilityTrendResponse, error) + + // 6. Blocked Deployments Trend API - Organization-wide + GetBlockedDeploymentsTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, aggregationType constants.AggregationType) (*bean.BlockedDeploymentsTrendResponse, error) +} + +type SecurityOverviewServiceImpl struct { + logger *zap.SugaredLogger + imageScanResultRepository imageScanRepo.ImageScanResultRepository + imageScanDeployInfoRepository imageScanRepo.ImageScanDeployInfoRepository + cveStoreRepository imageScanRepo.CveStoreRepository + ciPipelineRepository pipelineConfig.CiPipelineRepository + cdWorkflowRepository pipelineConfig.CdWorkflowRepository +} + +func NewSecurityOverviewServiceImpl( + logger *zap.SugaredLogger, + imageScanResultRepository imageScanRepo.ImageScanResultRepository, + imageScanDeployInfoRepository imageScanRepo.ImageScanDeployInfoRepository, + cveStoreRepository imageScanRepo.CveStoreRepository, + ciPipelineRepository pipelineConfig.CiPipelineRepository, + cdWorkflowRepository pipelineConfig.CdWorkflowRepository, +) *SecurityOverviewServiceImpl { + return &SecurityOverviewServiceImpl{ + logger: logger, + imageScanResultRepository: imageScanResultRepository, + imageScanDeployInfoRepository: imageScanDeployInfoRepository, + cveStoreRepository: cveStoreRepository, + ciPipelineRepository: ciPipelineRepository, + cdWorkflowRepository: cdWorkflowRepository, + } +} + +func (service *SecurityOverviewServiceImpl) GetSecurityOverview(ctx context.Context, request *bean.SecurityOverviewRequest) (*bean.SecurityOverviewResponse, error) { + service.logger.Infow("GetSecurityOverview called", "request", request) + + // Fetch all vulnerabilities with fixed_version in a single query + vulnerabilities, err := service.imageScanResultRepository.GetVulnerabilityRawData("", nil, request.EnvIds, request.ClusterIds, request.AppIds, nil) + if err != nil { + service.logger.Errorw("error fetching vulnerabilities", "err", err) + return nil, fmt.Errorf("failed to fetch vulnerabilities: %w", err) + } + + // Calculate counts in application code + totalCount := len(vulnerabilities) + fixableCount := 0 + zeroDayCount := 0 + + uniqueCVEs := make(map[string]bool) + uniqueFixableCVEs := make(map[string]bool) + uniqueZeroDayCVEs := make(map[string]bool) + + for _, vuln := range vulnerabilities { + // Track unique CVEs + uniqueCVEs[vuln.CveStoreName] = true + + // Check if fixable (has fixed_version) + if vuln.FixedVersion != "" { + fixableCount++ + uniqueFixableCVEs[vuln.CveStoreName] = true + } else { + // Zero-day (no fixed_version) + zeroDayCount++ + uniqueZeroDayCVEs[vuln.CveStoreName] = true + } + } + + response := &bean.SecurityOverviewResponse{ + TotalVulnerabilities: &bean.VulnerabilityCount{ + Count: totalCount, + UniqueCount: len(uniqueCVEs), + }, + FixableVulnerabilities: &bean.VulnerabilityCount{ + Count: fixableCount, + UniqueCount: len(uniqueFixableCVEs), + }, + ZeroDayVulnerabilities: &bean.VulnerabilityCount{ + Count: zeroDayCount, + UniqueCount: len(uniqueZeroDayCVEs), + }, + } + + return response, nil +} + +func (service *SecurityOverviewServiceImpl) GetSeverityInsights(ctx context.Context, request *bean.SeverityInsightsRequest) (*bean.SeverityInsightsResponse, error) { + service.logger.Infow("GetSeverityInsights called", "request", request) + + // Determine environment type filter + // nil = all environments, true = prod only, false = non-prod only + var isProd *bool + if request.EnvType == bean.EnvTypeProd { + prodValue := true + isProd = &prodValue + } else if request.EnvType == bean.EnvTypeNonProd { + nonProdValue := false + isProd = &nonProdValue + } + // If EnvType is "all", isProd remains nil + + // Fetch all vulnerability data with severity and execution time in a single query + vulnerabilities, err := service.imageScanResultRepository.GetSeverityInsightDataByFilters(request.EnvIds, request.ClusterIds, request.AppIds, isProd) + if err != nil { + service.logger.Errorw("error fetching severity insight data", "err", err) + return nil, fmt.Errorf("failed to fetch severity insight data: %w", err) + } + + // Initialize counters using adapter + severityCount := adaptor.NewSeverityCount() + ageDistribution := adaptor.NewAgeDistribution() + + // Current time for age calculation + now := time.Now() + + // Process vulnerabilities in a single pass + for _, vuln := range vulnerabilities { + severity := scanBean.Severity(vuln.Severity) + + // Count by severity + switch severity { + case scanBean.Critical: + severityCount.Critical++ + case scanBean.High: + severityCount.High++ + case scanBean.Medium: + severityCount.Medium++ + case scanBean.Low: + severityCount.Low++ + default: + severityCount.Unknown++ + } + + // Calculate age in days + age := now.Sub(vuln.ExecutionTime).Hours() / 24 + + // Count by age bucket AND severity + var ageBucket *bean.AgeBucketSeverity + if age < 30 { + ageBucket = ageDistribution.LessThan30Days + } else if age < 60 { + ageBucket = ageDistribution.Between30To60Days + } else if age < 90 { + ageBucket = ageDistribution.Between60To90Days + } else { + ageBucket = ageDistribution.MoreThan90Days + } + + // Increment severity count within the age bucket + switch severity { + case scanBean.Critical: + ageBucket.Critical++ + case scanBean.High: + ageBucket.High++ + case scanBean.Medium: + ageBucket.Medium++ + case scanBean.Low: + ageBucket.Low++ + default: + ageBucket.Unknown++ + } + } + + response := &bean.SeverityInsightsResponse{ + SeverityDistribution: severityCount, + AgeDistribution: ageDistribution, + } + + return response, nil +} + +func (service *SecurityOverviewServiceImpl) GetDeploymentSecurityStatus(ctx context.Context, request *bean.DeploymentSecurityStatusRequest) (*bean.DeploymentSecurityStatusResponse, error) { + service.logger.Infow("GetDeploymentSecurityStatus called", "request", request) + + // Get total active deployments count + totalDeployments, err := service.imageScanDeployInfoRepository.GetActiveDeploymentCountByFilters(request.EnvIds, request.ClusterIds, request.AppIds) + if err != nil { + service.logger.Errorw("error getting total active deployments count", "err", err) + return nil, fmt.Errorf("failed to get total active deployments count: %w", err) + } + + // Get deployments with vulnerabilities count + deploymentsWithVulnerabilities, err := service.imageScanDeployInfoRepository.GetActiveDeploymentCountWithVulnerabilitiesByFilters(request.EnvIds, request.ClusterIds, request.AppIds) + if err != nil { + service.logger.Errorw("error getting deployments with vulnerabilities count", "err", err) + return nil, fmt.Errorf("failed to get deployments with vulnerabilities count: %w", err) + } + + // Get scanned and unscanned deployment counts in a single optimized query + scannedCounts, err := service.imageScanDeployInfoRepository.GetActiveDeploymentScannedUnscannedCountByFilters(request.EnvIds, request.ClusterIds, request.AppIds) + if err != nil { + service.logger.Errorw("error getting scanned/unscanned deployment counts", "err", err) + return nil, fmt.Errorf("failed to get scanned/unscanned deployment counts: %w", err) + } + + // Get total CI pipelines count (workflows) + totalCiPipelines, err := service.ciPipelineRepository.GetActiveCiPipelineCount() + if err != nil { + service.logger.Errorw("error getting total CI pipelines count", "err", err) + return nil, fmt.Errorf("failed to get total CI pipelines count: %w", err) + } + + // Get scan-enabled CI pipelines count (scan_enabled=true in ci_pipeline table) + scanEnabledCiPipelines, err := service.ciPipelineRepository.GetScanEnabledCiPipelineCount() + if err != nil { + service.logger.Errorw("error getting scan-enabled CI pipelines count", "err", err) + return nil, fmt.Errorf("failed to get scan-enabled CI pipelines count: %w", err) + } + + // Get CI pipelines with IMAGE SCAN plugin configured in POST-CI or PRE-CD stages + pluginConfiguredPipelines, err := service.ciPipelineRepository.GetCiPipelineCountWithImageScanPluginInPostCiOrPreCd() + if err != nil { + service.logger.Errorw("error getting CI pipelines with IMAGE SCAN plugin in POST-CI or PRE-CD count", "err", err) + return nil, fmt.Errorf("failed to get CI pipelines with IMAGE SCAN plugin in POST-CI or PRE-CD count: %w", err) + } + + totalScanningEnabledPipelines := scanEnabledCiPipelines + pluginConfiguredPipelines + + // Build response with calculated percentages + // For unscanned images: percentage = unscanned / (unscanned + scanned) + totalScannableDeployments := scannedCounts.UnscannedCount + scannedCounts.ScannedCount + response := &bean.DeploymentSecurityStatusResponse{ + ActiveDeploymentsWithVulnerabilities: &bean.DeploymentMetric{ + Count: deploymentsWithVulnerabilities, + Percentage: calculatePercentage(deploymentsWithVulnerabilities, totalDeployments), + }, + ActiveDeploymentsWithUnscannedImages: &bean.DeploymentMetric{ + Count: scannedCounts.UnscannedCount, + Percentage: calculatePercentage(scannedCounts.UnscannedCount, totalScannableDeployments), + }, + WorkflowsWithScanningEnabled: &bean.WorkflowMetric{ + Count: totalScanningEnabledPipelines, + Percentage: calculatePercentage(totalScanningEnabledPipelines, totalCiPipelines), + }, + } + + return response, nil +} + +func (service *SecurityOverviewServiceImpl) GetVulnerabilityTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, envType bean.EnvType, aggregationType constants.AggregationType) (*bean.VulnerabilityTrendResponse, error) { + service.logger.Infow("GetVulnerabilityTrend called", "from", currentTimeRange.From, "to", currentTimeRange.To, "envType", envType, "aggregationType", aggregationType) + + // Determine environment type filter + // nil = all environments, true = prod only, false = non-prod only + var isProd *bool + if envType == bean.EnvTypeProd { + prodValue := true + isProd = &prodValue + } else if envType == bean.EnvTypeNonProd { + nonProdValue := false + isProd = &nonProdValue + } + // If envType is "all", isProd remains nil + + // Fetch vulnerability trend data from repository + vulnerabilities, err := service.imageScanResultRepository.GetVulnerabilityTrendDataByFilters( + currentTimeRange.From, + currentTimeRange.To, + isProd, + ) + if err != nil { + service.logger.Errorw("error getting vulnerability trend data", "err", err) + return nil, fmt.Errorf("failed to get vulnerability trend data: %w", err) + } + + // Aggregate vulnerabilities by time bucket and severity + trendData := service.aggregateVulnerabilitiesByTime(vulnerabilities, currentTimeRange.From, currentTimeRange.To, aggregationType) + + response := &bean.VulnerabilityTrendResponse{ + Trend: trendData, + } + + return response, nil +} + +// aggregateVulnerabilitiesByTime aggregates vulnerabilities by time buckets and severity +func (service *SecurityOverviewServiceImpl) aggregateVulnerabilitiesByTime( + vulnerabilities []*imageScanRepo.VulnerabilityTrendData, + from, to *time.Time, + aggregationType constants.AggregationType, +) []*bean.VulnerabilityTrendDataPoint { + // Map to track unique CVEs per time bucket and severity: timeKey -> severity -> set of CVE names + severityMap := make(map[string]map[int]map[string]bool) + + targetLocation := from.Location() + + // Process each vulnerability and bucket by time + for _, vuln := range vulnerabilities { + // Convert UTC execution time to target timezone for proper time bucketing + localExecutionTime := vuln.ExecutionTime.In(targetLocation) + + var timeKey string + if aggregationType == constants.AggregateByHour { + timeKey = localExecutionTime.Truncate(time.Hour).Format("2006-01-02T15:04:05Z") + } else if aggregationType == constants.AggregateByMonth { + timeKey = time.Date(localExecutionTime.Year(), localExecutionTime.Month(), 1, 0, 0, 0, 0, targetLocation).Format("2006-01-02T15:04:05Z") + } else { + timeKey = localExecutionTime.Truncate(24 * time.Hour).Format("2006-01-02T15:04:05Z") + } + + // Initialize maps if needed + if severityMap[timeKey] == nil { + severityMap[timeKey] = make(map[int]map[string]bool) + } + if severityMap[timeKey][vuln.Severity] == nil { + severityMap[timeKey][vuln.Severity] = make(map[string]bool) + } + + // Track unique CVE names per time bucket and severity + severityMap[timeKey][vuln.Severity][vuln.CveStoreName] = true + } + + // Generate time-series data with zero values for missing time buckets + var trendData []*bean.VulnerabilityTrendDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + dataPoint := service.createVulnerabilityDataPoint(current, severityMap[timeKey]) + trendData = append(trendData, dataPoint) + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + dataPoint := service.createVulnerabilityDataPoint(current, severityMap[timeKey]) + trendData = append(trendData, dataPoint) + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Daily aggregation + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Format("2006-01-02T15:04:05Z") + dataPoint := service.createVulnerabilityDataPoint(current, severityMap[timeKey]) + trendData = append(trendData, dataPoint) + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData +} + +// createVulnerabilityDataPoint creates a data point with counts for each severity level +func (service *SecurityOverviewServiceImpl) createVulnerabilityDataPoint( + timestamp time.Time, + severityCounts map[int]map[string]bool, +) *bean.VulnerabilityTrendDataPoint { + dataPoint := &bean.VulnerabilityTrendDataPoint{ + Timestamp: timestamp, + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + Total: 0, + } + + if severityCounts == nil { + return dataPoint + } + + // Count unique CVEs for each severity level + for severity, cveSet := range severityCounts { + count := len(cveSet) + + switch scanBean.Severity(severity) { + case scanBean.Critical: + dataPoint.Critical = count + case scanBean.High: + dataPoint.High = count + case scanBean.Medium: + dataPoint.Medium = count + case scanBean.Low: + dataPoint.Low = count + default: + dataPoint.Unknown = count + } + + dataPoint.Total += count + } + + return dataPoint +} + +func (service *SecurityOverviewServiceImpl) GetBlockedDeploymentsTrend(ctx context.Context, currentTimeRange *utils.TimeRangeRequest, aggregationType constants.AggregationType) (*bean.BlockedDeploymentsTrendResponse, error) { + service.logger.Infow("GetBlockedDeploymentsTrend called", "from", currentTimeRange.From, "to", currentTimeRange.To, "aggregationType", aggregationType) + + // Fetch blocked deployment data from repository + blockedDeployments, err := service.cdWorkflowRepository.GetBlockedDeploymentsForTrend(currentTimeRange.From, currentTimeRange.To) + if err != nil { + service.logger.Errorw("error getting blocked deployments for trend", "err", err) + return nil, fmt.Errorf("failed to get blocked deployments: %w", err) + } + + // Aggregate blocked deployments by time bucket + trendData := service.aggregateBlockedDeploymentsByTime(blockedDeployments, currentTimeRange.From, currentTimeRange.To, aggregationType) + + response := &bean.BlockedDeploymentsTrendResponse{ + Trend: trendData, + } + + return response, nil +} + +// aggregateBlockedDeploymentsByTime aggregates blocked deployments by time buckets +func (service *SecurityOverviewServiceImpl) aggregateBlockedDeploymentsByTime( + blockedDeployments []pipelineConfig.BlockedDeploymentData, + from, to *time.Time, + aggregationType constants.AggregationType, +) []*bean.BlockedDeploymentDataPoint { + // Map to track counts per time bucket: Unix timestamp -> count + countMap := make(map[int64]int) + + targetLocation := from.Location() + + // Process each blocked deployment and bucket by time + for _, deployment := range blockedDeployments { + // Convert UTC started_on time to target timezone for proper time bucketing + localStartedOn := deployment.StartedOn.In(targetLocation) + + var bucketTime time.Time + if aggregationType == constants.AggregateByHour { + // Truncate to hour boundary in local timezone + bucketTime = time.Date(localStartedOn.Year(), localStartedOn.Month(), localStartedOn.Day(), + localStartedOn.Hour(), 0, 0, 0, targetLocation) + } else if aggregationType == constants.AggregateByMonth { + // Truncate to month boundary (1st day of month at midnight) + bucketTime = time.Date(localStartedOn.Year(), localStartedOn.Month(), 1, 0, 0, 0, 0, targetLocation) + } else { + // Daily aggregation - truncate to day boundary (midnight) in local timezone + bucketTime = time.Date(localStartedOn.Year(), localStartedOn.Month(), localStartedOn.Day(), + 0, 0, 0, 0, targetLocation) + } + + // Use Unix timestamp as key to avoid timezone formatting issues + timeKey := bucketTime.Unix() + countMap[timeKey]++ + } + + // Generate time-series data with zero values for missing time buckets + var trendData []*bean.BlockedDeploymentDataPoint + + if aggregationType == constants.AggregateByHour { + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), to.Hour(), 0, 0, 0, from.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Unix() + count := countMap[timeKey] + + trendData = append(trendData, &bean.BlockedDeploymentDataPoint{ + Timestamp: current, + Count: count, + }) + + current = current.Add(time.Hour) + } + } else if aggregationType == constants.AggregateByMonth { + current := time.Date(from.Year(), from.Month(), 1, 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), 1, 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Unix() + count := countMap[timeKey] + + trendData = append(trendData, &bean.BlockedDeploymentDataPoint{ + Timestamp: current, + Count: count, + }) + + current = current.AddDate(0, 1, 0) // Add one month + } + } else { + // Daily aggregation + current := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, from.Location()) + end := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, to.Location()) + + for current.Before(end) || current.Equal(end) { + timeKey := current.Unix() + count := countMap[timeKey] + + trendData = append(trendData, &bean.BlockedDeploymentDataPoint{ + Timestamp: current, + Count: count, + }) + + current = current.AddDate(0, 0, 1) // Add one day + } + } + + return trendData +} + +func calculatePercentage(count, total int) float64 { + if total == 0 { + return 0.0 + } + return util.RoundToTwoDecimals(float64(count) / float64(total) * 100.0) +} diff --git a/pkg/overview/adaptor/SecurityOverviewAdapter.go b/pkg/overview/adaptor/SecurityOverviewAdapter.go new file mode 100644 index 0000000000..b983e592f9 --- /dev/null +++ b/pkg/overview/adaptor/SecurityOverviewAdapter.go @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package adaptor + +import "github.com/devtron-labs/devtron/pkg/overview/bean" + +// SecurityOverviewAdapter provides factory methods for initializing security overview bean structs + +// NewSeverityCount returns a new initialized SeverityCount with all fields set to zero +func NewSeverityCount() *bean.SeverityCount { + return &bean.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + } +} + +// NewAgeBucketSeverity returns a new initialized AgeBucketSeverity with all fields set to zero +func NewAgeBucketSeverity() *bean.AgeBucketSeverity { + return &bean.AgeBucketSeverity{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + } +} + +// NewAgeDistribution returns a new initialized AgeDistribution with all nested structs initialized +func NewAgeDistribution() *bean.AgeDistribution { + return &bean.AgeDistribution{ + LessThan30Days: NewAgeBucketSeverity(), + Between30To60Days: NewAgeBucketSeverity(), + Between60To90Days: NewAgeBucketSeverity(), + MoreThan90Days: NewAgeBucketSeverity(), + } +} + +// NewVulnerabilityCount returns a new initialized VulnerabilityCount with all fields set to zero +func NewVulnerabilityCount() *bean.VulnerabilityCount { + return &bean.VulnerabilityCount{ + Count: 0, + UniqueCount: 0, + } +} + +// NewSecurityOverviewResponse returns a new initialized SecurityOverviewResponse with all nested structs initialized +func NewSecurityOverviewResponse() *bean.SecurityOverviewResponse { + return &bean.SecurityOverviewResponse{ + TotalVulnerabilities: NewVulnerabilityCount(), + FixableVulnerabilities: NewVulnerabilityCount(), + ZeroDayVulnerabilities: NewVulnerabilityCount(), + } +} + +// NewSeverityInsightsResponse returns a new initialized SeverityInsightsResponse with all nested structs initialized +func NewSeverityInsightsResponse() *bean.SeverityInsightsResponse { + return &bean.SeverityInsightsResponse{ + SeverityDistribution: NewSeverityCount(), + AgeDistribution: NewAgeDistribution(), + } +} + +// NewDeploymentMetric returns a new initialized DeploymentMetric with all fields set to zero +func NewDeploymentMetric() *bean.DeploymentMetric { + return &bean.DeploymentMetric{ + Count: 0, + Percentage: 0.0, + } +} + +// NewWorkflowMetric returns a new initialized WorkflowMetric with all fields set to zero +func NewWorkflowMetric() *bean.WorkflowMetric { + return &bean.WorkflowMetric{ + Count: 0, + Percentage: 0.0, + } +} + +// NewDeploymentSecurityStatusResponse returns a new initialized DeploymentSecurityStatusResponse with all nested structs initialized +func NewDeploymentSecurityStatusResponse() *bean.DeploymentSecurityStatusResponse { + return &bean.DeploymentSecurityStatusResponse{ + ActiveDeploymentsWithVulnerabilities: NewDeploymentMetric(), + ActiveDeploymentsWithUnscannedImages: NewDeploymentMetric(), + WorkflowsWithScanningEnabled: NewWorkflowMetric(), + } +} + +// NewVulnerabilitiesResponse returns a new initialized VulnerabilitiesResponse with empty slice and pagination info +func NewVulnerabilitiesResponse(offset, size int) *bean.VulnerabilitiesResponse { + return &bean.VulnerabilitiesResponse{ + Vulnerabilities: []*bean.Vulnerability{}, + Total: 0, + Offset: offset, + Size: size, + } +} + +// NewVulnerabilityTrendResponse returns a new initialized VulnerabilityTrendResponse with empty trend slice +func NewVulnerabilityTrendResponse() *bean.VulnerabilityTrendResponse { + return &bean.VulnerabilityTrendResponse{ + Trend: []*bean.VulnerabilityTrendDataPoint{}, + } +} + +// NewBlockedDeploymentsTrendResponse returns a new initialized BlockedDeploymentsTrendResponse with empty trend slice +func NewBlockedDeploymentsTrendResponse() *bean.BlockedDeploymentsTrendResponse { + return &bean.BlockedDeploymentsTrendResponse{ + Trend: []*bean.BlockedDeploymentDataPoint{}, + } +} diff --git a/pkg/overview/bean/DoraMetricBean.go b/pkg/overview/bean/DoraMetricBean.go new file mode 100644 index 0000000000..9ad30fdb03 --- /dev/null +++ b/pkg/overview/bean/DoraMetricBean.go @@ -0,0 +1,172 @@ +package bean + +import ( + "time" + + "github.com/devtron-labs/common-lib/utils" +) + +type AllDoraMetrics struct { + DeploymentFrequency *DoraMetric + MeanLeadTime *DoraMetric + ChangeFailureRate *DoraMetric + MeanTimeToRecovery *DoraMetric +} + +func NewAllDoraMetrics() *AllDoraMetrics { + return &AllDoraMetrics{ + DeploymentFrequency: &DoraMetric{}, + MeanLeadTime: &DoraMetric{}, + ChangeFailureRate: &DoraMetric{}, + MeanTimeToRecovery: &DoraMetric{}, + } +} + +func (r *AllDoraMetrics) WithDeploymentFrequency(deploymentFrequency *DoraMetric) *AllDoraMetrics { + r.DeploymentFrequency = deploymentFrequency + return r +} + +func (r *AllDoraMetrics) WithMeanLeadTime(meanLeadTime *DoraMetric) *AllDoraMetrics { + r.MeanLeadTime = meanLeadTime + return r +} + +func (r *AllDoraMetrics) WithChangeFailureRate(changeFailureRate *DoraMetric) *AllDoraMetrics { + r.ChangeFailureRate = changeFailureRate + return r +} + +func (r *AllDoraMetrics) WithMeanTimeToRecovery(meanTimeToRecovery *DoraMetric) *AllDoraMetrics { + r.MeanTimeToRecovery = meanTimeToRecovery + return r +} + +// LensMetrics represents the response structure from Lens API +type LensMetrics struct { + AverageCycleTime float64 `json:"average_cycle_time"` + AverageLeadTime float64 `json:"average_lead_time"` + ChangeFailureRate float64 `json:"change_failure_rate"` + AverageRecoveryTime float64 `json:"average_recovery_time"` + AverageDeploymentSize float32 `json:"average_deployment_size"` + AverageLineAdded float32 `json:"average_line_added"` + AverageLineDeleted float32 `json:"average_line_deleted"` + LastFailedTime string `json:"last_failed_time"` + RecoveryTimeLastFailed float64 `json:"recovery_time_last_failed"` +} + +type DoraMetric struct { + OverallAverage *MetricValue `json:"overallAverage"` + ComparisonValue int `json:"comparisonValue"` // Percentage or minutes change + ComparisonUnit ComparisonUnit `json:"comparisonUnit"` // PERCENTAGE or MINUTES + PerformanceLevelCount *PerformanceLevelCount `json:"performanceLevelCount"` // Count of pipelines in each performance category +} + +func NewDoraMetric() *DoraMetric { + return &DoraMetric{} +} +func (r *DoraMetric) WithOverallAverage(overallAverage *MetricValue) *DoraMetric { + r.OverallAverage = overallAverage + return r +} + +func (r *DoraMetric) WithComparisonValue(comparisonValue int) *DoraMetric { + r.ComparisonValue = comparisonValue + return r +} +func (r *DoraMetric) WithComparisonUnit(comparisonUnit ComparisonUnit) *DoraMetric { + r.ComparisonUnit = comparisonUnit + return r +} +func (r *DoraMetric) WithPerformanceLevelCount(performanceLevelCount *PerformanceLevelCount) *DoraMetric { + r.PerformanceLevelCount = performanceLevelCount + return r +} + +type MetricValue struct { + Value float64 `json:"value"` + Unit string `json:"unit"` // NUMBER, PERCENTAGE, MINUTES +} + +type PerformanceLevelCount struct { + Elite int `json:"elite"` + High int `json:"high"` + Medium int `json:"medium"` + Low int `json:"low"` +} + +// DORA Metrics Beans +type DoraMetricsRequest struct { + TimeRangeRequest *utils.TimeRangeRequest `json:"timeRangeRequest"` + PrevFrom *time.Time `json:"prevFrom,omitempty"` // Previous period start time + PrevTo *time.Time `json:"prevTo,omitempty"` // Previous period end time +} + +type DoraMetricsResponse struct { + ProdDeploymentPipelineCount int `json:"prodDeploymentPipelineCount"` + DeploymentFrequency *DoraMetric `json:"deploymentFrequency"` + MeanLeadTime *DoraMetric `json:"meanLeadTime"` + ChangeFailureRate *DoraMetric `json:"changeFailureRate"` + MeanTimeToRecovery *DoraMetric `json:"meanTimeToRecovery"` +} + +func NewDoraMetricsResponse() *DoraMetricsResponse { + return &DoraMetricsResponse{} +} + +type ComparisonUnit string + +const ( + ComparisonUnitMinutes ComparisonUnit = "MINUTES" + ComparisonUnitPercentage ComparisonUnit = "PERCENTAGE" +) + +type MetricValueUnit string + +const ( + MetricValueUnitNumber MetricValueUnit = "NUMBER" + MetricValueUnitPercentage MetricValueUnit = "PERCENTAGE" + MetricValueUnitMinutes MetricValueUnit = "MINUTES" +) + +func (r MetricValueUnit) ToString() string { + return string(r) +} + +type PerformanceCategory string + +const ( + PerformanceElite PerformanceCategory = "Elite" + PerformanceHigh PerformanceCategory = "High" + PerformanceMedium PerformanceCategory = "Medium" + PerformanceLow PerformanceCategory = "Low" +) + +type MetricCategory string + +const ( + MetricCategoryMeanTimeToRecovery MetricCategory = "meanTimeToRecovery" + MetricCategoryChangeFailureRate MetricCategory = "changeFailureRate" + MetricCategoryMeanLeadTime MetricCategory = "meanLeadTime" + MetricCategoryDeploymentFrequency MetricCategory = "deploymentFrequency" +) + +// IsValidMetricCategory checks if the given string is a valid metric category +func IsValidMetricCategory(category string) bool { + switch MetricCategory(category) { + case MetricCategoryDeploymentFrequency, MetricCategoryMeanLeadTime, MetricCategoryChangeFailureRate, MetricCategoryMeanTimeToRecovery: + return true + default: + return false + } +} + +// IsValidPerformanceCategory checks if the given string is a valid performance category +func IsValidPerformanceCategory(category string) bool { + switch PerformanceCategory(category) { + case PerformanceElite, PerformanceHigh, PerformanceMedium, PerformanceLow: + return true + default: + return false + } +} diff --git a/pkg/overview/bean/OverviewBean.go b/pkg/overview/bean/OverviewBean.go new file mode 100644 index 0000000000..fc21c127b3 --- /dev/null +++ b/pkg/overview/bean/OverviewBean.go @@ -0,0 +1,446 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package bean + +import ( + capacityBean "github.com/devtron-labs/devtron/pkg/k8s/capacity/bean" + "time" + + "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/devtron/pkg/overview/constants" +) + +type BuildDeploymentActivityRequest struct { + From *time.Time `json:"from"` + To *time.Time `json:"to"` +} + +type ActivityKind string + +const ( + ActivityKindBuildTrigger ActivityKind = "buildTrigger" + ActivityKindDeploymentTrigger ActivityKind = "deploymentTrigger" + ActivityKindAvgBuildTime ActivityKind = "avgBuildTime" +) + +type BuildDeploymentActivityDetailedRequest struct { + ActivityKind ActivityKind `json:"activityKind" validate:"required,oneof=buildTrigger deploymentTrigger avgBuildTime"` + AggregationType constants.AggregationType `json:"aggregationType,omitempty"` + From *time.Time `json:"from"` + To *time.Time `json:"to"` +} + +type AppMetrics struct { + Total int `json:"total"` + YourApps *AppTypeMetrics `json:"yourApps"` + ThirdPartyApps *AppTypeMetrics `json:"thirdPartyApps"` +} + +type PipelineMetrics struct { + Total int `json:"total"` + Production int `json:"production"` + NonProduction int `json:"nonProduction"` +} + +// Common structure for entity metadata +type EntityMetadata struct { + Name string `json:"name"` + CreatedOn time.Time `json:"createdOn"` +} + +// Time-based aggregated data point +type TimeDataPoint struct { + Date string `json:"date"` // YYYY-MM-DD format for days, YYYY-MM-DD HH:00 format for hours + Count int `json:"count"` // Aggregated count for this time period +} + +// Enhanced metrics structures with detailed metadata +type ProjectMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type AppTypeMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type EnvironmentMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type BuildPipelineMetrics struct { + Total int `json:"total"` + NormalCiPipelines *CiPipelineTypeMetrics `json:"normalCiPipelines"` + ExternalCiPipelines *CiPipelineTypeMetrics `json:"externalCiPipelines"` +} + +type CiPipelineTypeMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type CdPipelineMetrics struct { + Total int `json:"total"` + Production *PipelineEnvironmentMetrics `json:"production"` + NonProduction *PipelineEnvironmentMetrics `json:"nonProduction"` +} + +type PipelineEnvironmentMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +type DeploymentMetrics struct { + Total int `json:"total"` + Details []EntityMetadata `json:"details"` +} + +// Trend-based metrics structures for aggregated time-series data +type ProjectTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type AppTrendMetrics struct { + Total int `json:"total"` + YourApps *AppTypeTrendMetrics `json:"yourApps"` + ThirdPartyApps *AppTypeTrendMetrics `json:"thirdPartyApps"` +} + +type AppTypeTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type EnvironmentTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type BuildPipelineTrendMetrics struct { + Total int `json:"total"` + NormalCiPipelines *CiPipelineTypeTrendMetrics `json:"normalCiPipelines"` + ExternalCiPipelines *CiPipelineTypeTrendMetrics `json:"externalCiPipelines"` +} + +type CiPipelineTypeTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type CdPipelineTrendMetrics struct { + Total int `json:"total"` + Production *PipelineEnvironmentTrendMetrics `json:"production"` + NonProduction *PipelineEnvironmentTrendMetrics `json:"nonProduction"` +} + +type PipelineEnvironmentTrendMetrics struct { + Total int `json:"total"` + Trend []TimeDataPoint `json:"trend"` +} + +type TrendComparison struct { + Value int `json:"value"` // The difference value (can be positive or negative) + Label string `json:"label"` // e.g., "this month", "this week", "this quarter" +} + +type AppsOverviewResponse struct { + Projects *AtAGlanceMetric `json:"projects"` + YourApplications *AtAGlanceMetric `json:"yourApplications"` + HelmApplications *AtAGlanceMetric `json:"helmApplications"` + Environments *AtAGlanceMetric `json:"environments"` +} + +type WorkflowOverviewResponse struct { + BuildPipelines *AtAGlanceMetric `json:"buildPipelines"` + ExternalImageSource *AtAGlanceMetric `json:"externalImageSource"` + AllDeploymentPipelines *AtAGlanceMetric `json:"allDeploymentPipelines"` + ScanningEnabledInWorkflows *AtAGlanceMetric `json:"scanningEnabledInWorkflows"` + GitOpsComplianceProdPipelines *AtAGlanceMetric `json:"gitOpsComplianceProdPipelines"` + ProductionPipelines *AtAGlanceMetric `json:"productionPipelines"` +} + +type AtAGlanceMetric struct { + Total int `json:"total"` + Percentage float64 `json:"percentage,omitempty"` // Optional: percentage value for metrics that represent percentages +} + +type BuildDeploymentActivityResponse struct { + TotalBuildTriggers int `json:"totalBuildTriggers"` + AverageBuildTime float64 `json:"averageBuildTime"` // in minutes + TotalDeploymentTriggers int `json:"totalDeploymentTriggers"` +} + +type BuildDeploymentActivityDetailedResponse struct { + ActivityKind ActivityKind `json:"activityKind"` // Type of activity data returned + AggregationType constants.AggregationType `json:"aggregationType"` // HOURLY, DAILY, or MONTHLY + BuildTriggersTrend []BuildStatusDataPoint `json:"buildTriggersTrend,omitempty"` + DeploymentTriggersTrend []DeploymentStatusDataPoint `json:"deploymentTriggersTrend,omitempty"` + AvgBuildTimeTrend []BuildTimeDataPoint `json:"avgBuildTimeTrend,omitempty"` +} + +type BuildStatusDataPoint struct { + Timestamp time.Time `json:"timestamp"` // Timestamp representing start of aggregation period + Total int `json:"total"` // Total build triggers + Successful int `json:"successful"` // Successful builds + Failed int `json:"failed"` // Failed builds +} + +type DeploymentStatusDataPoint struct { + Timestamp time.Time `json:"timestamp"` // Timestamp representing start of aggregation period + Total int `json:"total"` // Total deployment triggers + Successful int `json:"successful"` // Successful deployments + Failed int `json:"failed"` // Failed deployments +} + +type BuildTimeDataPoint struct { + Timestamp time.Time `json:"timestamp"` // Timestamp representing start of aggregation period + AverageBuildTime float64 `json:"averageBuildTime"` // in minutes for that time period +} + +// Insights Beans +type PipelineType string + +const ( + BuildPipelines PipelineType = "buildPipelines" + DeploymentPipelines PipelineType = "deploymentPipelines" +) + +type SortOrder string + +const ( + ASC SortOrder = "ASC" + DESC SortOrder = "DESC" +) + +type InsightsRequest struct { + TimeRangeRequest *utils.TimeRangeRequest `json:"timeRangeRequest"` + PipelineType PipelineType `json:"pipelineType"` + SortOrder SortOrder `json:"sortOrder"` + Limit int `json:"limit"` + Offset int `json:"offset"` +} + +type InsightsResponse struct { + Pipelines []PipelineUsageItem `json:"pipelines"` + TotalCount int `json:"totalCount"` +} + +type PipelineUsageItem struct { + AppID int `json:"appId"` // Required for both CI and CD pipelines + EnvID int `json:"envId,omitempty"` // Only for deployment pipelines + PipelineID int `json:"pipelineId"` + PipelineName string `json:"pipelineName"` + AppName string `json:"appName"` + EnvName string `json:"envName,omitempty"` // Only for deployment pipelines + TriggerCount int `json:"triggerCount"` +} + +type ApprovalPolicyOverviewResponse struct { + TotalProdPipelineCount int `json:"totalProdPipelineCount"` + PipelineCountWithConfigApproval int `json:"pipelineCountWithConfigApproval"` + PipelineCountWithDeploymentApproval int `json:"pipelineCountWithDeploymentApproval"` +} + +// Cluster Management Overview Beans + +// ClusterOverviewRequest represents the request for cluster management overview +type ClusterOverviewRequest struct { + // No specific filters needed for now - returns all cluster data +} + +// ClusterOverviewResponse represents the comprehensive cluster management overview +type ClusterOverviewResponse struct { + TotalClusters int `json:"totalClusters"` + TotalCpuCapacity *ResourceCapacity `json:"totalCpuCapacity"` + TotalMemoryCapacity *ResourceCapacity `json:"totalMemoryCapacity"` + ClusterStatusBreakdown *ClusterStatusBreakdown `json:"clusterStatusBreakdown"` + NodeSchedulingBreakdown *NodeSchedulingBreakdown `json:"nodeSchedulingBreakdown"` + NodeErrorBreakdown *NodeErrorBreakdown `json:"nodeErrorBreakdown"` + ClusterDistribution *ClusterDistribution `json:"clusterDistribution"` + ClusterCapacityDistribution []ClusterCapacityDistribution `json:"clusterCapacityDistribution"` + RawClusterCapacityDetails []*capacityBean.ClusterCapacityDetail `json:"rawClusterCapacityDetails"` + NodeDistribution *NodeDistribution `json:"nodeDistribution"` +} + +// ResourceCapacity represents capacity with value and unit +type ResourceCapacity struct { + Value string `json:"value"` + Unit string `json:"unit"` +} + +// ClusterStatusBreakdown represents cluster health status breakdown +type ClusterStatusBreakdown struct { + Healthy int `json:"healthy"` + Unhealthy int `json:"unhealthy"` + ConnectionFailed int `json:"connectionFailed"` +} + +// NodeErrorBreakdown represents breakdown of node errors with detailed node information +type NodeErrorBreakdown struct { + ErrorCounts map[string]int `json:"errorCounts"` // Map of error types to their counts + Total int `json:"total"` // Total number of node errors + NodeErrors []NodeErrorDetail `json:"nodeErrors"` // Detailed list of nodes with errors +} + +// NodeErrorDetail represents detailed error information for a single node +type NodeErrorDetail struct { + NodeName string `json:"nodeName"` // Name of the node with errors + ClusterName string `json:"clusterName"` // Name of the cluster the node belongs to + ClusterID int `json:"clusterId"` // ID of the cluster + Errors []string `json:"errors"` // List of error types + NodeStatus string `json:"nodeStatus"` // Current status of the node (Ready/Not Ready) +} + +// NodeSchedulingBreakdown represents breakdown of node scheduling status with detailed node information +type NodeSchedulingBreakdown struct { + Schedulable int `json:"schedulable"` // Count of schedulable nodes + Unschedulable int `json:"unschedulable"` // Count of unschedulable nodes + Total int `json:"total"` // Total number of nodes + SchedulableNodes []NodeSchedulingDetail `json:"schedulableNodes"` // Detailed list of schedulable nodes + UnschedulableNodes []NodeSchedulingDetail `json:"unschedulableNodes"` // Detailed list of unschedulable nodes +} + +// NodeSchedulingDetail represents detailed information about a node's scheduling status +type NodeSchedulingDetail struct { + NodeName string `json:"nodeName"` // Name of the node + ClusterName string `json:"clusterName"` // Name of the cluster the node belongs to + ClusterID int `json:"clusterId"` // ID of the cluster + Schedulable bool `json:"schedulable"` // Whether the node is schedulable +} + +// ClusterDistribution represents cluster distribution by provider and cluster version +type ClusterDistribution struct { + ByProvider []ProviderDistribution `json:"byProvider"` + ByVersion []VersionDistribution `json:"byVersion"` +} + +// ProviderDistribution represents cluster count by cloud provider +type ProviderDistribution struct { + Provider string `json:"provider"` // AWS, GCP, Azure, On-Premise, etc. + Count int `json:"count"` +} + +// VersionDistribution represents cluster count by Kubernetes version (major.minor only) +type VersionDistribution struct { + Version string `json:"version"` // e.g., "1.28", "1.29", "1.30" (major.minor only, patch ignored) + Count int `json:"count"` +} + +// ClusterCapacityDistribution represents capacity distribution for individual clusters +type ClusterCapacityDistribution struct { + ClusterID int `json:"clusterId"` + ClusterName string `json:"clusterName"` + ServerVersion string `json:"serverVersion"` // Kubernetes server version (e.g., "v1.28.3") + CPU *ClusterResourceMetric `json:"cpu"` + Memory *ClusterResourceMetric `json:"memory"` +} + +// ClusterResourceMetric represents resource metrics for a cluster +type ClusterResourceMetric struct { + Capacity float64 `json:"capacity"` // Capacity in cores for CPU, Gi for memory (with decimal precision) + UtilizationPercent float64 `json:"utilizationPercent"` // Utilization percentage + RequestsPercent float64 `json:"requestsPercent"` // Requests percentage + LimitsPercent float64 `json:"limitsPercent"` // Limits percentage +} + +// NodeDistribution represents node distribution by clusters and autoscaler +type NodeDistribution struct { + ByClusters []ClusterNodeCount `json:"byClusters"` // Node count grouped by cluster + ByAutoscaler []AutoscalerNodeCount `json:"byAutoscaler"` // Node count grouped by autoscaler type +} + +// Removed old structs - ClusterSummary, ResourceSummary, NodeCountSummary not needed in new API spec + +// ClusterNodeCount represents node count for a specific cluster +type ClusterNodeCount struct { + ClusterID int `json:"clusterId"` // ID of the cluster + ClusterName string `json:"clusterName"` // Name of the cluster + NodeCount int `json:"nodeCount"` // Total number of nodes in this cluster +} + +// AutoscalerNodeCount represents node count for a specific autoscaler type with detailed node information +type AutoscalerNodeCount struct { + AutoscalerType string `json:"autoscalerType"` // Type of autoscaler (EKS, Karpenter, Cast AI, GKE, CAS) + NodeCount int `json:"nodeCount"` // Total number of nodes managed by this autoscaler + NodeDetails []AutoscalerNodeDetail `json:"nodeDetails"` // Detailed list of nodes managed by this autoscaler +} + +// AutoscalerNodeDetail represents detailed information for a single node managed by autoscaler +type AutoscalerNodeDetail struct { + NodeName string `json:"nodeName"` // Name of the node + ClusterName string `json:"clusterName"` // Name of the cluster the node belongs to + ClusterID int `json:"clusterId"` // ID of the cluster + ManagedBy string `json:"managedBy"` // Display name of the autoscaler managing this node +} + +// Cluster Upgrade Overview Beans + +// ClusterUpgradeOverviewResponse represents the response for cluster upgrade overview +type ClusterUpgradeOverviewResponse struct { + CanCurrentUserUpgrade bool `json:"canCurrentUserUpgrade"` + LatestVersion string `json:"latestVersion"` + ClusterList []ClusterUpgradeDetails `json:"clusterList"` +} + +// ClusterUpgradeDetails represents upgrade details for a single cluster +type ClusterUpgradeDetails struct { + ClusterId int `json:"clusterId"` + ClusterName string `json:"clusterName"` + CurrentVersion string `json:"currentVersion"` + UpgradePath []string `json:"upgradePath"` +} + +// NodeViewGroupType represents the type of node view grouping +type NodeViewGroupType string + +const ( + NodeViewGroupTypeNodeErrors NodeViewGroupType = "nodeErrors" + NodeViewGroupTypeNodeScheduling NodeViewGroupType = "nodeScheduling" + NodeViewGroupTypeAutoscaler NodeViewGroupType = "autoscalerManaged" +) + +// ClusterOverviewDetailRequest represents request parameters for detailed drill-down API +type ClusterOverviewDetailRequest struct { + GroupBy NodeViewGroupType `schema:"groupBy" validate:"required,oneof=nodeErrors nodeScheduling autoscalerManaged"` + Offset int `schema:"offset"` + Limit int `schema:"limit"` + SortBy string `schema:"sortBy"` + SortOrder string `schema:"sortOrder"` // asc or desc + SearchKey string `schema:"searchKey"` + + // Filter parameters (optional, used based on GroupBy) + AutoscalerType string `schema:"autoscalerType"` // Filter by autoscaler type (only for autoscalerManaged groupBy) + ErrorType string `schema:"errorType"` // Filter by error type (only for nodeErrors groupBy) + SchedulableType string `schema:"schedulableType"` // Filter by schedulable type: "schedulable" or "unschedulable" (only for nodeScheduling groupBy) +} + +// ClusterOverviewNodeDetailedResponse represents the unified response for all node view group types +// Fields are conditionally included based on the groupBy parameter +type ClusterOverviewNodeDetailedResponse struct { + TotalCount int `json:"totalCount"` + NodeList []ClusterOverviewNodeDetailedItem `json:"nodeList"` +} + +// ClusterOverviewNodeDetailedItem represents a single node item in the detailed response +// Different fields are populated based on the NodeViewGroupType +type ClusterOverviewNodeDetailedItem struct { + // Common fields (always present) + NodeName string `json:"nodeName"` + ClusterName string `json:"clusterName"` + ClusterID int `json:"clusterId,omitempty"` + + // NodeErrors specific fields + NodeErrors []string `json:"nodeErrors,omitempty"` // List of error types (only for nodeErrors type) + NodeStatus string `json:"nodeStatus,omitempty"` // Node status: Ready/Not Ready (only for nodeErrors type) + + // NodeScheduling specific fields + Schedulable bool `json:"schedulable,omitempty"` // Whether node is schedulable (only for nodeScheduling type) + + // Autoscaler specific fields + AutoscalerType string `json:"autoscalerType,omitempty"` // Type of autoscaler managing the node (only for autoscaler type) +} diff --git a/pkg/overview/bean/SecurityOverviewBean.go b/pkg/overview/bean/SecurityOverviewBean.go new file mode 100644 index 0000000000..2c393f1d95 --- /dev/null +++ b/pkg/overview/bean/SecurityOverviewBean.go @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package bean + +import "time" + +// ============================================================================ +// Common Types +// ============================================================================ + +type EnvType string + +const ( + EnvTypeProd EnvType = "prod" + EnvTypeNonProd EnvType = "non-prod" + EnvTypeAll EnvType = "all" +) + +type VulnerabilityCount struct { + Count int `json:"count"` // Total instances (with duplicates) + UniqueCount int `json:"uniqueCount"` // Unique CVEs +} + +type SeverityCount struct { + Critical int `json:"critical"` + High int `json:"high"` + Medium int `json:"medium"` + Low int `json:"low"` + Unknown int `json:"unknown"` +} + +type AgeBucketSeverity struct { + Critical int `json:"critical"` + High int `json:"high"` + Medium int `json:"medium"` + Low int `json:"low"` + Unknown int `json:"unknown"` +} + +type AgeDistribution struct { + LessThan30Days *AgeBucketSeverity `json:"lessThan30Days"` + Between30To60Days *AgeBucketSeverity `json:"between30To60Days"` + Between60To90Days *AgeBucketSeverity `json:"between60To90Days"` + MoreThan90Days *AgeBucketSeverity `json:"moreThan90Days"` +} + +// ============================================================================ +// 1. Security Overview API (At a Glance - Organization-wide) +// ============================================================================ + +type SecurityOverviewRequest struct { + EnvIds []int `json:"envIds" schema:"envIds"` + ClusterIds []int `json:"clusterIds" schema:"clusterIds"` + AppIds []int `json:"appIds" schema:"appIds"` +} + +type SecurityOverviewResponse struct { + TotalVulnerabilities *VulnerabilityCount `json:"totalVulnerabilities"` + FixableVulnerabilities *VulnerabilityCount `json:"fixableVulnerabilities"` + ZeroDayVulnerabilities *VulnerabilityCount `json:"zeroDayVulnerabilities"` +} + +// ============================================================================ +// 2. Severity Insights API (With Prod/Non-Prod Filtering) +// ============================================================================ + +type SeverityInsightsRequest struct { + EnvIds []int `json:"envIds" schema:"envIds"` + ClusterIds []int `json:"clusterIds" schema:"clusterIds"` + AppIds []int `json:"appIds" schema:"appIds"` + EnvType EnvType `json:"envType" schema:"envType" validate:"required,oneof=prod non-prod all"` +} + +type SeverityInsightsResponse struct { + SeverityDistribution *SeverityCount `json:"severityDistribution"` + AgeDistribution *AgeDistribution `json:"ageDistribution"` +} + +// ============================================================================ +// 3. Deployment Security Status API +// ============================================================================ + +type DeploymentSecurityStatusRequest struct { + EnvIds []int `json:"envIds" schema:"envIds"` + ClusterIds []int `json:"clusterIds" schema:"clusterIds"` + AppIds []int `json:"appIds" schema:"appIds"` +} + +type DeploymentMetric struct { + Count int `json:"count"` + Percentage float64 `json:"percentage"` +} + +type WorkflowMetric struct { + Count int `json:"count"` + Percentage float64 `json:"percentage"` +} + +type DeploymentSecurityStatusResponse struct { + ActiveDeploymentsWithVulnerabilities *DeploymentMetric `json:"activeDeploymentsWithVulnerabilities"` + ActiveDeploymentsWithUnscannedImages *DeploymentMetric `json:"activeDeploymentsWithUnscannedImages"` + WorkflowsWithScanningEnabled *WorkflowMetric `json:"workflowsWithScanningEnabled"` +} + +// ============================================================================ +// 4. Vulnerability Details API (Paginated List) +// ============================================================================ + +type VulnerabilitiesRequest struct { + EnvIds []int `json:"envIds" schema:"envIds"` + ClusterIds []int `json:"clusterIds" schema:"clusterIds"` + AppIds []int `json:"appIds" schema:"appIds"` + Severity string `json:"severity" schema:"severity"` // Optional: critical, high, medium, low, unknown + Offset int `json:"offset" schema:"offset"` + Size int `json:"size" schema:"size" validate:"required,min=1,max=100"` +} + +type Vulnerability struct { + CveName string `json:"cveName"` + Severity string `json:"severity"` + Package string `json:"package"` + CurrentVersion string `json:"currentVersion"` + FixedVersion string `json:"fixedVersion"` + AppCount int `json:"appCount"` // Number of apps affected + EnvironmentCount int `json:"environmentCount"` // Number of environments affected + FirstDetected time.Time `json:"firstDetected"` +} + +type VulnerabilitiesResponse struct { + Vulnerabilities []*Vulnerability `json:"vulnerabilities"` + Total int `json:"total"` + Offset int `json:"offset"` + Size int `json:"size"` +} + +// ============================================================================ +// 5. Vulnerability Trend API (Time-series with Prod/Non-Prod Filtering) +// ============================================================================ + +type VulnerabilityTrendRequest struct { + TimeWindow string `json:"timeWindow" schema:"timeWindow" validate:"required,oneof=today thisWeek thisMonth thisQuarter"` + EnvType EnvType `json:"envType" schema:"envType" validate:"required,oneof=prod non-prod all"` + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` +} + +type VulnerabilityTrendDataPoint struct { + Timestamp time.Time `json:"timestamp"` + Critical int `json:"critical"` + High int `json:"high"` + Medium int `json:"medium"` + Low int `json:"low"` + Unknown int `json:"unknown"` + Total int `json:"total"` +} + +type VulnerabilityTrendResponse struct { + Trend []*VulnerabilityTrendDataPoint `json:"trend"` +} + +// ============================================================================ +// 6. Blocked Deployments Trend API (Organization-wide) +// ============================================================================ + +type BlockedDeploymentsTrendRequest struct { + TimeWindow string `json:"timeWindow" schema:"timeWindow" validate:"required,oneof=today thisWeek thisMonth thisQuarter"` + From *time.Time `json:"from" schema:"from"` + To *time.Time `json:"to" schema:"to"` +} + +type BlockedDeploymentDataPoint struct { + Timestamp time.Time `json:"timestamp"` + Count int `json:"count"` +} + +type BlockedDeploymentsTrendResponse struct { + Trend []*BlockedDeploymentDataPoint `json:"trend"` +} diff --git a/pkg/overview/cache/ClusterCacheService.go b/pkg/overview/cache/ClusterCacheService.go new file mode 100644 index 0000000000..2f56610a91 --- /dev/null +++ b/pkg/overview/cache/ClusterCacheService.go @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package cache + +import ( + "fmt" + "sync" + "time" + + "github.com/devtron-labs/devtron/pkg/overview/bean" + "go.uber.org/zap" +) + +// ClusterCacheService provides caching functionality for cluster overview data +type ClusterCacheService interface { + GetClusterOverview() (*bean.ClusterOverviewResponse, bool) + SetClusterOverview(data *bean.ClusterOverviewResponse) error + InvalidateClusterOverview() + InvalidateAll() + IsRefreshing() bool + SetRefreshing(refreshing bool) + GetCacheAge() time.Duration +} + +// ClusterCacheServiceImpl implements ClusterCacheService using in-memory cache +type ClusterCacheServiceImpl struct { + logger *zap.SugaredLogger + overviewCache *cacheEntry + cacheMutex sync.RWMutex +} + +// cacheEntry represents a cached item with timestamp +type cacheEntry struct { + data interface{} + lastUpdated time.Time + isRefreshing bool +} + +// NewClusterCacheServiceImpl creates a new instance of ClusterCacheServiceImpl +func NewClusterCacheServiceImpl(logger *zap.SugaredLogger) *ClusterCacheServiceImpl { + return &ClusterCacheServiceImpl{ + logger: logger, + } +} + +// GetClusterOverview retrieves cluster overview data from cache +func (impl *ClusterCacheServiceImpl) GetClusterOverview() (*bean.ClusterOverviewResponse, bool) { + impl.cacheMutex.RLock() + defer impl.cacheMutex.RUnlock() + + if impl.overviewCache == nil { + return nil, false + } + + if data, ok := impl.overviewCache.data.(*bean.ClusterOverviewResponse); ok { + age := time.Since(impl.overviewCache.lastUpdated) + impl.logger.Infow("cluster overview cache hit", "cacheAge", age) + return data, true + } + + impl.logger.Errorw("cluster overview cache data type mismatch") + return nil, false +} + +// SetClusterOverview stores cluster overview data in cache with timestamp +func (impl *ClusterCacheServiceImpl) SetClusterOverview(data *bean.ClusterOverviewResponse) error { + if data == nil { + return fmt.Errorf("cannot cache nil cluster overview data") + } + + impl.cacheMutex.Lock() + defer impl.cacheMutex.Unlock() + + impl.overviewCache = &cacheEntry{ + data: data, + lastUpdated: time.Now(), + } + + impl.logger.Debugw("cluster overview data cached", "timestamp", impl.overviewCache.lastUpdated) + return nil +} + +// InvalidateClusterOverview removes cluster overview data from cache +func (impl *ClusterCacheServiceImpl) InvalidateClusterOverview() { + impl.cacheMutex.Lock() + defer impl.cacheMutex.Unlock() + + impl.overviewCache = nil + impl.logger.Debugw("cluster overview cache invalidated") +} + +// InvalidateAll removes all cached data +func (impl *ClusterCacheServiceImpl) InvalidateAll() { + impl.cacheMutex.Lock() + defer impl.cacheMutex.Unlock() + + impl.overviewCache = nil + impl.logger.Debugw("all cluster cache invalidated") +} + +// IsRefreshing checks if cache is currently being refreshed +func (impl *ClusterCacheServiceImpl) IsRefreshing() bool { + impl.cacheMutex.RLock() + defer impl.cacheMutex.RUnlock() + + if impl.overviewCache == nil { + return false + } + return impl.overviewCache.isRefreshing +} + +// SetRefreshing marks cache as being refreshed +func (impl *ClusterCacheServiceImpl) SetRefreshing(refreshing bool) { + impl.cacheMutex.Lock() + defer impl.cacheMutex.Unlock() + + if impl.overviewCache != nil { + impl.overviewCache.isRefreshing = refreshing + } else if refreshing { + // Initialize cache entry if setting refreshing to true + impl.overviewCache = &cacheEntry{ + isRefreshing: true, + lastUpdated: time.Now(), + } + } +} + +// GetCacheAge returns how old the cached data is +func (impl *ClusterCacheServiceImpl) GetCacheAge() time.Duration { + impl.cacheMutex.RLock() + defer impl.cacheMutex.RUnlock() + + if impl.overviewCache == nil { + return 0 + } + return time.Since(impl.overviewCache.lastUpdated) +} diff --git a/pkg/overview/config/ClusterOverviewConfig.go b/pkg/overview/config/ClusterOverviewConfig.go new file mode 100644 index 0000000000..349f8da247 --- /dev/null +++ b/pkg/overview/config/ClusterOverviewConfig.go @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package config + +import ( + "fmt" + "time" + + "github.com/caarlos0/env" +) + +// ClusterOverviewConfig represents configuration for cluster overview functionality +type ClusterOverviewConfig struct { + // CacheEnabled enables or disables caching for cluster overview data + CacheEnabled bool `env:"CLUSTER_OVERVIEW_CACHE_ENABLED" envDefault:"true" description:"Enable caching for cluster overview data"` + + // BackgroundRefreshEnabled enables proactive background cache refresh + BackgroundRefreshEnabled bool `env:"CLUSTER_OVERVIEW_BACKGROUND_REFRESH_ENABLED" envDefault:"true" description:"Enable background refresh of cluster overview cache"` + + // RefreshIntervalSeconds defines how often to refresh cache in background + RefreshIntervalSeconds int `env:"CLUSTER_OVERVIEW_REFRESH_INTERVAL_SECONDS" envDefault:"15" description:"Background cache refresh interval in seconds"` + + // MaxParallelClusters limits concurrent cluster API calls during refresh + MaxParallelClusters int `env:"CLUSTER_OVERVIEW_MAX_PARALLEL_CLUSTERS" envDefault:"15" description:"Maximum number of clusters to fetch in parallel during refresh"` + + // MaxStaleDataSeconds maximum age of cache before considering it too stale + MaxStaleDataSeconds int `env:"CLUSTER_OVERVIEW_MAX_STALE_DATA_SECONDS" envDefault:"30" description:"Maximum age of cached data in seconds before warning"` +} + +// GetRefreshInterval returns the refresh interval as a time.Duration +func (c *ClusterOverviewConfig) GetRefreshInterval() time.Duration { + return time.Duration(c.RefreshIntervalSeconds) * time.Second +} + +// GetMaxStaleDataDuration returns the max stale data duration as a time.Duration +func (c *ClusterOverviewConfig) GetMaxStaleDataDuration() time.Duration { + return time.Duration(c.MaxStaleDataSeconds) * time.Second +} + +func GetClusterOverviewConfig() (*ClusterOverviewConfig, error) { + cfg := &ClusterOverviewConfig{} + err := env.Parse(cfg) + if err != nil { + return nil, fmt.Errorf("failed to parse infra overview config: %w", err) + } + + return cfg, nil +} diff --git a/pkg/overview/constants/ClusterConstants.go b/pkg/overview/constants/ClusterConstants.go new file mode 100644 index 0000000000..ebcbff7842 --- /dev/null +++ b/pkg/overview/constants/ClusterConstants.go @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package constants + +// Cloud Provider Constants +const ( + ProviderAWS = "AWS" + ProviderGCP = "GCP" + ProviderAzure = "Azure" + ProviderOracle = "Oracle" + ProviderDigitalOcean = "DigitalOcean" + ProviderIBM = "IBM" + ProviderAlibaba = "Alibaba" + ProviderUnknown = "Unknown" +) + +// Node Condition Type Constants +// These map to Kubernetes node condition types +const ( + NodeConditionNetworkUnavailable = "NetworkUnavailable" + NodeConditionMemoryPressure = "MemoryPressure" + NodeConditionDiskPressure = "DiskPressure" + NodeConditionPIDPressure = "PIDPressure" + NodeConditionReady = "Ready" + NodeConditionOthers = "Others" +) + +// Node Error Breakdown Keys +// These are used as keys in the NodeErrorBreakdown map +const ( + NodeErrorNetworkUnavailable = "NetworkUnavailable" + NodeErrorMemoryPressure = "MemoryPressure" + NodeErrorDiskPressure = "DiskPressure" + NodeErrorPIDPressure = "PIDPressure" + NodeErrorKubeletNotReady = "KubeletNotReady" + NodeErrorOthers = "Others" +) + +// Version Constants +const ( + VersionUnknown = "Unknown" +) + +// Autoscaler Type Constants +const ( + AutoscalerKarpenter = "karpenter" + AutoscalerGKE = "gke" + AutoscalerEKS = "eks" + AutoscalerAKS = "aks" + AutoscalerCastAI = "castAi" + AutoscalerClusterAutoscaler = "clusterAutoscaler" + AutoscalerNotDetected = "notDetected" +) + +// Autoscaler Label Constants +// These labels are used to identify which autoscaler manages a node +const ( + // EKS Auto Mode label + LabelEKSComputeType = "eks.amazonaws.com/compute-type" + LabelEKSComputeAuto = "auto" + + // Karpenter label + LabelKarpenterInitialized = "karpenter.sh/initialized" + LabelKarpenterTrue = "true" + + // Cast AI label + LabelCastAIManagedBy = "provisioner.cast.ai/managed-by" + LabelCastAIValue = "cast.ai" + + // GKE label + LabelGKEProvisioning = "cloud.google.com/gke-provisioning" + LabelGKEAutoPilot = "spot" +) + +// Node Name Prefix Constants +const ( + NodePrefixGKE = "gke-" + NodePrefixAKS = "aks-" + NodePrefixEKS = "eks-" + NodePrefixOKE = "oke-" +) + +// Node Name Pattern Constants +const ( + NodePatternAWSComputeInternal = ".compute.internal" + NodePatternAWSEC2Internal = ".ec2.internal" + NodePatternAzureVMSS = "vmss" + NodePatternAzureScaleSets = "scalesets" + NodePatternGCP = "gcp" + NodePatternGoogle = "google" + NodePatternDigitalOcean = "digitalocean" + NodePatternIBMKube = "kube" + NodePatternAliyun = "aliyun" + NodePatternAlibabaRegion = "cn-" +) + +// AWS Region Pattern Constants +var AWSRegionPatterns = []string{ + "us-east-", "us-west-", "eu-west-", "eu-central-", "ap-south-", + "ap-southeast-", "ap-northeast-", "ca-central-", "sa-east-", +} + +// Sort Field Constants for Cluster Overview Detail API +const ( + SortFieldNodeName = "nodeName" + SortFieldClusterName = "clusterName" + SortFieldNodeErrors = "nodeErrors" + SortFieldNodeStatus = "nodeStatus" + SortFieldSchedulable = "schedulable" + SortFieldAutoscalerType = "autoscalerType" +) + +// Sort Order Constants +const ( + SortOrderAsc = "ASC" + SortOrderDesc = "DESC" +) + +// Schedulable Type Constants for filtering +const ( + SchedulableTypeSchedulable = "schedulable" + SchedulableTypeUnschedulable = "unschedulable" +) diff --git a/pkg/overview/constants/TimeConstants.go b/pkg/overview/constants/TimeConstants.go new file mode 100644 index 0000000000..b1be68396f --- /dev/null +++ b/pkg/overview/constants/TimeConstants.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2024. Devtron Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package constants + +import ( + "time" +) + +// TimePeriod represents the predefined time periods +type TimePeriod string + +// TimeWindow represents the predefined time windows (same as TimePeriod but for API consistency) +type TimeWindow string + +const ( + Today TimePeriod = "today" + ThisWeek TimePeriod = "week" + ThisMonth TimePeriod = "month" + ThisQuarter TimePeriod = "quarter" + LastWeek TimePeriod = "lastWeek" + LastMonth TimePeriod = "lastMonth" +) + +// AggregationType represents how data should be aggregated +type AggregationType string + +const ( + AggregateByHour AggregationType = "HOUR" + AggregateByDay AggregationType = "DAY" + AggregateByMonth AggregationType = "MONTH" +) + +// TimeRange represents a time range with from and to timestamps +type TimeRange struct { + From time.Time + To time.Time + AggregationType AggregationType +} + +// IsValidTimePeriod checks if the given string is a valid time period +func IsValidTimePeriod(period string) bool { + switch TimePeriod(period) { + case Today, ThisWeek, ThisMonth, ThisQuarter, LastWeek, LastMonth: + return true + default: + return false + } +} + +// IsValidTimeWindow checks if the given string is a valid time window +func IsValidTimeWindow(window string) bool { + switch window { + case "today", "week", "month", "quarter", "lastWeek", "lastMonth": + return true + default: + return false + } +} + +// GetAggregationType returns the aggregation type for a given time period +// This is used to determine whether to aggregate data by hour, day, or month +func GetAggregationType(period TimePeriod) AggregationType { + switch period { + case Today: + return AggregateByHour + case ThisWeek, ThisMonth, LastWeek, LastMonth: + return AggregateByDay + case ThisQuarter: + return AggregateByMonth + default: + return AggregateByDay + } +} diff --git a/pkg/overview/util/AutoscalerUtil.go b/pkg/overview/util/AutoscalerUtil.go new file mode 100644 index 0000000000..a551d7c054 --- /dev/null +++ b/pkg/overview/util/AutoscalerUtil.go @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package util + +import ( + capacityBean "github.com/devtron-labs/devtron/pkg/k8s/capacity/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" +) + +// DetermineAutoscalerTypeFromLabels determines the autoscaler type based on node labels (map format) +// This is used by the informer layer which works with native Kubernetes node labels +// Priority order: EKS Auto Mode > Karpenter > Cast AI > GKE > Not Detected +// Note: Cluster Autoscaler (CAS) cannot be reliably detected from node labels alone as it works +// with existing node groups and doesn't add its own labels. Nodes managed by CAS will show as "Not Detected". +func DetermineAutoscalerTypeFromLabels(labels map[string]string) string { + // Check for EKS Auto Mode: eks.amazonaws.com/compute-type=auto + if computeType, exists := labels[constants.LabelEKSComputeType]; exists && computeType == constants.LabelEKSComputeAuto { + return constants.AutoscalerEKS + } + + // Check for Karpenter: karpenter.sh/initialized=true + if initialized, exists := labels[constants.LabelKarpenterInitialized]; exists && initialized == constants.LabelKarpenterTrue { + return constants.AutoscalerKarpenter + } + + // Check for Cast AI: provisioner.cast.ai/managed-by=cast.ai + if managedBy, exists := labels[constants.LabelCastAIManagedBy]; exists && managedBy == constants.LabelCastAIValue { + return constants.AutoscalerCastAI + } + + // Check for GKE: cloud.google.com/gke-provisioning=standard + if provisioning, exists := labels[constants.LabelGKEProvisioning]; exists && provisioning == constants.LabelGKEAutoPilot { + return constants.AutoscalerGKE + } + + // If none of the known autoscaler labels are found, return Not Detected + // This includes nodes managed by Cluster Autoscaler (CAS) as CAS doesn't add unique labels + return constants.AutoscalerNotDetected +} + +// DetermineAutoscalerTypeFromLabelArray determines the autoscaler type based on node labels (array format) +// This is used by the service layer which works with capacity service label objects +// It converts the label array to a map and delegates to DetermineAutoscalerTypeFromLabels +func DetermineAutoscalerTypeFromLabelArray(labels []*capacityBean.LabelAnnotationTaintObject) string { + // Convert label array to map for easier lookup + labelMap := make(map[string]string) + for _, label := range labels { + if label != nil { + labelMap[label.Key] = label.Value + } + } + + // Delegate to the main detection function + return DetermineAutoscalerTypeFromLabels(labelMap) +} diff --git a/pkg/overview/util/ClusterCapacityConverter.go b/pkg/overview/util/ClusterCapacityConverter.go new file mode 100644 index 0000000000..40203f238d --- /dev/null +++ b/pkg/overview/util/ClusterCapacityConverter.go @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package util + +import ( + "fmt" + + clusterBean "github.com/devtron-labs/devtron/pkg/cluster/bean" + capacityBean "github.com/devtron-labs/devtron/pkg/k8s/capacity/bean" + overviewBean "github.com/devtron-labs/devtron/pkg/overview/bean" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +// ConvertClusterOverviewToCapacityDetails converts ClusterOverviewResponse to ClusterCapacityDetail list +// This is used to serve resource browser API from cluster overview cache +func ConvertClusterOverviewToCapacityDetails( + logger *zap.SugaredLogger, + overviewResponse *overviewBean.ClusterOverviewResponse, + allClusters []clusterBean.ClusterBean, +) []*capacityBean.ClusterCapacityDetail { + if overviewResponse == nil { + logger.Warn("overview response is nil, cannot convert to capacity details") + return nil + } + + capacityDetails := make([]*capacityBean.ClusterCapacityDetail, 0, len(overviewResponse.ClusterCapacityDistribution)) + + // Create a map for quick lookup of cluster beans + clusterMap := make(map[int]clusterBean.ClusterBean) + for _, cluster := range allClusters { + clusterMap[cluster.Id] = cluster + } + + // Create a map for quick lookup of capacity distribution + capacityDistMap := make(map[int]overviewBean.ClusterCapacityDistribution) + for _, capacityDist := range overviewResponse.ClusterCapacityDistribution { + capacityDistMap[capacityDist.ClusterID] = capacityDist + } + + // Create a map for node errors by cluster + nodeErrorsByCluster := make(map[int]map[corev1.NodeConditionType][]string) + for _, nodeError := range overviewResponse.NodeErrorBreakdown.NodeErrors { + if _, exists := nodeErrorsByCluster[nodeError.ClusterID]; !exists { + nodeErrorsByCluster[nodeError.ClusterID] = make(map[corev1.NodeConditionType][]string) + } + // Convert error strings to NodeConditionType + for _, errorStr := range nodeError.Errors { + conditionType := corev1.NodeConditionType(errorStr) + nodeErrorsByCluster[nodeError.ClusterID][conditionType] = append( + nodeErrorsByCluster[nodeError.ClusterID][conditionType], + nodeError.NodeName, + ) + } + } + + // Create a map for node count by cluster + nodeCountByCluster := make(map[int]int) + for _, nodeCount := range overviewResponse.NodeDistribution.ByClusters { + nodeCountByCluster[nodeCount.ClusterID] = nodeCount.NodeCount + } + + // Build capacity details for each cluster + for _, cluster := range allClusters { + capacityDist, hasCapacity := capacityDistMap[cluster.Id] + + var detail *capacityBean.ClusterCapacityDetail + if hasCapacity { + // Cluster has capacity data (connected cluster) + detail = buildCapacityDetailFromOverview( + cluster, + capacityDist, + nodeErrorsByCluster[cluster.Id], + nodeCountByCluster[cluster.Id], + ) + } else { + // Connection failed cluster + detail = &capacityBean.ClusterCapacityDetail{ + Id: cluster.Id, + Name: cluster.ClusterName, + ErrorInConnection: cluster.ErrorInConnecting, + Status: capacityBean.ClusterStatusConnectionFailed, + IsVirtualCluster: cluster.IsVirtualCluster, + IsProd: cluster.IsProd, + } + } + + capacityDetails = append(capacityDetails, detail) + } + + logger.Debugw("converted cluster overview to capacity details", + "totalClusters", len(capacityDetails), + "connectedClusters", len(overviewResponse.ClusterCapacityDistribution)) + + return capacityDetails +} + +// buildCapacityDetailFromOverview builds a single ClusterCapacityDetail from overview data +func buildCapacityDetailFromOverview( + cluster clusterBean.ClusterBean, + capacityDist overviewBean.ClusterCapacityDistribution, + nodeErrors map[corev1.NodeConditionType][]string, + nodeCount int, +) *capacityBean.ClusterCapacityDetail { + // Determine cluster status based on node errors + status := capacityBean.ClusterStatusHealthy + if len(nodeErrors) > 0 { + status = capacityBean.ClusterStatusUnHealthy + } + + // Build CPU and Memory resource objects + cpuResource := &capacityBean.ResourceDetailObject{ + Capacity: fmt.Sprintf("%.2f", capacityDist.CPU.Capacity), + } + + // Add utilization, requests, and limits percentages if available + if capacityDist.CPU.UtilizationPercent > 0 { + cpuResource.UsagePercentage = fmt.Sprintf("%.2f", capacityDist.CPU.UtilizationPercent) + } + if capacityDist.CPU.RequestsPercent > 0 { + cpuResource.RequestPercentage = fmt.Sprintf("%.2f", capacityDist.CPU.RequestsPercent) + } + if capacityDist.CPU.LimitsPercent > 0 { + cpuResource.LimitPercentage = fmt.Sprintf("%.2f", capacityDist.CPU.LimitsPercent) + } + + memoryResource := &capacityBean.ResourceDetailObject{ + Capacity: fmt.Sprintf("%.2fGi", capacityDist.Memory.Capacity), + } + + // Add utilization, requests, and limits percentages if available + if capacityDist.Memory.UtilizationPercent > 0 { + memoryResource.UsagePercentage = fmt.Sprintf("%.2f", capacityDist.Memory.UtilizationPercent) + } + if capacityDist.Memory.RequestsPercent > 0 { + memoryResource.RequestPercentage = fmt.Sprintf("%.2f", capacityDist.Memory.RequestsPercent) + } + if capacityDist.Memory.LimitsPercent > 0 { + memoryResource.LimitPercentage = fmt.Sprintf("%.2f", capacityDist.Memory.LimitsPercent) + } + + return &capacityBean.ClusterCapacityDetail{ + Id: cluster.Id, + Name: cluster.ClusterName, + NodeCount: nodeCount, + NodeDetails: []capacityBean.NodeDetails{}, // Not available in overview cache + NodeErrors: nodeErrors, + NodeK8sVersions: []string{}, // Not available in overview cache + ServerVersion: "", // Not available in overview cache + Cpu: cpuResource, + Memory: memoryResource, + Status: status, + IsVirtualCluster: cluster.IsVirtualCluster, + IsProd: cluster.IsProd, + } +} diff --git a/pkg/overview/util/DoraMetricUtils.go b/pkg/overview/util/DoraMetricUtils.go new file mode 100644 index 0000000000..b7e8d26813 --- /dev/null +++ b/pkg/overview/util/DoraMetricUtils.go @@ -0,0 +1,201 @@ +package util + +import ( + "time" + + "github.com/devtron-labs/devtron/pkg/overview/bean" +) + +func CreateDoraMetricObject(overallAverageValue float64, overallAverageUnit bean.MetricValueUnit, comparisonValue int, comparisonUnit bean.ComparisonUnit, performanceLevelCount *bean.PerformanceLevelCount) *bean.DoraMetric { + return &bean.DoraMetric{ + OverallAverage: &bean.MetricValue{ + Value: overallAverageValue, + Unit: overallAverageUnit.ToString(), + }, + ComparisonValue: comparisonValue, + ComparisonUnit: comparisonUnit, + PerformanceLevelCount: performanceLevelCount, + } +} + +// CalculateComparison calculates the comparison value between current and previous periods +// Returns percentage for DeploymentFrequency and ChangeFailureRate, minutes for MeanLeadTime and MeanTimeToRecovery +func CalculateComparison(current, previous float64, metricCategory bean.MetricCategory) int { + switch metricCategory { + case bean.MetricCategoryDeploymentFrequency, bean.MetricCategoryChangeFailureRate: + if previous == 0 { + if current > 0 { + return 100 // Return 100% increase when previous was 0 + } + return 0 + } + // Calculate percentage change for frequency and failure rate metrics + percentageChange := ((current - previous) / previous) * 100 + return int(percentageChange) + case bean.MetricCategoryMeanLeadTime, bean.MetricCategoryMeanTimeToRecovery: + if previous == 0 { + if current > 0 { + return int(current) + } + return 0 + } + // Calculate minutes difference for time-based metrics + return int(current - previous) + default: + return 0 + } +} + +// CalculatePerformanceLevelsForMetric calculates the count of pipelines in each performance category for a specific metric +func CalculatePerformanceLevelsForMetric(metricsData map[string]*bean.LensMetrics, metricCategory bean.MetricCategory) *bean.PerformanceLevelCount { + performanceLevels := &bean.PerformanceLevelCount{ + Elite: 0, + High: 0, + Medium: 0, + Low: 0, + } + + if len(metricsData) == 0 { + return performanceLevels + } + + // Categorize each app-env pair based on the specific metric + for _, lensMetrics := range metricsData { + var metricValue float64 + + // Get the appropriate metric value based on category + switch metricCategory { + case bean.MetricCategoryDeploymentFrequency: + metricValue = lensMetrics.AverageCycleTime + case bean.MetricCategoryMeanLeadTime: + metricValue = lensMetrics.AverageLeadTime + case bean.MetricCategoryChangeFailureRate: + metricValue = lensMetrics.ChangeFailureRate + case bean.MetricCategoryMeanTimeToRecovery: + metricValue = lensMetrics.AverageRecoveryTime + default: + // Default to low performance for unknown metric categories + performanceLevels.Low++ + continue + } + + // Categorize based on the specific metric thresholds + if IsInMetricCategory(metricValue, metricCategory, bean.PerformanceElite) { + performanceLevels.Elite++ + } else if IsInMetricCategory(metricValue, metricCategory, bean.PerformanceHigh) { + performanceLevels.High++ + } else if IsInMetricCategory(metricValue, metricCategory, bean.PerformanceMedium) { + performanceLevels.Medium++ + } else { + performanceLevels.Low++ + } + } + + return performanceLevels +} + +// IsInMetricCategory routes to the appropriate category checking function based on metric type +func IsInMetricCategory(value float64, metricCategory bean.MetricCategory, performanceCategory bean.PerformanceCategory) bool { + switch metricCategory { + case bean.MetricCategoryDeploymentFrequency: + return IsInDeploymentFrequencyCategory(value, performanceCategory) + case bean.MetricCategoryMeanLeadTime: + return IsInLeadTimeCategory(value, performanceCategory) + case bean.MetricCategoryChangeFailureRate: + return IsInChangeFailureRateCategory(value, performanceCategory) + case bean.MetricCategoryMeanTimeToRecovery: + return IsInRecoveryTimeCategory(value, performanceCategory) + default: + return false + } +} + +// IsInDeploymentFrequencyCategory checks deployment frequency thresholds +func IsInDeploymentFrequencyCategory(value float64, category bean.PerformanceCategory) bool { + switch category { + case bean.PerformanceElite: + return value >= 1.0 // On demand (multiple deploys per day) + case bean.PerformanceHigh: + return value >= 0.14 && value < 1.0 // Between once per day and once per week (1/7 ≈ 0.14) + case bean.PerformanceMedium: + return value >= 0.033 && value < 0.14 // Between once per week and once per month (1/30 ≈ 0.033) + case bean.PerformanceLow: + return value < 0.033 // Between once per month and once every six months + } + return false +} + +// IsInLeadTimeCategory checks change lead time thresholds (lower is better) +func IsInLeadTimeCategory(value float64, category bean.PerformanceCategory) bool { + switch category { + case bean.PerformanceElite: + return value < 24 // Less than one day + case bean.PerformanceHigh: + return value >= 24 && value <= 168 // Between one day and one week + case bean.PerformanceMedium: + return value > 168 && value <= 720 // Between one week and one month + case bean.PerformanceLow: + return value > 720 && value <= 4320 // Between one month and six months + } + return false +} + +// IsInRecoveryTimeCategory checks failed deployment recovery time thresholds (lower is better) +func IsInRecoveryTimeCategory(value float64, category bean.PerformanceCategory) bool { + switch category { + case bean.PerformanceElite: + return value < 1 // Less than one hour + case bean.PerformanceHigh: + return value >= 1 && value < 24 // Less than one day (1-24 hours) + case bean.PerformanceMedium: + return value >= 24 && value < 168 // Less than one day to one week (assuming this is the intended range) + case bean.PerformanceLow: + return value >= 168 && value <= 720 // Between one week and one month + } + return false +} + +// IsInChangeFailureRateCategory checks change failure rate thresholds (lower is better) +func IsInChangeFailureRateCategory(value float64, category bean.PerformanceCategory) bool { + switch category { + case bean.PerformanceElite: + return value <= 5 // 5% or less + case bean.PerformanceHigh: + return value > 5 && value <= 10 // 6-10% (interpreting the table logically) + case bean.PerformanceMedium: + return value > 10 && value <= 20 // 11-20% + case bean.PerformanceLow: + return value > 20 && value <= 40 // 21-40% + } + return false +} + +// CalculateAverageFromValues calculates average from a slice of float64 values +func CalculateAverageFromValues(values []float64) float64 { + if len(values) == 0 { + return 0.0 + } + + var total float64 + for _, value := range values { + total += value + } + + return total / float64(len(values)) +} + +// CalculatePreviousPeriod calculates the previous period dates for comparison +func CalculatePreviousPeriod(from, to *time.Time) (*time.Time, *time.Time) { + if from == nil || to == nil { + return nil, nil + } + + // Calculate the duration of the current period + duration := to.Sub(*from) + + // Previous period ends where current period starts, and starts duration before that + previousTo := *from + previousFrom := from.Add(-duration) + + return &previousFrom, &previousTo +} diff --git a/pkg/overview/util/MathUtil.go b/pkg/overview/util/MathUtil.go new file mode 100644 index 0000000000..acf4ec34d1 --- /dev/null +++ b/pkg/overview/util/MathUtil.go @@ -0,0 +1,12 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package util + +import "math" + +// RoundToTwoDecimals rounds a float64 value to 2 decimal places (e.g., 72.054321 -> 72.05) +func RoundToTwoDecimals(value float64) float64 { + return math.Round(value*100) / 100 +} diff --git a/pkg/overview/util/TimeWindowUtil.go b/pkg/overview/util/TimeWindowUtil.go new file mode 100644 index 0000000000..c42deebee9 --- /dev/null +++ b/pkg/overview/util/TimeWindowUtil.go @@ -0,0 +1,184 @@ +package util + +import ( + "fmt" + "time" + + "github.com/devtron-labs/common-lib/utils" + "github.com/devtron-labs/devtron/pkg/overview/constants" +) + +// TimeRange represents a time range with from and to timestamps +type TimeRange struct { + From time.Time + To time.Time +} + +// ParseTimeString helper function to parse time string in ISO 8601 format +func ParseTimeString(timeStr string) (time.Time, error) { + // Try parsing with different time formats + formats := []string{ + time.RFC3339, // "2006-01-02T15:04:05Z07:00" + time.RFC3339Nano, // "2006-01-02T15:04:05.999999999Z07:00" + "2006-01-02T15:04:05Z", + "2006-01-02T15:04:05.000Z", + "2006-01-02T15:04:05-07:00", + "2006-01-02T15:04:05.000-07:00", + "2006-01-02 15:04:05", + "2006-01-02", + } + + for _, format := range formats { + if t, err := time.Parse(format, timeStr); err == nil { + return t, nil + } + } + + return time.Time{}, fmt.Errorf("invalid time format: %s", timeStr) +} + +// GetCurrentTimePeriodBasedOnTimeWindow parses the time-based filter request with timeWindow support using individual parameters +func GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to string) (*utils.TimeRangeRequest, error) { + timeRangeReq := &utils.TimeRangeRequest{} + if len(timeWindow) > 0 { + timeWindowType := utils.TimeWindows(timeWindow) + timeRangeReq = utils.NewTimeWindowRequest(timeWindowType) + } else { + if len(from) == 0 || len(to) == 0 { + return nil, fmt.Errorf("either timeWindow or both from/to parameters must be provided") + } + var fromTime, toTime *time.Time + if parsedTime, err := ParseTimeString(from); err == nil { + fromTime = &parsedTime + } else { + return nil, fmt.Errorf("invalid 'from' time format: %s", from) + } + if parsedTime, err := ParseTimeString(to); err == nil { + toTime = &parsedTime + } else { + return nil, fmt.Errorf("invalid 'from' time format: %s", to) + } + timeRangeReq = utils.NewTimeRangeRequest(fromTime, toTime) + } + timeRange, err := timeRangeReq.ParseAndValidateTimeRange() + if err != nil { + return nil, err + } + + return timeRange, nil +} + +// calculatePreviousTimeRangeFromDuration calculates the previous time range based on current time range duration +// currentFrom becomes the previous To, and prevFrom is calculated by subtracting the duration from currentFrom +func calculatePreviousTimeRangeFromDuration(currentFrom, currentTo *time.Time) (*utils.TimeRangeRequest, error) { + if currentFrom == nil || currentTo == nil { + return nil, fmt.Errorf("currentFrom and currentTo cannot be nil") + } + + // Calculate the duration between current from and to + duration := currentTo.Sub(*currentFrom) + + // Previous To becomes current From + prevTo := *currentFrom + + // Previous From is calculated by subtracting the duration from current From + prevFrom := currentFrom.Add(-duration) + + // Create time range request for the calculated previous period + timeRangeReq := utils.NewTimeRangeRequest(&prevFrom, &prevTo) + + // Parse and validate the time range + timeRange, err := timeRangeReq.ParseAndValidateTimeRange() + if err != nil { + return nil, fmt.Errorf("failed to parse calculated previous time period: %w", err) + } + + return timeRange, nil +} + +// GetPreviousTimePeriodBasedOnTimeWindow calculates the previous from and to using the timeWindow key +// It maps current time windows to their previous equivalents and calls ParseAndValidateTimeRange +// For unknown timeWindows, it falls back to duration-based calculation using currentFrom and currentTo +func GetPreviousTimePeriodBasedOnTimeWindow(timeWindow string, currentFrom, currentTo *time.Time) (*utils.TimeRangeRequest, error) { + var previousTimeWindow utils.TimeWindows + + switch utils.TimeWindows(timeWindow) { + case utils.Today: + // If user provided today, use yesterday + previousTimeWindow = utils.Yesterday + case utils.Week: + // If user provided week, use lastWeek + previousTimeWindow = utils.LastWeek + case utils.Month: + // If user provided month, use lastMonth + previousTimeWindow = utils.LastMonth + case utils.Quarter: + // If user provided quarter, use lastQuarter + previousTimeWindow = utils.LastQuarter + default: + // Fallback to duration-based calculation for unknown timeWindows + return calculatePreviousTimeRangeFromDuration(currentFrom, currentTo) + } + + // Create time window request for the previous period + timeRangeReq := utils.NewTimeWindowRequest(previousTimeWindow) + + // Parse and validate the time range + timeRange, err := timeRangeReq.ParseAndValidateTimeRange() + if err != nil { + return nil, fmt.Errorf("failed to parse previous time period for timeWindow %s: %w", timeWindow, err) + } + + return timeRange, nil +} + +// GetCurrentAndPreviousTimeRangeBasedOnTimeWindow calculates and returns the current and previous time ranges based on a time window. +// It supports parsing and validating a time-based filter with optional "from" and "to" parameters and time window input. +// Returns two time range requests for the current and previous periods, or an error if parsing or validation fails. +func GetCurrentAndPreviousTimeRangeBasedOnTimeWindow(timeWindow, from, to string) (*utils.TimeRangeRequest, *utils.TimeRangeRequest, error) { + // Get current time range + currentFromTo, err := GetCurrentTimePeriodBasedOnTimeWindow(timeWindow, from, to) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse current time period: %w", err) + } + + // Get previous time range using current time range for fallback calculation + prevFromTo, err := GetPreviousTimePeriodBasedOnTimeWindow(timeWindow, currentFromTo.From, currentFromTo.To) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse previous time period: %w", err) + } + + return currentFromTo, prevFromTo, nil +} + +// GetTimePeriodFromTimeRange determines the time period based on the duration between from and to +func GetTimePeriodFromTimeRange(from, to *time.Time) constants.TimePeriod { + if from == nil || to == nil { + return constants.ThisWeek // default + } + + duration := to.Sub(*from) + + // If the duration is approximately 1 day (within 2 hours tolerance) + if duration <= 26*time.Hour && duration >= 22*time.Hour { + return constants.Today + } + + // If the duration is approximately 1 week (within 1 day tolerance) + if duration <= 8*24*time.Hour && duration >= 6*24*time.Hour { + return constants.ThisWeek + } + + // If the duration is approximately 1 month (within 3 days tolerance) + if duration <= 33*24*time.Hour && duration >= 28*24*time.Hour { + return constants.ThisMonth + } + + // If the duration is approximately 3 months (within 1 week tolerance) + if duration <= 97*24*time.Hour && duration >= 83*24*time.Hour { + return constants.ThisQuarter + } + + // Default to week for other durations + return constants.ThisWeek +} diff --git a/pkg/overview/util/TrendCalculator.go b/pkg/overview/util/TrendCalculator.go new file mode 100644 index 0000000000..ee54819773 --- /dev/null +++ b/pkg/overview/util/TrendCalculator.go @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package util + +import ( + "time" + + "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + "github.com/devtron-labs/devtron/pkg/cluster/environment/repository" + "github.com/devtron-labs/devtron/pkg/overview/bean" + "github.com/devtron-labs/devtron/pkg/overview/constants" + teamRepository "github.com/devtron-labs/devtron/pkg/team/repository" +) + +// TrendCalculator provides utility functions for calculating trend comparisons +type TrendCalculator struct{} + +// NewTrendCalculator creates a new instance of TrendCalculator +func NewTrendCalculator() *TrendCalculator { + return &TrendCalculator{} +} + +// CalculateTrendComparison calculates the trend comparison between current and previous period +func (tc *TrendCalculator) CalculateTrendComparison(currentValue, previousValue int, from, to *time.Time) *bean.TrendComparison { + timePeriod := GetTimePeriodFromTimeRange(from, to) + + if previousValue == 0 && currentValue == 0 { + return &bean.TrendComparison{ + Value: 0, + Label: tc.getTrendLabel(timePeriod), + } + } + + difference := currentValue - previousValue + return &bean.TrendComparison{ + Value: difference, + Label: tc.getTrendLabel(timePeriod), + } +} + +// CalculatePercentageTrendComparison calculates the trend comparison for percentage values +func (tc *TrendCalculator) CalculatePercentageTrendComparison(currentPercentage, previousPercentage float64, from, to *time.Time) *bean.TrendComparison { + timePeriod := GetTimePeriodFromTimeRange(from, to) + + difference := int(currentPercentage - previousPercentage) + return &bean.TrendComparison{ + Value: difference, + Label: tc.getTrendLabel(timePeriod), + } +} + +// GetPreviousPeriodTimeRange calculates the time range for the previous period based on the current period +// It simply subtracts the duration from the current period to get the previous period +func (tc *TrendCalculator) GetPreviousPeriodTimeRange(from, to *time.Time) (*time.Time, *time.Time) { + if from == nil || to == nil { + return nil, nil + } + + // Calculate the duration of the current period + duration := to.Sub(*from) + + // Previous period ends where current period starts, and starts duration before that + prevTo := *from + prevFrom := from.Add(-duration) + + return &prevFrom, &prevTo +} + +// getTrendLabel returns the appropriate label for the trend comparison +func (tc *TrendCalculator) getTrendLabel(timePeriod constants.TimePeriod) string { + switch timePeriod { + case constants.Today: + return "today" + case constants.ThisWeek: + return "this week" + case constants.ThisMonth: + return "this month" + case constants.ThisQuarter: + return "this quarter" + case constants.LastWeek: + return "last week" + case constants.LastMonth: + return "last month" + default: + return "this period" + } +} + +// CalculateTrendForTimeDataPoints calculates trend comparison for time-based data points +func (tc *TrendCalculator) CalculateTrendForTimeDataPoints(currentData, previousData []bean.TimeDataPoint, from, to *time.Time) *bean.TrendComparison { + currentTotal := 0 + for _, point := range currentData { + currentTotal += point.Count + } + + previousTotal := 0 + for _, point := range previousData { + previousTotal += point.Count + } + + return tc.CalculateTrendComparison(currentTotal, previousTotal, from, to) +} + +// CalculatePercentageFromCounts calculates percentage from counts +func (tc *TrendCalculator) CalculatePercentageFromCounts(numerator, denominator int) float64 { + if denominator == 0 { + return 0.0 + } + return (float64(numerator) / float64(denominator)) * 100.0 +} + +// CalculateAverageFromCounts calculates average from total and count +func (tc *TrendCalculator) CalculateAverageFromCounts(total, count int) float64 { + if count == 0 { + return 0.0 + } + return float64(total) / float64(count) +} + +// FilterTeamsByTimeRange filters teams by time range based on created_on field +func FilterTeamsByTimeRange(teams []teamRepository.Team, from, to *time.Time) []teamRepository.Team { + if from == nil && to == nil { + return teams + } + + filtered := make([]teamRepository.Team, 0) + for _, team := range teams { + if IsWithinTimeRange(team.CreatedOn, from, to) { + filtered = append(filtered, team) + } + } + return filtered +} + +// FilterAppsByTimeRange filters apps by time range based on created_on field +func FilterAppsByTimeRange(apps []*app.App, from, to *time.Time) []*app.App { + if from == nil && to == nil { + return apps + } + + filtered := make([]*app.App, 0) + for _, app := range apps { + if IsWithinTimeRange(app.CreatedOn, from, to) { + filtered = append(filtered, app) + } + } + return filtered +} + +// FilterEnvironmentsByTimeRange filters environments by time range based on created_on field +func FilterEnvironmentsByTimeRange(environments []*repository.Environment, from, to *time.Time) []*repository.Environment { + if from == nil && to == nil { + return environments + } + + filtered := make([]*repository.Environment, 0) + for _, env := range environments { + if IsWithinTimeRange(env.CreatedOn, from, to) { + filtered = append(filtered, env) + } + } + return filtered +} + +// FilterCiPipelinesByTimeRange filters CI pipelines by time range based on created_on field +func FilterCiPipelinesByTimeRange(pipelines []*pipelineConfig.CiPipeline, from, to *time.Time) []*pipelineConfig.CiPipeline { + if from == nil && to == nil { + return pipelines + } + + filtered := make([]*pipelineConfig.CiPipeline, 0) + for _, pipeline := range pipelines { + if IsWithinTimeRange(pipeline.CreatedOn, from, to) { + filtered = append(filtered, pipeline) + } + } + return filtered +} + +// FilterCdPipelinesByTimeRange filters CD pipelines by time range based on created_on field +func FilterCdPipelinesByTimeRange(pipelines []*pipelineConfig.Pipeline, from, to *time.Time) []*pipelineConfig.Pipeline { + if from == nil && to == nil { + return pipelines + } + + filtered := make([]*pipelineConfig.Pipeline, 0) + for _, pipeline := range pipelines { + if IsWithinTimeRange(pipeline.CreatedOn, from, to) { + filtered = append(filtered, pipeline) + } + } + return filtered +} + +// IsWithinTimeRange checks if a timestamp is within the given time range +func IsWithinTimeRange(timestamp time.Time, from, to *time.Time) bool { + if from != nil && timestamp.Before(*from) { + return false + } + if to != nil && timestamp.After(*to) { + return false + } + return true +} + +// CalculateAppTrendFromPeriodComparison calculates trend by comparing current and previous period app counts +func CalculateAppTrendFromPeriodComparison(currentApps, previousApps []*app.App) int { + currentCount := 0 + for _, app := range currentApps { + if app.Active { + currentCount++ + } + } + + previousCount := 0 + for _, app := range previousApps { + if app.Active { + previousCount++ + } + } + + return currentCount - previousCount +} + +// CalculateTeamTrendFromPeriodComparison calculates trend by comparing current and previous period team counts +func CalculateTeamTrendFromPeriodComparison(currentTeams, previousTeams []teamRepository.Team) int { + currentCount := 0 + for _, team := range currentTeams { + if team.Active { + currentCount++ + } + } + + previousCount := 0 + for _, team := range previousTeams { + if team.Active { + previousCount++ + } + } + + return currentCount - previousCount +} + +// CalculateEnvironmentTrendFromPeriodComparison calculates trend by comparing current and previous period environment counts +func CalculateEnvironmentTrendFromPeriodComparison(currentEnvs, previousEnvs []*repository.Environment) int { + currentCount := 0 + for _, env := range currentEnvs { + if env.Active { + currentCount++ + } + } + + previousCount := 0 + for _, env := range previousEnvs { + if env.Active { + previousCount++ + } + } + + return currentCount - previousCount +} + +// CalculateCiPipelineTrendFromPeriodComparison calculates trend by comparing current and previous period CI pipeline counts +func CalculateCiPipelineTrendFromPeriodComparison(currentPipelines, previousPipelines []*pipelineConfig.CiPipeline) int { + currentCount := 0 + for _, pipeline := range currentPipelines { + if !pipeline.Deleted { + currentCount++ + } + } + + previousCount := 0 + for _, pipeline := range previousPipelines { + if !pipeline.Deleted { + previousCount++ + } + } + + return currentCount - previousCount +} + +// CalculateCdPipelineTrendFromPeriodComparison calculates trend by comparing current and previous period CD pipeline counts +func CalculateCdPipelineTrendFromPeriodComparison(currentPipelines, previousPipelines []*pipelineConfig.Pipeline) int { + currentCount := 0 + for _, pipeline := range currentPipelines { + if !pipeline.Deleted { + currentCount++ + } + } + + previousCount := 0 + for _, pipeline := range previousPipelines { + if !pipeline.Deleted { + previousCount++ + } + } + + return currentCount - previousCount +} diff --git a/pkg/overview/wire_overview.go b/pkg/overview/wire_overview.go new file mode 100644 index 0000000000..55590a9e82 --- /dev/null +++ b/pkg/overview/wire_overview.go @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package overview + +import ( + "github.com/devtron-labs/devtron/pkg/overview/cache" + "github.com/devtron-labs/devtron/pkg/overview/config" + "github.com/google/wire" +) + +// OverviewWireSet provides wire set for overview module +var OverviewWireSet = wire.NewSet( + config.GetClusterOverviewConfig, + + // Service layer + NewAppManagementServiceImpl, + wire.Bind(new(AppManagementService), new(*AppManagementServiceImpl)), + + NewDoraMetricsServiceImpl, + wire.Bind(new(DoraMetricsService), new(*DoraMetricsServiceImpl)), + + NewInsightsServiceImpl, + wire.Bind(new(InsightsService), new(*InsightsServiceImpl)), + + // Cluster cache service + cache.NewClusterCacheServiceImpl, + wire.Bind(new(cache.ClusterCacheService), new(*cache.ClusterCacheServiceImpl)), + + // Cluster overview service (uses background refresh worker) + NewClusterOverviewServiceImpl, + wire.Bind(new(ClusterOverviewService), new(*ClusterOverviewServiceImpl)), + + // Security overview service (uses existing image scanning repositories) + NewSecurityOverviewServiceImpl, + wire.Bind(new(SecurityOverviewService), new(*SecurityOverviewServiceImpl)), + + // Main overview service + NewOverviewServiceImpl, + wire.Bind(new(OverviewService), new(*OverviewServiceImpl)), +) diff --git a/pkg/pipeline/workflowStatus/repository/WorkflowStageRepository.go b/pkg/pipeline/workflowStatus/repository/WorkflowStageRepository.go index d384f30c3b..c2d661b32b 100644 --- a/pkg/pipeline/workflowStatus/repository/WorkflowStageRepository.go +++ b/pkg/pipeline/workflowStatus/repository/WorkflowStageRepository.go @@ -1,6 +1,8 @@ package repository import ( + "time" + "github.com/devtron-labs/devtron/pkg/pipeline/workflowStatus/bean" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" @@ -13,6 +15,7 @@ type WorkflowStageRepository interface { GetWorkflowStagesByWorkflowIdAndType(workflowId int, workflowType string) ([]*WorkflowExecutionStage, error) GetWorkflowStagesByWorkflowIdAndWtype(wfId int, wfType string) ([]*WorkflowExecutionStage, error) GetWorkflowStagesByWorkflowIdsAndWtype(wfIds []int, wfType string) ([]*WorkflowExecutionStage, error) + GetSuccessfulCIExecutionStages(from, to *time.Time) ([]*WorkflowExecutionStage, error) } type WorkflowStageRepositoryImpl struct { @@ -92,3 +95,22 @@ func (impl *WorkflowStageRepositoryImpl) GetWorkflowStagesByWorkflowIdsAndWtype( } return workflowStages, err } + +func (impl *WorkflowStageRepositoryImpl) GetSuccessfulCIExecutionStages(from, to *time.Time) ([]*WorkflowExecutionStage, error) { + var workflowStages []*WorkflowExecutionStage + err := impl.dbConnection.Model(&workflowStages). + Where("workflow_type = ?", "CI"). + Where("stage_name = ?", "Execution"). + Where("status = ?", "SUCCEEDED"). + Where("status_for = ?", "workflow"). + Where("start_time IS NOT NULL"). + Where("end_time IS NOT NULL"). + Where("created_on >= ? AND created_on <= ?", from, to). + Order("id ASC"). + Select() + if err != nil { + impl.logger.Errorw("error in fetching successful CI execution stages", "err", err) + return workflowStages, err + } + return workflowStages, nil +} diff --git a/pkg/policyGovernance/security/imageScanning/ImageScanService.go b/pkg/policyGovernance/security/imageScanning/ImageScanService.go index f0268c20a8..484001ea07 100644 --- a/pkg/policyGovernance/security/imageScanning/ImageScanService.go +++ b/pkg/policyGovernance/security/imageScanning/ImageScanService.go @@ -18,6 +18,10 @@ package imageScanning import ( "context" + "fmt" + "strings" + "time" + bean4 "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/cluster/environment" @@ -29,8 +33,9 @@ import ( securityBean "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository/bean" repository2 "github.com/devtron-labs/devtron/pkg/policyGovernance/security/scanTool/repository" "github.com/devtron-labs/devtron/pkg/workflow/cd/read" + "github.com/devtron-labs/devtron/util/sliceUtil" "go.opentelemetry.io/otel" - "time" + "golang.org/x/exp/slices" "github.com/devtron-labs/devtron/internal/sql/repository" repository1 "github.com/devtron-labs/devtron/internal/sql/repository/app" @@ -49,9 +54,15 @@ type ImageScanService interface { VulnerabilityExposure(request *repository3.VulnerabilityRequest) (*repository3.VulnerabilityExposureListingResponse, error) GetArtifactVulnerabilityStatus(ctx context.Context, request *bean2.VulnerabilityCheckRequest) (bool, error) IsImageScanExecutionCompleted(image, imageDigest string) (bool, error) + FetchVulnerabilitySummary(ctx context.Context, request *bean3.VulnerabilitySummaryRequest, ids []int) (*bean3.VulnerabilitySummary, error) + FetchVulnerabilityListing(ctx context.Context, request *bean3.VulnerabilityListingRequest, ids []int) (*bean3.VulnerabilityListingResponse, error) + // resource scanning functions below GetScanResults(resourceScanQueryParams *bean3.ResourceScanQueryParams) (parser.ResourceScanResponseDto, error) FilterDeployInfoByScannedArtifactsDeployedInEnv(deployInfoList []*repository3.ImageScanDeployInfo) ([]*repository3.ImageScanDeployInfo, error) + + // Optimized method for vulnerability summary - combines filtering with scanned artifact check in single query + FetchScannedDeployInfoWithFilters(ctx context.Context, envIds, clusterIds []int) ([]*repository3.ImageScanDeployInfo, error) } type ImageScanServiceImpl struct { @@ -109,7 +120,44 @@ func (impl ImageScanServiceImpl) FetchAllDeployInfo(request *bean3.ImageScanRequ return deployedList, nil } -func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageScanRequest, deployInfoIds []int) (*bean3.ImageScanHistoryListingResponse, error) { +func (impl *ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageScanRequest, deployInfoIds []int) (*bean3.ImageScanHistoryListingResponse, error) { + + // Handle different scan status cases + if request.ScanStatus == securityBean.ScanStatusNotScanned { + // Show only not-scanned items + return impl.fetchNonScannedAppEnvListing(request, deployInfoIds) + } else if request.ScanStatus == securityBean.ScanStatusScanned { + // Show only scanned items + return impl.fetchScannedAppEnvListing(request, deployInfoIds) + } + + // ScanStatusAll (default: empty string) - show both scanned and not-scanned + // Fetch both scanned and not-scanned items and merge them + scannedResponse, err := impl.fetchScannedAppEnvListing(request, deployInfoIds) + if err != nil { + impl.Logger.Errorw("error while fetching scanned items", "err", err) + return nil, err + } + + notScannedResponse, err := impl.fetchNonScannedAppEnvListing(request, deployInfoIds) + if err != nil { + impl.Logger.Errorw("error while fetching not-scanned items", "err", err) + return nil, err + } + + // Merge the responses + mergedResponse := &bean3.ImageScanHistoryListingResponse{ + Offset: request.Offset, + Size: request.Size, + ImageScanHistoryResponse: append(scannedResponse.ImageScanHistoryResponse, notScannedResponse.ImageScanHistoryResponse...), + Total: scannedResponse.Total + notScannedResponse.Total, + } + + return mergedResponse, nil +} + +// fetchScannedAppEnvListing fetches scanned app-env combinations +func (impl *ImageScanServiceImpl) fetchScannedAppEnvListing(request *bean3.ImageScanRequest, deployInfoIds []int) (*bean3.ImageScanHistoryListingResponse, error) { groupByList, err := impl.imageScanDeployInfoRepository.ScanListingWithFilter(&request.ImageScanFilter, request.Size, request.Offset, deployInfoIds) if err != nil { impl.Logger.Errorw("error while fetching scan execution result", "err", err) @@ -138,7 +186,6 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS groupByListMap[item.Id] = item executionHistoryIds = append(executionHistoryIds, item.ImageScanExecutionHistoryId...) } - // fetching all execution history in bulk for updating last check time in case when no vul are found(no results will be saved) mapOfExecutionHistoryIdVsLastExecTime, err := impl.fetchImageExecutionHistoryMapByIds(executionHistoryIds) if err != nil { @@ -147,6 +194,7 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS } var finalResponseList []*bean3.ImageScanHistoryResponse + for _, item := range groupByList { imageScanHistoryResponse := &bean3.ImageScanHistoryResponse{} var lastChecked time.Time @@ -155,6 +203,7 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS highCount := 0 moderateCount := 0 lowCount, unkownCount := 0, 0 + fixableCount := 0 imageScanDeployInfo := groupByListMap[item.Id] if imageScanDeployInfo != nil { scanResultList, err := impl.scanResultRepository.FetchByScanExecutionIds(imageScanDeployInfo.ImageScanExecutionHistoryId) @@ -170,6 +219,11 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS for _, item := range scanResultList { lastChecked = item.ImageScanExecutionHistory.ExecutionTime criticalCount, highCount, moderateCount, lowCount, unkownCount = impl.updateCount(item.CveStore.GetSeverity(), criticalCount, highCount, moderateCount, lowCount, unkownCount) + + // Count fixable vulnerabilities (those with a fixed version) + if item.FixedVersion != "" { + fixableCount++ + } } // updating in case when no vul are found (no results) if lastChecked.IsZero() && len(imageScanDeployInfo.ImageScanExecutionHistoryId) > 0 && mapOfExecutionHistoryIdVsLastExecTime != nil { @@ -192,6 +246,15 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS imageScanHistoryResponse.LastChecked = &lastChecked } imageScanHistoryResponse.SeverityCount = severityCount + imageScanHistoryResponse.FixableVulnerabilities = fixableCount + + // Set scan status based on whether it's scanned or not + if imageScanDeployInfo != nil && len(imageScanDeployInfo.ImageScanExecutionHistoryId) > 0 && imageScanDeployInfo.ImageScanExecutionHistoryId[0] != -1 { + imageScanHistoryResponse.ScanStatus = "scanned" + } else { + imageScanHistoryResponse.ScanStatus = "not-scanned" + } + if imageScanDeployInfo != nil { imageScanHistoryResponse.EnvId = imageScanDeployInfo.EnvId } @@ -245,6 +308,91 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageS return finalResponse, err } +// fetchNonScannedAppEnvListing fetches non-scanned app-env combinations +// These are active deployments that don't have scan data in image_scan_deploy_info +func (impl *ImageScanServiceImpl) fetchNonScannedAppEnvListing(request *bean3.ImageScanRequest, deployInfoIds []int) (*bean3.ImageScanHistoryListingResponse, error) { + // Get non-scanned app-env combinations + nonScannedList, err := impl.imageScanDeployInfoRepository.GetNonScannedAppEnvCombinations(&request.ImageScanFilter, request.Size, request.Offset, deployInfoIds) + if err != nil { + impl.Logger.Errorw("error while fetching non-scanned app-env combinations", "err", err) + return nil, err + } + + // Get total count + totalCount, err := impl.imageScanDeployInfoRepository.GetNonScannedAppEnvCombinationsCount(&request.ImageScanFilter, deployInfoIds) + if err != nil { + impl.Logger.Errorw("error while fetching non-scanned app-env combinations count", "err", err) + return nil, err + } + + // Build response list + var finalResponseList []*bean3.ImageScanHistoryResponse + + // Get app and environment details + appIds := make([]int, 0) + envIds := make([]int, 0) + for _, item := range nonScannedList { + appIds = append(appIds, item.ScanObjectMetaId) + envIds = append(envIds, item.EnvId) + } + + // Extract unique IDs to avoid duplicate queries + appIds = sliceUtil.GetUniqueElements(appIds) + envIds = sliceUtil.GetUniqueElements(envIds) + + // Fetch app details + appMap := make(map[int]string) + if len(appIds) > 0 { + apps, err := impl.appRepository.FindAppAndProjectByIdsIn(appIds) + if err != nil && err != pg.ErrNoRows { + impl.Logger.Errorw("error while fetching apps", "err", err) + return nil, err + } + for _, app := range apps { + appMap[app.Id] = app.AppName + } + } + + // Fetch environment details using lightweight method + envMap := make(map[int]string) + if len(envIds) > 0 { + envMap, err = impl.envService.FindNamesByIds(envIds) + if err != nil && err != pg.ErrNoRows { + impl.Logger.Errorw("error while fetching environments", "err", err) + return nil, err + } + } + + // Build response items + for _, item := range nonScannedList { + response := &bean3.ImageScanHistoryResponse{ + ImageScanDeployInfoId: -1, // No scan deploy info exists + AppId: item.ScanObjectMetaId, + EnvId: item.EnvId, + Name: appMap[item.ScanObjectMetaId], + Type: item.ObjectType, + Environment: envMap[item.EnvId], + ScanStatus: string(securityBean.ScanStatusNotScanned), + SeverityCount: &bean3.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + }, + FixableVulnerabilities: 0, + } + finalResponseList = append(finalResponseList, response) + } + + return &bean3.ImageScanHistoryListingResponse{ + Offset: request.Offset, + Size: request.Size, + ImageScanHistoryResponse: finalResponseList, + Total: totalCount, + }, nil +} + func (impl ImageScanServiceImpl) fetchImageExecutionHistoryMapByIds(historyIds []int) (map[int]time.Time, error) { mapOfExecutionHistoryIdVsExecutionTime := make(map[int]time.Time) if len(historyIds) > 0 { @@ -782,3 +930,434 @@ func (impl ImageScanServiceImpl) fetchLatestArtifactMetadataDeployedOnAllEnvsAcr } return appEnvToCiArtifactMap, ciArtifactIdToScannedMap, nil } + +// FetchScannedDeployInfoWithFilters returns deploy info for scanned artifacts that are currently deployed +// This is an optimized method that combines the logic of FetchAllDeployInfo + FilterDeployInfoByScannedArtifactsDeployedInEnv +// into a single database query, significantly improving performance for vulnerability summary API +func (impl *ImageScanServiceImpl) FetchScannedDeployInfoWithFilters(ctx context.Context, envIds, clusterIds []int) ([]*repository3.ImageScanDeployInfo, error) { + _, span := otel.Tracer("imageScanService").Start(ctx, "FetchScannedDeployInfoWithFilters") + defer span.End() + + deployInfoList, err := impl.imageScanDeployInfoRepository.FindScannedDeployInfoWithFilters(envIds, clusterIds) + if err != nil { + impl.Logger.Errorw("error in FetchScannedDeployInfoWithFilters", "err", err, "envIds", envIds, "clusterIds", clusterIds) + return nil, err + } + return deployInfoList, nil +} + +// FetchVulnerabilitySummary fetches the vulnerability summary for the given filters +// Same filters as VulnerabilityListing: Environment, Cluster, Application, Severity, Fix Availability, Vulnerability Age +// ids parameter contains RBAC-filtered deploy info IDs that the user has access to +func (impl *ImageScanServiceImpl) FetchVulnerabilitySummary(ctx context.Context, request *bean3.VulnerabilitySummaryRequest, ids []int) (*bean3.VulnerabilitySummary, error) { + ctx, span := otel.Tracer("imageScanService").Start(ctx, "FetchVulnerabilitySummary") + defer span.End() + + // Fetch raw vulnerability data with database-level filters (same as VulnerabilityListing) + // This applies: CVEName (empty for summary), Severity, EnvironmentIds, ClusterIds, AppIds, and RBAC-filtered deploy info IDs + rawData, err := impl.scanResultRepository.GetVulnerabilityRawData("", request.Severity, request.EnvironmentIds, request.ClusterIds, request.AppIds, ids) + if err != nil { + impl.Logger.Errorw("error while fetching vulnerability raw data", "err", err) + return nil, err + } + + if len(rawData) == 0 { + // Return empty summary + return &bean3.VulnerabilitySummary{ + TotalVulnerabilities: 0, + SeverityCount: &bean3.SeverityCount{ + Critical: 0, + High: 0, + Medium: 0, + Low: 0, + Unknown: 0, + }, + FixableVulnerabilities: 0, + NotFixableVulnerabilities: 0, + }, nil + } + + vulnerabilities := make([]*bean3.VulnerabilityDetail, 0) + for _, data := range rawData { + // Convert severity int to string + severityStr := impl.convertSeverityEnumToString(data.Severity) + vulnerabilities = append(vulnerabilities, &bean3.VulnerabilityDetail{ + CVEName: data.CveStoreName, + Severity: severityStr, + AppName: data.AppName, + AppId: data.AppId, + EnvName: data.EnvName, + EnvId: data.EnvId, + DiscoveredAt: data.ExecutionTime, + Package: data.Package, + CurrentVersion: data.CurrentVersion, + FixedVersion: data.FixedVersion, + }) + + } + + // Apply code-level filters (FixAvailable and VulnAge) + // Same logic as VulnerabilityListing + vulnerabilities = impl.applyVulnerabilitySummaryFilters(vulnerabilities, request) + + // Calculate summary from filtered vulnerabilities + totalVulnerabilities := len(vulnerabilities) + totalFixableVulnerabilities := 0 + totalNotFixableVulnerabilities := 0 + summaryCriticalCount := 0 + summaryHighCount := 0 + summaryModerateCount := 0 + summaryLowCount := 0 + summaryUnknownCount := 0 + + for _, vuln := range vulnerabilities { + // Count by severity + switch strings.ToLower(vuln.Severity) { + case "critical": + summaryCriticalCount++ + case "high": + summaryHighCount++ + case "medium", "moderate": + summaryModerateCount++ + case "low": + summaryLowCount++ + default: + summaryUnknownCount++ + } + + // Count fixable vs not fixable + if vuln.FixedVersion != "" { + totalFixableVulnerabilities++ + } else { + totalNotFixableVulnerabilities++ + } + } + + // Build and return vulnerability summary + vulnerabilitySummary := &bean3.VulnerabilitySummary{ + TotalVulnerabilities: totalVulnerabilities, + SeverityCount: &bean3.SeverityCount{ + Critical: summaryCriticalCount, + High: summaryHighCount, + Medium: summaryModerateCount, + Low: summaryLowCount, + Unknown: summaryUnknownCount, + }, + FixableVulnerabilities: totalFixableVulnerabilities, + NotFixableVulnerabilities: totalNotFixableVulnerabilities, + } + + return vulnerabilitySummary, nil +} + +// FetchVulnerabilityListing fetches the vulnerability listing with pagination and filters +// Optimized version: Uses code-level aggregation instead of database GROUP BY +func (impl *ImageScanServiceImpl) FetchVulnerabilityListing(ctx context.Context, request *bean3.VulnerabilityListingRequest, ids []int) (*bean3.VulnerabilityListingResponse, error) { + ctx, span := otel.Tracer("imageScanService").Start(ctx, "FetchVulnerabilityListing") + defer span.End() + + rawData, err := impl.scanResultRepository.GetVulnerabilityRawData(request.CVEName, request.Severity, request.EnvironmentIds, request.ClusterIds, request.AppIds, ids) + if err != nil { + impl.Logger.Errorw("error while fetching vulnerability raw data", "err", err) + return nil, err + } + + // Code-level aggregation using maps for O(n) performance + // Key: "cveName|appId|envId|package|version|fixedVersion" + // This ensures unique combinations of CVE+App+Env+Package+Version + vulnerabilityMap := make(map[string]*bean3.VulnerabilityDetail) + + for _, data := range rawData { + // Create unique key for this CVE+App+Env+Package+Version combination + key := fmt.Sprintf("%s|%d|%d|%s|%s|%s", data.CveStoreName, data.AppId, data.EnvId, data.Package, data.CurrentVersion, data.FixedVersion) + + // Check if this combination already exists + if existing, exists := vulnerabilityMap[key]; exists { + // Keep the earliest discovery time + if data.ExecutionTime.Before(existing.DiscoveredAt) { + existing.DiscoveredAt = data.ExecutionTime + } + } else { + // New combination - add to map + vulnerabilityMap[key] = &bean3.VulnerabilityDetail{ + CVEName: data.CveStoreName, + Severity: impl.convertSeverityEnumToString(data.Severity), + AppName: data.AppName, + AppId: data.AppId, + EnvName: data.EnvName, + EnvId: data.EnvId, + DiscoveredAt: data.ExecutionTime, + Package: data.Package, + CurrentVersion: data.CurrentVersion, + FixedVersion: data.FixedVersion, + } + } + } + + // Convert map to slice + vulnerabilities := make([]*bean3.VulnerabilityDetail, 0, len(vulnerabilityMap)) + for _, vuln := range vulnerabilityMap { + vulnerabilities = append(vulnerabilities, vuln) + } + + // Apply code-level filters + vulnerabilities = impl.applyVulnerabilityFilters(vulnerabilities, request) + + // Apply sorting based on request + impl.sortVulnerabilities(vulnerabilities, request.SortBy, request.SortOrder) + + // Apply pagination in code + totalCount := len(vulnerabilities) + start := request.Offset + end := request.Offset + request.Size + + // Handle edge cases + if start > totalCount { + start = totalCount + } + if end > totalCount { + end = totalCount + } + if start < 0 { + start = 0 + } + + // Slice for pagination + paginatedVulnerabilities := vulnerabilities[start:end] + + return &bean3.VulnerabilityListingResponse{ + Offset: request.Offset, + Size: request.Size, + Total: totalCount, + Vulnerabilities: paginatedVulnerabilities, + }, nil +} + +// applyVulnerabilityFilters applies code-level filters (fix availability and vulnerability age) +func (impl *ImageScanServiceImpl) applyVulnerabilityFilters(vulnerabilities []*bean3.VulnerabilityDetail, request *bean3.VulnerabilityListingRequest) []*bean3.VulnerabilityDetail { + filtered := make([]*bean3.VulnerabilityDetail, 0, len(vulnerabilities)) + now := time.Now() + + for _, vuln := range vulnerabilities { + // Apply fix availability filter (multi-select) + if len(request.FixAvailability) > 0 { + hasFixedVersion := vuln.FixedVersion != "" + matchesFilter := false + + for _, fixAvailType := range request.FixAvailability { + if fixAvailType == bean3.FixAvailable && hasFixedVersion { + matchesFilter = true + break + } + if fixAvailType == bean3.FixNotAvailable && !hasFixedVersion { + matchesFilter = true + break + } + } + + if !matchesFilter { + continue + } + } + + // Apply vulnerability age filter (multi-select) + if len(request.AgeOfDiscovery) > 0 { + daysSinceDiscovery := int(now.Sub(vuln.DiscoveredAt).Hours() / 24) + matchesAgeFilter := false + + for _, ageType := range request.AgeOfDiscovery { + switch ageType { + case bean3.VulnAgeLessThan30Days: + if daysSinceDiscovery < 30 { + matchesAgeFilter = true + } + case bean3.VulnAge30To60Days: + if daysSinceDiscovery >= 30 && daysSinceDiscovery < 60 { + matchesAgeFilter = true + } + case bean3.VulnAge60To90Days: + if daysSinceDiscovery >= 60 && daysSinceDiscovery < 90 { + matchesAgeFilter = true + } + case bean3.VulnAgeMoreThan90Days: + if daysSinceDiscovery >= 90 { + matchesAgeFilter = true + } + } + + if matchesAgeFilter { + break + } + } + + if !matchesAgeFilter { + continue + } + } + + filtered = append(filtered, vuln) + } + + return filtered +} + +// applyVulnerabilitySummaryFilters applies code-level filters for summary (fix availability and vulnerability age) +// Same logic as applyVulnerabilityFilters but for VulnerabilitySummaryRequest +func (impl *ImageScanServiceImpl) applyVulnerabilitySummaryFilters(vulnerabilities []*bean3.VulnerabilityDetail, request *bean3.VulnerabilitySummaryRequest) []*bean3.VulnerabilityDetail { + filtered := make([]*bean3.VulnerabilityDetail, 0, len(vulnerabilities)) + now := time.Now() + + for _, vuln := range vulnerabilities { + // Apply fix availability filter (multi-select) + if len(request.FixAvailability) > 0 { + hasFixedVersion := vuln.FixedVersion != "" + matchesFilter := false + + for _, fixAvailType := range request.FixAvailability { + if fixAvailType == bean3.FixAvailable && hasFixedVersion { + matchesFilter = true + break + } + if fixAvailType == bean3.FixNotAvailable && !hasFixedVersion { + matchesFilter = true + break + } + } + + if !matchesFilter { + continue + } + } + + // Apply vulnerability age filter (multi-select) + if len(request.AgeOfDiscovery) > 0 { + daysSinceDiscovery := int(now.Sub(vuln.DiscoveredAt).Hours() / 24) + matchesAgeFilter := false + + for _, ageType := range request.AgeOfDiscovery { + switch ageType { + case bean3.VulnAgeLessThan30Days: + if daysSinceDiscovery < 30 { + matchesAgeFilter = true + } + case bean3.VulnAge30To60Days: + if daysSinceDiscovery >= 30 && daysSinceDiscovery < 60 { + matchesAgeFilter = true + } + case bean3.VulnAge60To90Days: + if daysSinceDiscovery >= 60 && daysSinceDiscovery < 90 { + matchesAgeFilter = true + } + case bean3.VulnAgeMoreThan90Days: + if daysSinceDiscovery >= 90 { + matchesAgeFilter = true + } + } + + if matchesAgeFilter { + break + } + } + + if !matchesAgeFilter { + continue + } + } + + filtered = append(filtered, vuln) + } + + return filtered +} + +// sortVulnerabilities sorts vulnerabilities based on sortBy and sortOrder +func (impl *ImageScanServiceImpl) sortVulnerabilities(vulnerabilities []*bean3.VulnerabilityDetail, sortBy bean3.VulnerabilitySortBy, sortOrder bean3.SortOrder) { + // Default sort: discoveredAt DESC, cveName ASC + if sortBy == "" { + sortBy = bean3.VulnSortByDiscoveredAt + } + if sortOrder == "" { + sortOrder = bean3.SortOrderDesc + } + + slices.SortFunc(vulnerabilities, func(a, b *bean3.VulnerabilityDetail) int { + var cmp int + + switch sortBy { + case bean3.VulnSortByCveName: + if a.CVEName < b.CVEName { + cmp = -1 + } else if a.CVEName > b.CVEName { + cmp = 1 + } + case bean3.VulnSortByCurrentVersion: + if a.CurrentVersion < b.CurrentVersion { + cmp = -1 + } else if a.CurrentVersion > b.CurrentVersion { + cmp = 1 + } + case bean3.VulnSortByFixedVersion: + if a.FixedVersion < b.FixedVersion { + cmp = -1 + } else if a.FixedVersion > b.FixedVersion { + cmp = 1 + } + case bean3.VulnSortByDiscoveredAt: + if a.DiscoveredAt.Before(b.DiscoveredAt) { + cmp = -1 + } else if a.DiscoveredAt.After(b.DiscoveredAt) { + cmp = 1 + } + case bean3.VulnSortBySeverity: + // Severity comparison: Critical > High > Medium > Low > Unknown + severityOrder := map[string]int{ + securityBean.CRITICAL: 4, + securityBean.HIGH: 3, + securityBean.MEDIUM: 2, + securityBean.LOW: 1, + securityBean.UNKNOWN: 0, + } + aSev := severityOrder[a.Severity] + bSev := severityOrder[b.Severity] + if aSev < bSev { + cmp = -1 + } else if aSev > bSev { + cmp = 1 + } + default: + // Default to discoveredAt + if a.DiscoveredAt.Before(b.DiscoveredAt) { + cmp = -1 + } else if a.DiscoveredAt.After(b.DiscoveredAt) { + cmp = 1 + } + } + + // Apply sort order + if sortOrder == bean3.SortOrderDesc { + cmp = -cmp + } + + return cmp + }) +} + +// convertSeverityEnumToString converts severity enum to string +func (impl *ImageScanServiceImpl) convertSeverityEnumToString(severity int) string { + switch severity { + case int(securityBean.Low): + return securityBean.LOW + case int(securityBean.Medium): + return securityBean.MEDIUM + case int(securityBean.High): + return securityBean.HIGH + case int(securityBean.Critical): + return securityBean.CRITICAL + case int(securityBean.Safe): + return securityBean.SAFE + case int(securityBean.Unknown): + return securityBean.UNKNOWN + default: + return securityBean.UNKNOWN + } +} diff --git a/pkg/policyGovernance/security/imageScanning/bean/bean.go b/pkg/policyGovernance/security/imageScanning/bean/bean.go index b250f8aff5..702386a817 100644 --- a/pkg/policyGovernance/security/imageScanning/bean/bean.go +++ b/pkg/policyGovernance/security/imageScanning/bean/bean.go @@ -1,10 +1,12 @@ package bean import ( + "time" + + workflowConstants "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig/bean/constants" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/helper/parser" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository" "github.com/devtron-labs/devtron/pkg/policyGovernance/security/imageScanning/repository/bean" - "time" ) const ( @@ -75,15 +77,111 @@ type ImageScanHistoryListingResponse struct { } type ImageScanHistoryResponse struct { - ImageScanDeployInfoId int `json:"imageScanDeployInfoId"` - AppId int `json:"appId"` - EnvId int `json:"envId"` - Name string `json:"name"` - Type string `json:"type"` - Environment string `json:"environment"` - LastChecked *time.Time `json:"lastChecked"` - Image string `json:"image,omitempty"` - SeverityCount *SeverityCount `json:"severityCount,omitempty"` + ImageScanDeployInfoId int `json:"imageScanDeployInfoId"` + AppId int `json:"appId"` + EnvId int `json:"envId"` + Name string `json:"name"` + Type string `json:"type"` + Environment string `json:"environment"` + LastChecked *time.Time `json:"lastChecked"` + Image string `json:"image,omitempty"` + SeverityCount *SeverityCount `json:"severityCount,omitempty"` + FixableVulnerabilities int `json:"fixableVulnerabilities"` + ScanStatus string `json:"scanStatus,omitempty"` // "scanned" or "not-scanned" +} + +// VulnerabilitySummary represents the summary of all vulnerabilities across all scanned apps/envs +type VulnerabilitySummary struct { + TotalVulnerabilities int `json:"totalVulnerabilities"` + SeverityCount *SeverityCount `json:"severityCount"` + FixableVulnerabilities int `json:"fixableVulnerabilities"` + NotFixableVulnerabilities int `json:"notFixableVulnerabilities"` +} + +// VulnerabilitySummaryRequest represents the request for vulnerability summary with filters +// Same filters as VulnerabilityListingRequest (except pagination and sorting) +type VulnerabilitySummaryRequest struct { + EnvironmentIds []int `json:"envIds"` // Filter by environment IDs + ClusterIds []int `json:"clusterIds"` // Filter by cluster IDs + AppIds []int `json:"appIds"` // Filter by application IDs + Severity []int `json:"severity"` // Filter by severity + FixAvailability []FixAvailabilityType `json:"fixAvailability"` // Filter by fix availability (multi-select: fixAvailable, fixNotAvailable) + AgeOfDiscovery []VulnerabilityAgeType `json:"ageOfDiscovery"` // Filter by vulnerability age (multi-select) +} + +// VulnerabilityListingRequest represents the request for vulnerability listing with filters +type VulnerabilityListingRequest struct { + CVEName string `json:"cveName"` // Search by CVE name + Severity []int `json:"severity"` // Filter by severity + EnvironmentIds []int `json:"envIds"` // Filter by environment IDs + ClusterIds []int `json:"clusterIds"` // Filter by cluster IDs + AppIds []int `json:"appIds"` // Filter by application IDs + FixAvailability []FixAvailabilityType `json:"fixAvailability"` // Filter by fix availability (multi-select: fixAvailable, fixNotAvailable) + AgeOfDiscovery []VulnerabilityAgeType `json:"ageOfDiscovery"` // Filter by vulnerability age (multi-select) + SortBy VulnerabilitySortBy `json:"sortBy"` // Sort by field + SortOrder SortOrder `json:"sortOrder"` // Sort order (ASC/DESC) + Offset int `json:"offset"` // Pagination offset + Size int `json:"size"` // Pagination size +} + +// FixAvailabilityType represents fix availability filter options +type FixAvailabilityType string + +const ( + FixAvailable FixAvailabilityType = "fixAvailable" // CVEs with fixes available + FixNotAvailable FixAvailabilityType = "fixNotAvailable" // CVEs without fixes +) + +// VulnerabilityAgeType represents vulnerability age filter +type VulnerabilityAgeType string + +const ( + VulnAgeLessThan30Days VulnerabilityAgeType = "lt_30d" // Less than 30 days old + VulnAge30To60Days VulnerabilityAgeType = "30_60d" // 30 to 60 days old + VulnAge60To90Days VulnerabilityAgeType = "60_90d" // 60 to 90 days old + VulnAgeMoreThan90Days VulnerabilityAgeType = "gt_90d" // More than 90 days old +) + +// VulnerabilitySortBy represents sort field for vulnerability listing +type VulnerabilitySortBy string + +const ( + VulnSortByCveName VulnerabilitySortBy = "cveName" + VulnSortByCurrentVersion VulnerabilitySortBy = "currentVersion" + VulnSortByFixedVersion VulnerabilitySortBy = "fixedVersion" + VulnSortByDiscoveredAt VulnerabilitySortBy = "discoveredAt" + VulnSortBySeverity VulnerabilitySortBy = "severity" +) + +// SortOrder represents sort order +// Type alias to repository constants to avoid circular imports +type SortOrder = workflowConstants.SortOrder + +const ( + SortOrderAsc = workflowConstants.SortOrderAsc + SortOrderDesc = workflowConstants.SortOrderDesc +) + +// VulnerabilityListingResponse represents the response for vulnerability listing +type VulnerabilityListingResponse struct { + Offset int `json:"offset"` + Size int `json:"size"` + Total int `json:"total"` + Vulnerabilities []*VulnerabilityDetail `json:"list"` +} + +// VulnerabilityDetail represents detailed information about a single CVE +type VulnerabilityDetail struct { + CVEName string `json:"cveName"` + Severity string `json:"severity"` + AppName string `json:"appName"` + AppId int `json:"appId"` + EnvName string `json:"envName"` + EnvId int `json:"envId"` + DiscoveredAt time.Time `json:"discoveredAt"` // First time this CVE was discovered + Package string `json:"package"` // Vulnerable package name + CurrentVersion string `json:"currentVersion"` // Current vulnerable version + FixedVersion string `json:"fixedVersion"` // Fixed version (empty if not fixable) } type ImageScanExecutionDetail struct { diff --git a/pkg/policyGovernance/security/imageScanning/repository/ImageScanDeployInfoRepository.go b/pkg/policyGovernance/security/imageScanning/repository/ImageScanDeployInfoRepository.go index 58f6dce98f..3c954b63ad 100644 --- a/pkg/policyGovernance/security/imageScanning/repository/ImageScanDeployInfoRepository.go +++ b/pkg/policyGovernance/security/imageScanning/repository/ImageScanDeployInfoRepository.go @@ -65,6 +65,11 @@ type ImageScanListingResponse struct { TotalCount int `json:"totalCount"` } +type DeploymentScannedCount struct { + ScannedCount int + UnscannedCount int +} + type ImageScanDeployInfoRepository interface { Save(model *ImageScanDeployInfo) error FindAll() ([]*ImageScanDeployInfo, error) @@ -75,6 +80,16 @@ type ImageScanDeployInfoRepository interface { FetchByAppIdAndEnvId(appId int, envId int, objectType []string) (*ImageScanDeployInfo, error) FindByTypeMetaAndTypeId(scanObjectMetaId int, objectType string) (*ImageScanDeployInfo, error) ScanListingWithFilter(request *repoBean.ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanListingResponse, error) + + // Security Overview methods + GetActiveDeploymentCountByFilters(envIds, clusterIds, appIds []int) (int, error) + GetActiveDeploymentCountWithVulnerabilitiesByFilters(envIds, clusterIds, appIds []int) (int, error) + GetActiveDeploymentScannedUnscannedCountByFilters(envIds, clusterIds, appIds []int) (*DeploymentScannedCount, error) + GetNonScannedAppEnvCombinations(request *repoBean.ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanDeployInfo, error) + GetNonScannedAppEnvCombinationsCount(request *repoBean.ImageScanFilter, deployInfoIds []int) (int, error) + + // Optimized method for vulnerability summary - combines filtering with scanned artifact check in single query + FindScannedDeployInfoWithFilters(envIds, clusterIds []int) ([]*ImageScanDeployInfo, error) } type ImageScanDeployInfoRepositoryImpl struct { @@ -289,3 +304,411 @@ func (impl ImageScanDeployInfoRepositoryImpl) scanListingQueryBuilder(request *r } return query, queryParams } + +// ============================================================================ +// Security Overview Methods +// ============================================================================ + +// GetActiveDeploymentCountByFilters returns the count of unique active deployments (app+env combinations) +// filtered by envIds, clusterIds, and appIds +// Uses cd_workflow_runner as the source of truth for ALL deployments (scanned and unscanned) +func (impl ImageScanDeployInfoRepositoryImpl) GetActiveDeploymentCountByFilters(envIds, clusterIds, appIds []int) (int, error) { + // Query to find latest deployment per app+environment combination from cd_workflow_runner + // This is the source of truth for ALL active deployments, not just scanned ones + // Partitions by (app_id, environment_id) to get the most recent deployment for each app+env + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + // Complete the CTE and count unique app+env combinations + query += ` + ) + SELECT COUNT(DISTINCT (app_id, environment_id)) + FROM LatestDeployments + WHERE rn = 1 + ` + + var count int + _, err := impl.dbConnection.Query(&count, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting active deployment count", "err", err) + return 0, err + } + + return count, nil +} + +// GetActiveDeploymentCountWithVulnerabilitiesByFilters returns the count of unique active deployments +// that have vulnerabilities in their LATEST deployed artifact +func (impl ImageScanDeployInfoRepositoryImpl) GetActiveDeploymentCountWithVulnerabilitiesByFilters(envIds, clusterIds, appIds []int) (int, error) { + // Query to find latest deployment per app+environment combination and check if it has vulnerabilities + // Partitions by (app_id, environment_id) to get the most recent deployment for each app+env + // This handles cases where pipelines are deleted and recreated for the same app+env + // Shows vulnerability data from all deployments (successful or failed) since vulnerability is about the image, not deployment status + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + cia.image, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + // Complete the CTE and count deployments with vulnerabilities + // Join with image_scan_deploy_info to verify scanned deployments + // Then join with image_scan_execution_history using both id and image for verification + query += ` + ) + SELECT COUNT(DISTINCT (ld.app_id, ld.environment_id)) + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + AND iseh.image = ld.image + INNER JOIN image_scan_execution_result iser + ON iser.image_scan_execution_history_id = iseh.id + WHERE ld.rn = 1 + AND isdi.image_scan_execution_history_id[1] != -1 + ` + + var count int + _, err := impl.dbConnection.Query(&count, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting deployment count with vulnerabilities", "err", err) + return 0, err + } + + return count, nil +} + +// GetActiveDeploymentScannedUnscannedCountByFilters returns the count of scanned and unscanned deployments +// in a single query for optimal performance. It finds the latest deployed artifact per app+env combination +// and counts how many have scanned=true vs scanned=false +func (impl ImageScanDeployInfoRepositoryImpl) GetActiveDeploymentScannedUnscannedCountByFilters(envIds, clusterIds, appIds []int) (*DeploymentScannedCount, error) { + // Query to find latest deployment per app+environment combination and count scanned vs unscanned + // Uses ROW_NUMBER() to get the latest deployment per app+env + // Partitions by (app_id, environment_id) to get the most recent deployment for each app+env + // This handles cases where pipelines are deleted and recreated for the same app+env + // Shows scan data from all deployments (successful or failed) since scan status is about the image, not deployment status + // Then uses conditional aggregation to count scanned and unscanned in one query + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + cia.scanned, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + INNER JOIN environment env ON env.id = p.environment_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + query += ` + ) + SELECT + COUNT(*) FILTER (WHERE scanned = true) as scanned_count, + COUNT(*) FILTER (WHERE scanned = false) as unscanned_count + FROM LatestDeployments + WHERE rn = 1 + ` + + type queryResult struct { + ScannedCount int `pg:"scanned_count"` + UnscannedCount int `pg:"unscanned_count"` + } + + var result queryResult + _, err := impl.dbConnection.Query(&result, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting deployment scanned/unscanned counts", "err", err) + return nil, err + } + + return &DeploymentScannedCount{ + ScannedCount: result.ScannedCount, + UnscannedCount: result.UnscannedCount, + }, nil +} + +// GetNonScannedAppEnvCombinations returns app-env combinations that are NOT scanned +// It finds all active deployments and excludes those that exist in image_scan_deploy_info +func (impl ImageScanDeployInfoRepositoryImpl) GetNonScannedAppEnvCombinations(request *repoBean.ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanDeployInfo, error) { + query, queryParams := impl.buildNonScannedAppEnvQuery(request, size, offset, deployInfoIds, false) + + var results []*ImageScanDeployInfo + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting non-scanned app-env combinations", "err", err) + return nil, err + } + + return results, nil +} + +// GetNonScannedAppEnvCombinationsCount returns count of app-env combinations that are NOT scanned +func (impl ImageScanDeployInfoRepositoryImpl) GetNonScannedAppEnvCombinationsCount(request *repoBean.ImageScanFilter, deployInfoIds []int) (int, error) { + query, queryParams := impl.buildNonScannedAppEnvQuery(request, 0, 0, deployInfoIds, true) + + var count int + _, err := impl.dbConnection.Query(&count, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting non-scanned app-env combinations count", "err", err) + return 0, err + } + + return count, nil +} + +// buildNonScannedAppEnvQuery builds query to find non-scanned app-env combinations +// It gets all active deployments from cd_workflow_runner and excludes those in image_scan_deploy_info +func (impl ImageScanDeployInfoRepositoryImpl) buildNonScannedAppEnvQuery(request *repoBean.ImageScanFilter, size int, offset int, deployInfoIds []int, isCountQuery bool) (string, []interface{}) { + var queryParams []interface{} + + // Build the CTE to get latest deployments + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + // Add filters to CTE + if len(request.EnvironmentIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(request.EnvironmentIds)) + } + + if len(request.ClusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(request.ClusterIds)) + } + + query += ` + ) + ` + + // Main query - select non-scanned app-env combinations + if isCountQuery { + query += ` + SELECT COUNT(*) + FROM LatestDeployments ld + INNER JOIN app a ON a.id = ld.app_id + INNER JOIN environment env ON env.id = ld.environment_id + LEFT JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + WHERE ld.rn = 1 + AND a.active = true + AND env.active = true + AND (isdi.id IS NULL OR isdi.image_scan_execution_history_id[1] = -1) + ` + } else { + query += ` + SELECT + ld.app_id as scan_object_meta_id, + ld.environment_id as env_id, + ld.cluster_id, + 'app' as object_type, + -1 as id + FROM LatestDeployments ld + INNER JOIN app a ON a.id = ld.app_id + INNER JOIN environment env ON env.id = ld.environment_id + LEFT JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + WHERE ld.rn = 1 + AND a.active = true + AND env.active = true + AND (isdi.id IS NULL OR isdi.image_scan_execution_history_id[1] = -1) + ` + } + + // Add app name filter if provided + if len(request.AppName) > 0 { + query += " AND a.app_name ILIKE ?" + queryParams = append(queryParams, util.GetLIKEClauseQueryParam(request.AppName)) + } + + // Add deployInfoIds filter if provided (for RBAC) + if len(deployInfoIds) > 0 && !isCountQuery { + // For non-scanned items, we can't filter by deployInfoIds since they don't exist in image_scan_deploy_info + // This filter is only applicable for scanned items + } + + // Add pagination for non-count queries + if !isCountQuery && size > 0 { + query += " ORDER BY ld.app_id, ld.environment_id LIMIT ? OFFSET ?" + queryParams = append(queryParams, size, offset) + } + + return query, queryParams +} + +// FindScannedDeployInfoWithFilters returns deploy info for scanned artifacts that are currently deployed +// This combines the logic of FindAll + FilterDeployInfoByScannedArtifactsDeployedInEnv into a single optimized query +// It only returns deploy info where: +// 1. The artifact is currently deployed (latest deployment per app+env) +// 2. The artifact has been scanned (exists in image_scan_deploy_info with valid scan history) +// 3. The deployed image matches the scanned image +func (impl ImageScanDeployInfoRepositoryImpl) FindScannedDeployInfoWithFilters(envIds, clusterIds []int) ([]*ImageScanDeployInfo, error) { + var results []*ImageScanDeployInfo + + // This query: + // 1. Gets the latest deployment per app+env from cd_workflow_runner + // 2. Joins with image_scan_deploy_info to get only scanned deployments + // 3. Verifies the deployed image matches the scanned image + // 4. Applies environment and cluster filters + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + cia.image, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + // Complete the CTE and join with image_scan_deploy_info + query += ` + ) + SELECT + isdi.id, + isdi.image_scan_execution_history_id, + isdi.scan_object_meta_id, + isdi.object_type, + isdi.env_id, + isdi.cluster_id + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + AND isdi.image_scan_execution_history_id[1] != -1 + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + AND iseh.image = ld.image + WHERE ld.rn = 1 + ` + + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in FindScannedDeployInfoWithFilters", "err", err, "envIds", envIds, "clusterIds", clusterIds) + return nil, err + } + + return results, nil +} diff --git a/pkg/policyGovernance/security/imageScanning/repository/ImageScanResultRepository.go b/pkg/policyGovernance/security/imageScanning/repository/ImageScanResultRepository.go index 06458de34c..b56d7777ae 100644 --- a/pkg/policyGovernance/security/imageScanning/repository/ImageScanResultRepository.go +++ b/pkg/policyGovernance/security/imageScanning/repository/ImageScanResultRepository.go @@ -17,6 +17,8 @@ package repository import ( + "time" + "github.com/go-pg/pg" "go.uber.org/zap" ) @@ -37,6 +39,51 @@ type ImageScanExecutionResult struct { ImageScanExecutionHistory ImageScanExecutionHistory } +type VulnerabilityData struct { + CveStoreName string + FixedVersion string +} + +type SeverityInsightData struct { + CveStoreName string + Severity int // Severity enum value from cve_store + ExecutionTime time.Time // From image_scan_execution_history +} + +type VulnerabilityTrendData struct { + CveStoreName string + Severity int // Severity enum value from cve_store + ExecutionTime time.Time // From image_scan_execution_history +} + +type VulnerabilityListingData struct { + CveStoreName string `sql:"cve_store_name"` + Severity int `sql:"severity"` + AppId int `sql:"app_id"` + AppName string `sql:"app_name"` + EnvId int `sql:"env_id"` + EnvName string `sql:"env_name"` + DiscoveredAt time.Time `sql:"discovered_at"` + Package string `sql:"package"` + CurrentVersion string `sql:"current_version"` + FixedVersion string `sql:"fixed_version"` + TotalCount int `sql:"total_count"` +} + +// VulnerabilityRawData represents raw CVE data before aggregation (for code-level optimization) +type VulnerabilityRawData struct { + CveStoreName string `sql:"cve_store_name"` + Severity int `sql:"severity"` + AppId int `sql:"app_id"` + AppName string `sql:"app_name"` + EnvId int `sql:"env_id"` + EnvName string `sql:"env_name"` + ExecutionTime time.Time `sql:"execution_time"` + Package string `sql:"package"` + CurrentVersion string `sql:"current_version"` + FixedVersion string `sql:"fixed_version"` +} + type ImageScanResultRepository interface { Save(model *ImageScanExecutionResult) error FindAll() ([]*ImageScanExecutionResult, error) @@ -48,6 +95,13 @@ type ImageScanResultRepository interface { FindByImageDigest(imageDigest string) ([]*ImageScanExecutionResult, error) FindByImageDigests(digest []string) ([]*ImageScanExecutionResult, error) FindByImage(image string) ([]*ImageScanExecutionResult, error) + + // Security Overview methods + GetSeverityInsightDataByFilters(envIds, clusterIds, appIds []int, isProd *bool) ([]*SeverityInsightData, error) + GetVulnerabilityTrendDataByFilters(from, to *time.Time, isProd *bool) ([]*VulnerabilityTrendData, error) + + // Vulnerability Listing + GetVulnerabilityRawData(cveName string, severities, envIds, clusterIds, appIds, deployInfoIds []int) ([]*VulnerabilityRawData, error) } type ImageScanResultRepositoryImpl struct { @@ -133,3 +187,279 @@ func (impl ImageScanResultRepositoryImpl) FindByImage(image string) ([]*ImageSca Where("image_scan_execution_history.image = ?", image).Order("image_scan_execution_history.execution_time desc").Select() return model, err } + +// ============================================================================ +// Security Overview Methods +// ============================================================================ + +// GetSeverityInsightDataByFilters returns vulnerability data with severity and execution time +// for calculating severity distribution and age distribution in a single query +// Only returns vulnerabilities from the LATEST deployed artifact for each app+env combination +// isProd: nil = all environments, true = prod only, false = non-prod only +func (impl ImageScanResultRepositoryImpl) GetSeverityInsightDataByFilters(envIds, clusterIds, appIds []int, isProd *bool) ([]*SeverityInsightData, error) { + var results []*SeverityInsightData + + // Query to get vulnerabilities from latest deployed images per app+env + // Step 1: Get latest deployment per app+env from cd_workflow_runner (source of truth for all deployments) + // Step 2: Join with image_scan_deploy_info to verify if this app+env has scanned image deployed + // image_scan_deploy_info contains env_id mapping and scan_execution_history_id for scanned images + // For object_type='app', the array image_scan_execution_history_id has length 1 (current deployed image's scan) + // Step 3: Get execution_time from image_scan_execution_history for age distribution + // Step 4: Fetch vulnerabilities with severity from image_scan_execution_result + // Images without scan data (not in image_scan_deploy_info) will not appear in results (zero vulnerabilities) + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + cia.image, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add prod/non-prod filter only if isProd is not nil + if isProd != nil { + query += " AND env.default = ?" + queryParams = append(queryParams, *isProd) + } + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + // Complete the CTE and join with image_scan_deploy_info to get only scanned deployments + // Then fetch vulnerabilities with severity and execution_time + query += ` + ) + SELECT + iser.cve_store_name, + COALESCE(cs.standard_severity, cs.severity) as severity, + iseh.execution_time + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + AND iseh.image = ld.image + INNER JOIN image_scan_execution_result iser + ON iser.image_scan_execution_history_id = iseh.id + INNER JOIN cve_store cs ON cs.name = iser.cve_store_name + WHERE ld.rn = 1 + AND isdi.image_scan_execution_history_id[1] != -1 + ` + + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting severity insight data", "err", err, "isProd", isProd) + return nil, err + } + + return results, nil +} + +// GetVulnerabilityTrendDataByFilters returns vulnerability data with severity and execution time +// for calculating time-series vulnerability trend grouped by severity +// Only returns vulnerabilities from the LATEST deployed artifact for each app+env combination +// isProd: nil = all environments, true = prod only, false = non-prod only +func (impl ImageScanResultRepositoryImpl) GetVulnerabilityTrendDataByFilters(from, to *time.Time, isProd *bool) ([]*VulnerabilityTrendData, error) { + var results []*VulnerabilityTrendData + + // Query to get vulnerabilities from latest deployed images per app+env + // Step 1: Get latest deployment per app+env from cd_workflow_runner (source of truth for all deployments) + // Step 2: Join with image_scan_deploy_info to verify if this app+env has scanned image deployed + // image_scan_deploy_info contains env_id mapping and scan_execution_history_id for scanned images + // For object_type='app', the array image_scan_execution_history_id has length 1 (current deployed image's scan) + // Step 3: Get execution_time from image_scan_execution_history for trend analysis + // Step 4: Fetch vulnerabilities with severity from image_scan_execution_result + // Images without scan data (not in image_scan_deploy_info) will not appear in results (zero vulnerabilities) + // Filters by execution_time range for trend analysis + query := ` + WITH LatestDeployments AS ( + SELECT + p.app_id, + p.environment_id, + env.cluster_id, + cia.image, + ROW_NUMBER() OVER (PARTITION BY p.app_id, p.environment_id ORDER BY cwr.id DESC) AS rn + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND env.active = true + ` + + var queryParams []interface{} + + // Add prod/non-prod filter only if isProd is not nil + if isProd != nil { + query += " AND env.default = ?" + queryParams = append(queryParams, *isProd) + } + + // Complete the CTE and join with image_scan_deploy_info to get only scanned deployments + // Then fetch vulnerabilities with severity and execution_time, filtered by time range + query += ` + ) + SELECT + iser.cve_store_name, + COALESCE(cs.standard_severity, cs.severity) as severity, + iseh.execution_time + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + AND iseh.image = ld.image + INNER JOIN image_scan_execution_result iser + ON iser.image_scan_execution_history_id = iseh.id + INNER JOIN cve_store cs ON cs.name = iser.cve_store_name + WHERE ld.rn = 1 + AND isdi.image_scan_execution_history_id[1] != -1 + AND iseh.execution_time >= ? AND iseh.execution_time <= ? + ` + + queryParams = append(queryParams, from, to) + + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting vulnerability trend data", "err", err, "from", from, "to", to, "isProd", isProd) + return nil, err + } + + return results, nil +} + +func (impl ImageScanResultRepositoryImpl) GetVulnerabilityRawData(cveName string, severities, envIds, clusterIds, appIds, deployInfoIds []int) ([]*VulnerabilityRawData, error) { + var results []*VulnerabilityRawData + + query := ` + WITH LatestDeployments AS ( + SELECT DISTINCT ON (p.app_id, p.environment_id) + p.app_id, + a.app_name, + p.environment_id, + env.environment_name as env_name, + env.cluster_id, + cia.image + FROM cd_workflow_runner cwr + INNER JOIN cd_workflow cw ON cw.id = cwr.cd_workflow_id + INNER JOIN pipeline p ON p.id = cw.pipeline_id + INNER JOIN app a ON a.id = p.app_id + INNER JOIN environment env ON env.id = p.environment_id + INNER JOIN ci_artifact cia ON cia.id = cw.ci_artifact_id + WHERE cwr.workflow_type = 'DEPLOY' + AND p.deleted = false + AND a.active = true + AND env.active = true + ` + + var queryParams []interface{} + + // Add filters to CTE + if len(envIds) > 0 { + query += " AND p.environment_id = ANY(?)" + queryParams = append(queryParams, pg.Array(envIds)) + } + + if len(clusterIds) > 0 { + query += " AND env.cluster_id = ANY(?)" + queryParams = append(queryParams, pg.Array(clusterIds)) + } + + if len(appIds) > 0 { + query += " AND p.app_id = ANY(?)" + queryParams = append(queryParams, pg.Array(appIds)) + } + + query += ` + ORDER BY p.app_id, p.environment_id, cwr.id DESC + ) + SELECT + iser.cve_store_name, + COALESCE(cs.standard_severity, cs.severity) as severity, + ld.app_id, + ld.app_name, + ld.environment_id as env_id, + ld.env_name, + iseh.execution_time, + iser.package, + iser.version as current_version, + iser.fixed_version + FROM LatestDeployments ld + INNER JOIN image_scan_deploy_info isdi + ON isdi.scan_object_meta_id = ld.app_id + AND isdi.env_id = ld.environment_id + AND isdi.object_type = 'app' + AND isdi.image_scan_execution_history_id[1] != -1 + INNER JOIN image_scan_execution_history iseh + ON iseh.id = isdi.image_scan_execution_history_id[1] + AND iseh.image = ld.image + INNER JOIN image_scan_execution_result iser + ON iser.image_scan_execution_history_id = iseh.id + ` + + // Add CVE name filter + if cveName != "" { + query += " AND iser.cve_store_name ILIKE ?" + queryParams = append(queryParams, "%"+cveName+"%") + } + + query += ` + INNER JOIN cve_store cs ON cs.name = iser.cve_store_name + ` + + // Add RBAC filter for deploy info IDs + if len(deployInfoIds) > 0 { + query += " WHERE isdi.id = ANY(?)" + queryParams = append(queryParams, pg.Array(deployInfoIds)) + + // Add severity filter with AND since WHERE already exists + if len(severities) > 0 { + query += " AND COALESCE(cs.standard_severity, cs.severity) = ANY(?)" + queryParams = append(queryParams, pg.Array(severities)) + } + } else { + // Add severity filter with WHERE since no deploy info filter + if len(severities) > 0 { + query += " WHERE COALESCE(cs.standard_severity, cs.severity) = ANY(?)" + queryParams = append(queryParams, pg.Array(severities)) + } + } + + _, err := impl.dbConnection.Query(&results, query, queryParams...) + if err != nil { + impl.logger.Errorw("error in getting vulnerability raw data", "err", err, "cveName", cveName, "severities", severities) + return nil, err + } + + return results, nil +} diff --git a/pkg/policyGovernance/security/imageScanning/repository/bean/bean.go b/pkg/policyGovernance/security/imageScanning/repository/bean/bean.go index be4d20a578..9469c12b8a 100644 --- a/pkg/policyGovernance/security/imageScanning/repository/bean/bean.go +++ b/pkg/policyGovernance/security/imageScanning/repository/bean/bean.go @@ -112,12 +112,13 @@ type ImageScanFilter struct { CVEName string `json:"cveName"` AppName string `json:"appName"` // ObjectName deprecated - ObjectName string `json:"objectName"` - EnvironmentIds []int `json:"envIds"` - ClusterIds []int `json:"clusterIds"` - Severity []int `json:"severity"` - SortOrder SortOrder `json:"sortOrder"` - SortBy SortBy `json:"sortBy"` // sort by objectName,envName,lastChecked + ObjectName string `json:"objectName"` + EnvironmentIds []int `json:"envIds"` + ClusterIds []int `json:"clusterIds"` + Severity []int `json:"severity"` + SortOrder SortOrder `json:"sortOrder"` + SortBy SortBy `json:"sortBy"` // sort by objectName,envName,lastChecked + ScanStatus ScanStatusType `json:"scanStatus,omitempty"` } type SortBy string @@ -127,3 +128,12 @@ const ( Asc SortOrder = "ASC" Desc SortOrder = "DESC" ) + +// ScanStatusType represents the scan status filter +type ScanStatusType string + +const ( + ScanStatusAll ScanStatusType = "" // default - show all (scanned + not-scanned) + ScanStatusScanned ScanStatusType = "scanned" + ScanStatusNotScanned ScanStatusType = "not-scanned" +) diff --git a/pkg/team/repository/TeamRepository.go b/pkg/team/repository/TeamRepository.go index d3e0695956..6947024455 100644 --- a/pkg/team/repository/TeamRepository.go +++ b/pkg/team/repository/TeamRepository.go @@ -17,6 +17,8 @@ package repository import ( + "time" + "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" ) @@ -32,6 +34,7 @@ type Team struct { type TeamRepository interface { Save(team *Team) error FindAllActive() ([]Team, error) + FindAllActiveInTimeRange(from, to *time.Time) ([]Team, error) FindOne(id int) (Team, error) FindByTeamName(name string) (Team, error) Update(team *Team) error @@ -108,3 +111,18 @@ func (impl TeamRepositoryImpl) FindByIds(ids []*int) ([]*Team, error) { func (impl TeamRepositoryImpl) GetConnection() *pg.DB { return impl.dbConnection } + +func (impl TeamRepositoryImpl) FindAllActiveInTimeRange(from, to *time.Time) ([]Team, error) { + var teams []Team + query := impl.dbConnection.Model(&teams).Where("active = ?", true) + + if from != nil { + query = query.Where("created_on >= ?", from) + } + if to != nil { + query = query.Where("created_on <= ?", to) + } + + err := query.Select() + return teams, err +} diff --git a/releasenotes.md b/releasenotes.md index c575949760..3b44e36bc5 100644 --- a/releasenotes.md +++ b/releasenotes.md @@ -1,12 +1,78 @@ -## v1.8.2 +## v2.0.0 +Devtron 2.0 focuses on improving Kubernetes operability at scale. It introduces centralized platform visibility, a restructured UI for faster navigation, and foundational security improvements—reducing operational overhead across CI/CD, infrastructure, and access management. + +--- ## Enhancements -- feat: linked ci should have artifacts of parent ci (#6836) + +### Overview Dashboards +We've introduced centralized overview dashboards to give teams instant visibility across critical dimensions of the system. + +- **Applications Overview** + View application health metrics and CI/CD pipeline activity in one place to quickly assess delivery performance and identify issues early. + +- **Infrastructure Overview** + Gain clear insights into cluster utilization and resource allocation for better visibility into Kubernetes capacity, usage, and optimization opportunities. + +- **Security Overview** + Get an aggregated view of your security posture to identify risks, track vulnerabilities, and monitor overall security status effectively. + +--- + +### Reimagined Platform UI +Devtron's UI has been restructured into logical modules, making the platform more intuitive, discoverable, and easier to navigate. + +- **Application Management** + Deploy and manage applications, access application groups, and apply bulk edits from a unified workspace. + +- **Infrastructure Management** + Manage applications deployed via Helm, Argo CD, and Flux CD, access the chart store, and explore your Kubernetes resources using the resource browser. + +- **Security Centre** + Review vulnerability reports, manage security scans, and enforce security policies from a single control plane. + +- **Automation Enablement** + Configure and schedule job orchestration to power automated workflows and reduce manual operational overhead. + +- **Global Configuration** + Configure SSO, manage clusters and environments, register container registries, and define authorization and access policies centrally. + +--- + +### Command Bar for Faster Navigation +Use the Command Bar (Ctrl + K on Windows/Linux, Cmd + K on macOS) to quickly jump to any screen across the platform and revisit recently accessed items—reducing clicks and speeding up navigation between workflows. + +--- + +### Additional Enhancements +- feat: Rollout 5.2.0 (#6889) +- feat: Added support for tcp in virtual service and changed the apiVersion for externalSecrets (#6892) +- feat: add helm_take_ownership and helm_redeployment_request columns to user_deployment_request table (#6888) +- feat: Added support to override container name (#6880) +- feat: Increase max length for TeamRequest name field (#6876) +- feat: Added namespace support for virtualService and destinationRule (#6868) +- feat: feature flag for encryption (#6856) +- feat: encryption for db credentials (#6852) + +--- + ## Bugs -- fix: clear cached reference charts for Deployment chart versions 4.18.0, 4.19.0, 1.0.0, and 1.1.0 (#6841) -- fix: Custom chart with dependencies are not working in Devtron Applications (#6834) -- fix: Terraform plugin fix in v1.0.1 (#6830) -## Others -- misc: Api specs and validations added (#6807) +- fix: migrate proxy chart dependencies and refactor related functions (#6899) +- fix: enhance validation and error handling in cluster update process (#6887) +- fix: Invalid type casting error for custom charts (#6883) +- fix: validation on team name (#6872) +- fix: sql injection (#6861) +- fix: user manager fix (#6854) +--- +## Others +- misc: Add support for migrating plugin metadata to parent metadata (#6902) +- misc: update UserDeploymentRequestWithAdditionalFields struct to include tableName for PostgreSQL compatibility (#6896) +- chore: rename SQL migration files for consistency (#6885) +- misc: Vc empty ns fix (#6871) +- misc: added validation on create environment (#6859) +- misc: migration unique constraint on mpc (#6851) +- misc: helm app details API spec (#6850) +- misc: api Spec Added for draft (#6849) +- misc: api Specs added for lock config (#6847) diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/.helmignore b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/.helmignore new file mode 100644 index 0000000000..50af031725 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/.image_descriptor_template.json b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/.image_descriptor_template.json new file mode 100644 index 0000000000..8a99a95664 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/.image_descriptor_template.json @@ -0,0 +1 @@ +{"server":{"deployment":{"image_tag":"{{.Tag}}","image":"{{.Name}}"}},"pipelineName": "{{.PipelineName}}","releaseVersion":"{{.ReleaseVersion}}","deploymentType": "{{.DeploymentType}}", "app": "{{.App}}", "env": "{{.Env}}", "appMetrics": {{.AppMetrics}}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/Chart.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/Chart.yaml new file mode 100644 index 0000000000..d6447056e4 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: deployment-chart_4-22-0 +version: 4.22.0 diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/README.md b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/README.md new file mode 100644 index 0000000000..07f18f2885 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/README.md @@ -0,0 +1,991 @@ + +# Deployment Chart - v4.21.0 + +## 1. Yaml File - + +### Container Ports + +This defines ports on which application services will be exposed to other services + +```yaml +ContainerPort: + - envoyPort: 8799 + idleTimeout: + name: app + port: 8080 + servicePort: 80 + nodePort: 32056 + supportStreaming: true + useHTTP2: true + protocol: TCP +``` + +| Key | Description | +| :--- | :--- | +| `envoyPort` | envoy port for the container. | +| `idleTimeout` | the duration of time that a connection is idle before the connection is terminated. | +| `name` | name of the port. | +| `port` | port for the container. | +| `servicePort` | port of the corresponding kubernetes service. | +| `nodePort` | nodeport of the corresponding kubernetes service. | +| `supportStreaming` | Used for high performance protocols like grpc where timeout needs to be disabled. | +| `useHTTP2` | Envoy container can accept HTTP2 requests. | +| `protocol` | Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP"| + +### EnvVariables +```yaml +EnvVariables: [] +``` +To set environment variables for the containers that run in the Pod. +### EnvVariablesFromSecretKeys +```yaml +EnvVariablesFromSecretKeys: + - name: ENV_NAME + secretName: SECRET_NAME + keyName: SECRET_KEY + +``` + It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable. + + ### EnvVariablesFromConfigMapKeys +```yaml +EnvVariablesFromConfigMapKeys: + - name: ENV_NAME + configMapName: CONFIG_MAP_NAME + keyName: CONFIG_MAP_KEY + +``` + It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable. + +### Liveness Probe + +If this check fails, kubernetes restarts the pod. This should return error code in case of non-recoverable error. + +```yaml +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true + grpc: + port: 8080 + service: "" +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the liveness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for liveliness. | +| `periodSeconds` | It defines the time to check a given container for liveness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfil the liveness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as live. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | +| `grpc` | GRPC specifies an action involving a GRPC port. Port is a required field if using gRPC service for health probes. Number must be in the range 1 to 65535. Service (optional) is the name of the service to place in the gRPC HealthCheckRequest. | + + + +### MaxUnavailable + +```yaml + MaxUnavailable: 0 +``` +The maximum number of pods that can be unavailable during the update process. The value of "MaxUnavailable: " can be an absolute number or percentage of the replicas count. The default value of "MaxUnavailable: " is 25%. + +### MaxSurge + +```yaml +MaxSurge: 1 +``` +The maximum number of pods that can be created over the desired number of pods. For "MaxSurge: " also, the value can be an absolute number or percentage of the replicas count. +The default value of "MaxSurge: " is 25%. + +### Min Ready Seconds + +```yaml +MinReadySeconds: 60 +``` +This specifies the minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available. This defaults to 0 (the Pod will be considered available as soon as it is ready). + +### Readiness Probe + +If this check fails, kubernetes stops sending traffic to the application. This should return error code in case of errors which can be recovered from if traffic is stopped. + +```yaml +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true + grpc: + port: 8080 + service: "" +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the readiness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for readiness. | +| `periodSeconds` | It defines the time to check a given container for readiness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfill the readiness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as ready. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | +| `grpc` | GRPC specifies an action involving a GRPC port. Port is a required field if using gRPC service for health probes. Number must be in the range 1 to 65535. Service (optional) is the name of the service to place in the gRPC HealthCheckRequest. | + + +### Pod Disruption Budget + +You can create `PodDisruptionBudget` for each application. A PDB limits the number of pods of a replicated application that are down simultaneously from voluntary disruptions. For example, an application would like to ensure the number of replicas running is never brought below the certain number. + +```yaml +podDisruptionBudget: + minAvailable: 1 +``` + +or + +```yaml +podDisruptionBudget: + maxUnavailable: 50% +``` + +You can specify either `maxUnavailable` or `minAvailable` in a PodDisruptionBudget and it can be expressed as integers or as a percentage + +| Key | Description | +| :--- | :--- | +| `minAvailable` | Evictions are allowed as long as they leave behind 1 or more healthy pods of the total number of desired replicas. | +| `maxUnavailable` | Evictions are allowed as long as at most 1 unhealthy replica among the total number of desired replicas. | + +### Ambassador Mappings + +You can create ambassador mappings to access your applications from outside the cluster. At its core a Mapping resource maps a resource to a service. + +```yaml +ambassadorMapping: + ambassadorId: "prod-emissary" + cors: {} + enabled: true + hostname: devtron.example.com + labels: {} + prefix: / + retryPolicy: {} + rewrite: "" + tls: + context: "devtron-tls-context" + create: false + hosts: [] + secretName: "" +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable ambassador mapping else set false.| +| `ambassadorId` | used to specify id for specific ambassador mappings controller. | +| `cors` | used to specify cors policy to access host for this mapping. | +| `weight` | used to specify weight for canary ambassador mappings. | +| `hostname` | used to specify hostname for ambassador mapping. | +| `prefix` | used to specify path for ambassador mapping. | +| `labels` | used to provide custom labels for ambassador mapping. | +| `retryPolicy` | used to specify retry policy for ambassador mapping. | +| `corsPolicy` | Provide cors headers on flagger resource. | +| `rewrite` | used to specify whether to redirect the path of this mapping and where. | +| `tls` | used to create or define ambassador TLSContext resource. | +| `extraSpec` | used to provide extra spec values which not present in deployment template for ambassador resource. | + +### Autoscaling + +This is connected to HPA and controls scaling up and down in response to request load. + +```yaml +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + containerResource: + enabled: true + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + + extraMetrics: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable autoscaling else set false.| +| `MinReplicas` | Minimum number of replicas allowed for scaling. | +| `MaxReplicas` | Maximum number of replicas allowed for scaling. | +| `TargetCPUUtilizationPercentage` | The target CPU utilization that is expected for a container. | +| `TargetMemoryUtilizationPercentage` | The target memory utilization that is expected for a container. | +| `extraMetrics` | Used to give external metrics for autoscaling. | +| `containerResource` | Used to scale resource as per container resource. | + +### Flagger + +You can use flagger for canary releases with deployment objects. It supports flexible traffic routing with istio service mesh as well. + +```yaml +flaggerCanary: + addOtherGateways: [] + addOtherHosts: [] + analysis: + interval: 15s + maxWeight: 50 + stepWeight: 5 + threshold: 5 + annotations: {} + appProtocol: http + corsPolicy: + allowCredentials: false + allowHeaders: + - x-some-header + allowMethods: + - GET + allowOrigin: + - example.com + maxAge: 24h + createIstioGateway: + annotations: {} + enabled: false + host: example.com + labels: {} + tls: + enabled: false + secretName: example-tls-secret + enabled: false + gatewayRefs: null + headers: + request: + add: + x-some-header: value + labels: {} + loadtest: + enabled: true + url: http://flagger-loadtester.istio-system/ + match: + - uri: + prefix: / + port: 8080 + portDiscovery: true + retries: null + rewriteUri: / + targetPort: 8080 + thresholds: + latency: 500 + successRate: 90 + timeout: null +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable canary releases using flagger else set false.| +| `addOtherGateways` | To provide multiple istio gateways for flagger. | +| `addOtherHosts` | Add multiple hosts for istio service mesh with flagger. | +| `analysis` | Define how the canary release should progresss and at what interval. | +| `annotations` | Annotation to add on flagger resource. | +| `labels` | Labels to add on flagger resource. | +| `appProtocol` | Protocol to use for canary. | +| `corsPolicy` | Provide cors headers on flagger resource. | +| `createIstioGateway` | Set to true if you want to create istio gateway as well with flagger. | +| `headers` | Add headers if any. | +| `loadtest` | Enable load testing for your canary release. | + + + +### Fullname Override + +```yaml +fullnameOverride: app-name +``` +`fullnameOverride` replaces the release fullname created by default by devtron, which is used to construct Kubernetes object names. By default, devtron uses {app-name}-{environment-name} as release fullname. + +### Image + +```yaml +image: + pullPolicy: IfNotPresent +``` + +Image is used to access images in kubernetes, pullpolicy is used to define the instances calling the image, here the image is pulled when the image is not present,it can also be set as "Always". + +### imagePullSecrets + +`imagePullSecrets` contains the docker credentials that are used for accessing a registry. + +```yaml +imagePullSecrets: + - regcred +``` +regcred is the secret that contains the docker credentials that are used for accessing a registry. Devtron will not create this secret automatically, you'll have to create this secret using dt-secrets helm chart in the App store or create one using kubectl. You can follow this documentation Pull an Image from a Private Registry [https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) . + +### Ingress + +This allows public access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + className: nginx + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` +Legacy deployment-template ingress format + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + path: "" + host: "" + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + +### Ingress Internal + +This allows private access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingressInternal: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + +### additionalBackends + +This defines additional backend path in the ingress . + +```yaml + hosts: + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 +``` + +### Init Containers +```yaml +initContainers: + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + args: + - sleep 300 + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate + + - name: nginx + image: nginx:1.14.2 + securityContext: + privileged: true + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] +``` +Specialized containers that run before app containers in a Pod. Init containers can contain utilities or setup scripts not present in an app image. One can use base image inside initContainer by setting the reuseContainerImage flag to `true`. + +### Istio + +Istio is a service mesh which simplifies observability, traffic management, security and much more with it's virtual services and gateways. + +```yaml +istio: + enable: true + gateway: + annotations: {} + enabled: false + host: example.com + labels: {} + tls: + enabled: false + secretName: example-tls-secret + virtualService: + annotations: {} + enabled: false + gateways: [] + hosts: [] + http: + - corsPolicy: + allowCredentials: false + allowHeaders: + - x-some-header + allowMethods: + - GET + allowOrigin: + - example.com + maxAge: 24h + headers: + request: + add: + x-some-header: value + match: + - uri: + prefix: /v1 + - uri: + prefix: /v2 + retries: + attempts: 2 + perTryTimeout: 3s + rewriteUri: / + route: + - destination: + host: service1 + port: 80 + timeout: 12s + - route: + - destination: + host: service2 + labels: {} +``` + +### Pause For Seconds Before Switch Active +```yaml +pauseForSecondsBeforeSwitchActive: 30 +``` +To wait for given period of time before switch active the container. + +### Resources + +These define minimum and maximum RAM and CPU available to the application. + +```yaml +resources: + limits: + cpu: "1" + memory: "200Mi" + requests: + cpu: "0.10" + memory: "100Mi" +``` + +Resources are required to set CPU and memory usage. + +#### Limits + +Limits make sure a container never goes above a certain value. The container is only allowed to go up to the limit, and then it is restricted. + +#### Requests + +Requests are what the container is guaranteed to get. + +### Service + +This defines annotations and the type of service, optionally can define name also. + +Supports "ClientIP" and "None". Used to maintain session affinity. Enable + client IP based session affinity. + +```yaml + service: + type: ClusterIP + annotations: {} + sessionAffinity: + enabled: true + sessionAffinityConfig: {} +``` + +### Volumes + +```yaml +volumes: + - name: log-volume + emptyDir: {} + - name: logpv + persistentVolumeClaim: + claimName: logpvc +``` + +It is required when some values need to be read from or written to an external disk. + +### Volume Mounts + +```yaml +volumeMounts: + - mountPath: /var/log/nginx/ + name: log-volume + - mountPath: /mnt/logs + name: logpvc + subPath: employee +``` + +It is used to provide mounts to the volume. + +### Affinity and anti-affinity + +```yaml +Spec: + Affinity: + Key: + Values: +``` + +Spec is used to define the desire state of the given container. + +Node Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node. + +Inter-pod affinity allow you to constrain which nodes your pod is eligible to be scheduled based on labels on pods. + +#### Key + +Key part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +#### Values + +Value part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +### Tolerations + +```yaml +tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +``` + +Taints are the opposite, they allow a node to repel a set of pods. + +A given pod can access the given node and avoid the given taint only if the given pod satisfies a given taint. + +Taints and tolerations are a mechanism which work together that allows you to ensure that pods are not placed on inappropriate nodes. Taints are added to nodes, while tolerations are defined in the pod specification. When you taint a node, it will repel all the pods except those that have a toleration for that taint. A node can have one or many taints associated with it. + +### Arguments + +```yaml +args: + enabled: false + value: [] +``` + +This is used to give arguments to command. + +### Command + +```yaml +command: + enabled: false + value: [] +``` + +It contains the commands for the server. + +| Key | Description | +| :--- | :--- | +| `enabled` | To enable or disable the command. | +| `value` | It contains the commands. | + + +### Containers +Containers section can be used to run side-car containers along with your main container within same pod. Containers running within same pod can share volumes and IP Address and can address each other @localhost. We can use base image inside container by setting the reuseContainerImage flag to `true`. + +```yaml + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate +``` + +### Prometheus + +```yaml + prometheus: + release: monitoring +``` + +It is a kubernetes monitoring tool and the name of the file to be monitored as monitoring in the given case.It describes the state of the prometheus. + +### rawYaml + +```yaml +rawYaml: + - apiVersion: v1 + kind: Service + metadata: + name: my-service + spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + type: ClusterIP +``` +Accepts an array of Kubernetes objects. You can specify any kubernetes yaml here and it will be applied when your app gets deployed. + +### Grace Period + +```yaml +GracePeriod: 30 +``` +Kubernetes waits for the specified time called the termination grace period before terminating the pods. By default, this is 30 seconds. If your pod usually takes longer than 30 seconds to shut down gracefully, make sure you increase the `GracePeriod`. + +A Graceful termination in practice means that your application needs to handle the SIGTERM message and begin shutting down when it receives it. This means saving all data that needs to be saved, closing down network connections, finishing any work that is left, and other similar tasks. + +There are many reasons why Kubernetes might terminate a perfectly healthy container. If you update your deployment with a rolling update, Kubernetes slowly terminates old pods while spinning up new ones. If you drain a node, Kubernetes terminates all pods on that node. If a node runs out of resources, Kubernetes terminates pods to free those resources. It’s important that your application handle termination gracefully so that there is minimal impact on the end user and the time-to-recovery is as fast as possible. + + +### Server + +```yaml +server: + deployment: + image_tag: 1-95a53 + image: "" +``` + +It is used for providing server configurations. + +#### Deployment + +It gives the details for deployment. + +| Key | Description | +| :--- | :--- | +| `image_tag` | It is the image tag | +| `image` | It is the URL of the image | + +### Service Monitor + +```yaml +servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace +``` + +It gives the set of targets to be monitored. + +### Db Migration Config + +```yaml +dbMigrationConfig: + enabled: false +``` + +It is used to configure database migration. + + +### KEDA Autoscaling +[KEDA](https://keda.sh) is a Kubernetes-based Event Driven Autoscaler. With KEDA, you can drive the scaling of any container in Kubernetes based on the number of events needing to be processed. KEDA can be installed into any Kubernetes cluster and can work alongside standard Kubernetes components like the Horizontal Pod Autoscaler(HPA). + +Example for autosccaling with KEDA using Prometheus metrics is given below: +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: + restoreToOriginalReplicaCount: true + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: prometheus + metadata: + serverAddress: http://:9090 + metricName: http_request_total + query: envoy_cluster_upstream_rq{appId="300", cluster_name="300-0", container="envoy",} + threshold: "50" + triggerAuthentication: + enabled: false + name: + spec: {} + authenticationRef: {} +``` +Example for autosccaling with KEDA based on kafka is given below : +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: {} + triggers: + - type: kafka + metadata: + bootstrapServers: b-2.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-3.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-1.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092 + topic: Orders-Service-ESP.info + lagThreshold: "100" + consumerGroup: oders-remove-delivered-packages + allowIdleConsumers: "true" + triggerAuthentication: + enabled: true + name: keda-trigger-auth-kafka-credential + spec: + secretTargetRef: + - parameter: sasl + name: keda-kafka-secrets + key: sasl + - parameter: username + name: keda-kafka-secrets + key: username + authenticationRef: + name: keda-trigger-auth-kafka-credential +``` + +### Winter-Soldier +Winter Soldier can be used to +- cleans up (delete) Kubernetes resources +- reduce workload pods to 0 + +**_NOTE:_** After deploying this we can create the Hibernator object and provide the custom configuration by which workloads going to delete, sleep and many more. for more information check [the main repo](https://github.com/devtron-labs/winter-soldier) + +Given below is template values you can give in winter-soldier: +```yaml +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + action: sleep + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + targetReplicas: [] + fieldSelector: [] +``` +Here, +| Key | values | Description | +| :--- | :--- | :--- | +| `enabled` | `fasle`,`true` | decide the enabling factor | +| `apiVersion` | `pincher.devtron.ai/v1beta1`, `pincher.devtron.ai/v1alpha1` | specific api version | +| `action` | `sleep`,`delete`, `scale` | This specify the action need to perform. | +| `timeRangesWithZone`:`timeZone` | eg:- `"Asia/Kolkata"`,`"US/Pacific"` | It use to specify the timeZone used. (It uses standard format. please refer [this](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)) | +| `timeRangesWithZone`:`timeRanges` | array of [ `timeFrom`, `timeTo`, `weekdayFrom`, `weekdayTo`] | It use to define time period/range on which the user need to perform the specified action. you can have multiple timeRanges.
These settings will take `action` on Sat and Sun from 00:00 to 23:59:59, | +| `targetReplicas` | `[n]` : n - number of replicas to scale. | These is mandatory field when the `action` is `scale`
Defalut value is `[]`. | +| `fieldSelector` | `- AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) ` | These value will take a list of methods to select the resources on which we perform specified `action` . | + + +here is an example, +```yaml +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '10h'), Now()) +``` +Above settings will take action on `Sat` and `Sun` from 00:00 to 23:59:59, and on `Mon`-`Fri` from 00:00 to 08:00 and 20:00 to 23:59:59. If `action:sleep` then runs hibernate at timeFrom and unhibernate at `timeTo`. If `action: delete` then it will delete workloads at `timeFrom` and `timeTo`. Here the `action:scale` thus it scale the number of resource replicas to `targetReplicas: [1,1,1]`. Here each element of `targetReplicas` array is mapped with the corresponding elments of array `timeRangesWithZone/timeRanges`. Thus make sure the length of both array is equal, otherwise the cnages cannot be observed. + +The above example will select the application objects which have been created 10 hours ago across all namespaces excluding application's namespace. Winter soldier exposes following functions to handle time, cpu and memory. + +- ParseTime - This function can be used to parse time. For eg to parse creationTimestamp use ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z') +- AddTime - This can be used to add time. For eg AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '-10h') ll add 10h to the time. Use d for day, h for hour, m for minutes and s for seconds. Use negative number to get earlier time. +- Now - This can be used to get current time. +- CpuToNumber - This can be used to compare CPU. For eg any({{spec.containers.#.resources.requests}}, { MemoryToNumber(.memory) < MemoryToNumber('60Mi')}) will check if any resource.requests is less than 60Mi. + + +### Security Context +A security context defines privilege and access control settings for a Pod or Container. + +To add a security context for main container: +```yaml +containerSecurityContext: + allowPrivilegeEscalation: false +``` + +To add a security context on pod level: +```yaml +podSecurityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 +``` + +### Topology Spread Constraints +You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. This can help to achieve high availability as well as efficient resource utilization. + +```yaml +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: {} + minDomains: 1 + nodeAffinityPolicy: Ignore +``` + +### Persistent Volume Claim +You can use persistent volume claim to mount volume as per your usecase. + +```yaml +persistentVolumeClaim: + name: my-pvc + storageClassName: default + accessMode: + - ReadWriteOnce + mountPath: /tmp +``` + +### Vertical Pod Autoscaling +This is connected to VPA and controls scaling up and down in response to request load. +```yaml +verticalPodScaling: + enabled: true + resourcePolicy: {} + updatePolicy: {} + ``` + +### Scheduler Name + +You can provide you own custom scheduler to schedule your application + +```yaml +schedulerName: "" +``` + +### Deployment Metrics + +It gives the realtime metrics of the deployed applications + +| Key | Description | +| :--- | :--- | +| `Deployment Frequency` | It shows how often this app is deployed to production | +| `Change Failure Rate` | It shows how often the respective pipeline fails. | +| `Mean Lead Time` | It shows the average time taken to deliver a change to production. | +| `Mean Time to Recovery` | It shows the average time taken to fix a failed pipeline. | + +## 2. Show application metrics + +If you want to see application metrics like different HTTP status codes metrics, application throughput, latency, response time. Enable the Application metrics from below the deployment template Save button. After enabling it, you should be able to see all metrics on App detail page. By default it remains disabled. +![](../../../.gitbook/assets/deployment_application_metrics%20%282%29.png) + +Once all the Deployment template configurations are done, click on `Save` to save your deployment configuration. Now you are ready to create [Workflow](workflow/) to do CI/CD. + +### Helm Chart Json Schema + +Helm Chart [json schema](../../../scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json) is used to validate the deployment template values. + +### Other Validations in Json Schema + +The values of CPU and Memory in limits must be greater than or equal to in requests respectively. Similarly, In case of envoyproxy, the values of limits are greater than or equal to requests as mentioned below. +``` +resources.limits.cpu >= resources.requests.cpu +resources.limits.memory >= resources.requests.memory +envoyproxy.resources.limits.cpu >= envoyproxy.resources.requests.cpu +envoyproxy.resources.limits.memory >= envoyproxy.resources.requests.memory +``` diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/app-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/app-values.yaml new file mode 100644 index 0000000000..5f8216c0a7 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/app-values.yaml @@ -0,0 +1,530 @@ +# Mandatory configs + +podDisruptionBudget: {} +deploymentLabels: {} +deploymentAnnotations: {} + +containerSpec: + lifecycle: + enabled: false + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +replicaCount: 1 +MinReadySeconds: 60 +GracePeriod: 30 +image: + pullPolicy: IfNotPresent +restartPolicy: Always +service: + type: ClusterIP + # enabled: true + #name: "service-1234567890" + loadBalancerSourceRanges: [] + # loadBalancerSourceRanges: + # - 1.2.3.4/32 + # - 1.2.5.6/23 + annotations: {} + # test1: test2 + # test3: test4 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + protocol: TCP +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +# Optional configs +LivenessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + grpc: {} + + +ReadinessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + grpc: {} + + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + grpc: {} + + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/force-ssl-redirect: 'false' +# nginx.ingress.kubernetes.io/ssl-redirect: 'false' +# kubernetes.io/ingress.class: nginx +# nginx.ingress.kubernetes.io/rewrite-target: /$2 +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: example.com + tls: + enabled: false + secretName: example-secret + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + annotation: {} + labels: {} + type: Deployment + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +flaggerCanary: + enabled: false + labels: {} + annotations: {} + createIstioGateway: + enabled: false + labels: {} + annotations: {} + host: + tls: + enabled: false + secretName: + # Istio gateways (optional) + addOtherGateways: [] + # Istio virtual service host names (optional) + addOtherHosts: [] + # Istio gateway refs (optional) + gatewayRefs: + # - name: istio-gateway + # namespace: istio-system + #service port + serviceport: 8080 + #containerPort + targetPort: 8080 + # discover all port open in container + portDiscovery: true + # application protocol (optional) + appProtocol: http + # Istio retry policy (optional) + retries: + # attempts: 3 + # perTryTimeout: 1s + # retryOn: "gateway-error,connect-failure,refused-stream" + # HTTP match conditions (optional) + match: + - uri: + prefix: / + # HTTP rewrite (optional) + rewriteUri: / + # timeout (optional) + timeout: + # Add headers (optional) + headers: + # request: + # add: + # x-some-header: "value" + # cross-origin resource sharing policy (optional) + corsPolicy: + # allowOrigin: + # - example.com + # allowMethods: + # - GET + # allowCredentials: false + # allowHeaders: + # - x-some-header + # maxAge: 24h + analysis: + # schedule interval (default 60s) + interval: 15s + # max number of failed metric checks before rollback + threshold: 5 + # max traffic percentage routed to canary + # percentage (0-100) + maxWeight: 50 + # canary increment step + # percentage (0-100) + stepWeight: 5 + thresholds: + # minimum req success rate (non 5xx responses) + # percentage (0-100) + successRate: 90 + # maximum req duration P99 + # milliseconds + latency: 500 + loadtest: + enabled: true + # load tester address + url: http://flagger-loadtester.istio-system/ + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +command: + workingDir: {} + enabled: false + value: [] + +args: + enabled: false + value: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + +#For adding custom labels to pods + +podLabels: {} +# customKey: customValue +podAnnotations: {} +# customKey: customValue + +rawYaml: [] + +topologySpreadConstraints: [] + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +containers: [] + ## Additional containers to run along with application pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + +dbMigrationConfig: + enabled: false + +tolerations: [] + +podSecurityContext: {} + +containerSecurityContext: {} + +Spec: + Affinity: + Key: "" + # Key: kops.k8s.io/instancegroup + Values: "" + +affinity: + enabled: false + values: {} + +ambassadorMapping: + enabled: false + labels: {} + prefix: / + ambassadorId: "" + hostname: devtron.example.com + rewrite: "" + retryPolicy: {} + cors: {} + tls: + context: "" + create: false + secretName: "" + hosts: [] + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 70 + TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} + containerResource: + enabled: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + minReplicaCount: 1 + maxReplicaCount: 2 + advanced: {} + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +# kedaHttpScaledObject: +# enabled: false +# minReplicaCount: 1 +# maxReplicaCount: 2 +# targetPendingRequests: +# scaledownPeriod: +# servicePort: 80 # port of the service (required) + +prometheus: + release: monitoring + +server: + deployment: + image_tag: 1-95af053 + image: "" + +servicemonitor: + additionalLabels: {} + +envoyproxy: + image: quay.io/devtron/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +imagePullSecrets: [] + # - test1 + # - test2 +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" +# - ip: "10.1.2.3" +# hostnames: +# - "foo.remote" +# - "bar.remote" + +verticalPodScaling: + enabled: false \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/env-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/env-values.yaml new file mode 100644 index 0000000000..48b794e8f2 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/env-values.yaml @@ -0,0 +1,66 @@ +replicaCount: 1 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 + +Spec: + Affinity: + Key: "" + Values: "" + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# +secret: + enabled: false + data: {} +# my_own_secret: S3ViZXJuZXRlcyBXb3Jrcw== + +EnvVariables: [] +# - name: FLASK_ENV +# value: qa + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: "0.05" + memory: 50Mi + requests: + cpu: "0.01" + memory: 10Mi + + diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/pipeline-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/pipeline-values.yaml new file mode 100644 index 0000000000..dbe4db3e8e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/pipeline-values.yaml @@ -0,0 +1,6 @@ +deployment: + strategy: + recreate: {} + rolling: + maxSurge: "25%" + maxUnavailable: 1 diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/release-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/release-values.yaml new file mode 100644 index 0000000000..48eb3f482c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/release-values.yaml @@ -0,0 +1,14 @@ +server: + deployment: + image_tag: IMAGE_TAG + image: IMAGE_REPO + enabled: false +dbMigrationConfig: + enabled: false + +pauseForSecondsBeforeSwitchActive: 0 +waitForSecondsBeforeScalingDown: 0 +autoPromotionSeconds: 30 + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/schema.json b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/schema.json new file mode 100644 index 0000000000..6a332631a9 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/schema.json @@ -0,0 +1,1368 @@ + +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "containerExtraSpecs":{ + "type": "object", + "title": "containerExtraSpecs", + "description": "Define container extra specs here" + }, + "ContainerPort": { + "type": "array", + "description": "defines ports on which application services will be exposed to other services", + "title": "Container Port", + "items": { + "type": "object", + "properties": { + "envoyPort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "envoy port for the container", + "title": "Envoy Port" + }, + "idleTimeout": { + "type": "string", + "description": "duration of time for which a connection is idle before the connection is terminated", + "title": "Idle Timeout" + }, + "name": { + "type": "string", + "description": "name of the port", + "title": "Name" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Port", + "title": "port for the container" + }, + "servicePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port of the corresponding kubernetes service", + "title": "Service Port" + }, + "nodePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "nodeport of the corresponding kubernetes service", + "title": "Node Port" + }, + "supportStreaming": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "field to enable/disable timeout for high performance protocols like grpc", + "title": "Support Streaming" + }, + "useHTTP2": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": " field for setting if envoy container can accept(or not) HTTP2 requests", + "title": "Use HTTP2" + } + } + } + }, + "EnvVariables": { + "type": "array", + "items": {}, + "description": "contains environment variables needed by the containers", + "title": "Environment Variables" + }, + "EnvVariablesFromFieldPath":{ + "type": "array", + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs", + "title": "EnvVariablesFromFieldPath", + "items": [ + { + "type": "object", + "properties": { + "name":{ + "type": "string", + "title": "name", + "description": "Env variable name to be" + }, + "fieldPath":{ + "type": "string", + "title": "fieldPath", + "description": "Path of the field to select in the specified API version" + } + } + } + ] + }, + "EnvVariablesFromSecretKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromSecretKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "secretName": { + "type": "string", + "title": "secretName", + "description": "Name of Secret from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "EnvVariablesFromConfigMapKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromConfigMapKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "configMapName": { + "type": "string", + "title": "configMapName", + "description": "Name of configMap from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "GracePeriod": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "time for which Kubernetes waits before terminating the pods", + "title": "Grace Period" + }, + "LivenessProbe": { + "type": "object", + "description": "used by the kubelet to know when to restart a container", + "title": "Liveness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the liveness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", + "title": "Failure Threshold" + }, + "httpHeaders": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for liveness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for liveness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "MaxSurge": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be created over the desired number of pods", + "title": "Maximum Surge" + }, + "MaxUnavailable": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be unavailable during the update process", + "title": "Maximum Unavailable" + }, + "MinReadySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", + "title": "Minimum Ready Seconds" + }, + "ReadinessProbe": { + "type": "object", + "description": "kubelet uses readiness probes to know when a container is ready to start accepting traffic", + "title": "Readiness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the readiness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", + "title": "Failure Threshold" + }, + "httpHeader": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for readiness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for readiness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "networkPolicy": { + "type": "object", + "description": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "title": "Network Policy", + "properties": { + "enabled":{ + "type":"boolean", + "description": "used to enable or disable NetworkPolicy" + }, + "annotations":{ + "type": "object", + "description": "Annotations for NetworkPolicy" + }, + "labels":{ + "type":"object", + "description": "Labels for NetworkPolicy" + }, + "podSelector":{ + "type": "object", + "description": "Selects the pods to which this NetworkPolicy object applies", + "properties": { + "matchExpressions":{ + "type":"array", + "description": "list of label selector" + }, + "matchLabels":{ + "type":"object", + "description": "map of {key,value} pairs" + } + } + }, + "policyTypes":{ + "type":"array", + "description": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress,Egress." + }, + "ingress":{ + "type":"array", + "description": "List of ingress rules to be applied to the selected pods" + }, + "egress":{ + "type":"array", + "description": "List of egress rules to be applied to the selected pods" + } + } + }, + "istio": { + "type": "object", + "description": "Istio Service mesh", + "title": "Istio" + }, + "flaggerCanary":{ + "type": "object", + "description": "Flagger for canary release with istio service mesh", + "title": "Flagger Canary Release" + }, + "Spec": { + "type": "object", + "description": "used to define the desire state of the given container", + "title": "Spec", + "properties": { + "Affinity": { + "type": "object", + "description": "Node/Inter-pod Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node/pods", + "title": "Affinity", + "properties": { + "Key": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "string", + "description": "Key part of the label for node/pod selection", + "title": "Key" + } + ] + }, + "Values": { + "type": "string", + "description": "Value part of the label for node/pod selection", + "title": "Values" + }, + "key": { + "type": "string" + } + } + } + } + }, + "ambassadorMapping": { + "type": "object", + "description": "used to create ambassador mapping resource", + "title": "Mapping", + "properties": { + "ambassadorId": { + "type": "string", + "description": "used to specify id for specific ambassador mappings controller", + "title": "Ambassador ID" + }, + "cors": { + "type": "object", + "description": "used to specify cors policy to access host for this mapping", + "title": "CORS" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify whether to create an ambassador mapping or not", + "title": "Enabled" + }, + "weight": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify weight for canary ambassador mappings" + }, + "hostname": { + "type": "string", + "description": "used to specify hostname for ambassador mapping", + "title": "Hostname" + }, + "labels": { + "type": "object", + "description": "used to provide custom labels for ambassador mapping", + "title": "Labels" + }, + "prefix": { + "type": "string", + "description": "used to specify path for ambassador mapping", + "title": "Prefix" + }, + "retryPolicy": { + "type": "object", + "description": "used to specify retry policy for ambassador mapping", + "title": "Retry Policy" + }, + "rewrite": { + "type": "string", + "description": "used to specify whether to redirect the path of this mapping and where", + "title": "Rewrite" + }, + "tls": { + "type": "object", + "description": "used to create or define ambassador TLSContext resource", + "title": "TLS Context" + }, + "extraSpec": { + "type": "object", + "description": "used to provide extra spec values which not present in deployment template for ambassador resource", + "title": "Extra Spec" + } + } + }, + "args": { + "type": "object", + "description": " used to give arguments to command", + "title": "Arguments", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling aruguments", + "title": "Enabled" + }, + "value": { + "type": "array", + "description": "values of the arguments", + "title": "Value", + "items": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + } + } + }, + "autoscaling": { + "type": "object", + "description": "connected to HPA and controls scaling up and down in response to request load", + "title": "Autoscaling", + "properties": { + "MaxReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Maximum number of replicas allowed for scaling", + "title": "Maximum Replicas" + }, + "MinReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Minimum number of replicas allowed for scaling", + "title": "Minimum Replicas" + }, + "TargetCPUUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target CPU utilization that is expected for a container", + "title": "TargetCPUUtilizationPercentage" + }, + "TargetMemoryUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target memory utilization that is expected for a container", + "title": "TargetMemoryUtilizationPercentage" + }, + "behavior": { + "type": "object", + "description": "describes behavior and scaling policies for that behavior", + "title": "Behavior" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling autoscaling", + "title": "Enabled" + }, + "labels": { + "type": "object", + "description": "labels for HPA", + "title": "labels" + }, + "annotations": { + "type": "object", + "description": "used to configure some options for HPA", + "title": "annotations" + }, + "extraMetrics": { + "type": "array", + "items": {}, + "description": "used to give external metrics for autoscaling", + "title": "Extra Metrics" + } + } + }, + "command": { + "type": "object", + "description": "contains the commands for the server", + "title": "Command", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling commands" + }, + "value": { + "type": "array", + "items": {}, + "description": "contains the commands", + "title": "Value" + }, + "workingDir": { + "type": "object", + "items": {}, + "description": "contains the working directory", + "title": "Working directory" + } + } + }, + "containerSecurityContext": { + "type": "object", + "description": " defines privilege and access control settings for a Container", + "title": "Container Security Context" + }, + "containers": { + "type": "array", + "items": {}, + "description": " used to run side-car containers along with the main container within same pod" + }, + "dbMigrationConfig": { + "type": "object", + "description": "used to configure database migration", + "title": "Db Migration Config", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling the config", + "title": "Enabled" + } + } + }, + "envoyproxy": { + "type": "object", + "description": "envoy is attached as a sidecar to the application container to collect metrics like 4XX, 5XX, throughput and latency", + "title": "Envoy Proxy", + "properties": { + "configMapName": { + "type": "string", + "description": "configMap containing configuration for Envoy", + "title": "ConfigMap" + }, + "lifecycle":{ + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled":{ + "type": "boolean" + }, + "postStart":{ + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created" + }, + "preStop":{ + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + }, + "image": { + "type": "string", + "description": "image of envoy to be used" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + } + } + }, + "hostAliases":{ + "type": "array", + "title": "hostAliases", + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file", + "items": [ + { + "type": "object", + "properties": { + "ip":{ + "type": "string", + "title": "IP", + "description": "IP address of the host file entry" + }, + "hostnames":{ + "type": "array", + "description": "Hostnames for the above IP address", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "image": { + "type": "object", + "description": "used to access images in kubernetes", + "title": "Image", + "properties": { + "pullPolicy": { + "type": "string", + "description": "used to define the instances calling the image", + "title": "Pull Policy", + "enum": ["IfNotPresent", "Always"] + } + } + }, + "restartPolicy": { + "type": "string", + "description": "It restarts the docker container based on defined conditions.", + "title": "Restart Policy", + "enum": [ + "Always", + "OnFailure", + "Never" + ] + }, + "imagePullSecrets": { + "type": "array", + "items": {}, + "description": "contains the docker credentials that are used for accessing a registry", + "title": "Image PullSecrets" + }, + "winterSoldier": { + "type": "object", + "description": "allows to scale, sleep or delete the resource based on time.", + "title": "winterSoldier", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the winterSoldier controller", + "title": "Annotations" + }, + "labels": { + "type": "object", + "description": "labels for winterSoldier", + "title": "winterSoldier labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "apiVersion": { + "type": "string", + "description": "Api version for winterSoldier", + "title": "winterSoldier apiVersion", + "default": "pincher.devtron.ai/v1alpha1" + }, + "timeRangesWithZone": { + "type": "object", + "description": "describe time zone and time ranges to input in the winterSoldier", + "title": "Time Ranges With Zone", + "timeZone": { + "type": "string", + "description": "describe time zone, and follow standard format", + "title": "Time Zone" + }, + "timeRanges": { + "type": "array", + "items": {}, + "description": "used to take array of time ranges in which each element contains timeFrom, timeTo, weekdayFrom and weekdayTo.", + "title": "Time Ranges" + } + }, + "type": { + "type": "string", + "description": "describe the type of application Rollout/deployment.", + "title": "Type" + }, + "action": { + "type": "string", + "description": "describe the action to be performed by winterSoldier.", + "title": "Action" + }, + "targetReplicas": { + "type": "array", + "description": "describe the number of replicas to which the resource should scale up or down.", + "title": "Target Replicas" + }, + "fieldSelector": { + "type": "array", + "description": "it takes arrays of methods to select specific fields.", + "title": "Field Selector" + } + } + }, + "ingress": { + "type": "object", + "description": "allows public access to URLs", + "title": "Ingress", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx" + }, + "labels": { + "type": "object", + "description": "labels for ingress", + "title": "Ingress labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "ingressInternal": { + "type": "object", + "description": "allows private access to the URLs", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx-internal" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "initContainers": { + "type": "array", + "items": {}, + "description": "specialized containers that run before app containers in a Pod, can contain utilities or setup scripts not present in an app image", + "title": "Init Containers" + }, + "kedaAutoscaling": { + "type": "object", + "description": "Kubernetes-based event driven autoscaler. With KEDA, one can drive the scaling of any container in Kubernetes based on the no. of events needing to be processed", + "title": "KEDA Autoscaling", + "properties": { + "advanced": { + "type": "object" + }, + "authenticationRef": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "envSourceContainerName": { + "type": "string" + }, + "maxReplicaCount": { + "type": "integer" + }, + "minReplicaCount": { + "type": "integer" + }, + "triggerAuthentication": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "spec": { + "type": "object" + } + } + }, + "triggers": { + "type": "array", + "items": {} + } + } + }, + "containerSpec": { + "type":"object", + "description": "define the container specic configuration", + "title": "containerSpec", + "properties": { + "lifecycle": { + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled":{ + "type": "boolean" + }, + "postStart":{ + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created.You could use this event to check that a required API is available before the container’s main work begins" + }, + "preStop":{ + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + } + } + }, + "pauseForSecondsBeforeSwitchActive": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "tell how much to wait for given period of time before switch active the container", + "title": "Pause For Seconds Before SwitchActive" + }, + "podAnnotations": { + "type":"object", + "description": "used to attach metadata and configs in Kubernetes", + "title": "Pod Annotations" + }, + "podDisruptionBudget": { + "type": "object", + "description": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "properties": { + "minAvailable":{ + "type": "string", + "title": "minAvailable", + "description": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod" + }, + "maxUnavailable":{ + "type": "string", + "title": "maxUnavailable", + "description": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod." + } + } + }, + "deploymentLabels": { + "type": "object", + "description": "deploymentLabels is an object to define the label on deployment.", + "title": "DeploymentLabels" + }, + "deploymentAnnotations": { + "type": "object", + "description": "deploymentAnnotations is an object to define the annotations on deployment.", + "title": "DeploymentAnnotations" + }, + "podExtraSpecs":{ + "type": "object", + "description": "ExtraSpec for the pods to be configured", + "title": "podExtraSpecs" + }, + "podLabels": { + "type":"object", + "description": "key/value pairs that are attached to pods, are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system", + "title": "Pod Labels" + }, + "podSecurityContext": { + "type":"object", + "description": "defines privilege and access control settings for a Pod or Container", + "title": "Pod Security Context" + }, + "prometheus": { + "type": "object", + "description": "a kubernetes monitoring tool", + "title": "Prometheus", + "properties": { + "release": { + "type": "string", + "description": "name of the file to be monitored, describes the state of prometheus" + } + } + }, + "rawYaml": { + "type": "array", + "items": {}, + "description": "Accepts an array of Kubernetes objects. One can specify any kubernetes yaml here & it will be applied when a app gets deployed.", + "title": "Raw YAML" + }, + "replicaCount": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "count of Replicas of pod", + "title": "REplica Count" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + }, + "secret": { + "type": "object", + "properties": { + "data": { + "type": "object" + }, + "enabled": { + "type": "boolean" + } + } + }, + "server": { + "type": "object", + "description": "used for providing server configurations.", + "title": "Server", + "properties": { + "deployment": { + "type": "object", + "description": "gives the details for deployment", + "title": "Deployment", + "properties": { + "image": { + "type": "string", + "description": "URL of the image", + "title": "Image" + }, + "image_tag": { + "type": "string", + "description": "tag of the image", + "title": "Image Tag" + } + } + } + } + }, + "service": { + "type": "object", + "description": "defines annotations and the type of service", + "title": "Service", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service" + }, + "type": { + "type": "string", + "description": "type of service", + "title": "Type", + "enum": [ + "ClusterIP", + "LoadBalancer", + "NodePort", + "ExternalName" + ] + } + } + }, + "serviceAccount": { + "type": "object", + "description": "defines service account for pods", + "title": "Service Account", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service account" + }, + "name": { + "type": "string", + "description": "name of service account", + "title": "Name" + }, + "create": { + "type": "boolean" + } + } + }, + "servicemonitor": { + "type": "object", + "description": "gives the set of targets to be monitored", + "title": "Service Monitor", + "properties": { + "additionalLabels": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array", + "items": {}, + "description": "a mechanism which work together with Taints which ensures that pods are not placed on inappropriate nodes", + "title": "Tolerations" + }, + "topologySpreadConstraints": { + "type": "array", + "items": {}, + "description": "used to control how Pods are spread across a cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains", + "title": "Topology Spread Constraints" + }, + "volumeMounts": { + "type": "array", + "items": {}, + "description": "used to provide mounts to the volume", + "title": "Volume Mounts" + }, + "volumes": { + "type": "array", + "items": {}, + "description": "required when some values need to be read from or written to an external disk", + "title": "Volumes" + }, + "waitForSecondsBeforeScalingDown": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Wait for given period of time before scaling down the container", + "title": "Wait For Seconds Before Scaling Down" + } + } +} + diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/secrets-test-values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/secrets-test-values.yaml new file mode 100644 index 0000000000..4a20404db8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/secrets-test-values.yaml @@ -0,0 +1 @@ +{"ConfigSecrets":{"enabled":true,"secrets":[{"data":{"standard_key":"c3RhbmRhcmQtdmFsdWU="},"external":false,"externalType":"","mountPath":"/test","name":"normal-secret","type":"volume"},{"data":{"secret_key":"U0VDUkVUIERBVEE="},"external":true,"externalType":"AWSSecretsManager","mountPath":"","name":"external-secret-3","type":"environment"}]}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/NOTES.txt b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/NOTES.txt new file mode 100644 index 0000000000..2b14478168 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range $.Values.ingress.paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".Chart.Name .fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include ".Chart.Name .fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".Chart.Name .fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".Chart.Name .name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/_helpers.tpl b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/_helpers.tpl new file mode 100644 index 0000000000..10de322e2a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/_helpers.tpl @@ -0,0 +1,167 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define ".Chart.Name .name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create service name +*/}} +{{- define ".servicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 55 | trimSuffix "-" -}}-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create preview service name +*/}} +{{- define ".previewservicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 55 | trimSuffix "-" -}}-preview +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".Chart.Name .fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".Chart.Name .chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define ".Chart.Name .color" -}} +{{- $active0 := (index .Values.server.deployment 0).enabled -}} +{{/* +{{- $active1 := (index .Values.server.deployment 1).enabled -}} +*/}} +{{- $active1 := include "safeenabledcheck" . -}} +{{- $active := and $active0 $active1 -}} +{{- $active -}} +{{- end -}} + +{{- define "safeenabledcheck" -}} +{{- if (eq (len .Values.server.deployment) 2) -}} + {{- if (index .Values.server.deployment 1).enabled -}} + {{- $active := true -}} + {{- $active -}} + {{- else -}} + {{- $active := false -}} + {{- $active -}} + {{- end -}} +{{- else -}} + {{- $active := false -}} + {{- $active -}} +{{- end -}} +{{- end -}} + + +{{- define "isCMVolumeExists" -}} + {{- $isCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $isCMVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isCMVolumeExists -}} +{{- end -}} + +{{- define "isSecretVolumeExists" -}} + {{- $isSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $isSecretVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isSecretVolumeExists -}} +{{- end -}} + + + + +{{- define "serviceMonitorEnabled" -}} + {{- $SMenabled := false -}} + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if and .servicemonitor.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- end }} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{- define "VerticalPodAutoScalingEnabled" -}} + {{- $SMenabled := false -}} + {{- if and .Values.verticalPodScaling.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{/* Create the name of the service account to use */}} +{{- define "serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include ".Chart.Name .fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* Check for app/release labels in customPodLabels and replace accordingly */}} +{{- define "customPodLabelsContainsApp" -}} + {{- $LabelsContain := false -}} + {{- if hasKey .Values.customPodLabels "app" }} + {{- $LabelsContain = true -}} + {{- end }} + {{- $LabelsContain -}} +{{- end -}} + +{{- define "customPodLabelsContainsRelease" -}} + {{- $LabelsContain := false -}} + {{- if hasKey .Values.customPodLabels "release" }} + {{- $LabelsContain = true -}} + {{- end }} + {{- $LabelsContain -}} +{{- end -}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/ambassador.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/ambassador.yaml new file mode 100644 index 0000000000..9d4a431c26 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/ambassador.yaml @@ -0,0 +1,94 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ambassadorMapping.enabled }} +{{- with $.Values.ambassadorMapping }} +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + {{- if .name }} + name: {{ .name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }}-mapping + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .labels }} +{{ toYaml .labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .ambassadorId }} + ambassador_id: {{ .ambassadorId }} + {{- end }} + {{- if .hostname }} + hostname: {{ .hostname | quote }} + {{- end }} + prefix: {{ .prefix }} + {{- if .rewrite }} + rewrite: {{ .rewrite }} + {{- end }} + service: {{ $svcName }}.{{ $.Release.Namespace }}:{{ $svcPort }} + {{- if .retryPolicy }} + retry_policy: +{{ toYaml .retryPolicy | indent 4 }} + {{- end }} + {{- if .cors }} + cors: +{{ toYaml .cors | indent 4 }} + {{- end }} + {{- if .weight }} + weight: {{ .weight }} + {{- end }} + {{- if .method }} + method: {{ .method }} + {{- end }} + {{- if .extraSpec }} +{{ toYaml .extraSpec | indent 2 }} + {{- end }} + {{- if .tls }} + {{- if .tls.context }} + tls: {{ .tls.context }} +{{- if .tls.create }} +--- +apiVersion: getambassador.io/v3alpha1 +kind: TLSContext +metadata: + name: {{ .tls.context }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .tls.labels }} +{{ toYaml .tls.labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .tls.secretName }} + secret: {{ .tls.secretName }} + {{- end }} + {{- if .tls.hosts }} + hosts: +{{ toYaml .tls.hosts | nindent 4 }} + {{- end }} + {{- if .tls.extraSpec }} +{{ toYaml .tls.extraSpec | indent 2 }} + {{- end }} +{{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/configmap.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/configmap.yaml new file mode 100644 index 0000000000..4e7879665e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/configmap.yaml @@ -0,0 +1,22 @@ +{{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .name}}-{{ $.Values.app }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +data: +{{ toYaml .data | trim | indent 2 }} + {{- end}} + {{- end}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/deployment.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/deployment.yaml new file mode 100644 index 0000000000..eed0b4acc3 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/deployment.yaml @@ -0,0 +1,1264 @@ + {{- $hasCMEnvExists := false -}} + {{- $hasCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $hasCMVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasCMEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + {{- $hasPVCExists := false -}} + {{- if .Values.persistentVolumeClaim.name }} + {{- $hasPVCExists = true }} + {{- end }} + + {{- $hasSecretEnvExists := false -}} + {{- $hasSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $hasSecretVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasSecretEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{ $CustomLabelsApp:= include "customPodLabelsContainsApp" . }} + {{ $CustomLabelsRelease:= include "customPodLabelsContainsRelease" . }} + + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ .Values.pipelineName }} +{{- if .Values.deploymentLabels }} +{{ toYaml .Values.deploymentLabels | indent 4 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + +{{- if .Values.deploymentAnnotations }} + annotations: +{{ toYaml .Values.deploymentAnnotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: +{{- if .Values.customMatchLabels }} +{{ toYaml .Values.customMatchLabels | indent 6 }} +{{- else }} + app: {{ .Values.customPodLabels.app | default (include ".Chart.Name .name" $) }} + release: {{ .Values.customPodLabels.release | default $.Release.Name }} +{{- end }} + replicas: {{ $.Values.replicaCount }} + minReadySeconds: {{ $.Values.MinReadySeconds }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- if not (eq "true" $CustomLabelsApp) }} + app: {{ .Values.customPodLabels.app | default (include ".Chart.Name .name" $) }} + {{- end }} + {{- if not (eq "true" $CustomLabelsRelease) }} + release: {{ .Values.customPodLabels.release |default $.Release.Name }} + {{- end }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} +{{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 8 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 8 }} +{{- end }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + spec: +{{- if $.Values.podExtraSpecs }} +{{ toYaml .Values.podExtraSpecs | indent 6 }} +{{- end }} + terminationGracePeriodSeconds: {{ $.Values.GracePeriod }} +{{- if $.Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 8 }} +{{- end }} +{{- if and $.Values.Spec.Affinity.Key $.Values.Spec.Affinity.Values }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ $.Values.Spec.Affinity.Key }} + operator: In + values: + - {{ $.Values.Spec.Affinity.Values | default "nodes" }} +{{- else if $.Values.affinity.enabled }} + affinity: +{{ toYaml .Values.affinity.values | indent 8 }} +{{- end }} +{{- if $.Values.serviceAccountName }} + serviceAccountName: {{ $.Values.serviceAccountName }} +{{- else }} + serviceAccountName: {{ template "serviceAccountName" . }} +{{- end }} +{{- if $.Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} +{{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- if $.Values.imagePullSecrets}} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end}} +{{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range $.Values.topologySpreadConstraints }} + - maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + {{- if semverCompare "<=1.30-0" $.Capabilities.KubeVersion.GitVersion }} + {{- if .minDomains }} + minDomains: {{ .minDomains }} + {{- end }} + {{- end }} + {{- if .nodeAffinityPolicy }} + nodeAffinityPolicy: {{ .nodeAffinityPolicy }} + {{- end }} + {{- if .nodeTaintsPolicy }} + nodeTaintsPolicy: {{ .nodeTaintsPolicy }} + {{- end }} + labelSelector: + matchLabels: + {{- if and .autoLabelSelector .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- else if .autoLabelSelector }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- else if .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- end }} +{{- end }} +{{- end }} +{{- if $.Values.topologySpreadConstraint }} + topologySpreadConstraints: +{{ toYaml .Values.topologySpreadConstraint }} +{{- end }} +{{- if $.Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if $.Values.restartPolicy }} + restartPolicy: {{ $.Values.restartPolicy }} +{{- else }} + restartPolicy: Always +{{- end }} +{{- if $.Values.initContainers}} + initContainers: +{{- range $i, $c := .Values.initContainers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-init-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .args}} + args: +{{ toYaml .args | indent 12 -}} +{{- end}} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + containers: + - name: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + image: "{{ .Values.server.deployment.image }}:{{ .Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- if $.Values.containerSpec.lifecycle.postStart }} + postStart: +{{ toYaml $.Values.containerSpec.lifecycle.postStart | indent 12 -}} + {{- end }} + {{- end }} +{{- if and $.Values.containerSecurityContext $.Values.privileged }} + securityContext: + privileged: true +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- else if $.Values.privileged }} + securityContext: + privileged: true +{{- else if $.Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- end }} +{{- if $.Values.containerExtraSpecs }} +{{ toYaml .Values.containerExtraSpecs | indent 10 }} +{{- end }} +{{- if $.Values.resizePolicy }} + resizePolicy: +{{ toYaml .Values.resizePolicy | indent 12 }} +{{- end }} + ports: + {{- range $.Values.ContainerPort }} + - name: {{ .name}} + containerPort: {{ .port }} + protocol: {{ .protocol | default "TCP" }} + {{- end}} +{{- if and $.Values.command.enabled $.Values.command.workingDir }} + workingDir: {{ $.Values.command.workingDir }} +{{- end}} +{{- if and $.Values.command.value $.Values.command.enabled}} + command: +{{ toYaml $.Values.command.value | indent 12 -}} +{{- end}} +{{- if and $.Values.args.value $.Values.args.enabled}} + args: +{{ toYaml $.Values.args.value | indent 12 -}} +{{- end }} + env: + - name: CONFIG_HASH + value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.ConfigHash) }}{{ .Values.devtronInternal.containerSpecs.ConfigHash }}{{ end }} + - name: SECRET_HASH + value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.SecretHash) }}{{ .Values.devtronInternal.containerSpecs.SecretHash }}{{ end }} + - name: DEVTRON_APP_NAME + value: {{ template ".Chart.Name .name" $ }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEVTRON_CONTAINER_REPO + value: "{{ .Values.server.deployment.image }}" + - name: DEVTRON_CONTAINER_TAG + value: "{{ .Values.server.deployment.image_tag }}" + {{- range $.Values.EnvVariablesFromFieldPath }} + - name: {{ .name }} + valueFrom: + fieldRef: + fieldPath: {{ .fieldPath }} + {{- end}} + {{- range $.Values.EnvVariables }} + {{- if and .name .value }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromSecretKeys }} + {{- if and .name .secretName .keyName }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromConfigMapKeys }} + {{- if and .name .configMapName .keyName }} + - name: {{ .name }} + valueFrom: + configMapKeyRef: + name: {{ .configMapName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- if or (and ($hasCMEnvExists) (.Values.ConfigMaps.enabled)) (and ($hasSecretEnvExists) (.Values.ConfigSecrets.enabled)) }} + envFrom: + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "environment" }} + - configMapRef: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "environment" }} + - secretRef: + {{if eq .external true}} + name: {{ .name }} + {{else if eq .external false}} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + +{{- if or $.Values.LivenessProbe.Path $.Values.LivenessProbe.command $.Values.LivenessProbe.tcp $.Values.LivenessProbe.grpc }} + livenessProbe: +{{- if $.Values.LivenessProbe.Path }} + httpGet: + path: {{ $.Values.LivenessProbe.Path }} + port: {{ $.Values.LivenessProbe.port }} + scheme: {{ $.Values.LivenessProbe.scheme }} + {{- if $.Values.LivenessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.LivenessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.LivenessProbe.command }} + exec: + command: +{{ toYaml .Values.LivenessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.LivenessProbe.tcp }} + tcpSocket: + port: {{ $.Values.LivenessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.LivenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.LivenessProbe.periodSeconds }} + successThreshold: {{ $.Values.LivenessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.LivenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.LivenessProbe.failureThreshold }} + {{- if $.Values.LivenessProbe.grpc }} + grpc: +{{ toYaml .Values.LivenessProbe.grpc | indent 14 }} + {{- end }} +{{- end }} +{{- if or $.Values.ReadinessProbe.Path $.Values.ReadinessProbe.command $.Values.ReadinessProbe.tcp $.Values.ReadinessProbe.grpc }} + readinessProbe: +{{- if $.Values.ReadinessProbe.Path }} + httpGet: + path: {{ $.Values.ReadinessProbe.Path }} + port: {{ $.Values.ReadinessProbe.port }} + scheme: {{ $.Values.ReadinessProbe.scheme }} + {{- if $.Values.ReadinessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.ReadinessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.ReadinessProbe.command }} + exec: + command: +{{ toYaml .Values.ReadinessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.ReadinessProbe.tcp }} + tcpSocket: + port: {{ $.Values.ReadinessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.ReadinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.ReadinessProbe.periodSeconds }} + successThreshold: {{ $.Values.ReadinessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.ReadinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.ReadinessProbe.failureThreshold }} + {{- if $.Values.ReadinessProbe.grpc }} + grpc: +{{ toYaml .Values.ReadinessProbe.grpc | indent 14 }} + {{- end}} +{{- end }} + resources: +{{ toYaml $.Values.resources | trim | indent 12 }} +{{- if or $.Values.StartupProbe.Path $.Values.StartupProbe.command $.Values.StartupProbe.tcp $.Values.StartupProbe.grpc }} + startupProbe: +{{- if $.Values.StartupProbe.Path }} + httpGet: + path: {{ $.Values.StartupProbe.Path }} + port: {{ $.Values.StartupProbe.port }} + {{- if $.Values.StartupProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.StartupProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.StartupProbe.command }} + exec: + command: +{{ toYaml .Values.StartupProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.StartupProbe.tcp }} + tcpSocket: + port: {{ $.Values.StartupProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.StartupProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.StartupProbe.periodSeconds }} + successThreshold: {{ $.Values.StartupProbe.successThreshold }} + timeoutSeconds: {{ $.Values.StartupProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.StartupProbe.failureThreshold }} + {{- if $.Values.StartupProbe.grpc }} + grpc: +{{ toYaml .Values.StartupProbe.grpc | indent 14 }} + {{- end}} +{{- end }} + volumeMounts: +{{- with .Values.volumeMounts }} +{{ toYaml . | trim | indent 12 }} +{{- end }} +{{- if $.Values.persistentVolumeClaim.name }} + - name: {{ .Values.persistentVolumeClaim.name }}-vol + mountPath: {{ .Values.persistentVolumeClaim.mountPath | default "/tmp" }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} + {{- if and (.esoSubPath) (ne (len .esoSubPath) 0) }} + {{- range .esoSubPath }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ . }} + subPath: {{ . }} + {{- end }} + {{- else }} + {{- range .esoSecretData.esoData }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ .secretKey }} + subPath: {{ .secretKey }} + {{- end }} + {{- end }} + {{- else }} + {{- range $k, $v := .data }} # for others secrets the mount path will be .data[i].secretKey + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} +{{- if $.Values.appMetrics }} + - name: envoy + image: {{ $.Values.envoyproxy.image | default "quay.io/devtron/envoy:v1.16.0"}} + {{- if $.Values.envoyproxy.lifecycle }} + lifecycle: +{{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} + {{- else if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.envoyproxy.resources | trim | indent 12 }} + ports: + - containerPort: 9901 + protocol: TCP + name: envoy-admin + {{- range $index, $element := .Values.ContainerPort }} + - name: envoy-{{ $element.name}} + containerPort: {{ $element.envoyPort | default (add 8790 $index) }} + protocol: TCP + {{- end }} + command: ["/usr/local/bin/envoy"] + args: ["-c", "/etc/envoy-config/envoy-config.json", "-l", "info", "--log-format", "[METADATA][%Y-%m-%d %T.%e][%t][%l][%n] %v"] + volumeMounts: + - name: {{ $.Values.envoyproxy.configMapName | default "envoy-config-volume" }} + mountPath: /etc/envoy-config/ +{{- if $.Values.envoyproxy.readinessProbe}} + readinessProbe: +{{ toYaml $.Values.envoyproxy.readinessProbe | indent 12}} +{{- end }} +{{- if $.Values.envoyproxy.livenessProbe}} + livenessProbe: +{{ toYaml $.Values.envoyproxy.livenessProbe | indent 12}} +{{- end }} +{{- end}} +{{- if $.Values.containers }} +{{- range $i, $c := .Values.containers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-sidecontainer-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .env }} + env: +{{ toYaml .env | indent 12 }} +{{- end }} + {{- if .envFrom }} + envFrom: +{{ toYaml .env | indent 12 }} +{{- end }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resizePolicy }} + resizePolicy: +{{ toYaml .resziePolicy | indent 12}} +{{- end }} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + + + volumes: + {{- if $.Values.appMetrics }} + - name: envoy-config-volume + configMap: + name: sidecar-config-{{ template ".Chart.Name .name" $ }} + {{- end }} +{{- with .Values.volumes }} +{{ toYaml . | trim | indent 8 }} +{{- end }} +{{- if .Values.persistentVolumeClaim.name }} + - name: {{.Values.persistentVolumeClaim.name}}-vol + persistentVolumeClaim: + claimName: {{.Values.persistentVolumeClaim.name }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + configMap: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + secret: + {{- if eq .external true }} + secretName: {{ .name }} + {{- else if eq .external false }} + secretName: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) (eq (.Values.appMetrics) false) }} []{{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) (eq (.Values.appMetrics) false) }} [] {{- end }} + + revisionHistoryLimit: 3 +## pauseForSecondsBeforeSwitchActive: {{ $.Values.pauseForSecondsBeforeSwitchActive }} +# waitForSecondsBeforeScalingDown: {{ $.Values.waitForSecondsBeforeScalingDown }} + strategy: + {{- if eq .Values.deploymentType "ROLLING" }} + type: "RollingUpdate" + rollingUpdate: + maxSurge: {{ $.Values.deployment.strategy.rolling.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.rolling.maxUnavailable }} + {{- end }} + {{- if eq .Values.deploymentType "RECREATE" }} + type: "Recreate" + {{- end }} +{{- if $.Values.secondaryWorkload.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include ".Chart.Name .fullname" $ }}-{{ $.Values.secondaryWorkload.postfix | default "sec" }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ .Values.pipelineName }} +{{- if .Values.deploymentLabels }} +{{ toYaml .Values.deploymentLabels | indent 4 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + +{{- if .Values.deploymentAnnotations }} + annotations: +{{ toYaml .Values.deploymentAnnotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: +{{- if .Values.customMatchLabels }} +{{ toYaml .Values.customMatchLabels | indent 6 }} +{{- else }} + app: {{ .Values.customPodLabels.app | default (include ".Chart.Name .name" $) }} + release: {{ .Values.customPodLabels.release | default $.Release.Name }} +{{- end }} + replicas: {{ $.Values.secondaryWorkload.replicaCount | default 1 }} + minReadySeconds: {{ $.Values.MinReadySeconds }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- if not (eq "true" $CustomLabelsApp) }} + app: {{ .Values.customPodLabels.app | default (include ".Chart.Name .name" $) }} + {{- end }} + {{- if not (eq "true" $CustomLabelsRelease) }} + release: {{ .Values.customPodLabels.release |default $.Release.Name }} + {{- end }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} +{{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 8 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 8 }} +{{- end }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + spec: +{{- if $.Values.podExtraSpecs }} +{{ toYaml .Values.podExtraSpecs | indent 6 }} +{{- end }} + terminationGracePeriodSeconds: {{ $.Values.GracePeriod }} +{{- if $.Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 8 }} +{{- end }} +{{- with $.Values.secondaryWorkload }} +{{- if and .Spec.Affinity.Key .Spec.Affinity.Values }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Spec.Affinity.Key }} + operator: In + values: + - {{ .Spec.Affinity.Values | default "nodes" }} +{{- else if .affinity.enabled }} + affinity: +{{ toYaml .affinity.values | indent 8 }} +{{- end }} +{{- end }} +{{- if $.Values.serviceAccountName }} + serviceAccountName: {{ $.Values.serviceAccountName }} +{{- else }} + serviceAccountName: {{ template "serviceAccountName" . }} +{{- end }} +{{- if $.Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} +{{- end }} + {{- if $.Values.secondaryWorkload.tolerations }} + tolerations: +{{ toYaml $.Values.secondaryWorkload.tolerations | indent 8 }} + {{- end }} +{{- if $.Values.imagePullSecrets}} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end}} +{{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range $.Values.topologySpreadConstraints }} + - maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + {{- if semverCompare "<=1.30-0" $.Capabilities.KubeVersion.GitVersion }} + {{- if .minDomains }} + minDomains: {{ .minDomains }} + {{- end }} + {{- end }} + {{- if .nodeAffinityPolicy }} + nodeAffinityPolicy: {{ .nodeAffinityPolicy }} + {{- end }} + {{- if .nodeTaintsPolicy }} + nodeTaintsPolicy: {{ .nodeTaintsPolicy }} + {{- end }} + labelSelector: + matchLabels: + {{- if and .autoLabelSelector .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- else if .autoLabelSelector }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- else if .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- end }} +{{- end }} +{{- end }} +{{- if $.Values.topologySpreadConstraint }} + topologySpreadConstraints: +{{ toYaml .Values.topologySpreadConstraint }} +{{- end }} +{{- if $.Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if $.Values.restartPolicy }} + restartPolicy: {{ $.Values.restartPolicy }} +{{- else }} + restartPolicy: Always +{{- end }} +{{- if $.Values.initContainers}} + initContainers: +{{- range $i, $c := .Values.initContainers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-init-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .args}} + args: +{{ toYaml .args | indent 12 -}} +{{- end}} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + containers: + - name: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + image: "{{ .Values.server.deployment.image }}:{{ .Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- if $.Values.containerSpec.lifecycle.postStart }} + postStart: +{{ toYaml $.Values.containerSpec.lifecycle.postStart | indent 12 -}} + {{- end }} + {{- end }} +{{- if and $.Values.containerSecurityContext $.Values.privileged }} + securityContext: + privileged: true +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- else if $.Values.privileged }} + securityContext: + privileged: true +{{- else if $.Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- end }} +{{- if $.Values.containerExtraSpecs }} +{{ toYaml .Values.containerExtraSpecs | indent 10 }} +{{- end }} +{{- if $.Values.resizePolicy }} + resizePolicy: +{{ toYaml .Values.resizePolicy | indent 12 }} +{{- end }} + ports: + {{- range $.Values.ContainerPort }} + - name: {{ .name}} + containerPort: {{ .port }} + protocol: {{ .protocol | default "TCP" }} + {{- end}} +{{- if and $.Values.command.enabled $.Values.command.workingDir }} + workingDir: {{ $.Values.command.workingDir }} +{{- end}} +{{- if and $.Values.command.value $.Values.command.enabled}} + command: +{{ toYaml $.Values.command.value | indent 12 -}} +{{- end}} +{{- if and $.Values.args.value $.Values.args.enabled}} + args: +{{ toYaml $.Values.args.value | indent 12 -}} +{{- end }} + env: + - name: CONFIG_HASH + value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.ConfigHash) }}{{ .Values.devtronInternal.containerSpecs.ConfigHash }}{{ end }} + - name: SECRET_HASH + value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.SecretHash) }}{{ .Values.devtronInternal.containerSpecs.SecretHash }}{{ end }} + - name: DEVTRON_APP_NAME + value: {{ template ".Chart.Name .name" $ }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEVTRON_CONTAINER_REPO + value: "{{ .Values.server.deployment.image }}" + - name: DEVTRON_CONTAINER_TAG + value: "{{ .Values.server.deployment.image_tag }}" + {{- range $.Values.EnvVariablesFromFieldPath }} + - name: {{ .name }} + valueFrom: + fieldRef: + fieldPath: {{ .fieldPath }} + {{- end}} + {{- range $.Values.EnvVariables }} + {{- if and .name .value }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromSecretKeys }} + {{- if and .name .secretName .keyName }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromConfigMapKeys }} + {{- if and .name .configMapName .keyName }} + - name: {{ .name }} + valueFrom: + configMapKeyRef: + name: {{ .configMapName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- if or (and ($hasCMEnvExists) (.Values.ConfigMaps.enabled)) (and ($hasSecretEnvExists) (.Values.ConfigSecrets.enabled)) }} + envFrom: + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "environment" }} + - configMapRef: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "environment" }} + - secretRef: + {{if eq .external true}} + name: {{ .name }} + {{else if eq .external false}} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + +{{- if or $.Values.LivenessProbe.Path $.Values.LivenessProbe.command $.Values.LivenessProbe.tcp $.Values.LivenessProbe.grpc }} + livenessProbe: +{{- if $.Values.LivenessProbe.Path }} + httpGet: + path: {{ $.Values.LivenessProbe.Path }} + port: {{ $.Values.LivenessProbe.port }} + scheme: {{ $.Values.LivenessProbe.scheme }} + {{- if $.Values.LivenessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.LivenessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.LivenessProbe.command }} + exec: + command: +{{ toYaml .Values.LivenessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.LivenessProbe.tcp }} + tcpSocket: + port: {{ $.Values.LivenessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.LivenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.LivenessProbe.periodSeconds }} + successThreshold: {{ $.Values.LivenessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.LivenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.LivenessProbe.failureThreshold }} + {{- if $.Values.LivenessProbe.grpc }} + grpc: +{{ toYaml .Values.LivenessProbe.grpc | indent 14 }} + {{- end }} +{{- end }} +{{- if or $.Values.ReadinessProbe.Path $.Values.ReadinessProbe.command $.Values.ReadinessProbe.tcp $.Values.ReadinessProbe.grpc }} + readinessProbe: +{{- if $.Values.ReadinessProbe.Path }} + httpGet: + path: {{ $.Values.ReadinessProbe.Path }} + port: {{ $.Values.ReadinessProbe.port }} + scheme: {{ $.Values.ReadinessProbe.scheme }} + {{- if $.Values.ReadinessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.ReadinessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.ReadinessProbe.command }} + exec: + command: +{{ toYaml .Values.ReadinessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.ReadinessProbe.tcp }} + tcpSocket: + port: {{ $.Values.ReadinessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.ReadinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.ReadinessProbe.periodSeconds }} + successThreshold: {{ $.Values.ReadinessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.ReadinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.ReadinessProbe.failureThreshold }} + {{- if $.Values.ReadinessProbe.grpc }} + grpc: +{{ toYaml .Values.ReadinessProbe.grpc | indent 14 }} + {{- end}} +{{- end }} + resources: +{{ toYaml $.Values.resources | trim | indent 12 }} +{{- if or $.Values.StartupProbe.Path $.Values.StartupProbe.command $.Values.StartupProbe.tcp $.Values.StartupProbe.grpc }} + startupProbe: +{{- if $.Values.StartupProbe.Path }} + httpGet: + path: {{ $.Values.StartupProbe.Path }} + port: {{ $.Values.StartupProbe.port }} + {{- if $.Values.StartupProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.StartupProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.StartupProbe.command }} + exec: + command: +{{ toYaml .Values.StartupProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.StartupProbe.tcp }} + tcpSocket: + port: {{ $.Values.StartupProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.StartupProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.StartupProbe.periodSeconds }} + successThreshold: {{ $.Values.StartupProbe.successThreshold }} + timeoutSeconds: {{ $.Values.StartupProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.StartupProbe.failureThreshold }} + {{- if $.Values.StartupProbe.grpc }} + grpc: +{{ toYaml .Values.StartupProbe.grpc | indent 14 }} + {{- end}} +{{- end }} + volumeMounts: +{{- with .Values.volumeMounts }} +{{ toYaml . | trim | indent 12 }} +{{- end }} +{{- if $.Values.persistentVolumeClaim.name }} + - name: {{ .Values.persistentVolumeClaim.name }}-vol + mountPath: {{ .Values.persistentVolumeClaim.mountPath | default "/tmp" }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} + {{- if and (.esoSubPath) (ne (len .esoSubPath) 0) }} + {{- range .esoSubPath }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ . }} + subPath: {{ . }} + {{- end }} + {{- else }} + {{- range .esoSecretData.esoData }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ .secretKey }} + subPath: {{ .secretKey }} + {{- end }} + {{- end }} + {{- else }} + {{- range $k, $v := .data }} # for others secrets the mount path will be .data[i].secretKey + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} +{{- if $.Values.appMetrics }} + - name: envoy + image: {{ $.Values.envoyproxy.image | default "quay.io/devtron/envoy:v1.16.0"}} + {{- if $.Values.envoyproxy.lifecycle }} + lifecycle: +{{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} + {{- else if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.envoyproxy.resources | trim | indent 12 }} + ports: + - containerPort: 9901 + protocol: TCP + name: envoy-admin + {{- range $index, $element := .Values.ContainerPort }} + - name: envoy-{{ $element.name}} + containerPort: {{ $element.envoyPort | default (add 8790 $index) }} + protocol: TCP + {{- end }} + command: ["/usr/local/bin/envoy"] + args: ["-c", "/etc/envoy-config/envoy-config.json", "-l", "info", "--log-format", "[METADATA][%Y-%m-%d %T.%e][%t][%l][%n] %v"] + volumeMounts: + - name: {{ $.Values.envoyproxy.configMapName | default "envoy-config-volume" }} + mountPath: /etc/envoy-config/ +{{- if $.Values.envoyproxy.readinessProbe}} + readinessProbe: +{{ toYaml $.Values.envoyproxy.readinessProbe | indent 12}} +{{- end }} +{{- if $.Values.envoyproxy.livenessProbe}} + livenessProbe: +{{ toYaml $.Values.envoyproxy.livenessProbe | indent 12}} +{{- end }} +{{- end}} +{{- if $.Values.containers }} +{{- range $i, $c := .Values.containers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-sidecontainer-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .env }} + env: +{{ toYaml .env | indent 12 }} +{{- end }} + {{- if .envFrom }} + envFrom: +{{ toYaml .env | indent 12 }} +{{- end }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resizePolicy }} + resizePolicy: +{{ toYaml .resziePolicy | indent 12}} +{{- end }} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + + + volumes: + {{- if $.Values.appMetrics }} + - name: envoy-config-volume + configMap: + name: sidecar-config-{{ template ".Chart.Name .name" $ }} + {{- end }} +{{- with .Values.volumes }} +{{ toYaml . | trim | indent 8 }} +{{- end }} +{{- if .Values.persistentVolumeClaim.name }} + - name: {{.Values.persistentVolumeClaim.name}}-vol + persistentVolumeClaim: + claimName: {{.Values.persistentVolumeClaim.name }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + configMap: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + secret: + {{- if eq .external true }} + secretName: {{ .name }} + {{- else if eq .external false }} + secretName: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) (eq (.Values.appMetrics) false) }} []{{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) (eq (.Values.appMetrics) false) }} [] {{- end }} + + revisionHistoryLimit: 3 +## pauseForSecondsBeforeSwitchActive: {{ $.Values.pauseForSecondsBeforeSwitchActive }} +# waitForSecondsBeforeScalingDown: {{ $.Values.waitForSecondsBeforeScalingDown }} + strategy: + {{- if eq .Values.deploymentType "ROLLING" }} + type: "RollingUpdate" + rollingUpdate: + maxSurge: {{ $.Values.deployment.strategy.rolling.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.rolling.maxUnavailable }} + {{- end }} + {{- if eq .Values.deploymentType "RECREATE" }} + type: "Recreate" + {{- end }} +{{- end }} + diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/externalsecrets.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/externalsecrets.yaml new file mode 100644 index 0000000000..6b6682c0a6 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/externalsecrets.yaml @@ -0,0 +1,84 @@ +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external true }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} +{{- if .esoSecretData.secretStore }} +--- +{{- if $.Capabilities.APIVersions.Has "external-secrets.io/v1" }} +apiVersion: external-secrets.io/v1 +{{- else }} +apiVersion: external-secrets.io/v1beta1 +{{- end }} +kind: SecretStore +metadata: + name: {{ .name}} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + provider: + {{- toYaml .esoSecretData.secretStore | nindent 4 }} +{{- end }} +--- +{{- if $.Capabilities.APIVersions.Has "external-secrets.io/v1" }} +apiVersion: external-secrets.io/v1 +{{- else }} +apiVersion: external-secrets.io/v1beta1 +{{- end }} +kind: ExternalSecret +metadata: + name: {{ .name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .esoSecretData.refreshInterval }} + refreshInterval: {{ .esoSecretData.refreshInterval }} + {{- else }} + refreshInterval: 1h + {{- end}} + {{- if and .esoSecretData.secretStoreRef (not .esoSecretData.secretStore) }} + secretStoreRef: +{{ toYaml .esoSecretData.secretStoreRef | indent 4 }} + {{- else }} + secretStoreRef: + name: {{ .name}} + kind: SecretStore + {{- end }} + target: + name: {{ .name}} + {{- if .esoSecretData.template }} + template: + {{- toYaml .esoSecretData.template | nindent 6 }} + {{- end }} + creationPolicy: Owner + {{- if .esoSecretData.esoDataFrom }} + dataFrom: + {{- toYaml .esoSecretData.esoDataFrom | nindent 4 }} + {{- else }} + data: + {{- range .esoSecretData.esoData }} + - secretKey: {{ .secretKey }} + remoteRef: + key: {{ .key }} + {{- if .property }} + property: {{ .property }} + {{- end }} + {{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/flagger.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/flagger.yaml new file mode 100644 index 0000000000..766098fb61 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/flagger.yaml @@ -0,0 +1,164 @@ +{{- if .Values.flaggerCanary.enabled }} +{{ if .Values.flaggerCanary.createIstioGateway.enabled -}} +{{- with .Values.flaggerCanary.createIstioGateway }} +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-istio-gateway + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .labels }} +{{ toYaml .labels | indent 4 }} + {{- end }} +{{- if .annotations }} + annotations: +{{ toYaml .annotations | indent 4 }} +{{- end }} +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - {{ .host | quote -}} +{{- if .tls.enabled }} + tls: + httpsRedirect: true + - port: + number: 443 + name: https + protocol: HTTPS + hosts: + - {{ .host | quote }} + tls: + mode: SIMPLE + credentialName: {{ .tls.secretName }} +{{ end }} +{{ end }} +{{ end }} +{{ end }} +--- +{{- if .Values.flaggerCanary.enabled }} +{{- with .Values.flaggerCanary }} +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-canary + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .labels }} +{{ toYaml .labels | indent 4 }} + {{- end }} +{{- if .annotations }} + annotations: +{{ toYaml .annotations | indent 4 }} +{{- end }} +spec: + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }} +{{- if $.Values.autoscaling.enabled }} + autoscalerRef: + apiVersion: autoscaling/v1 + kind: HorizontalPodAutoscaler + name: {{ template ".Chart.Name .fullname" $ }}-hpa +{{- end }} + service: + portDiscovery: {{ .portDiscovery }} + port: {{ .serviceport }} + targetPort: {{ .targetPort }} + {{- if .appProtocol }} + appProtocol: {{ .appProtocol }} + {{- end }} +{{- if $.Values.flaggerCanary.gatewayRefs }} + gatewayRefs: +{{ toYaml $.Values.flaggerCanary.gatewayRefs | indent 6 }} +{{- end }} + {{- if or .createIstioGateway.enabled .addOtherGateways }} + gateways: + {{- if .createIstioGateway.enabled }} + - {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + {{- if .addOtherGateways }} + {{- range .addOtherGateways }} + - {{ . }} + {{- end }} + {{- end }} + {{- end }} + {{- if or .createIstioGateway.enabled .addOtherHosts }} + hosts: + {{- if .createIstioGateway.enabled }} + - {{ .createIstioGateway.host | quote }} + {{- end }} + {{- if .addOtherHosts }} + {{- range .addOtherHosts }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .retries }} + retries: +{{ toYaml .retries | indent 6 }} + {{- end }} + {{- if .match }} + match: + {{- range .match }} + - uri: + prefix: {{ .uri.prefix }} + {{- end }} + {{- end }} + {{- if .rewriteUri }} + rewrite: + uri: {{ .rewriteUri }} + {{- end }} + {{- if .timeout }} + timeout: {{ .timeout }} + {{- end }} +{{- if $.Values.flaggerCanary.headers }} + headers: +{{ toYaml $.Values.flaggerCanary.headers | indent 6 }} +{{- end }} +{{- if $.Values.flaggerCanary.corsPolicy }} + corsPolicy: +{{ toYaml $.Values.flaggerCanary.corsPolicy | indent 6 }} +{{- end }} + analysis: + interval: {{ .analysis.interval }} + threshold: {{ .analysis.threshold }} + maxWeight: {{ .analysis.maxWeight }} + stepWeight: {{ .analysis.stepWeight }} + metrics: + - name: request-success-rate + threshold: {{ .thresholds.successRate }} + interval: 1m + - name: request-duration + threshold: {{ .thresholds.latency }} + interval: 1m + webhooks: + {{- if .loadtest.enabled }} + - name: load-test + url: {{ .loadtest.url }} + timeout: 5s + metadata: + cmd: "hey -z 1m -q 10 -c 2 http://{{ include ".Chart.Name .fullname" $ }}-canary.{{ $.Release.Namespace }}:{{ $.Values.flaggerCanary.targetPort }}/" + {{- end }} +{{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/generic.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/generic.yaml new file mode 100644 index 0000000000..db95e84267 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/generic.yaml @@ -0,0 +1,4 @@ +{{- range .Values.rawYaml }} +--- +{{ toYaml . }} + {{- end -}} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/hpa.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/hpa.yaml new file mode 100644 index 0000000000..78c89c4c2c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/hpa.yaml @@ -0,0 +1,177 @@ +{{- if $.Values.autoscaling.enabled }} +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2beta2 +{{- else }} +apiVersion: autoscaling/v2beta1 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + {{- if $.Values.autoscaling.name }} + name: {{ $.Values.autoscaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-hpa + {{- end }} + {{- if .Values.autoscaling.annotations }} + annotations: +{{ toYaml .Values.autoscaling.annotations | indent 4 }} + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + {{- if .Values.autoscaling.labels }} +{{ toYaml .Values.autoscaling.labels | indent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }} + minReplicas: {{ $.Values.autoscaling.MinReplicas }} + maxReplicas: {{ $.Values.autoscaling.MaxReplicas }} + metrics: + {{- if $.Values.autoscaling.containerResource.enabled }} + {{- with $.Values.autoscaling.containerResource }} + {{- if .TargetCPUUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: cpu + container: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + target: + type: Utilization + averageUtilization: {{ .TargetCPUUtilizationPercentage }} + {{- end}} + {{- if .TargetMemoryUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: memory + container: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + target: + type: Utilization + averageUtilization: {{ .TargetMemoryUtilizationPercentage }} + {{- end}} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if and $.Values.autoscaling.extraMetrics (semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion) }} + {{- toYaml $.Values.autoscaling.extraMetrics | nindent 2 }} + {{- end}} + {{- if and $.Values.autoscaling.behavior (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + behavior: + {{- toYaml $.Values.autoscaling.behavior | nindent 4 }} + {{- end }} + {{- end }} +{{- if and $.Values.secondaryWorkload.enabled $.Values.secondaryWorkload.autoscaling.enabled }} +--- +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2beta2 +{{- else }} +apiVersion: autoscaling/v2beta1 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-{{ $.Values.secondaryWorkload.postfix | default "sec" }}-hpa + {{- if .Values.autoscaling.annotations }} + annotations: +{{ toYaml .Values.autoscaling.annotations | indent 4 }} + {{- end }} + {{- if .Values.autoscaling.labels }} + labels: +{{ toYaml .Values.autoscaling.labels | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }}-{{ $.Values.secondaryWorkload.postfix | default "sec" }} + {{- with $.Values.secondaryWorkload }} + minReplicas: {{ .autoscaling.MinReplicas }} + maxReplicas: {{ .autoscaling.MaxReplicas }} + metrics: + {{- if .autoscaling.containerResource.enabled }} + {{- with .autoscaling.containerResource }} + {{- if .TargetCPUUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: cpu + container: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + target: + type: Utilization + averageUtilization: {{ .TargetCPUUtilizationPercentage }} + {{- end}} + {{- if .TargetMemoryUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: memory + container: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + target: + type: Utilization + averageUtilization: {{ .TargetMemoryUtilizationPercentage }} + {{- end}} + {{- end }} + {{- end }} + {{- if .autoscaling.TargetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if semverCompare ">=1.16-0" $.Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ .autoscaling.TargetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ .autoscaling.TargetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if .autoscaling.TargetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if semverCompare ">=1.16-0" $.Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ .autoscaling.TargetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ .autoscaling.TargetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if and .autoscaling.extraMetrics (semverCompare ">=1.16-0" $.Capabilities.KubeVersion.GitVersion) }} + {{- toYaml .autoscaling.extraMetrics | nindent 2 }} + {{- end}} + {{- if and .autoscaling.behavior (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + behavior: + {{- toYaml .autoscaling.behavior | nindent 4 }} + {{- end }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/ingress.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/ingress.yaml new file mode 100644 index 0000000000..d9a2543e98 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/ingress.yaml @@ -0,0 +1,188 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ingress.enabled -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- if and .Values.ingressInternal.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingressInternal.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingressInternal.annotations "kubernetes.io/ingress.class" .Values.ingressInternal.className}} + {{- end }} +{{- end }} +{{- end }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingress.name }} + name: {{ $.Values.ingress.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} + {{- end }} +{{- if .Values.ingress.annotations }} + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + {{- if or .Values.ingress.host .Values.ingress.path }} + - host: {{ .Values.ingress.host }} + http: + paths: + - path: {{ .Values.ingress.path }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingress.pathType | default "ImplementationSpecific" }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingress.hosts) (not ($.Values.ingress.host )) }} + {{- range .Values.ingress.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + + {{- end }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end }} +{{- if $.Values.ingressInternal.enabled }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{ else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{ else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingressInternal.name }} + name: {{ $.Values.ingressInternal.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress-internal + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.ingressInternal.annotations }} + annotations: +{{ toYaml .Values.ingressInternal.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingressInternal.className }} + {{- end }} + rules: + {{- if or .Values.ingressInternal.host .Values.ingressInternal.path }} + - host: {{ .Values.ingressInternal.host }} + http: + paths: + - path: {{ .Values.ingressInternal.path }} + {{- if and .Values.ingressInternal.pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingressInternal.pathType | default "Prefix" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingressInternal.hosts) (not ($.Values.ingressInternal.host )) }} + {{- range .Values.ingressInternal.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ingressInternal.tls }} + tls: +{{ toYaml .Values.ingressInternal.tls | indent 4 }} + {{- end -}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-authorizationpolicy.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-authorizationpolicy.yaml new file mode 100644 index 0000000000..8340555ff3 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-authorizationpolicy.yaml @@ -0,0 +1,41 @@ +{{- with .Values.istio }} +{{- if and .enable .authorizationPolicy.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: AuthorizationPolicy +metadata: + {{- if .authorizationPolicy.name }} + name: {{ .authorizationPolicy.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .authorizationPolicy.labels }} +{{ toYaml .authorizationPolicy.labels | indent 4 }} + {{- end }} +{{- if .authorizationPolicy.annotations }} + annotations: +{{ toYaml .authorizationPolicy.annotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} + action: {{ .authorizationPolicy.action }} +{{- if $.Values.istio.authorizationPolicy.provider }} + provider: +{{ toYaml $.Values.istio.authorizationPolicy.provider | indent 4 }} +{{- end }} +{{- if $.Values.istio.authorizationPolicy.rules }} + rules: +{{ toYaml $.Values.istio.authorizationPolicy.rules | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-destinationrule.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-destinationrule.yaml new file mode 100644 index 0000000000..6fe9ae01c6 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-destinationrule.yaml @@ -0,0 +1,41 @@ +{{- with .Values.istio }} +{{- if and .enable .destinationRule.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: DestinationRule +metadata: + {{- if .destinationRule.name }} + name: {{ .destinationRule.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-destinationrule + {{- end }} + {{- if .destinationRule.namespace }} + namespace: {{ .destinationRule.namespace }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .destinationRule.labels }} +{{ toYaml .destinationRule.labels | indent 4 }} + {{- end }} +{{- if .destinationRule.annotations }} + annotations: +{{ toYaml .destinationRule.annotations | indent 4 }} +{{- end }} +spec: + host: "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- if $.Values.istio.destinationRule.subsets }} + subsets: +{{ toYaml $.Values.istio.destinationRule.subsets | indent 4 }} +{{- end }} +{{- if $.Values.istio.destinationRule.trafficPolicy }} + trafficPolicy: +{{ toYaml $.Values.istio.destinationRule.trafficPolicy | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-gateway.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-gateway.yaml new file mode 100644 index 0000000000..b145d6c569 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-gateway.yaml @@ -0,0 +1,71 @@ +{{- if and .Values.istio.enable .Values.istio.gateway.enabled -}} +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + {{- if .Values.istio.gateway.name }} + name: {{ .Values.istio.gateway.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.istio.gateway.labels }} +{{ toYaml $.Values.istio.gateway.labels | indent 4 }} + {{- end }} +{{- if $.Values.istio.gateway.annotations }} + annotations: +{{ toYaml $.Values.istio.gateway.annotations | indent 4 }} +{{- end }} +spec: +{{ with $.Values.istio.gateway }} + selector: + {{- if .selector }} + {{- toYaml .selector | nindent 4 }} + {{- else }} + istio: ingressgateway + {{- end }} + servers: + {{- if .servers }} + {{- toYaml .servers | nindent 2 }} + {{- else }} + - port: + number: 80 + name: http + protocol: HTTP + hosts: +{{- if .host }} + - {{ .host | quote -}} +{{- else if .hosts }} +{{- range .hosts }} + - {{ . | quote }} +{{- end }} +{{- end }} +{{- if .tls.enabled }} + tls: + httpsRedirect: true + - port: + number: 443 + name: https + protocol: HTTPS + hosts: +{{- if .host }} + - {{ .host | quote }} +{{- else if .hosts }} +{{- range .hosts }} + - {{ . | quote }} +{{- end }} +{{- end }} + tls: + mode: SIMPLE + credentialName: {{ .tls.secretName }} +{{ end }} +{{ end }} +{{ end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-peerauthentication.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-peerauthentication.yaml new file mode 100644 index 0000000000..dedd971c6d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-peerauthentication.yaml @@ -0,0 +1,40 @@ +{{- with .Values.istio }} +{{- if and .enable .peerAuthentication.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + {{- if .peerAuthentication.name }} + name: {{ .peerAuthentication.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .peerAuthentication.labels }} +{{ toYaml .peerAuthentication.labels | indent 4 }} + {{- end }} +{{- if .peerAuthentication.annotations }} + annotations: +{{ toYaml .peerAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .peerAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} + mtls: + mode: {{ .peerAuthentication.mtls.mode }} +{{- if $.Values.istio.peerAuthentication.portLevelMtls }} + portLevelMtls: +{{ toYaml $.Values.istio.peerAuthentication.portLevelMtls | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-requestauthentication.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-requestauthentication.yaml new file mode 100644 index 0000000000..49bb89552d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-requestauthentication.yaml @@ -0,0 +1,38 @@ +{{- with .Values.istio }} +{{- if and .enable .requestAuthentication.enabled }} +apiVersion: security.istio.io/v1beta1 +kind: RequestAuthentication +metadata: + {{- if .requestAuthentication.name }} + name: {{.requestAuthentication.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .requestAuthentication.labels }} +{{ toYaml .requestAuthentication.labels | indent 4 }} + {{- end }} +{{- if .requestAuthentication.annotations }} + annotations: +{{ toYaml .requestAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .requestAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} +{{- if $.Values.istio.requestAuthentication.jwtRules }} + jwtRules: +{{ toYaml $.Values.istio.requestAuthentication.jwtRules | indent 2 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-virtualservice.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-virtualservice.yaml new file mode 100644 index 0000000000..24193470ae --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/istio-virtualservice.yaml @@ -0,0 +1,75 @@ +{{- with .Values.istio }} +{{- if and .enable .virtualService.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + {{- if .virtualService.name }} + name: {{ .virtualService.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-virtualservice + {{- end }} + {{- if .virtualService.namespace }} + namespace: {{ .virtualService.namespace }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .virtualService.labels }} +{{ toYaml .virtualService.labels | indent 4 }} + {{- end }} +{{- if .virtualService.annotations }} + annotations: +{{ toYaml .virtualService.annotations | indent 4 }} +{{- end }} +spec: +{{- if or .gateway.enabled .virtualService.gateways }} + gateways: + {{- if .gateway.enabled }} + {{- if .gateway.name }} + - {{ .gateway.name }} + {{- else }} + - {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + {{- end }} + {{- range .virtualService.gateways }} + - {{ . | quote }} + {{- end }} +{{- end }} +{{- if or .gateway.enabled .virtualService.hosts }} + hosts: + {{- if .gateway.enabled }} + {{- if .gateway.host }} + - {{ .gateway.host | quote }} + {{- else if .gateway.hosts }} +{{- range .gateway.hosts }} + - {{ . | quote }} +{{- end }} + {{- end }} + {{- end }} + {{- range .virtualService.hosts }} + - {{ . | quote }} + {{- end }} +{{- else }} + hosts: + {{- if $.Values.service.name }} + - {{ $.Values.service.name }}.{{ $.Release.Namespace }}.svc.cluster.local + {{- else }} + - "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" + {{- end }} +{{- end }} +{{- if $.Values.istio.virtualService.http }} + http: +{{ toYaml $.Values.istio.virtualService.http | indent 4 }} +{{- end }} +{{- if $.Values.istio.virtualService.tcp }} + tcp: +{{ toYaml $.Values.istio.virtualService.tcp | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/keda-autoscaling.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/keda-autoscaling.yaml new file mode 100644 index 0000000000..780afa73b1 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/keda-autoscaling.yaml @@ -0,0 +1,78 @@ +{{- if $.Values.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + {{- if $.Values.kedaAutoscaling.name }} + name: {{ $.Values.kedaAutoscaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-keda + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ .Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.labels }} +{{ toYaml .Values.kedaAutoscaling.labels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.annotations }} + annotations: +{{ toYaml .Values.kedaAutoscaling.annotations | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }} +{{- if $.Values.kedaAutoscaling.envSourceContainerName }} + envSourceContainerName: {{ $.Values.kedaAutoscaling.envSourceContainerName }} +{{- end }} +{{- if $.Values.kedaAutoscaling.pollingInterval }} + pollingInterval: {{ $.Values.kedaAutoscaling.pollingInterval }} +{{- end }} +{{- if $.Values.kedaAutoscaling.cooldownPeriod }} + cooldownPeriod: {{ $.Values.kedaAutoscaling.cooldownPeriod }} +{{- end }} +{{- if $.Values.kedaAutoscaling.idleReplicaCount }} + idleReplicaCount: {{ $.Values.kedaAutoscaling.idleReplicaCount }} +{{- end }} + minReplicaCount: {{ $.Values.kedaAutoscaling.minReplicaCount }} + maxReplicaCount: {{ $.Values.kedaAutoscaling.maxReplicaCount }} +{{- if $.Values.kedaAutoscaling.fallback }} + fallback: +{{ toYaml $.Values.kedaAutoscaling.fallback | indent 4 }} +{{- end }} +{{- if $.Values.kedaAutoscaling.advanced }} + advanced: +{{ toYaml $.Values.kedaAutoscaling.advanced | indent 4 }} +{{- end }} + triggers: +{{ toYaml .Values.kedaAutoscaling.triggers | indent 2}} +{{- if $.Values.kedaAutoscaling.authenticationRef }} + authenticationRef: +{{ toYaml $.Values.kedaAutoscaling.authenticationRef | indent 6 }} +{{- end }} +--- +{{- if $.Values.kedaAutoscaling.triggerAuthentication.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{ $.Values.kedaAutoscaling.triggerAuthentication.name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} +spec: +{{ toYaml $.Values.kedaAutoscaling.triggerAuthentication.spec | indent 2 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/metrics-service-monitor.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/metrics-service-monitor.yaml new file mode 100644 index 0000000000..fa5321d303 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/metrics-service-monitor.yaml @@ -0,0 +1,35 @@ +{{- if $.Values.appMetrics -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: + jobLabel: {{ template ".Chart.Name .name" $ }} + endpoints: + - port: envoy-admin + interval: 30s + path: /stats/prometheus + relabelings: + - action: replace + sourceLabels: + - __meta_kubernetes_pod_label_pod_template_hash + targetLabel: devtron_app_hash + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} + podTargetLabels: + - appId + - envId + - devtron_app_hash +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/networkpolicy.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/networkpolicy.yaml new file mode 100644 index 0000000000..ee8bdaf8be --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/networkpolicy.yaml @@ -0,0 +1,52 @@ +{{- if .Values.networkPolicy.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + {{- if .Values.networkPolicy.name }} + name: {{ .Values.networkPolicy.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-networkpolicy + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.networkPolicy.labels }} +{{ toYaml $.Values.networkPolicy.labels | indent 4 }} + {{- end }} +{{- if $.Values.networkPolicy.annotations }} + annotations: +{{ toYaml $.Values.networkPolicy.annotations | indent 4 }} +{{- end }} +spec: + podSelector: +{{- if .podSelector.matchExpressions }} + matchExpressions: +{{ toYaml $.Values.networkPolicy.podSelector.matchExpressions | indent 6 }} +{{- end }} +{{- if .podSelector.matchLabels }} + matchLabels: +{{ toYaml $.Values.networkPolicy.podSelector.matchLabels | indent 6 }} +{{- else }} + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} +{{- end }} +{{- if .policyTypes }} + policyTypes: +{{ toYaml $.Values.networkPolicy.policyTypes | indent 4 }} +{{- end }} +{{- if .ingress }} + ingress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4 }} +{{- end }} +{{- if .egress }} + egress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4}} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/persistent-volume-claim.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/persistent-volume-claim.yaml new file mode 100644 index 0000000000..bf4e6dfb71 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/persistent-volume-claim.yaml @@ -0,0 +1,24 @@ +{{- if .Values.persistentVolumeClaim.name }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{.Values.persistentVolumeClaim.name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- with .Values.persistentVolumeClaim }} +spec: + accessModes: +{{- range .accessMode }} + - {{ . }} +{{- end }} + resources: + requests: + storage: {{ .storage | default "5Gi" }} + storageClassName: {{ .storageClassName | default "default" }} + volumeMode: {{ .volumeMode | default "Filesystem" }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/poddisruptionbudget.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000000..9094fd82e6 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/poddisruptionbudget.yaml @@ -0,0 +1,38 @@ +{{- if .Values.podDisruptionBudget }} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: policy/v1 +{{- else -}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + {{- if .Values.podDisruptionBudget.name }} + name: {{ .Values.podDisruptionBudget.name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 6 }} + {{- else }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/pre-sync-job.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/pre-sync-job.yaml new file mode 100644 index 0000000000..54c9f636ee --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/pre-sync-job.yaml @@ -0,0 +1,29 @@ +{{- if $.Values.dbMigrationConfig.enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-migrator + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + annotations: + argocd.argoproj.io/hook: PreSync +# argocd.argoproj.io/hook-delete-policy: HookSucceeded +spec: + template: + spec: + containers: + - name: migrator + image: 686244538589.dkr.ecr.us-east-2.amazonaws.com/migrator:0.0.1-rc14 + env: + {{- range $.Values.dbMigrationConfig.envValues }} + - name: {{ .key}} + value: {{ .value | quote }} + {{- end}} + restartPolicy: Never + backoffLimit: 0 +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/prometheusrules.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/prometheusrules.yaml new file mode 100644 index 0000000000..c285de1388 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/prometheusrules.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + {{- if .Values.prometheusRule.name }} + name: {{ .Values.prometheusRule.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }} + {{- end }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: + kind: Prometheus + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ .Values.prometheus.release }} + {{- if .Values.prometheusRule.additionalLabels }} +{{ toYaml .Values.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.prometheusRule.rules }} + groups: + {{- if $.Values.prometheusRule.name }} + - name: {{ $.Values.prometheusRule.name }} + {{- else }} + - name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + rules: {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/secret.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/secret.yaml new file mode 100644 index 0000000000..5ac3ae1410 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/secret.yaml @@ -0,0 +1,84 @@ +{{- if $.Values.secret.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: app-secret + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml $.Values.secret.data | indent 2 }} +{{- end }} + + +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name}}-{{ $.Values.app }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + chart: {{ template ".Chart.Name .chart" $ }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml .data | trim | indent 2 }} +{{- end}} + {{if eq .external true }} + {{if (or (eq .externalType "AWSSecretsManager") (eq .externalType "AWSSystemManager") (eq .externalType "HashiCorpVault"))}} +--- +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: {{ .name}} +{{- if $.Values.appLabels }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .roleARN }} + roleArn: .roleARN + {{- end}} + {{- if eq .externalType "AWSSecretsManager"}} + backendType: secretsManager + {{- end}} + {{- if eq .externalType "AWSSystemManager"}} + backendType: systemManager + {{- end}} + {{- if eq .externalType "HashiCorpVault"}} + backendType: vault + {{- end}} + data: + {{- range .secretData }} + - key: {{.key}} + name: {{.name}} + {{- if .property }} + property: {{.property}} + {{- end}} + isBinary: {{.isBinary}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/service.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/service.yaml new file mode 100644 index 0000000000..99293e4e6e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/service.yaml @@ -0,0 +1,114 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".servicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end}} +spec: + type: {{ .Values.service.type | default "ClusterIP" }} +{{- with .Values.service.extraSpec }} + {{- toYaml . | nindent 2 }} + {{- end }} +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range .Values.service.loadBalancerSourceRanges }} + - {{ . }} + {{- end }} + {{- end }} +{{- end }} + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + {{- if .targetPort }} + targetPort: {{ .targetPort }} + {{- else if $.Values.appMetrics }} + targetPort: envoy-{{ .name }} + {{- else }} + targetPort: {{ .name }} + {{- end }} + protocol: {{ .protocol | default "TCP" }} + {{- if (and (eq $.Values.service.type "NodePort") .nodePort ) }} + nodePort: {{ .nodePort }} + {{- end }} + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- if .Values.service.sessionAffinity.enabled }} + sessionAffinity: ClientIP +{{- end }} +{{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: +{{ toYaml .Values.service.sessionAffinityConfig | indent 4 }} +{{- end }} +{{- if eq .Values.deploymentType "BLUE-GREEN" }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".previewservicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + {{- if .targetPort }} + targetPort: {{ .targetPort }} + {{- else if $.Values.appMetrics }} + targetPort: envoy-{{ .name }} + {{- else }} + targetPort: {{ .name }} + {{- end }} + protocol: TCP + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/serviceaccount.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/serviceaccount.yaml new file mode 100644 index 0000000000..f337548e94 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if $.Values.serviceAccount }} +{{- if $.Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "serviceAccountName" . }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/servicemonitor.yaml new file mode 100644 index 0000000000..9b920388d2 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/servicemonitor.yaml @@ -0,0 +1,117 @@ +{{ $serviceMonitorEnabled := include "serviceMonitorEnabled" . }} +{{- if eq "true" $serviceMonitorEnabled -}} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + {{- if .Values.servicemonitor.name }} + name: {{ .Values.servicemonitor.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-sm + {{- end }} + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.servicemonitor.additionalLabels }} +{{ toYaml .Values.servicemonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout | quote }} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- range .Values.containers }} + {{- range .ports }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.servicemonitor.namespaceSelector }} + namespaceSelector: + matchNames: + {{- toYaml .Values.servicemonitor.namespaceSelector | nindent 6 }} + {{- end }} + selector: + matchLabels: + {{- if .Values.servicemonitor.matchLabels }} + {{- toYaml .Values.servicemonitor.matchLabels | nindent 6 }} + {{- else }} + app: {{ template ".Chart.Name .name" $ }} +{{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/sidecar-configmap.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/sidecar-configmap.yaml new file mode 100644 index 0000000000..cf32679409 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/sidecar-configmap.yaml @@ -0,0 +1,169 @@ +{{- if .Values.appMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2019-08-12T18:38:34Z + name: sidecar-config-{{ template ".Chart.Name .name" $ }} +data: + envoy-config.json: | + { + "stats_config": { + "use_all_default_tags": false, + "stats_tags": [ + { + "tag_name": "cluster_name", + "regex": "^cluster\\.((.+?(\\..+?\\.svc\\.cluster\\.local)?)\\.)" + }, + { + "tag_name": "tcp_prefix", + "regex": "^tcp\\.((.*?)\\.)\\w+?$" + }, + { + "tag_name": "response_code", + "regex": "_rq(_(\\d{3}))$" + }, + { + "tag_name": "response_code_class", + "regex": ".*_rq(_(\\dxx))$" + }, + { + "tag_name": "http_conn_manager_listener_prefix", + "regex": "^listener(?=\\.).*?\\.http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "http_conn_manager_prefix", + "regex": "^http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "listener_address", + "regex": "^listener\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "mongo_prefix", + "regex": "^mongo\\.(.+?)\\.(collection|cmd|cx_|op_|delays_|decoding_)(.*?)$" + } + ], + "stats_matcher": { + "inclusion_list": { + "patterns": [ + { + "regex": ".*_rq_\\dxx$" + }, + { + "regex": ".*_rq_time$" + }, + { + "regex": "cluster.*" + }, + ] + } + } + }, + "admin": { + "access_log_path": "/dev/null", + "address": { + "socket_address": { + "address": "0.0.0.0", + "port_value": 9901 + } + } + }, + "static_resources": { + "clusters": [ + {{- range $index, $element := .Values.ContainerPort }} + { + "name": "{{ $.Values.app }}-{{ $index }}", + "type": "STATIC", + "connect_timeout": "0.250s", + "lb_policy": "ROUND_ROBIN", +{{- if $element.idleTimeout }} + "common_http_protocol_options": { + "idle_timeout": {{ $element.idleTimeout | quote }} + }, +{{- end }} +{{- if or $element.useHTTP2 $element.useGRPC }} + "http2_protocol_options": {}, +{{- end }} +{{- if and (not $element.useGRPC) (not $element.supportStreaming) }} + "max_requests_per_connection": "1", +{{- end }} + "load_assignment": { + "cluster_name": "9", + "endpoints": { + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "127.0.0.1", + "port_value": {{ $element.port }} + } + } + } + } + ] + } + } + }, + {{- end }} + ], + "listeners":[ + {{- range $index, $element := .Values.ContainerPort }} + { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "0.0.0.0", + "port_value": {{ $element.envoyPort | default (add 8790 $index) }} + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "config": { + "codec_type": "AUTO", + "stat_prefix": "stats", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { +{{- if $element.supportStreaming }} + "timeout": "0s", +{{- end }} +{{- if and ($element.envoyTimeout) (not $element.supportStreaming) }} + "timeout": "{{ $element.envoyTimeout }}", +{{- end }} + "cluster": "{{ $.Values.app }}-{{ $index }}" + } + } + ] + } + ] + }, + "http_filters": { + "name": "envoy.filters.http.router" + } + } + } + ] + } + ] + }, + {{- end }} + ] + } + } +--- +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/vertical-pod-autoscaler.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/vertical-pod-autoscaler.yaml new file mode 100644 index 0000000000..ffbf24d823 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/vertical-pod-autoscaler.yaml @@ -0,0 +1,34 @@ +{{ $VerticalPodAutoScalingEnabled := include "VerticalPodAutoScalingEnabled" . }} +{{- if eq "true" $VerticalPodAutoScalingEnabled -}} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + {{- if .Values.verticalPodScaling.name }} + name: {{ .Values.verticalPodScaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-vpa + {{- end }} + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} +spec: +{{- if .Values.verticalPodScaling.resourcePolicy }} + resourcePolicy: +{{ toYaml .Values.verticalPodScaling.resourcePolicy}} +{{- end }} +{{- if .Values.verticalPodScaling.updatePolicy }} + updatePolicy: +{{ toYaml .Values.verticalPodScaling.updatePolicy}} +{{- end }} + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include ".Chart.Name .fullname" $ }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/winter-soldier.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/winter-soldier.yaml new file mode 100644 index 0000000000..314f0c6db0 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/templates/winter-soldier.yaml @@ -0,0 +1,45 @@ +{{- if .Values.winterSoldier.enabled }} +apiVersion: {{ $.Values.winterSoldier.apiVersion }} +kind: Hibernator +metadata: + {{- if .Values.winterSoldier.name }} + name: {{ .Values.winterSoldier.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-hibernator + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.winterSoldier.labels }} +{{ toYaml .Values.winterSoldier.labels | indent 4 }} + {{- end }} +{{- if .Values.winterSoldier.annotations }} + annotations: +{{ toYaml .Values.winterSoldier.annotations | indent 4 }} +{{- end }} +spec: + timeRangesWithZone: +{{ toYaml $.Values.winterSoldier.timeRangesWithZone | indent 4}} + selectors: + - inclusions: + - objectSelector: + name: {{ include ".Chart.Name .fullname" $ }} + type: {{ .Values.winterSoldier.type | quote }} + fieldSelector: +{{toYaml $.Values.winterSoldier.fieldSelector | indent 14 }} + namespaceSelector: + name: {{ $.Release.Namespace }} + exclusions: [] + action: {{ $.Values.winterSoldier.action }} + {{- if eq .Values.winterSoldier.action "scale" }} + {{- if .Values.winterSoldier.targetReplicas }} + targetReplicas: {{ $.Values.winterSoldier.targetReplicas }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/test-values.json b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/test-values.json new file mode 100644 index 0000000000..a26806cb91 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/test-values.json @@ -0,0 +1,292 @@ +{ + "ConfigMaps": { + "enabled": true, + "maps": [ + { + "data": { + "a": "b" + }, + "esoSecretData": {}, + "external": false, + "externalType": "", + "filePermission": "", + "mountPath": "", + "name": "abc", + "roleARN": "", + "subPath": false, + "type": "environment" + } + ] + }, + "ConfigSecrets": { + "enabled": true, + "secrets": [ + { + "data": { + "access-key": "QUtJQVdQVENFV0w1Wk4zVFBSRzY=", + "secret-access-key": "dkJ1bXRJL1YyZFUrQmVrSnM4QkVsblJnQzlRbEZueVZqL0dEdUc4Ng==" + }, + "esoSecretData": {}, + "external": false, + "externalType": "", + "filePermission": "", + "mountPath": "", + "name": "auth-aws", + "roleARN": "", + "subPath": false, + "type": "environment" + }, + { + "esoSecretData": { + "esoData": [ + { + "key": "ajay-secret-aws", + "property": "mob", + "secretKey": "mymob" + }, + { + "key": "ajay-secret-aws", + "property": "pin", + "secretKey": "mypin" + } + ], + "secretStore": { + "aws": { + "auth": { + "secretRef": { + "accessKeyIDSecretRef": { + "key": "access-key", + "name": "auth-aws-1" + }, + "secretAccessKeySecretRef": { + "key": "secret-access-key", + "name": "auth-aws-1" + } + } + }, + "region": "ap-south-1", + "service": "SecretsManager" + } + } + }, + "external": true, + "externalType": "ESO_AWSSecretsManager", + "filePermission": "", + "mountPath": "", + "name": "external-secret-aws", + "roleARN": "", + "subPath": false, + "type": "environment" + } + ] + }, + "ContainerPort": [ + { + "envoyPort": 8799, + "idleTimeout": "1800s", + "name": "app", + "port": 80, + "servicePort": 80, + "supportStreaming": false, + "useHTTP2": false + } + ], + "EnvVariables": [], + "GracePeriod": 30, + "LivenessProbe": { + "Path": "", + "command": [], + "failureThreshold": 3, + "httpHeaders": [], + "initialDelaySeconds": 20, + "periodSeconds": 10, + "port": 8080, + "scheme": "", + "successThreshold": 1, + "tcp": false, + "timeoutSeconds": 5 + }, + "MaxSurge": 1, + "MaxUnavailable": 0, + "MinReadySeconds": 60, + "ReadinessProbe": { + "Path": "", + "command": [], + "failureThreshold": 3, + "httpHeaders": [], + "initialDelaySeconds": 20, + "periodSeconds": 10, + "port": 8080, + "scheme": "", + "successThreshold": 1, + "tcp": false, + "timeoutSeconds": 5 + }, + "Spec": { + "Affinity": { + "Values": "nodes", + "key": "" + } + }, + "app": "1", + "appLabels": {}, + "appMetrics": false, + "args": { + "enabled": false, + "value": [ + "/bin/sh", + "-c", + "touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600" + ] + }, + "autoscaling": { + "MaxReplicas": 2, + "MinReplicas": 1, + "TargetCPUUtilizationPercentage": 90, + "TargetMemoryUtilizationPercentage": 80, + "annotations": {}, + "behavior": {}, + "enabled": false, + "extraMetrics": [], + "labels": {} + }, + "command": { + "enabled": false, + "value": [], + "workingDir": {} + }, + "containerSecurityContext": {}, + "containers": [], + "dbMigrationConfig": { + "enabled": false + }, + "deployment": { + "strategy": { + "blueGreen": { + "autoPromotionEnabled": false, + "autoPromotionSeconds": 30, + "previewReplicaCount": 1, + "scaleDownDelaySeconds": 30 + } + } + }, + "deploymentType": "BLUE-GREEN", + "env": "1", + "envoyproxy": { + "configMapName": "", + "image": "quay.io/devtron/envoy:v1.14.1", + "resources": { + "limits": { + "cpu": "50m", + "memory": "50Mi" + }, + "requests": { + "cpu": "50m", + "memory": "50Mi" + } + } + }, + "hostAliases": [], + "image": { + "pullPolicy": "IfNotPresent" + }, + "imagePullSecrets": [], + "ingress": { + "annotations": {}, + "className": "", + "enabled": false, + "hosts": [ + { + "host": "chart-example1.local", + "pathType": "ImplementationSpecific", + "paths": [ + "/example1" + ] + } + ], + "labels": {}, + "tls": [] + }, + "ingressInternal": { + "annotations": {}, + "className": "", + "enabled": false, + "hosts": [ + { + "host": "chart-example1.internal", + "pathType": "ImplementationSpecific", + "paths": [ + "/example1" + ] + }, + { + "host": "chart-example2.internal", + "pathType": "ImplementationSpecific", + "paths": [ + "/example2", + "/example2/healthz" + ] + } + ], + "tls": [] + }, + "initContainers": [], + "kedaAutoscaling": { + "advanced": {}, + "authenticationRef": {}, + "enabled": false, + "envSourceContainerName": "", + "maxReplicaCount": 2, + "minReplicaCount": 1, + "triggerAuthentication": { + "enabled": false, + "name": "", + "spec": {} + }, + "triggers": [] + }, + "pauseForSecondsBeforeSwitchActive": 30, + "pipelineName": "cd-1-fpji", + "podAnnotations": {}, + "podLabels": {}, + "podSecurityContext": {}, + "prometheus": { + "release": "monitoring" + }, + "rawYaml": [], + "releaseVersion": "6", + "replicaCount": 1, + "resources": { + "limits": { + "cpu": "0.05", + "memory": "50Mi" + }, + "requests": { + "cpu": "0.01", + "memory": "10Mi" + } + }, + "secret": { + "data": {}, + "enabled": false + }, + "server": { + "deployment": { + "image": "aju121/test12", + "image_tag": "63118bf2-1-1" + } + }, + "service": { + "annotations": {}, + "loadBalancerSourceRanges": [], + "type": "ClusterIP" + }, + "servicemonitor": { + "additionalLabels": {} + }, + "tolerations": [], + "topologySpreadConstraints": [], + "volumeMounts": [], + "volumes": [], + "waitForSecondsBeforeScalingDown": 30 +} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/test_values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/test_values.yaml new file mode 100644 index 0000000000..1f9f739b61 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/test_values.yaml @@ -0,0 +1,787 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +kedaAutoscaling: + enabled: true + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + cooldownPeriod: 300 # Optional. Default: 300 seconds + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 # Optional. Must be less than minReplicaCount + pollingInterval: 30 # Optional. Default: 30 seconds + # The fallback section is optional. It defines a number of replicas to fallback to if a scaler is in an error state. + fallback: {} # Optional. Section to specify fallback options + # failureThreshold: 3 # Mandatory if fallback section is included + # replicas: 6 + advanced: {} + # horizontalPodAutoscalerConfig: # Optional. Section to specify HPA related options + # behavior: # Optional. Use to modify HPA's scaling behavior + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + triggers: + - type: kubernetes-workload + name: trig_one + metadata: + podSelector: 'pod=workload-test' + - type: metrics-api + name: trig_two + metadata: + url: "https://mockbin.org/bin/336a8d99-9e09-4f1f-979d-851a6d1b1423" + valueLocation: "tasks" + + triggerAuthentication: + enabled: true + name: "trigger-test" + spec: {} + authenticationRef: {} + +deploymentLabels: + name: kunalverma + Company: Devtron + Job: DevRel + +deploymentAnnotations: + name: kunalverma + Company: Devtron + Job: DevRel + +containerSpec: + lifecycle: + enabled: true + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +imagePullSecrets: + - test1 + - test2 +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyTimeout: 15 + targetPort: 8080 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace + + - name: app1 + port: 8090 + targetPort: 1234 + servicePort: 8080 + useGRPC: true + servicemonitor: + enabled: true + - name: app2 + port: 8091 + servicePort: 8081 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +autoscaling: + enabled: true + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +secret: + enabled: false + +service: + type: ClusterIP + # name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # loadBalancerIP: "98.73.123.45" + # loadBalancerSourceRanges: ["127.0.0.1","127.0.0.1","127.0.0.1"] + extraSpec: {} +# externalTrafficPolicy: Local + # test1: test2 + # test3: test4 + +istio: + enable: true + gateway: + enabled: true + labels: {} + annotations: {} + # host: example.com + hosts: + - "example4.com" + tls: + enabled: true + secretName: example-tls-secret + virtualService: + enabled: true + labels: {} + annotations: {} + gateways: [] + hosts: + - example1.local + tcp: + http: + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # rewriteUri: / + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + # route: + # - destination: + # host: service1 + # port: 80 + # - route: + # - destination: + # host: service2 + +flaggerCanary: + enabled: false + labels: {} + annotations: {} + createIstioGateway: + enabled: false + labels: {} + annotations: {} + host: example.com + tls: + enabled: false + secretName: example-tls-secret + # Istio gateways (optional) + addOtherGateways: [] + # Istio virtual service host names (optional) + addOtherHosts: [] + # Istio gateway refs (optional) + gatewayRefs: + # - name: istio-gateway + # namespace: istio-system + #service port + port: 80 + #containerPort + targetPort: 8080 + # discover all port open in container + portDiscovery: false + # application protocol (optional) + appProtocol: + # Istio retry policy (optional) + retries: + # attempts: 3 + # perTryTimeout: 1s + # retryOn: "gateway-error,connect-failure,refused-stream" + # HTTP match conditions (optional) + match: + - uri: + prefix: / + # HTTP rewrite (optional) + rewriteUri: + # timeout (optional) + timeout: + # Add headers (optional) + headers: + # request: + # add: + # x-some-header: "value" + # cross-origin resource sharing policy (optional) + corsPolicy: + # allowOrigin: + # - example.com + # allowMethods: + # - GET + # allowCredentials: false + # allowHeaders: + # - x-some-header + # maxAge: 24h + analysis: + # schedule interval (default 60s) + interval: 5s + # max number of failed metric checks before rollback + threshold: 10 + # max traffic percentage routed to canary + # percentage (0-100) + maxWeight: 50 + # canary increment step + # percentage (0-100) + stepWeight: 5 + thresholds: + # minimum req success rate (non 5xx responses) + # percentage (0-100) + successRate: 90 + # maximum req duration P99 + # milliseconds + latency: 500 + loadtest: + enabled: true + # load tester address + url: http://flagger-loadtester.test/ + +server: + deployment: + image_tag: 1-95af053 + image: "" +deploymentType: "RECREATE" + +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: + foo: bar + +EnvVariables: + - name: FLASK_ENV + value: qa + +LivenessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + - name: Custom-Header2 + value: xyz + +ReadinessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + +StartupProbe: + Path: "/" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + +prometheusRule: + enabled: true + additionalLabels: {} + namespace: "" + rules: + # These are just examples rules, please adapt them to your needs + - alert: TooMany500s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 5XXs + summary: More than 5% of the all requests did return 5XX, this require your attention + - alert: TooMany400s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 4XXs + summary: More than 5% of the all requests did return 4XX, this require your attention + + +ingress: + enabled: true + className: nginx + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" +# Old Ingress Format +# host: "ingress-example.com" +# path: "/app" + +# New Ingress Format + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 + + tls: [] +### Legacy Ingress Format ## +# host: abc.com +# path: "/" +# pathType: "ImplementationSpecific" + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: true + className: nginx-internal + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + additionalBackends: + - path: /internal + pathType: "ImplementationSpecific" + backend: + service: + name: test-service-internal + port: + number: 80 + - path: /internal-01 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service-internal + port: + number: 80 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + type: Deployment + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + + +dbMigrationConfig: + enabled: false + +command: + workingDir: /app + enabled: false + value: ["ls"] + +args: + enabled: false + value: [] + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# filePermission: 0400 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: true + secrets: + - name: config-secret-1 + type: environment + external: true + externalType: AWSSecretsManager + esoSecretData: + secretStore: + aws: + service: SecretsManager + region: us-east-1 + auth: + secretRef: + accessKeyIDSecretRef: + name: awssm-secret + key: access-key + secretAccessKeySecretRef: + name: awssm-secret + key: secret-access-key + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + data: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + - name: config-secret-2 + type: environment + external: false + externalType: ESO_HashiCorpVault + esoSecretData: + secretStore: + vault: + server: "http://my.vault.server:8200" + path: "secret" + version: "v2" + auth: + tokenSecretRef: + name: vault-token + key: token + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + date: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + - command: ["sh", "-c", "chown -R 1000:1000 logs"] + reuseContainerImage: true + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + privileged: true + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + - name: init-migrate + image: busybox:latest + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + capabilities: + drop: + - ALL + +containers: + # Additional init containers to run before the Scheduler pods. + # for example, be used to run a sidecar that chown Logs storage . + - name: volume-mount-hack + image: busybox + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + + +rawYaml: + - apiVersion: v1 + kind: Service + metadata: + annotations: + labels: + app: sample-metrics-app + name: sample-metrics-app + namespace: default + spec: + ports: + - name: web + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: sample-metrics-app + sessionAffinity: None + type: ClusterIP + - apiVersion: v1 + kind: Service + metadata: + annotations: + labels: + app: sample-metrics-app + name: sample-metrics-app + namespace: default + spec: + ports: + - name: web + port: 80 + protocol: TCP + targetPort: 8080 + selector: + app: sample-metrics-app + sessionAffinity: None + type: ClusterIP + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +podDisruptionBudget: + minAvailable: 1 + maxUnavailable: 1 + + # Node tolerations for server scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +# + +tolerations: + - key: "key" + operator: "Equal|Exists" + value: "value" + effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +appMetrics: true +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "test1" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: + kubernetes.io/service-account.name: build-robot +containerSecurityContext: + allowPrivilegeEscalation: false +privileged: true +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" + + +affinity: + enabled: false + values: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: security + operator: In + values: + - S1 + topologyKey: topology.kubernetes.io/zone + +secondaryWorkload: + enabled: false + postfix: "od" + replicaCount: 1 + affinity: {} + tolerations: [] + autoscaling: + enabled: true + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/values.yaml b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/values.yaml new file mode 100644 index 0000000000..d0f96f7dd8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/deployment-chart_4-22-0/values.yaml @@ -0,0 +1,723 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + envoyTimeout: 15s + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + protocol: TCP +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace + + - name: app1 + port: 8090 + servicePort: 8080 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +# Optional override for the main container name. If provided, this name will be used +# for the primary container instead of the default chart-derived name. +containerName: "" + +Spec: + Affinity: + Key: "" +# Key: kops.k8s.io/instancegroup + Values: "" + +affinity: {} + +image: + pullPolicy: IfNotPresent + +restartPolicy: Always + +ambassadorMapping: + enabled: false + # labels: + # key1: value1 + # prefix: / + # ambassadorId: 1234 + # hostname: devtron.example.com + # rewrite: /foo/ + # retryPolicy: + # retry_on: "5xx" + # num_retries: 10 + # cors: + # origins: http://foo.example,http://bar.example + # methods: POST, GET, OPTIONS + # headers: Content-Type + # credentials: true + # exposed_headers: X-Custom-Header + # max_age: "86400" + # weight: 10 + # method: GET + # extraSpec: + # method_regex: true + # headers: + # x-quote-mode: backend + # x-random-header: devtron + # tls: + # context: httpd-context + # create: true + # secretName: httpd-secret + # hosts: + # - anything.example.info + # - devtron.example.com + # extraSpec: + # min_tls_version: v1.2 + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + annotations: {} + labels: {} + behavior: {} + containerResource: + enabled: false +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + cooldownPeriod: 300 # Optional. Default: 300 seconds + minReplicaCount: 1 + maxReplicaCount: 2 + pollingInterval: 30 # Optional. Default: 30 seconds + # The fallback section is optional. It defines a number of replicas to fallback to if a scaler is in an error state. + fallback: {} # Optional. Section to specify fallback options + # failureThreshold: 3 # Mandatory if fallback section is included + # replicas: 6 + advanced: {} + # horizontalPodAutoscalerConfig: # Optional. Section to specify HPA related options + # behavior: # Optional. Use to modify HPA's scaling behavior + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +# kedaHttpScaledObject: +# enabled: false +# minReplicaCount: 1 +# maxReplicaCount: 2 +# targetPendingRequests: +# scaledownPeriod: +# servicePort: 80 # port of the service (required) + +secret: + enabled: false + +service: + type: ClusterIP + enabled: true +# name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + sessionAffinity: + enabled: false + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: "" + tls: + enabled: false + secretName: "" + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +flaggerCanary: + enabled: false + labels: {} + annotations: {} + createIstioGateway: + enabled: false + labels: {} + annotations: {} + host: "" + tls: + enabled: false + secretName: "" + # Istio gateways (optional) + addOtherGateways: [] + # Istio virtual service host names (optional) + addOtherHosts: [] + # Istio gateway refs (optional) + gatewayRefs: + # - name: istio-gateway + # namespace: istio-system + #service port + serviceport: 8080 + #containerPort + targetPort: 8080 + # discover all port open in container + portDiscovery: true + # application protocol (optional) + appProtocol: + # Istio retry policy (optional) + retries: + attempts: 3 + perTryTimeout: 1s + retryOn: "gateway-error,connect-failure,refused-stream" + # HTTP match conditions (optional) + match: + - uri: + prefix: / + # HTTP rewrite (optional) + rewriteUri: / + # timeout (optional) + timeout: + # Add headers (optional) + headers: + # request: + # add: + # x-some-header: "value" + # cross-origin resource sharing policy (optional) + corsPolicy: + # allowOrigin: + # - example.com + # allowMethods: + # - GET + # allowCredentials: false + # allowHeaders: + # - x-some-header + # maxAge: 24h + analysis: + # schedule interval (default 60s) + interval: 15s + # max number of failed metric checks before rollback + threshold: 5 + # max traffic percentage routed to canary + # percentage (0-100) + maxWeight: 50 + # canary increment step + # percentage (0-100) + stepWeight: 5 + thresholds: + # minimum req success rate (non 5xx responses) + # percentage (0-100) + successRate: 90 + # maximum req duration P99 + # milliseconds + latency: 500 + loadtest: + enabled: true + # load tester address + url: http://flagger-loadtester.istio-system/ + + +server: + deployment: + image_tag: 1-95af053 + image: "" + +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name + +EnvVariables: [] + # - name: FLASK_ENV + # value: qa + +EnvVariablesFromSecretKeys: [] + # - name: ENV_NAME + # secretName: SECRET_NAME + # keyName: SECRET_KEY + +EnvVariablesFromConfigMapKeys: [] + # - name: ENV_NAME + # configMapName: CONFIG_MAP_NAME + # keyName: CONFIG_MAP_KEY + +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + grpc: {} + + +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + grpc: {} + + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + grpc: {} + + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" +# rules: +# # These are just examples rules, please adapt them to your needs +# - alert: TooMany500s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 5XXs +# summary: More than 5% of the all requests did return 5XX, this require your attention +# - alert: TooMany400s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 4XXs +# summary: More than 5% of the all requests did return 4XX, this require your attention +# + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + labels: {} + annotations: {} + type: Deployment + timeRangesWithZone: {} + # timeZone: "Asia/Kolkata" + # timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: [] + # - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +dbMigrationConfig: + enabled: false + +command: + enabled: false + value: [] + +args: + enabled: false + value: [] + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: false + secrets: [] + # data: + # key1: key1value-1 + # key2: key2value-1 + # key3: key3value-1 +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + # # Uncomment below line ONLY IF you want to reuse the container image. + # # This will assign your application's docker image to init container. + # reuseContainerImage: true + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: zone + # whenUnsatisfiable: DoNotSchedule + # autoLabelSelector: true + # minDomain: 1 + # nodeTaintsPolicy: Honor + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + lifecycle: {} + configMapName: "" + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +containerSpec: + lifecycle: + enabled: false + preStop: {} +# exec: +# command: ["sleep","10"] + postStart: {} +# httpGet: +# host: example.com +# path: /example +# port: 90 + +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + +podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 3000 + # fsGroup: 2000 + +containerSecurityContext: {} + # allowPrivilegeEscalation: false +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +imagePullSecrets: [] + # - test1 + # - test2 +persistentVolumeClaim: {} + +verticalPodScaling: + enabled: false + +customPodLabels: {} + +secondaryWorkload: + enabled: false + # Optional override for secondary workload's main container name + containerName: "" + Spec: + Affinity: + Key: "" + Values: "" + replicaCount: 1 + affinity: {} + tolerations: [] + autoscaling: + enabled: false + containerResource: + enabled: false diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/.helmignore b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/.helmignore new file mode 100644 index 0000000000..50af031725 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/.image_descriptor_template.json b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/.image_descriptor_template.json new file mode 100644 index 0000000000..bd2472da07 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/.image_descriptor_template.json @@ -0,0 +1 @@ +{"server":{"deployment":{"image_tag":"{{.Tag}}","image":"{{.Name}}"}},"pipelineName": "{{.PipelineName}}","releaseVersion":"{{.ReleaseVersion}}","deploymentType": "{{.DeploymentType}}", "app": "{{.App}}", "env": "{{.Env}}", "appMetrics": {{.AppMetrics}}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/Chart.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/Chart.yaml new file mode 100644 index 0000000000..a6e8dec01c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: reference-chart_5-2-0 +version: 5.2.0 \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/README.md b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/README.md new file mode 100644 index 0000000000..8ce0aa6917 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/README.md @@ -0,0 +1,863 @@ + +# Rollout Deployment Chart - v5.2 + +## 1. Yaml File - + +### Container Ports + +This defines ports on which application services will be exposed to other services + +```yaml +ContainerPort: + - envoyPort: 8799 + idleTimeout: + name: app + port: 8080 + servicePort: 80 + nodePort: 32056 + supportStreaming: true + useHTTP2: true +``` + +| Key | Description | +| :--- | :--- | +| `envoyPort` | envoy port for the container. | +| `idleTimeout` | the duration of time that a connection is idle before the connection is terminated. | +| `name` | name of the port. | +| `port` | port for the container. | +| `servicePort` | port of the corresponding kubernetes service. | +| `nodePort` | nodeport of the corresponding kubernetes service. | +| `supportStreaming` | Used for high performance protocols like grpc where timeout needs to be disabled. | +| `useHTTP2` | Envoy container can accept HTTP2 requests. | + +### EnvVariables +```yaml +EnvVariables: [] +``` +To set environment variables for the containers that run in the Pod. + +### EnvVariablesFromSecretKeys +```yaml +EnvVariablesFromSecretKeys: + - name: ENV_NAME + secretName: SECRET_NAME + keyName: SECRET_KEY + +``` + It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable. + + ### EnvVariablesFromConfigMapKeys +```yaml +EnvVariablesFromConfigMapKeys: + - name: ENV_NAME + configMapName: CONFIG_MAP_NAME + keyName: CONFIG_MAP_KEY + +``` + It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable. + +### Liveness Probe + +If this check fails, kubernetes restarts the pod. This should return error code in case of non-recoverable error. + +```yaml +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the liveness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for liveliness. | +| `periodSeconds` | It defines the time to check a given container for liveness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfil the liveness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as live. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | + + +### MaxUnavailable + +```yaml + MaxUnavailable: 0 +``` +The maximum number of pods that can be unavailable during the update process. The value of "MaxUnavailable: " can be an absolute number or percentage of the replicas count. The default value of "MaxUnavailable: " is 25%. + +### MaxSurge + +```yaml +MaxSurge: 1 +``` +The maximum number of pods that can be created over the desired number of pods. For "MaxSurge: " also, the value can be an absolute number or percentage of the replicas count. +The default value of "MaxSurge: " is 25%. + +### Min Ready Seconds + +```yaml +MinReadySeconds: 60 +``` +This specifies the minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available. This defaults to 0 (the Pod will be considered available as soon as it is ready). + +### Readiness Probe + +If this check fails, kubernetes stops sending traffic to the application. This should return error code in case of errors which can be recovered from if traffic is stopped. + +```yaml +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the readiness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for readiness. | +| `periodSeconds` | It defines the time to check a given container for readiness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfill the readiness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as ready. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | + +### Pod Disruption Budget + +You can create `PodDisruptionBudget` for each application. A PDB limits the number of pods of a replicated application that are down simultaneously from voluntary disruptions. For example, an application would like to ensure the number of replicas running is never brought below the certain number. + +```yaml +podDisruptionBudget: + minAvailable: 1 +``` + +or + +```yaml +podDisruptionBudget: + maxUnavailable: 50% +``` + +You can specify either `maxUnavailable` or `minAvailable` in a PodDisruptionBudget and it can be expressed as integers or as a percentage + +| Key | Description | +| :--- | :--- | +| `minAvailable` | Evictions are allowed as long as they leave behind 1 or more healthy pods of the total number of desired replicas. | +| `maxUnavailable` | Evictions are allowed as long as at most 1 unhealthy replica among the total number of desired replicas. | + +### Ambassador Mappings + +You can create ambassador mappings to access your applications from outside the cluster. At its core a Mapping resource maps a resource to a service. + +```yaml +ambassadorMapping: + ambassadorId: "prod-emissary" + cors: {} + enabled: true + hostname: devtron.example.com + labels: {} + prefix: / + retryPolicy: {} + rewrite: "" + tls: + context: "devtron-tls-context" + create: false + hosts: [] + secretName: "" +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable ambassador mapping else set false.| +| `ambassadorId` | used to specify id for specific ambassador mappings controller. | +| `cors` | used to specify cors policy to access host for this mapping. | +| `weight` | used to specify weight for canary ambassador mappings. | +| `hostname` | used to specify hostname for ambassador mapping. | +| `prefix` | used to specify path for ambassador mapping. | +| `labels` | used to provide custom labels for ambassador mapping. | +| `retryPolicy` | used to specify retry policy for ambassador mapping. | +| `corsPolicy` | Provide cors headers on flagger resource. | +| `rewrite` | used to specify whether to redirect the path of this mapping and where. | +| `tls` | used to create or define ambassador TLSContext resource. | +| `extraSpec` | used to provide extra spec values which not present in deployment template for ambassador resource. | + +### Autoscaling + +This is connected to HPA and controls scaling up and down in response to request load. + +```yaml +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + extraMetrics: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable autoscaling else set false.| +| `MinReplicas` | Minimum number of replicas allowed for scaling. | +| `MaxReplicas` | Maximum number of replicas allowed for scaling. | +| `TargetCPUUtilizationPercentage` | The target CPU utilization that is expected for a container. | +| `TargetMemoryUtilizationPercentage` | The target memory utilization that is expected for a container. | +| `extraMetrics` | Used to give external metrics for autoscaling. | + +### Fullname Override + +```yaml +fullnameOverride: app-name +``` +`fullnameOverride` replaces the release fullname created by default by devtron, which is used to construct Kubernetes object names. By default, devtron uses {app-name}-{environment-name} as release fullname. + +### Image + +```yaml +image: + pullPolicy: IfNotPresent +``` + +Image is used to access images in kubernetes, pullpolicy is used to define the instances calling the image, here the image is pulled when the image is not present,it can also be set as "Always". + +### imagePullSecrets + +`imagePullSecrets` contains the docker credentials that are used for accessing a registry. + +```yaml +imagePullSecrets: + - regcred +``` +regcred is the secret that contains the docker credentials that are used for accessing a registry. Devtron will not create this secret automatically, you'll have to create this secret using dt-secrets helm chart in the App store or create one using kubectl. You can follow this documentation Pull an Image from a Private Registry [https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) . + +### Ingress + +This allows public access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + className: nginx + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` +Legacy deployment-template ingress format + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + path: "" + host: "" + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | +### additionalBackends + +This defines additional backend path in the ingress . + +```yaml + hosts: + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 +``` +### Ingress Internal + +This allows private access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingressInternal: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + +### Init Containers +```yaml +initContainers: + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate + + - name: nginx + image: nginx:1.14.2 + securityContext: + privileged: true + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] +``` +Specialized containers that run before app containers in a Pod. Init containers can contain utilities or setup scripts not present in an app image. One can use base image inside initContainer by setting the reuseContainerImage flag to `true`. + +### Istio + +Istio is a service mesh which simplifies observability, traffic management, security and much more with it's virtual services and gateways. + +```yaml +istio: + enable: true + gateway: + annotations: {} + enabled: false + host: example.com + labels: {} + tls: + enabled: false + secretName: example-tls-secret + virtualService: + annotations: {} + enabled: false + gateways: [] + hosts: [] + http: + - corsPolicy: + allowCredentials: false + allowHeaders: + - x-some-header + allowMethods: + - GET + allowOrigin: + - example.com + maxAge: 24h + headers: + request: + add: + x-some-header: value + match: + - uri: + prefix: /v1 + - uri: + prefix: /v2 + retries: + attempts: 2 + perTryTimeout: 3s + rewriteUri: / + route: + - destination: + host: service1 + port: 80 + timeout: 12s + - route: + - destination: + host: service2 + labels: {} +``` + +### Pause For Seconds Before Switch Active +```yaml +pauseForSecondsBeforeSwitchActive: 30 +``` +To wait for given period of time before switch active the container. + + +### Winter-Soldier +Winter Soldier can be used to +- cleans up (delete) Kubernetes resources +- reduce workload pods to 0 + +**_NOTE:_** After deploying this we can create the Hibernator object and provide the custom configuration by which workloads going to delete, sleep and many more. for more information check [the main repo](https://github.com/devtron-labs/winter-soldier) + +Given below is template values you can give in winter-soldier: +```yaml +winterSoilder: + enable: false + apiVersion: pincher.devtron.ai/v1alpha1 + action: sleep + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + targetReplicas: [] + fieldSelector: [] +``` +Here, +| Key | values | Description | +| :--- | :--- | :--- | +| `enable` | `fasle`,`true` | decide the enabling factor | +| `apiVersion` | `pincher.devtron.ai/v1beta1`, `pincher.devtron.ai/v1alpha1` | specific api version | +| `action` | `sleep`,`delete`, `scale` | This specify the action need to perform. | +| `timeRangesWithZone`:`timeZone` | eg:- `"Asia/Kolkata"`,`"US/Pacific"` | It use to specify the timeZone used. (It uses standard format. please refer [this](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)) | +| `timeRangesWithZone`:`timeRanges` | array of [ `timeFrom`, `timeTo`, `weekdayFrom`, `weekdayTo`] | It use to define time period/range on which the user need to perform the specified action. you can have multiple timeRanges.
These settings will take `action` on Sat and Sun from 00:00 to 23:59:59, | +| `targetReplicas` | `[n]` : n - number of replicas to scale. | These is mandatory field when the `action` is `scale`
Defalut value is `[]`. | +| `fieldSelector` | `- AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) ` | These value will take a list of methods to select the resources on which we perform specified `action` . | + + +here is an example, +```yaml +winterSoilder: + apiVersion: pincher.devtron.ai/v1alpha1 + enable: true + annotations: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '10h'), Now()) +``` +Above settings will take action on `Sat` and `Sun` from 00:00 to 23:59:59, and on `Mon`-`Fri` from 00:00 to 08:00 and 20:00 to 23:59:59. If `action:sleep` then runs hibernate at timeFrom and unhibernate at `timeTo`. If `action: delete` then it will delete workloads at `timeFrom` and `timeTo`. Here the `action:scale` thus it scale the number of resource replicas to `targetReplicas: [1,1,1]`. Here each element of `targetReplicas` array is mapped with the corresponding elments of array `timeRangesWithZone/timeRanges`. Thus make sure the length of both array is equal, otherwise the cnages cannot be observed. + +The above example will select the application objects which have been created 10 hours ago across all namespaces excluding application's namespace. Winter soldier exposes following functions to handle time, cpu and memory. + +- ParseTime - This function can be used to parse time. For eg to parse creationTimestamp use ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z') +- AddTime - This can be used to add time. For eg AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '-10h') ll add 10h to the time. Use d for day, h for hour, m for minutes and s for seconds. Use negative number to get earlier time. +- Now - This can be used to get current time. +- CpuToNumber - This can be used to compare CPU. For eg any({{spec.containers.#.resources.requests}}, { MemoryToNumber(.memory) < MemoryToNumber('60Mi')}) will check if any resource.requests is less than 60Mi. + + + +### Resources + +These define minimum and maximum RAM and CPU available to the application. + +```yaml +resources: + limits: + cpu: "1" + memory: "200Mi" + requests: + cpu: "0.10" + memory: "100Mi" +``` + +Resources are required to set CPU and memory usage. + +#### Limits + +Limits make sure a container never goes above a certain value. The container is only allowed to go up to the limit, and then it is restricted. + +#### Requests + +Requests are what the container is guaranteed to get. + +### Service + +This defines annotations and the type of service, optionally can define name also. + +```yaml + service: + type: ClusterIP + annotations: {} +``` + +### Volumes + +```yaml +volumes: + - name: log-volume + emptyDir: {} + - name: logpv + persistentVolumeClaim: + claimName: logpvc +``` + +It is required when some values need to be read from or written to an external disk. + +### Volume Mounts + +```yaml +volumeMounts: + - mountPath: /var/log/nginx/ + name: log-volume + - mountPath: /mnt/logs + name: logpvc + subPath: employee +``` + +It is used to provide mounts to the volume. + +### Affinity and anti-affinity + +```yaml +Spec: + Affinity: + Key: + Values: +``` + +Spec is used to define the desire state of the given container. + +Node Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node. + +Inter-pod affinity allow you to constrain which nodes your pod is eligible to be scheduled based on labels on pods. + +#### Key + +Key part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +#### Values + +Value part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +### Tolerations + +```yaml +tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +``` + +Taints are the opposite, they allow a node to repel a set of pods. + +A given pod can access the given node and avoid the given taint only if the given pod satisfies a given taint. + +Taints and tolerations are a mechanism which work together that allows you to ensure that pods are not placed on inappropriate nodes. Taints are added to nodes, while tolerations are defined in the pod specification. When you taint a node, it will repel all the pods except those that have a toleration for that taint. A node can have one or many taints associated with it. + +### Arguments + +```yaml +args: + enabled: false + value: [] +``` + +This is used to give arguments to command. + +### Command + +```yaml +command: + enabled: false + value: [] +``` + +It contains the commands for the server. + +| Key | Description | +| :--- | :--- | +| `enabled` | To enable or disable the command. | +| `value` | It contains the commands. | + + +### Containers +Containers section can be used to run side-car containers along with your main container within same pod. Containers running within same pod can share volumes and IP Address and can address each other @localhost. We can use base image inside container by setting the reuseContainerImage flag to `true`. + +```yaml + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate +``` + +### Prometheus + +```yaml + prometheus: + release: monitoring +``` + +It is a kubernetes monitoring tool and the name of the file to be monitored as monitoring in the given case.It describes the state of the prometheus. + +### rawYaml + +```yaml +rawYaml: + - apiVersion: v1 + kind: Service + metadata: + name: my-service + spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + type: ClusterIP +``` +Accepts an array of Kubernetes objects. You can specify any kubernetes yaml here and it will be applied when your app gets deployed. + +### Grace Period + +```yaml +GracePeriod: 30 +``` +Kubernetes waits for the specified time called the termination grace period before terminating the pods. By default, this is 30 seconds. If your pod usually takes longer than 30 seconds to shut down gracefully, make sure you increase the `GracePeriod`. + +A Graceful termination in practice means that your application needs to handle the SIGTERM message and begin shutting down when it receives it. This means saving all data that needs to be saved, closing down network connections, finishing any work that is left, and other similar tasks. + +There are many reasons why Kubernetes might terminate a perfectly healthy container. If you update your deployment with a rolling update, Kubernetes slowly terminates old pods while spinning up new ones. If you drain a node, Kubernetes terminates all pods on that node. If a node runs out of resources, Kubernetes terminates pods to free those resources. It’s important that your application handle termination gracefully so that there is minimal impact on the end user and the time-to-recovery is as fast as possible. + + +### Server + +```yaml +server: + deployment: + image_tag: 1-95a53 + image: "" +``` + +It is used for providing server configurations. + +#### Deployment + +It gives the details for deployment. + +| Key | Description | +| :--- | :--- | +| `image_tag` | It is the image tag | +| `image` | It is the URL of the image | + +### Service Monitor + +```yaml +servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace +``` + +It gives the set of targets to be monitored. + +### Db Migration Config + +```yaml +dbMigrationConfig: + enabled: false +``` + +It is used to configure database migration. + + +### KEDA Autoscaling +[KEDA](https://keda.sh) is a Kubernetes-based Event Driven Autoscaler. With KEDA, you can drive the scaling of any container in Kubernetes based on the number of events needing to be processed. KEDA can be installed into any Kubernetes cluster and can work alongside standard Kubernetes components like the Horizontal Pod Autoscaler(HPA). + +Example for autosccaling with KEDA using Prometheus metrics is given below: +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: + restoreToOriginalReplicaCount: true + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: prometheus + metadata: + serverAddress: http://:9090 + metricName: http_request_total + query: envoy_cluster_upstream_rq{appId="300", cluster_name="300-0", container="envoy",} + threshold: "50" + triggerAuthentication: + enabled: false + name: + spec: {} + authenticationRef: {} +``` +Example for autosccaling with KEDA based on kafka is given below : +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: {} + triggers: + - type: kafka + metadata: + bootstrapServers: b-2.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-3.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-1.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092 + topic: Orders-Service-ESP.info + lagThreshold: "100" + consumerGroup: oders-remove-delivered-packages + allowIdleConsumers: "true" + triggerAuthentication: + enabled: true + name: keda-trigger-auth-kafka-credential + spec: + secretTargetRef: + - parameter: sasl + name: keda-kafka-secrets + key: sasl + - parameter: username + name: keda-kafka-secrets + key: username + authenticationRef: + name: keda-trigger-auth-kafka-credential +``` + +### Security Context +A security context defines privilege and access control settings for a Pod or Container. + +To add a security context for main container: +```yaml +containerSecurityContext: + allowPrivilegeEscalation: false +``` + +To add a security context on pod level: +```yaml +podSecurityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 +``` + +### Topology Spread Constraints +You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. This can help to achieve high availability as well as efficient resource utilization. + +```yaml +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: {} +``` + +### Deployment Metrics + +It gives the realtime metrics of the deployed applications + +| Key | Description | +| :--- | :--- | +| `Deployment Frequency` | It shows how often this app is deployed to production | +| `Change Failure Rate` | It shows how often the respective pipeline fails. | +| `Mean Lead Time` | It shows the average time taken to deliver a change to production. | +| `Mean Time to Recovery` | It shows the average time taken to fix a failed pipeline. | + +## 2. Show application metrics + +If you want to see application metrics like different HTTP status codes metrics, application throughput, latency, response time. Enable the Application metrics from below the deployment template Save button. After enabling it, you should be able to see all metrics on App detail page. By default it remains disabled. +![](../../../.gitbook/assets/deployment_application_metrics%20%282%29.png) + +Once all the Deployment template configurations are done, click on `Save` to save your deployment configuration. Now you are ready to create [Workflow](workflow/) to do CI/CD. + +### Helm Chart Json Schema + +Helm Chart [json schema](../../../scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json) is used to validate the deployment template values. + +### Other Validations in Json Schema + +The values of CPU and Memory in limits must be greater than or equal to in requests respectively. Similarly, In case of envoyproxy, the values of limits are greater than or equal to requests as mentioned below. +``` +resources.limits.cpu >= resources.requests.cpu +resources.limits.memory >= resources.requests.memory +envoyproxy.resources.limits.cpu >= envoyproxy.resources.requests.cpu +envoyproxy.resources.limits.memory >= envoyproxy.resources.requests.memory +``` diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/app-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/app-values.yaml new file mode 100644 index 0000000000..d579b2259d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/app-values.yaml @@ -0,0 +1,448 @@ +# Mandatory configs +podDisruptionBudget: {} +rolloutLabels: {} +rolloutAnnotations: {} + +containerSpec: + lifecycle: + enabled: false + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +replicaCount: 1 +MinReadySeconds: 60 +GracePeriod: 30 +image: + pullPolicy: IfNotPresent +restartPolicy: Always +service: + # enabled: true + type: ClusterIP + #name: "service-1234567890" + loadBalancerSourceRanges: [] + # loadBalancerSourceRanges: + # - 1.2.3.4/32 + # - 1.2.5.6/23 + annotations: {} + # test1: test2 + # test3: test4 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + protocol: TCP + resizePolicy: [] +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +# Optional configs +LivenessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + +ReadinessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/force-ssl-redirect: 'false' +# nginx.ingress.kubernetes.io/ssl-redirect: 'false' +# kubernetes.io/ingress.class: nginx +# nginx.ingress.kubernetes.io/rewrite-target: /$2 +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +command: + workingDir: {} + enabled: false + value: [] + +args: + enabled: false + value: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + +#For adding custom labels to pods + +podLabels: {} +# customKey: customValue +podAnnotations: {} +# customKey: customValue + +rawYaml: [] + +topologySpreadConstraints: [] + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +containers: [] + ## Additional containers to run along with application pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + +dbMigrationConfig: + enabled: false + +tolerations: [] + +podSecurityContext: {} + +containerSecurityContext: {} + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + +affinity: + enabled: false + values: {} + +ambassadorMapping: + enabled: false + labels: {} + prefix: / + ambassadorId: "" + hostname: devtron.example.com + rewrite: "" + retryPolicy: {} + cors: {} + tls: + context: "" + create: false + secretName: "" + hosts: [] + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 70 + TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} + containerResource: + enabled: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + minReplicaCount: 1 + maxReplicaCount: 2 + advanced: {} + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +prometheus: + release: monitoring + +server: + deployment: + image_tag: 1-95af053 + image: "" + +servicemonitor: + additionalLabels: {} + +envoyproxy: + image: quay.io/devtron/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + selector: {} + gatewayExtraSpec: {} + host: "example.com" + tls: + enabled: false + secretName: secret-name + virtualService: + enabled: false + labels: {} + annotations: {} + virtualServiceExtraSpec: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + annotation: {} + labels: {} + type: Rollout + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + + + + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +imagePullSecrets: [] + # - test1 + # - test2 +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" +# - ip: "10.1.2.3" +# hostnames: +# - "foo.remote" +# - "bar.remote" +peristentVolumeClaim: {} + +analysisTemplate: + enabled: false + templates: [] + +verticalPodScaling: + enabled: false diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/env-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/env-values.yaml new file mode 100644 index 0000000000..5cd07c0269 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/env-values.yaml @@ -0,0 +1,66 @@ +replicaCount: 1 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 + +Spec: + Affinity: + key: "" + Values: nodes + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# +secret: + enabled: false + data: {} +# my_own_secret: S3ViZXJuZXRlcyBXb3Jrcw== + +EnvVariables: [] +# - name: FLASK_ENV +# value: qa + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: "0.05" + memory: 50Mi + requests: + cpu: "0.01" + memory: 10Mi + + diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/pipeline-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/pipeline-values.yaml new file mode 100644 index 0000000000..da8360dd02 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/pipeline-values.yaml @@ -0,0 +1,26 @@ +deployment: + strategy: + blueGreen: + autoPromotionSeconds: 30 + scaleDownDelaySeconds: 30 + previewReplicaCount: 1 + autoPromotionEnabled: true + rolling: + maxSurge: "25%" + maxUnavailable: 1 + canary: + maxSurge: "25%" + maxUnavailable: 1 + steps: + - setWeight: 25 + - pause: + duration: 15 # 1 min + - setWeight: 50 + - pause: + duration: 15 # 1 min + - setWeight: 75 + - pause: + duration: 15 # 1 min + recreate: + maxSurge: "0%" + maxUnavailable: "100%" \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/release-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/release-values.yaml new file mode 100644 index 0000000000..48eb3f482c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/release-values.yaml @@ -0,0 +1,14 @@ +server: + deployment: + image_tag: IMAGE_TAG + image: IMAGE_REPO + enabled: false +dbMigrationConfig: + enabled: false + +pauseForSecondsBeforeSwitchActive: 0 +waitForSecondsBeforeScalingDown: 0 +autoPromotionSeconds: 30 + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/schema.json b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/schema.json new file mode 100644 index 0000000000..2a43e937cd --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/schema.json @@ -0,0 +1,1363 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "containerExtraSpecs": { + "type": "object", + "title": "containerExtraSpecs", + "description": "Define container extra specs here" + }, + "ContainerPort": { + "type": "array", + "description": "defines ports on which application services will be exposed to other services", + "title": "Container Port", + "items": { + "type": "object", + "properties": { + "envoyPort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "envoy port for the container", + "title": "Envoy Port" + }, + "idleTimeout": { + "type": "string", + "description": "duration of time for which a connection is idle before the connection is terminated", + "title": "Idle Timeout" + }, + "name": { + "type": "string", + "description": "name of the port", + "title": "Name" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Port", + "title": "port for the container" + }, + "servicePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port of the corresponding kubernetes service", + "title": "Service Port" + }, + "nodePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "nodeport of the corresponding kubernetes service", + "title": "Node Port" + }, + "supportStreaming": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "field to enable/disable timeout for high performance protocols like grpc", + "title": "Support Streaming" + }, + "useHTTP2": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": " field for setting if envoy container can accept(or not) HTTP2 requests", + "title": "Use HTTP2" + } + } + } + }, + "EnvVariables": { + "type": "array", + "items": {}, + "description": "contains environment variables needed by the containers", + "title": "Environment Variables" + }, + "EnvVariablesFromFieldPath": { + "type": "array", + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs", + "title": "EnvVariablesFromFieldPath", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be" + }, + "fieldPath": { + "type": "string", + "title": "fieldPath", + "description": "Path of the field to select in the specified API version" + } + } + } + ] + }, + "EnvVariablesFromSecretKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromSecretKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "secretName": { + "type": "string", + "title": "secretName", + "description": "Name of Secret from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "EnvVariablesFromConfigMapKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromConfigMapKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "configMapName": { + "type": "string", + "title": "configMapName", + "description": "Name of configMap from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "GracePeriod": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "time for which Kubernetes waits before terminating the pods", + "title": "Grace Period" + }, + "LivenessProbe": { + "type": "object", + "description": "used by the kubelet to know when to restart a container", + "title": "Liveness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the liveness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", + "title": "Failure Threshold" + }, + "httpHeaders": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for liveness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for liveness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "MaxSurge": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be created over the desired number of pods", + "title": "Maximum Surge" + }, + "MaxUnavailable": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be unavailable during the update process", + "title": "Maximum Unavailable" + }, + "MinReadySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", + "title": "Minimum Ready Seconds" + }, + "ReadinessProbe": { + "type": "object", + "description": "kubelet uses readiness probes to know when a container is ready to start accepting traffic", + "title": "Readiness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the readiness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", + "title": "Failure Threshold" + }, + "httpHeader": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for readiness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for readiness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "Spec": { + "type": "object", + "description": "used to define the desire state of the given container", + "title": "Spec", + "properties": { + "Affinity": { + "type": "object", + "description": "Node/Inter-pod Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node/pods", + "title": "Affinity", + "properties": { + "Key": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "string", + "description": "Key part of the label for node/pod selection", + "title": "Key" + } + ] + }, + "Values": { + "type": "string", + "description": "Value part of the label for node/pod selection", + "title": "Values" + }, + "key": { + "type": "string" + } + } + } + } + }, + "ambassadorMapping": { + "type": "object", + "description": "used to create ambassador mapping resource", + "title": "Mapping", + "properties": { + "ambassadorId": { + "type": "string", + "description": "used to specify id for specific ambassador mappings controller", + "title": "Ambassador ID" + }, + "cors": { + "type": "object", + "description": "used to specify cors policy to access host for this mapping", + "title": "CORS" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify whether to create an ambassador mapping or not", + "title": "Enabled" + }, + "weight": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify weight for canary ambassador mappings" + }, + "hostname": { + "type": "string", + "description": "used to specify hostname for ambassador mapping", + "title": "Hostname" + }, + "labels": { + "type": "object", + "description": "used to provide custom labels for ambassador mapping", + "title": "Labels" + }, + "prefix": { + "type": "string", + "description": "used to specify path for ambassador mapping", + "title": "Prefix" + }, + "retryPolicy": { + "type": "object", + "description": "used to specify retry policy for ambassador mapping", + "title": "Retry Policy" + }, + "rewrite": { + "type": "string", + "description": "used to specify whether to redirect the path of this mapping and where", + "title": "Rewrite" + }, + "tls": { + "type": "object", + "description": "used to create or define ambassador TLSContext resource", + "title": "TLS Context" + }, + "extraSpec": { + "type": "object", + "description": "used to provide extra spec values which not present in deployment template for ambassador resource", + "title": "Extra Spec" + } + } + }, + "args": { + "type": "object", + "description": " used to give arguments to command", + "title": "Arguments", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling aruguments", + "title": "Enabled" + }, + "value": { + "type": "array", + "description": "values of the arguments", + "title": "Value", + "items": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + } + } + }, + "autoscaling": { + "type": "object", + "description": "connected to HPA and controls scaling up and down in response to request load", + "title": "Autoscaling", + "properties": { + "MaxReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Maximum number of replicas allowed for scaling", + "title": "Maximum Replicas" + }, + "MinReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Minimum number of replicas allowed for scaling", + "title": "Minimum Replicas" + }, + "TargetCPUUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target CPU utilization that is expected for a container", + "title": "TargetCPUUtilizationPercentage" + }, + "TargetMemoryUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target memory utilization that is expected for a container", + "title": "TargetMemoryUtilizationPercentage" + }, + "behavior": { + "type": "object", + "description": "describes behavior and scaling policies for that behavior", + "title": "Behavior" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling autoscaling", + "title": "Enabled" + }, + "labels": { + "type": "object", + "description": "labels for HPA", + "title": "labels" + }, + "annotations": { + "type": "object", + "description": "used to configure some options for HPA", + "title": "annotations" + }, + "extraMetrics": { + "type": "array", + "items": {}, + "description": "used to give external metrics for autoscaling", + "title": "Extra Metrics" + } + } + }, + "command": { + "type": "object", + "description": "contains the commands for the server", + "title": "Command", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling commands" + }, + "value": { + "type": "array", + "items": {}, + "description": "contains the commands", + "title": "Value" + }, + "workingDir": { + "type": "object", + "items": {}, + "description": "contains the working directory", + "title": "Working directory" + } + } + }, + "containerSecurityContext": { + "type": "object", + "description": " defines privilege and access control settings for a Container", + "title": "Container Security Context" + }, + "containers": { + "type": "array", + "items": {}, + "description": " used to run side-car containers along with the main container within same pod" + }, + "dbMigrationConfig": { + "type": "object", + "description": "used to configure database migration", + "title": "Db Migration Config", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling the config", + "title": "Enabled" + } + } + }, + "envoyproxy": { + "type": "object", + "description": "envoy is attached as a sidecar to the application container to collect metrics like 4XX, 5XX, throughput and latency", + "title": "Envoy Proxy", + "properties": { + "configMapName": { + "type": "string", + "description": "configMap containing configuration for Envoy", + "title": "ConfigMap" + }, + "lifecycle": { + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled": { + "type": "boolean" + }, + "postStart": { + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created" + }, + "preStop": { + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + }, + "image": { + "type": "string", + "description": "image of envoy to be used" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + } + } + }, + "hostAliases": { + "type": "array", + "title": "hostAliases", + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file", + "items": [ + { + "type": "object", + "properties": { + "ip": { + "type": "string", + "title": "IP", + "description": "IP address of the host file entry" + }, + "hostnames": { + "type": "array", + "description": "Hostnames for the above IP address", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "image": { + "type": "object", + "description": "used to access images in kubernetes", + "title": "Image", + "properties": { + "pullPolicy": { + "type": "string", + "description": "used to define the instances calling the image", + "title": "Pull Policy", + "enum": [ + "IfNotPresent", + "Always" + ] + } + } + }, + "restartPolicy": { + "type": "string", + "description": "It restarts the docker container based on defined conditions.", + "title": "Restart Policy", + "enum": [ + "Always", + "OnFailure", + "Never" + ] + }, + "imagePullSecrets": { + "type": "array", + "items": {}, + "description": "contains the docker credentials that are used for accessing a registry", + "title": "Image PullSecrets" + }, + "winterSoldier": { + "type": "object", + "description": "allows to scale, sleep or delete the resource based on time.", + "title": "winterSoldier", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the winterSoldier controller", + "title": "Annotations" + }, + "labels": { + "type": "object", + "description": "labels for winterSoldier", + "title": "winterSoldier labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "apiVersion": { + "type": "string", + "description": "Api version for winterSoldier", + "title": "winterSoldier apiVersion", + "default": "pincher.devtron.ai/v1alpha1" + }, + "timeRangesWithZone": { + "type": "object", + "description": "describe time zone and time ranges to input in the winterSoldier", + "title": "Time Ranges With Zone", + "timeZone": { + "type": "string", + "description": "describe time zone, and follow standard format", + "title": "Time Zone" + }, + "timeRanges": { + "type": "array", + "items": {}, + "description": "used to take array of time ranges in which each element contains timeFrom, timeTo, weekdayFrom and weekdayTo.", + "title": "Time Ranges" + } + }, + "type": { + "type": "string", + "description": "describe the type of application Rollout/deployment.", + "title": "Type" + }, + "action": { + "type": "string", + "description": "describe the action to be performed by winterSoldier.", + "title": "Action" + }, + "targetReplicas": { + "type": "array", + "description": "describe the number of replicas to which the resource should scale up or down.", + "title": "Target Replicas" + }, + "fieldSelector": { + "type": "array", + "description": "it takes arrays of methods to select specific fields.", + "title": "Field Selector" + } + } + }, + "ingress": { + "type": "object", + "description": "allows public access to URLs", + "title": "Ingress", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx" + }, + "labels": { + "type": "object", + "description": "labels for ingress", + "title": "Ingress labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "ingressInternal": { + "type": "object", + "description": "allows private access to the URLs", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx-internal" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "networkPolicy":{ + "type": "object", + "description": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "title": "Network Policy", + "properties": { + "enabled":{ + "type":"boolean", + "description": "used to enable or disable NetworkPolicy" + }, + "annotations":{ + "type": "object", + "description": "Annotations for NetworkPolicy" + }, + "labels":{ + "type":"object", + "description": "Labels for NetworkPolicy" + }, + "podSelector":{ + "type": "object", + "description": "Selects the pods to which this NetworkPolicy object applies", + "properties": { + "matchExpressions":{ + "type":"array", + "description": "list of label selector" + }, + "matchLabels":{ + "type":"object", + "description": "map of {key,value} pairs" + } + } + }, + "policyTypes":{ + "type":"array", + "description": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress,Egress." + }, + "ingress":{ + "type":"array", + "description": "List of ingress rules to be applied to the selected pods" + }, + "egress":{ + "type":"array", + "description": "List of egress rules to be applied to the selected pods" + } + } + }, + "istio":{ + "type": "object", + "description": "Istio Service mesh", + "title": "Istio" + }, + "initContainers": { + "type": "array", + "items": {}, + "description": "specialized containers that run before app containers in a Pod, can contain utilities or setup scripts not present in an app image", + "title": "Init Containers" + }, + "kedaAutoscaling": { + "type": "object", + "description": "Kubernetes-based event driven autoscaler. With KEDA, one can drive the scaling of any container in Kubernetes based on the no. of events needing to be processed", + "title": "KEDA Autoscaling", + "properties": { + "advanced": { + "type": "object" + }, + "authenticationRef": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "envSourceContainerName": { + "type": "string" + }, + "maxReplicaCount": { + "type": "integer" + }, + "minReplicaCount": { + "type": "integer" + }, + "triggerAuthentication": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "spec": { + "type": "object" + } + } + }, + "triggers": { + "type": "array", + "items": {} + } + } + }, + "containerSpec": { + "type": "object", + "description": "define the container specic configuration", + "title": "containerSpec", + "properties": { + "lifecycle": { + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled": { + "type": "boolean" + }, + "postStart": { + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created.You could use this event to check that a required API is available before the container’s main work begins" + }, + "preStop": { + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + } + } + }, + "pauseForSecondsBeforeSwitchActive": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "tell how much to wait for given period of time before switch active the container", + "title": "Pause For Seconds Before SwitchActive" + }, + "podAnnotations": { + "type": "object", + "description": "used to attach metadata and configs in Kubernetes", + "title": "Pod Annotations" + }, + "podDisruptionBudget": { + "type": "object", + "description": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "properties": { + "minAvailable": { + "type": "string", + "title": "minAvailable", + "description": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod" + }, + "maxUnavailable": { + "type": "string", + "title": "maxUnavailable", + "description": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod." + } + } + }, + "podExtraSpecs": { + "type": "object", + "description": "ExtraSpec for the pods to be configured", + "title": "podExtraSpecs" + }, + "podLabels": { + "type": "object", + "description": "key/value pairs that are attached to pods, are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system", + "title": "Pod Labels" + }, + "podSecurityContext": { + "type": "object", + "description": "defines privilege and access control settings for a Pod or Container", + "title": "Pod Security Context" + }, + "prometheus": { + "type": "object", + "description": "a kubernetes monitoring tool", + "title": "Prometheus", + "properties": { + "release": { + "type": "string", + "description": "name of the file to be monitored, describes the state of prometheus" + } + } + }, + "rawYaml": { + "type": "array", + "items": {}, + "description": "Accepts an array of Kubernetes objects. One can specify any kubernetes yaml here & it will be applied when a app gets deployed.", + "title": "Raw YAML" + }, + "replicaCount": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "count of Replicas of pod", + "title": "REplica Count" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + }, + "secret": { + "type": "object", + "properties": { + "data": { + "type": "object" + }, + "enabled": { + "type": "boolean" + } + } + }, + "server": { + "type": "object", + "description": "used for providing server configurations.", + "title": "Server", + "properties": { + "deployment": { + "type": "object", + "description": "gives the details for deployment", + "title": "Deployment", + "properties": { + "image": { + "type": "string", + "description": "URL of the image", + "title": "Image" + }, + "image_tag": { + "type": "string", + "description": "tag of the image", + "title": "Image Tag" + } + } + } + } + }, + "service": { + "type": "object", + "description": "defines annotations and the type of service", + "title": "Service", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service" + }, + "type": { + "type": "string", + "description": "type of service", + "title": "Type", + "enum": [ + "ClusterIP", + "LoadBalancer", + "NodePort", + "ExternalName" + ] + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable service", + "title": "Enabled" + } + } + }, + "serviceAccount": { + "type": "object", + "description": "defines service account for pods", + "title": "Service Account", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service account" + }, + "name": { + "type": "string", + "description": "name of service account", + "title": "Name" + }, + "create": { + "type": "boolean" + } + } + }, + "servicemonitor": { + "type": "object", + "description": "gives the set of targets to be monitored", + "title": "Service Monitor", + "properties": { + "additionalLabels": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array", + "items": {}, + "description": "a mechanism which work together with Taints which ensures that pods are not placed on inappropriate nodes", + "title": "Tolerations" + }, + "topologySpreadConstraints": { + "type": "array", + "items": {}, + "description": "used to control how Pods are spread across a cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains", + "title": "Topology Spread Constraints" + }, + "volumeMounts": { + "type": "array", + "items": {}, + "description": "used to provide mounts to the volume", + "title": "Volume Mounts" + }, + "volumes": { + "type": "array", + "items": {}, + "description": "required when some values need to be read from or written to an external disk", + "title": "Volumes" + }, + "waitForSecondsBeforeScalingDown": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Wait for given period of time before scaling down the container", + "title": "Wait For Seconds Before Scaling Down" + } + } +} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/secrets-test-values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/secrets-test-values.yaml new file mode 100644 index 0000000000..4a20404db8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/secrets-test-values.yaml @@ -0,0 +1 @@ +{"ConfigSecrets":{"enabled":true,"secrets":[{"data":{"standard_key":"c3RhbmRhcmQtdmFsdWU="},"external":false,"externalType":"","mountPath":"/test","name":"normal-secret","type":"volume"},{"data":{"secret_key":"U0VDUkVUIERBVEE="},"external":true,"externalType":"AWSSecretsManager","mountPath":"","name":"external-secret-3","type":"environment"}]}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/NOTES.txt b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/NOTES.txt new file mode 100644 index 0000000000..2b14478168 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range $.Values.ingress.paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".Chart.Name .fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include ".Chart.Name .fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".Chart.Name .fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".Chart.Name .name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/_helpers.tpl b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/_helpers.tpl new file mode 100644 index 0000000000..813a7186ff --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/_helpers.tpl @@ -0,0 +1,167 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define ".Chart.Name .name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create service name +*/}} +{{- define ".servicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 55 | trimSuffix "-" -}}-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create preview service name +*/}} +{{- define ".previewservicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 55 | trimSuffix "-" -}}-preview +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".Chart.Name .fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".Chart.Name .chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define ".Chart.Name .color" -}} +{{- $active0 := (index .Values.server.deployment 0).enabled -}} +{{/* +{{- $active1 := (index .Values.server.deployment 1).enabled -}} +*/}} +{{- $active1 := include "safeenabledcheck" . -}} +{{- $active := and $active0 $active1 -}} +{{- $active -}} +{{- end -}} + +{{- define "safeenabledcheck" -}} +{{- if (eq (len .Values.server.deployment) 2) -}} + {{- if (index .Values.server.deployment 1).enabled -}} + {{- $active := true -}} + {{- $active -}} + {{- else -}} + {{- $active := false -}} + {{- $active -}} + {{- end -}} +{{- else -}} + {{- $active := false -}} + {{- $active -}} +{{- end -}} +{{- end -}} + + +{{- define "isCMVolumeExists" -}} + {{- $isCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $isCMVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isCMVolumeExists -}} +{{- end -}} + +{{- define "isSecretVolumeExists" -}} + {{- $isSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $isSecretVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isSecretVolumeExists -}} +{{- end -}} + + + + +{{- define "serviceMonitorEnabled" -}} + {{- $SMenabled := false -}} + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if and .servicemonitor.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- end }} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{/* Create the name of the service account to use */}} +{{- define "serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include ".Chart.Name .fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "VerticalPodAutoScalingEnabled" -}} + {{- $SMenabled := false -}} + {{- if and .Values.verticalPodScaling.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{/* Check for app/release labels in customPodLabels and replace accordingly */}} +{{- define "customPodLabelsContainsApp" -}} + {{- $LabelsContain := false -}} + {{- if hasKey .Values.customPodLabels "app" }} + {{- $LabelsContain = true -}} + {{- end }} + {{- $LabelsContain -}} +{{- end -}} + +{{- define "customPodLabelsContainsRelease" -}} + {{- $LabelsContain := false -}} + {{- if hasKey .Values.customPodLabels "release" }} + {{- $LabelsContain = true -}} + {{- end }} + {{- $LabelsContain -}} +{{- end -}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/ambassador.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/ambassador.yaml new file mode 100644 index 0000000000..7c374a70e8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/ambassador.yaml @@ -0,0 +1,86 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ambassadorMapping.enabled }} +{{- with $.Values.ambassadorMapping }} +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + name: {{ include ".Chart.Name .fullname" $ }}-mapping + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .labels }} +{{ toYaml .labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .ambassadorId }} + ambassador_id: {{ .ambassadorId }} + {{- end }} + {{- if .hostname }} + hostname: {{ .hostname | quote }} + {{- end }} + prefix: {{ .prefix }} + {{- if .rewrite }} + rewrite: {{ .rewrite }} + {{- end }} + service: {{ $svcName }}.{{ $.Release.Namespace }}:{{ $svcPort }} + {{- if .retryPolicy }} + retry_policy: +{{ toYaml .retryPolicy | indent 4 }} + {{- end }} + {{- if .cors }} + cors: +{{ toYaml .cors | indent 4 }} + {{- end }} + {{- if .weight }} + weight: {{ .weight }} + {{- end }} + {{- if .method }} + method: {{ .method }} + {{- end }} + {{- if .extraSpec }} +{{ toYaml .extraSpec | indent 2 }} + {{- end }} + {{- if .tls }} + {{- if .tls.context }} + tls: {{ .tls.context }} +{{- if .tls.create }} +--- +apiVersion: getambassador.io/v3alpha1 +kind: TLSContext +metadata: + name: {{ .tls.context }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .tls.labels }} +{{ toYaml .tls.labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .tls.secretName }} + secret: {{ .tls.secretName }} + {{- end }} + {{- if .tls.hosts }} + hosts: +{{ toYaml .tls.hosts | nindent 4 }} + {{- end }} + {{- if .tls.extraSpec }} +{{ toYaml .tls.extraSpec | indent 2 }} + {{- end }} +{{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/analysis-template.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/analysis-template.yaml new file mode 100644 index 0000000000..53ff5f6909 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/analysis-template.yaml @@ -0,0 +1,34 @@ +{{- if .Values.analysisTemplate.enabled }} +{{- range .Values.analysisTemplate.templates }} +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + {{- if .annotations }} + annotations: +{{ toYaml .annotations | indent 4 }} + {{- end }} + name: {{ .name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + pipelineName: {{ $.Values.pipelineName }} + {{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} + {{- end }} + {{- if .labels }} +{{ toYaml .labels | indent 4 }} + {{- end }} +spec: + {{- if .args }} + args: +{{ toYaml .args | indent 2 }} + {{- end }} + {{- if .measurementRetention }} + measurementRetention: +{{ toYaml .measurementRetention | indent 2 }} + {{- end }} + metrics: +{{ toYaml .metrics | indent 2 }} +--- +{{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/configmap.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/configmap.yaml new file mode 100644 index 0000000000..4e7879665e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/configmap.yaml @@ -0,0 +1,22 @@ +{{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .name}}-{{ $.Values.app }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +data: +{{ toYaml .data | trim | indent 2 }} + {{- end}} + {{- end}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/deployment.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/deployment.yaml new file mode 100644 index 0000000000..f3f03dbe15 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/deployment.yaml @@ -0,0 +1,761 @@ + {{- $hasCMEnvExists := false -}} + {{- $hasCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $hasCMVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasCMEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + {{- $hasPVCExists := false -}} + {{- if .Values.persistentVolumeClaim.name }} + {{- $hasPVCExists = true }} + {{- end }} + + {{- $hasSecretEnvExists := false -}} + {{- $hasSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $hasSecretVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasSecretEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{ $CustomLabelsApp:= include "customPodLabelsContainsApp" . }} + {{ $CustomLabelsRelease:= include "customPodLabelsContainsRelease" . }} + + +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: {{ include ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ .Values.pipelineName }} +{{- if .Values.rolloutLabels }} +{{ toYaml .Values.rolloutLabels | indent 4 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.rolloutAnnotations }} + annotations: +{{ toYaml .Values.rolloutAnnotations | indent 4 }} +{{- end }} +spec: +{{- if .Values.workloadRef }} +{{ toYaml .Values.workloadRef | indent 4 }} +{{- end }} +{{- if .Values.rollbackWindow.revisions }} + rollbackWindow: + revisions: {{ .Values.rollbackWindow.revisions }} +{{- end }} + {{- if .Values.analysis }} + analysis: +{{ toYaml .Values.analysis | indent 4 }} + {{- end }} + selector: + matchLabels: +{{- if .Values.customMatchLabels }} +{{ toYaml .Values.customMatchLabels | indent 6 }} +{{- else }} + app: {{ .Values.customPodLabels.app | default (include ".Chart.Name .name" $) }} + release: {{ .Values.customPodLabels.release | default $.Release.Name }} +{{- end }} + replicas: {{ $.Values.replicaCount }} + minReadySeconds: {{ $.Values.MinReadySeconds }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- if not (eq "true" $CustomLabelsApp) }} + app: {{ .Values.customPodLabels.app | default (include ".Chart.Name .name" $) }} + {{- end }} + {{- if not (eq "true" $CustomLabelsRelease) }} + release: {{ .Values.customPodLabels.release |default $.Release.Name }} + {{- end }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} +{{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 8 }} +{{- end }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 8 }} +{{- end }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} + spec: +{{- if $.Values.schedulingGates.name }} + schedulingGates: + name: {{ $.Values.schedulingGates.name }} +{{- end }} +{{- if $.Values.podExtraSpecs }} +{{ toYaml .Values.podExtraSpecs | indent 6 }} +{{- end }} + terminationGracePeriodSeconds: {{ $.Values.GracePeriod }} +{{- if $.Values.hostAliases }} + hostAliases: +{{ toYaml .Values.hostAliases | indent 8 }} +{{- end }} +{{- if and $.Values.Spec.Affinity.Key $.Values.Spec.Affinity.Values }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ $.Values.Spec.Affinity.Key }} + operator: In + values: + - {{ $.Values.Spec.Affinity.Values | default "nodes" }} +{{- else if $.Values.affinity.enabled }} + affinity: +{{ toYaml .Values.affinity.values | indent 8 }} +{{- end }} +{{- if $.Values.serviceAccountName }} + serviceAccountName: {{ $.Values.serviceAccountName }} +{{- else }} + serviceAccountName: {{ template "serviceAccountName" . }} +{{- end }} +{{- if $.Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} +{{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- if $.Values.imagePullSecrets}} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end}} +{{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range $.Values.topologySpreadConstraints }} + - maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + {{- if semverCompare "<=1.30-0" $.Capabilities.KubeVersion.GitVersion }} + {{- if .minDomains }} + minDomains: {{ .minDomains }} + {{- end }} + {{- end }} + {{- if .nodeAffinityPolicy }} + nodeAffinityPolicy: {{ .nodeAffinityPolicy }} + {{- end }} + {{- if .nodeTaintsPolicy }} + nodeTaintsPolicy: {{ .nodeTaintsPolicy }} + {{- end }} + labelSelector: + matchLabels: + {{- if and .autoLabelSelector .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- else if .autoLabelSelector }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- else if .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- end }} +{{- end }} +{{- end }} +{{- if $.Values.topologySpreadConstraint }} + topologySpreadConstraints: +{{ toYaml .Values.topologySpreadConstraint }} +{{- end }} +{{- if $.Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if $.Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if $.Values.restartPolicy }} + restartPolicy: {{ $.Values.restartPolicy }} +{{- else }} + restartPolicy: Always +{{- end }} +{{- if $.Values.initContainers}} + initContainers: +{{- range $i, $c := .Values.initContainers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-init-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .args}} + args: +{{ toYaml .args | indent 12 -}} +{{- end}} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + containers: + - name: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + image: "{{ .Values.server.deployment.image }}:{{ .Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- if $.Values.containerSpec.lifecycle.postStart }} + postStart: +{{ toYaml $.Values.containerSpec.lifecycle.postStart | indent 12 -}} + {{- end }} + {{- end }} +{{- if and $.Values.containerSecurityContext $.Values.privileged }} + securityContext: + privileged: true +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- else if $.Values.privileged }} + securityContext: + privileged: true +{{- else if $.Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- end }} +{{- if $.Values.containerExtraSpecs }} +{{ toYaml .Values.containerExtraSpecs | indent 10 }} +{{- end }} +{{- if $.Values.resizePolicy }} + resizePolicy: +{{ toYaml .Values.resizePolicy | indent 12 }} +{{- end }} + ports: + {{- range $.Values.ContainerPort }} + - name: {{ .name}} + containerPort: {{ .port }} + protocol: {{ .protocol | default "TCP" }} + {{- end}} +{{- if and $.Values.command.enabled $.Values.command.workingDir }} + workingDir: {{ $.Values.command.workingDir }} +{{- end}} +{{- if and $.Values.command.value $.Values.command.enabled}} + command: +{{ toYaml $.Values.command.value | indent 12 -}} +{{- end}} +{{- if and $.Values.args.value $.Values.args.enabled}} + args: +{{ toYaml $.Values.args.value | indent 12 -}} +{{- end }} + env: + - name: CONFIG_HASH + value: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.ConfigHash) }}{{ .Values.devtronInternal.containerSpecs.ConfigHash }}{{ end }} + - name: SECRET_HASH + value: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.SecretHash) }}{{ .Values.devtronInternal.containerSpecs.SecretHash }}{{ end }} + - name: DEVTRON_APP_NAME + value: {{ template ".Chart.Name .name" $ }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEVTRON_CONTAINER_REPO + value: "{{ .Values.server.deployment.image }}" + - name: DEVTRON_CONTAINER_TAG + value: "{{ .Values.server.deployment.image_tag }}" + {{- range $.Values.EnvVariablesFromFieldPath }} + {{- if and .name .fieldPath }} + - name: {{ .name }} + valueFrom: + fieldRef: + fieldPath: {{ .fieldPath }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariables }} + {{- if and .name .value }} + - name: {{ .name }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromSecretKeys }} + {{- if and .name .secretName .keyName }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromConfigMapKeys }} + {{- if and .name .configMapName .keyName }} + - name: {{ .name }} + valueFrom: + configMapKeyRef: + name: {{ .configMapName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- if or (and ($hasCMEnvExists) (.Values.ConfigMaps.enabled)) (and ($hasSecretEnvExists) (.Values.ConfigSecrets.enabled)) }} + envFrom: + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "environment" }} + - configMapRef: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "environment" }} + - secretRef: + {{if eq .external true}} + name: {{ .name }} + {{else if eq .external false}} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + +{{- if or $.Values.LivenessProbe.Path $.Values.LivenessProbe.command $.Values.LivenessProbe.tcp }} + livenessProbe: +{{- if $.Values.LivenessProbe.Path }} + httpGet: + path: {{ $.Values.LivenessProbe.Path }} + port: {{ $.Values.LivenessProbe.port }} + scheme: {{ $.Values.LivenessProbe.scheme }} + {{- if $.Values.LivenessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.LivenessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.LivenessProbe.command }} + exec: + command: +{{ toYaml .Values.LivenessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.LivenessProbe.tcp }} + tcpSocket: + port: {{ $.Values.LivenessProbe.port }} +{{- end}} +{{- if and $.Values.LivenessProbe.grpc }} + grpc: + port: {{ $.Values.LivenessProbe.port }} + service: {{ $.Values.service.name }} +{{- end}} + initialDelaySeconds: {{ $.Values.LivenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.LivenessProbe.periodSeconds }} + successThreshold: {{ $.Values.LivenessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.LivenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.LivenessProbe.failureThreshold }} +{{- end }} +{{- if or $.Values.ReadinessProbe.Path $.Values.ReadinessProbe.command $.Values.ReadinessProbe.tcp }} + readinessProbe: +{{- if $.Values.ReadinessProbe.Path }} + httpGet: + path: {{ $.Values.ReadinessProbe.Path }} + port: {{ $.Values.ReadinessProbe.port }} + scheme: {{ $.Values.ReadinessProbe.scheme }} + {{- if $.Values.ReadinessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.ReadinessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.ReadinessProbe.command }} + exec: + command: +{{ toYaml .Values.ReadinessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.ReadinessProbe.tcp }} + tcpSocket: + port: {{ $.Values.ReadinessProbe.port }} +{{- end}} +{{- if and $.Values.ReadinessProbe.grpc }} + grpc: + port: {{ $.Values.ReadinessProbe.port }} + service: {{ $.Values.service.name }} +{{- end}} + initialDelaySeconds: {{ $.Values.ReadinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.ReadinessProbe.periodSeconds }} + successThreshold: {{ $.Values.ReadinessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.ReadinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.ReadinessProbe.failureThreshold }} +{{- end }} + resources: +{{ toYaml $.Values.resources | trim | indent 12 }} +{{- if or $.Values.StartupProbe.Path $.Values.StartupProbe.command $.Values.StartupProbe.tcp }} + startupProbe: +{{- if $.Values.StartupProbe.Path }} + httpGet: + path: {{ $.Values.StartupProbe.Path }} + port: {{ $.Values.StartupProbe.port }} + {{- if $.Values.StartupProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.StartupProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.StartupProbe.command }} + exec: + command: +{{ toYaml .Values.StartupProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.StartupProbe.tcp }} + tcpSocket: + port: {{ $.Values.StartupProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.StartupProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.StartupProbe.periodSeconds }} + successThreshold: {{ $.Values.StartupProbe.successThreshold }} + timeoutSeconds: {{ $.Values.StartupProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.StartupProbe.failureThreshold }} +{{- end }} + volumeMounts: +{{- with .Values.volumeMounts }} +{{ toYaml . | trim | indent 12 }} +{{- end }} +{{- if $.Values.persistentVolumeClaim.name }} + - name: {{ .Values.persistentVolumeClaim.name }}-vol + mountPath: {{ .Values.persistentVolumeClaim.mountPath | default "/tmp" }} +{{- end}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} + {{- if and (.esoSubPath) (ne (len .esoSubPath) 0) }} + {{- range .esoSubPath }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ . }} + subPath: {{ . }} + {{- end }} + {{- else }} + {{- range .esoSecretData.esoData }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ .secretKey }} + subPath: {{ .secretKey }} + {{- end }} + {{- end }} + {{- else }} + {{- range $k, $v := .data }} # for others secrets the mount path will be .data[i].secretKey + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} +{{- if $.Values.appMetrics }} + - name: envoy + image: {{ $.Values.envoyproxy.image | default "quay.io/devtron/envoy:v1.16.0"}} + {{- if $.Values.envoyproxy.lifecycle }} + lifecycle: +{{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} + {{- else if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.envoyproxy.resources | trim | indent 12 }} + ports: + - containerPort: 9901 + protocol: TCP + name: envoy-admin + {{- range $index, $element := .Values.ContainerPort }} + - name: envoy-{{ $element.name}} + containerPort: {{ $element.envoyPort | default (add 8790 $index) }} + protocol: TCP + {{- end }} + command: ["/usr/local/bin/envoy"] + args: ["-c", "/etc/envoy-config/envoy-config.json", "-l", "info", "--log-format", "[METADATA][%Y-%m-%d %T.%e][%t][%l][%n] %v"] + volumeMounts: + - name: {{ $.Values.envoyproxy.configMapName | default "envoy-config-volume" }} + mountPath: /etc/envoy-config/ +{{- if $.Values.envoyproxy.readinessProbe}} + readinessProbe: +{{ toYaml $.Values.envoyproxy.readinessProbe | indent 12}} +{{- end }} +{{- if $.Values.envoyproxy.livenessProbe}} + livenessProbe: +{{ toYaml $.Values.envoyproxy.livenessProbe | indent 12}} +{{- end }} +{{- end}} +{{- if $.Values.containers }} +{{- range $i, $c := .Values.containers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-sidecontainer-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .env }} + env: +{{ toYaml .env | indent 12 }} +{{- end }} + {{- if .envFrom }} + envFrom: +{{ toYaml .env | indent 12 }} +{{- end }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resizePolicy }} + resizePolicy: +{{ toYaml .resziePolicy | indent 12}} +{{- end }} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + volumes: + {{- if $.Values.appMetrics }} + - name: envoy-config-volume + configMap: + name: sidecar-config-{{ template ".Chart.Name .name" $ }} + {{- end }} +{{- if .Values.persistentVolumeClaim.name }} + - name: {{.Values.persistentVolumeClaim.name}}-vol + persistentVolumeClaim: + claimName: {{.Values.persistentVolumeClaim.name }} +{{- end}} +{{- with .Values.volumes }} +{{ toYaml . | trim | indent 8 }} +{{- end }} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + configMap: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + secret: + {{- if eq .external true }} + secretName: {{ .name }} + {{- else if eq .external false }} + secretName: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) (eq (.Values.appMetrics) false) }} []{{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq ($hasPVCExists) false) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) (eq (.Values.appMetrics) false) }} []{{- end }} + revisionHistoryLimit: 3 +## pauseForSecondsBeforeSwitchActive: {{ $.Values.pauseForSecondsBeforeSwitchActive }} +# waitForSecondsBeforeScalingDown: {{ $.Values.waitForSecondsBeforeScalingDown }} + strategy: + {{- if eq .Values.deploymentType "BLUE-GREEN" }} + blueGreen: # A new field that used to provide configurable options for a BlueGreenUpdate strategy + previewService: {{ template ".previewservicename" . }} # Reference to a service that can serve traffic to a new image before it receives the active traffic + activeService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + autoPromotionSeconds: {{ $.Values.deployment.strategy.blueGreen.autoPromotionSeconds }} + scaleDownDelaySeconds: {{ $.Values.deployment.strategy.blueGreen.scaleDownDelaySeconds }} + previewReplicaCount: {{ $.Values.deployment.strategy.blueGreen.previewReplicaCount }} + autoPromotionEnabled: {{ $.Values.deployment.strategy.blueGreen.autoPromotionEnabled }} + {{- else if eq .Values.deploymentType "RECREATE" }} + canary: + maxSurge: {{ $.Values.deployment.strategy.recreate.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.recreate.maxUnavailable }} + {{- else if eq .Values.deploymentType "ROLLING" }} + canary: + stableService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + maxSurge: {{ $.Values.deployment.strategy.rolling.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.rolling.maxUnavailable }} + {{- else if eq .Values.deploymentType "CANARY" }} + canary: + {{- if .Values.deployment.strategy.canary.antiAffinity }} + antiAffinity: +{{ toYaml .Values.deployment.strategy.canary.antiAffinity | indent 8 }} + {{- end }} + {{- if .Values.deployment.strategy.canary.canaryMetadata }} + canaryMetadata: +{{ toYaml .Values.deployment.strategy.canary.canaryMetadata | indent 8 }} + {{- end }} + {{- if .Values.deployment.strategy.canary.pingPong }} + pingPong: +{{ toYaml .Values.deployment.strategy.canary.pingPong | indent 8 }} + {{- end }} + {{- if .Values.deployment.strategy.canary.stableMetadata }} + stableMetadata: +{{ toYaml .Values.deployment.strategy.canary.stableMetadata | indent 8 }} + {{- end }} + {{- if .Values.deployment.strategy.canary.analysis }} + analysis: +{{ toYaml .Values.deployment.strategy.canary.analysis | indent 8 }} + {{- end }} + stableService: {{ template ".servicename" . }} # Reference to a service that serves end-user traffic to the replica set + {{- if .Values.deployment.strategy.canary.canaryService }} + canaryService: {{ $.Values.deployment.strategy.canary.canaryService }} + {{- else }} + canaryService: {{ template ".previewservicename" . }} + {{- end }} + maxSurge: {{ $.Values.deployment.strategy.canary.maxSurge }} + maxUnavailable: {{ $.Values.deployment.strategy.canary.maxUnavailable }} + steps: +{{ toYaml .Values.deployment.strategy.canary.steps | indent 8 }} + {{- if .Values.deployment.strategy.canary.trafficRouting }} + trafficRouting: + {{- if .Values.deployment.strategy.canary.trafficRouting.smi }} + smi: + {{- if .Values.deployment.strategy.canary.trafficRouting.smi.rootService }} + rootService: {{ .Values.deployment.strategy.canary.trafficRouting.smi.rootService }} + {{- else }} + rootService: {{ template ".servicename" . }} + {{- end }} + {{- if .Values.deployment.strategy.canary.trafficRouting.smi.trafficSplitName }} + trafficSplitName: {{ .Values.deployment.strategy.canary.trafficRouting.smi.trafficSplitName }} + {{- else }} + trafficSplitName: {{ template ".Chart.Name .fullname" $ }}-traffic-split + {{- end }} + {{- else if .Values.deployment.strategy.canary.trafficRouting.istio }} + istio: + {{ toYaml .Values.deployment.strategy.canary.trafficRouting.istio | indent 10 }} + {{- else if .Values.deployment.strategy.canary.trafficRouting.alb }} + alb: + {{- if .Values.deployment.strategy.canary.trafficRouting.alb.ingress }} + ingress: {{ .Values.deployment.strategy.canary.trafficRouting.alb.ingress }} + {{- else if $.Values.ingress.name }} + ingress: .Values.ingress.name + {{- else }} + ingress: {{ template ".Chart.Name .fullname" . }}-ingress + {{- end }} + {{- if .Values.deployment.strategy.canary.trafficRouting.alb.rootService }} + rootService: {{ .Values.deployment.strategy.canary.trafficRouting.alb.rootService }} + {{- else }} + rootService: {{ template ".servicename" . }} + {{- end }} + {{- if .Values.deployment.strategy.canary.trafficRouting.alb.annotationPrefix }} + annotationPrefix: {{ .Values.deployment.strategy.canary.trafficRouting.alb.annotationPrefix }} + {{- end }} + {{- if .Values.deployment.strategy.canary.trafficRouting.alb.servicePort }} + servicePort: {{ .Values.deployment.strategy.canary.trafficRouting.alb.servicePort }} + {{- else }} + {{- with index .Values.ContainerPort 0 }} + servicePort: {{ .servicePort }} + {{- end }} + {{- end }} + {{- if .Values.deployment.strategy.canary.trafficRouting.alb.stickinessConfig }} + stickinessConfig: +{{ toYaml .Values.deployment.strategy.canary.trafficRouting.alb.stickinessConfig | nindent 12 }} + {{- end }} + {{- if .Values.deployment.strategy.canary.trafficRouting.alb.ingresses }} + ingresses: + {{- range .Values.deployment.strategy.canary.trafficRouting.alb.ingresses }} + - {{ . }} + {{- end }} + {{- end }} + {{- else if .Values.deployment.strategy.canary.trafficRouting.customTrafficRouting }} +{{ toYaml .Values.deployment.strategy.canary.trafficRouting.customTrafficRouting | indent 8 }} + {{- end }} + {{- if .Values.deployment.strategy.canary.trafficRouting.managedRoutes }} + managedRoutes: +{{ toYaml .Values.deployment.strategy.canary.trafficRouting.managedRoutes | indent 10 }} + {{- end }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/externalsecrets.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/externalsecrets.yaml new file mode 100644 index 0000000000..6b6682c0a6 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/externalsecrets.yaml @@ -0,0 +1,84 @@ +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external true }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} +{{- if .esoSecretData.secretStore }} +--- +{{- if $.Capabilities.APIVersions.Has "external-secrets.io/v1" }} +apiVersion: external-secrets.io/v1 +{{- else }} +apiVersion: external-secrets.io/v1beta1 +{{- end }} +kind: SecretStore +metadata: + name: {{ .name}} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + provider: + {{- toYaml .esoSecretData.secretStore | nindent 4 }} +{{- end }} +--- +{{- if $.Capabilities.APIVersions.Has "external-secrets.io/v1" }} +apiVersion: external-secrets.io/v1 +{{- else }} +apiVersion: external-secrets.io/v1beta1 +{{- end }} +kind: ExternalSecret +metadata: + name: {{ .name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .esoSecretData.refreshInterval }} + refreshInterval: {{ .esoSecretData.refreshInterval }} + {{- else }} + refreshInterval: 1h + {{- end}} + {{- if and .esoSecretData.secretStoreRef (not .esoSecretData.secretStore) }} + secretStoreRef: +{{ toYaml .esoSecretData.secretStoreRef | indent 4 }} + {{- else }} + secretStoreRef: + name: {{ .name}} + kind: SecretStore + {{- end }} + target: + name: {{ .name}} + {{- if .esoSecretData.template }} + template: + {{- toYaml .esoSecretData.template | nindent 6 }} + {{- end }} + creationPolicy: Owner + {{- if .esoSecretData.esoDataFrom }} + dataFrom: + {{- toYaml .esoSecretData.esoDataFrom | nindent 4 }} + {{- else }} + data: + {{- range .esoSecretData.esoData }} + - secretKey: {{ .secretKey }} + remoteRef: + key: {{ .key }} + {{- if .property }} + property: {{ .property }} + {{- end }} + {{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/generic.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/generic.yaml new file mode 100644 index 0000000000..db95e84267 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/generic.yaml @@ -0,0 +1,4 @@ +{{- range .Values.rawYaml }} +--- +{{ toYaml . }} + {{- end -}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/hpa.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/hpa.yaml new file mode 100644 index 0000000000..1c5c636b9f --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/hpa.yaml @@ -0,0 +1,93 @@ +{{- if $.Values.autoscaling.enabled }} +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2beta2 +{{- else }} +apiVersion: autoscaling/v2beta1 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + {{- if $.Values.autoscaling.name }} + name: {{ $.Values.autoscaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-hpa + {{- end }} + {{- if .Values.autoscaling.annotations }} + annotations: +{{ toYaml .Values.autoscaling.annotations | indent 4 }} + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + {{- if .Values.autoscaling.labels }} +{{ toYaml .Values.autoscaling.labels | indent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + scaleTargetRef: + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + name: {{ include ".Chart.Name .fullname" $ }} + minReplicas: {{ $.Values.autoscaling.MinReplicas }} + maxReplicas: {{ $.Values.autoscaling.MaxReplicas }} + metrics: + {{- if $.Values.autoscaling.containerResource.enabled }} + {{- with $.Values.autoscaling.containerResource }} + {{- if .TargetCPUUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: cpu + container: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + target: + type: Utilization + averageUtilization: {{ .TargetCPUUtilizationPercentage }} + {{- end}} + {{- if .TargetMemoryUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: memory + container: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + target: + type: Utilization + averageUtilization: {{ .TargetMemoryUtilizationPercentage }} + {{- end}} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if and $.Values.autoscaling.extraMetrics (semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion) }} + {{- toYaml $.Values.autoscaling.extraMetrics | nindent 2 }} + {{- end}} + {{- if and $.Values.autoscaling.behavior (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + behavior: + {{- toYaml $.Values.autoscaling.behavior | nindent 4 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/ingress.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/ingress.yaml new file mode 100644 index 0000000000..1d47899d0b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/ingress.yaml @@ -0,0 +1,187 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ingress.enabled -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- if and .Values.ingressInternal.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingressInternal.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingressInternal.annotations "kubernetes.io/ingress.class" .Values.ingressInternal.className}} + {{- end }} +{{- end }} +{{- end }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingress.name }} + name: {{ $.Values.ingress.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} + {{- end }} +{{- if .Values.ingress.annotations }} + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + {{- if or .Values.ingress.host .Values.ingress.path }} + - host: {{ .Values.ingress.host }} + http: + paths: + - path: {{ .Values.ingress.path }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingress.pathType | default "ImplementationSpecific" }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingress.hosts) (not ($.Values.ingress.host )) }} + {{- range .Values.ingress.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end }} +{{- if $.Values.ingressInternal.enabled }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{ else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{ else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingressInternal.name }} + name: {{ $.Values.ingressInternal.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress-internal + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.ingressInternal.annotations }} + annotations: +{{ toYaml .Values.ingressInternal.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingressInternal.className }} + {{- end }} + rules: + {{- if or .Values.ingressInternal.host .Values.ingressInternal.path }} + - host: {{ .Values.ingressInternal.host }} + http: + paths: + - path: {{ .Values.ingressInternal.path }} + {{- if and .Values.ingressInternal.pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingressInternal.pathType | default "Prefix" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingressInternal.hosts) (not ($.Values.ingressInternal.host )) }} + {{- range .Values.ingressInternal.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ingressInternal.tls }} + tls: +{{ toYaml .Values.ingressInternal.tls | indent 4 }} + {{- end -}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-authorizationpolicy.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-authorizationpolicy.yaml new file mode 100644 index 0000000000..df063920a7 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-authorizationpolicy.yaml @@ -0,0 +1,45 @@ +{{- with .Values.istio }} +{{- if and .enable .authorizationPolicy.enabled }} +{{ if semverCompare ">=1.22-0" $.Capabilities.KubeVersion.GitVersion -}} +apiVersion: security.istio.io/v1 +{{- else -}} +apiVersion: security.istio.io/v1beta1 +{{- end }} +kind: AuthorizationPolicy +metadata: + {{- if .authorizationPolicy.name }} + name: {{ .authorizationPolicy.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .authorizationPolicy.labels }} +{{ toYaml .authorizationPolicy.labels | indent 4 }} + {{- end }} +{{- if .authorizationPolicy.annotations }} + annotations: +{{ toYaml .authorizationPolicy.annotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} + action: {{ .authorizationPolicy.action }} +{{- if $.Values.istio.authorizationPolicy.provider }} + provider: +{{ toYaml $.Values.istio.authorizationPolicy.provider | indent 4 }} +{{- end }} +{{- if $.Values.istio.authorizationPolicy.rules }} + rules: +{{ toYaml $.Values.istio.authorizationPolicy.rules | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-destinationrule.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-destinationrule.yaml new file mode 100644 index 0000000000..b1eb429257 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-destinationrule.yaml @@ -0,0 +1,45 @@ +{{- with .Values.istio }} +{{- if and .enable .destinationRule.enabled }} +{{ if semverCompare ">=1.22-0" $.Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.istio.io/v1 +{{ else }} +apiVersion: networking.istio.io/v1beta1 +{{- end }} +kind: DestinationRule +metadata: + {{- if .destinationRule.name }} + name: {{ .destinationRule.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-destinationrule + {{- end }} + {{- if .destinationRule.namespace }} + namespace: {{ .destinationRule.namespace }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .destinationRule.labels }} +{{ toYaml .destinationRule.labels | indent 4 }} + {{- end }} +{{- if .destinationRule.annotations }} + annotations: +{{ toYaml .destinationRule.annotations | indent 4 }} +{{- end }} +spec: + host: "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- if $.Values.istio.destinationRule.subsets }} + subsets: +{{ toYaml $.Values.istio.destinationRule.subsets | indent 4 }} +{{- end }} +{{- if $.Values.istio.destinationRule.trafficPolicy }} + trafficPolicy: +{{ toYaml $.Values.istio.destinationRule.trafficPolicy | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-gateway.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-gateway.yaml new file mode 100644 index 0000000000..425cc48c12 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-gateway.yaml @@ -0,0 +1,73 @@ +{{- if and .Values.istio.enable .Values.istio.gateway.enabled -}} +{{ if semverCompare ">=1.22-0" $.Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.istio.io/v1 +{{ else }} +apiVersion: networking.istio.io/v1beta1 +{{- end }} +kind: Gateway +metadata: + {{- if .Values.istio.gateway.name }} + name: {{ .Values.istio.gateway.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.istio.gateway.labels }} +{{ toYaml $.Values.istio.gateway.labels | indent 4 }} + {{- end }} +{{- if $.Values.istio.gateway.annotations }} + annotations: +{{ toYaml $.Values.istio.gateway.annotations | indent 4 }} +{{- end }} +spec: +{{- if .Values.istio.gateway.gatewayExtraSpec }} +{{ toYaml $.Values.istio.gateway.gatewayExtraSpec | indent 2}} +{{- else}} +{{- if .Values.istio.gateway.selector }} + selector: +{{ toYaml $.Values.istio.gateway.selector | indent 4}} +{{- end }} + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: +{{- if .Values.istio.gateway.host }} + - {{ .Values.istio.gateway.host | quote -}} +{{- else if .Values.istio.gateway.hosts }} +{{- range .Values.istio.gateway.hosts }} + - {{ . | quote }} +{{- end }} +{{- end }} +{{ with .Values.istio.gateway }} +{{- if .tls.enabled }} + tls: + httpsRedirect: true + - port: + number: 443 + name: https + protocol: HTTPS + hosts: +{{- if .host }} + - {{ .host | quote }} +{{- else if .hosts }} +{{- range .hosts }} + - {{ . | quote }} +{{- end }} +{{- end }} + tls: + mode: SIMPLE + credentialName: {{ .tls.secretName }} +{{ end }} +{{ end }} +{{ end }} +{{ end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-peerauthentication.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-peerauthentication.yaml new file mode 100644 index 0000000000..5e143c8530 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-peerauthentication.yaml @@ -0,0 +1,44 @@ +{{- with .Values.istio }} +{{- if and .enable .peerAuthentication.enabled }} +{{ if semverCompare ">=1.22-0" $.Capabilities.KubeVersion.GitVersion -}} +apiVersion: security.istio.io/v1 +{{- else -}} +apiVersion: security.istio.io/v1beta1 +{{- end }} +kind: PeerAuthentication +metadata: + {{- if .peerAuthentication.name }} + name: {{ .peerAuthentication.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .peerAuthentication.labels }} +{{ toYaml .peerAuthentication.labels | indent 4 }} + {{- end }} +{{- if .peerAuthentication.annotations }} + annotations: +{{ toYaml .peerAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .peerAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} + mtls: + mode: {{ .peerAuthentication.mtls.mode }} +{{- if $.Values.istio.peerAuthentication.portLevelMtls }} + portLevelMtls: +{{ toYaml $.Values.istio.peerAuthentication.portLevelMtls | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-requestauthentication.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-requestauthentication.yaml new file mode 100644 index 0000000000..d8082bd34d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-requestauthentication.yaml @@ -0,0 +1,42 @@ +{{- with .Values.istio }} +{{- if and .enable .requestAuthentication.enabled }} +{{ if semverCompare ">=1.22-0" $.Capabilities.KubeVersion.GitVersion -}} +apiVersion: security.istio.io/v1 +{{- else -}} +apiVersion: security.istio.io/v1beta1 +{{- end }} +kind: RequestAuthentication +metadata: + {{- if .requestAuthentication.name }} + name: {{ .requestAuthentication.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .requestAuthentication.labels }} +{{ toYaml .requestAuthentication.labels | indent 4 }} + {{- end }} +{{- if .requestAuthentication.annotations }} + annotations: +{{ toYaml .requestAuthentication.annotations | indent 4 }} +{{- end }} +spec: +{{- if .requestAuthentication.selector.enabled }} + selector: + matchLabels: + app.kubernetes.io/name: {{ template ".Chart.Name .fullname" $ }} +{{- end }} +{{- if $.Values.istio.requestAuthentication.jwtRules }} + jwtRules: +{{ toYaml $.Values.istio.requestAuthentication.jwtRules | indent 2 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-virtualservice.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-virtualservice.yaml new file mode 100644 index 0000000000..e08206b162 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/istio-virtualservice.yaml @@ -0,0 +1,75 @@ +{{- with .Values.istio }} +{{- if and .enable .virtualService.enabled }} +{{ if semverCompare ">=1.22-0" $.Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.istio.io/v1 +{{ else }} +apiVersion: networking.istio.io/v1beta1 +{{- end }} +kind: VirtualService +metadata: + {{- if .virtualService.name }} + name: {{ .virtualService.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-virtualservice + {{- end }} + {{- if .virtualService.namespace }} + namespace: {{ .virtualService.namespace }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .virtualService.labels }} +{{ toYaml .virtualService.labels | indent 4 }} + {{- end }} +{{- if .virtualService.annotations }} + annotations: +{{ toYaml .virtualService.annotations | indent 4 }} +{{- end }} +spec: +{{- if or .gateway.enabled .virtualService.gateways }} +{{- if .virtualService.virtualServiceExtraSpec }} +{{ toYaml .virtualService.virtualServiceExtraSpec | indent 2}} +{{- else }} + gateways: + {{- if .gateway.enabled }} + - {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + {{- range .virtualService.gateways }} + - {{ . | quote }} + {{- end }} +{{- end }} +{{- if or .gateway.enabled .virtualService.hosts }} + hosts: + {{- if .gateway.enabled }} + {{- if .gateway.host }} + - {{ .gateway.host | quote }} + {{- else if .gateway.hosts }} +{{- range .gateway.hosts }} + - {{ . | quote }} +{{- end }} + {{- end }} + {{- end }} + {{- range .virtualService.hosts }} + - {{ . | quote }} + {{- end }} +{{- else }} + hosts: + - "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- end }} +{{- if $.Values.istio.virtualService.http }} + http: +{{ toYaml $.Values.istio.virtualService.http | indent 4 }} +{{- end }} +{{- if $.Values.istio.virtualService.tcp }} + tcp: +{{ toYaml $.Values.istio.virtualService.tcp | indent 4 }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/keda-autoscaling.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/keda-autoscaling.yaml new file mode 100644 index 0000000000..850312e16d --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/keda-autoscaling.yaml @@ -0,0 +1,78 @@ +{{- if $.Values.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + {{- if $.Values.kedaAutoscaling.name }} + name: {{ $.Values.kedaAutoscaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-keda + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ .Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.labels }} +{{ toYaml .Values.kedaAutoscaling.labels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.annotations }} + annotations: +{{ toYaml .Values.kedaAutoscaling.annotations | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + name: {{ include ".Chart.Name .fullname" $ }} +{{- if $.Values.kedaAutoscaling.envSourceContainerName }} + envSourceContainerName: {{ $.Values.kedaAutoscaling.envSourceContainerName }} +{{- end }} +{{- if $.Values.kedaAutoscaling.pollingInterval }} + pollingInterval: {{ $.Values.kedaAutoscaling.pollingInterval }} +{{- end }} +{{- if $.Values.kedaAutoscaling.cooldownPeriod }} + cooldownPeriod: {{ $.Values.kedaAutoscaling.cooldownPeriod }} +{{- end }} +{{- if $.Values.kedaAutoscaling.idleReplicaCount }} + idleReplicaCount: {{ $.Values.kedaAutoscaling.idleReplicaCount }} +{{- end }} + minReplicaCount: {{ $.Values.kedaAutoscaling.minReplicaCount }} + maxReplicaCount: {{ $.Values.kedaAutoscaling.maxReplicaCount }} +{{- if $.Values.kedaAutoscaling.fallback }} + fallback: +{{ toYaml $.Values.kedaAutoscaling.fallback | indent 4 }} +{{- end }} +{{- if $.Values.kedaAutoscaling.advanced }} + advanced: +{{ toYaml $.Values.kedaAutoscaling.advanced | indent 4 }} +{{- end }} + triggers: +{{ toYaml .Values.kedaAutoscaling.triggers | indent 2}} +{{- if $.Values.kedaAutoscaling.authenticationRef }} + authenticationRef: +{{ toYaml $.Values.kedaAutoscaling.authenticationRef | indent 6 }} +{{- end }} +--- +{{- if $.Values.kedaAutoscaling.triggerAuthentication.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{ $.Values.kedaAutoscaling.triggerAuthentication.name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} +spec: +{{ toYaml $.Values.kedaAutoscaling.triggerAuthentication.spec | indent 2 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/metrics-service-monitor.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/metrics-service-monitor.yaml new file mode 100644 index 0000000000..4e9e544f50 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/metrics-service-monitor.yaml @@ -0,0 +1,35 @@ +{{- if $.Values.appMetrics -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: + jobLabel: {{ template ".Chart.Name .name" $ }} + endpoints: + - port: envoy-admin + interval: 30s + path: /stats/prometheus + relabelings: + - action: replace + sourceLabels: + - __meta_kubernetes_pod_label_rollouts_pod_template_hash + targetLabel: devtron_app_hash + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} + podTargetLabels: + - appId + - envId + - devtron_app_hash +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/networkpolicy.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/networkpolicy.yaml new file mode 100644 index 0000000000..ee8bdaf8be --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/networkpolicy.yaml @@ -0,0 +1,52 @@ +{{- if .Values.networkPolicy.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + {{- if .Values.networkPolicy.name }} + name: {{ .Values.networkPolicy.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-networkpolicy + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.networkPolicy.labels }} +{{ toYaml $.Values.networkPolicy.labels | indent 4 }} + {{- end }} +{{- if $.Values.networkPolicy.annotations }} + annotations: +{{ toYaml $.Values.networkPolicy.annotations | indent 4 }} +{{- end }} +spec: + podSelector: +{{- if .podSelector.matchExpressions }} + matchExpressions: +{{ toYaml $.Values.networkPolicy.podSelector.matchExpressions | indent 6 }} +{{- end }} +{{- if .podSelector.matchLabels }} + matchLabels: +{{ toYaml $.Values.networkPolicy.podSelector.matchLabels | indent 6 }} +{{- else }} + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} +{{- end }} +{{- if .policyTypes }} + policyTypes: +{{ toYaml $.Values.networkPolicy.policyTypes | indent 4 }} +{{- end }} +{{- if .ingress }} + ingress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4 }} +{{- end }} +{{- if .egress }} + egress: +{{ toYaml $.Values.networkPolicy.ingress | indent 4}} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/persistent-volume-claim.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/persistent-volume-claim.yaml new file mode 100644 index 0000000000..cee0fb2fde --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/persistent-volume-claim.yaml @@ -0,0 +1,27 @@ +{{- if .Values.persistentVolumeClaim.name }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{.Values.persistentVolumeClaim.name }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- with .Values.persistentVolumeClaim }} +spec: + accessModes: +{{- range .accessMode }} + - {{ . }} +{{- end }} + resources: + requests: + storage: {{ .storage | default "5Gi" }} + storageClassName: {{ .storageClassName | default "default" }} + volumeMode: {{ .volumeMode | default "Filesystem" }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/poddisruptionbudget.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000000..869d380d40 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/poddisruptionbudget.yaml @@ -0,0 +1,38 @@ +{{- if .Values.podDisruptionBudget }} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: policy/v1 +{{- else -}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + {{- if .Values.podDisruptionBudget.name }} + name: {{ .Values.podDisruptionBudget.name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }} + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 6 }} + {{- else }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/pre-sync-job.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/pre-sync-job.yaml new file mode 100644 index 0000000000..cd733d4857 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/pre-sync-job.yaml @@ -0,0 +1,23 @@ +{{- if $.Values.dbMigrationConfig.enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-migrator + annotations: + argocd.argoproj.io/hook: PreSync +# argocd.argoproj.io/hook-delete-policy: HookSucceeded +spec: + template: + spec: + containers: + - name: migrator + image: 686244538589.dkr.ecr.us-east-2.amazonaws.com/migrator:0.0.1-rc14 + env: + {{- range $.Values.dbMigrationConfig.envValues }} + - name: {{ .key}} + value: {{ .value | quote }} + {{- end}} + restartPolicy: Never + backoffLimit: 0 +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/prometheusrules.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/prometheusrules.yaml new file mode 100644 index 0000000000..90f398bff4 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/prometheusrules.yaml @@ -0,0 +1,22 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template ".Chart.Name .fullname" . }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: + kind: Prometheus + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.prometheusRule.additionalLabels }} +{{ toYaml .Values.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.prometheusRule.rules }} + groups: + - name: {{ template ".Chart.Name .fullname" $ }} + rules: {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/secret.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/secret.yaml new file mode 100644 index 0000000000..26a17b968c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/secret.yaml @@ -0,0 +1,69 @@ +{{- if $.Values.secret.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: app-secret +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml $.Values.secret.data | indent 2 }} +{{- end }} + + +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name}}-{{ $.Values.app }} +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml .data | trim | indent 2 }} +{{- end}} + {{if eq .external true }} + {{if (or (eq .externalType "AWSSecretsManager") (eq .externalType "AWSSystemManager") (eq .externalType "HashiCorpVault"))}} +--- +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: {{ .name}} +{{- if $.Values.appLabels }} + labels: +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .roleARN }} + roleArn: .roleARN + {{- end}} + {{- if eq .externalType "AWSSecretsManager"}} + backendType: secretsManager + {{- end}} + {{- if eq .externalType "AWSSystemManager"}} + backendType: systemManager + {{- end}} + {{- if eq .externalType "HashiCorpVault"}} + backendType: vault + {{- end}} + data: + {{- range .secretData }} + - key: {{.key}} + name: {{.name}} + {{- if .property }} + property: {{.property}} + {{- end}} + isBinary: {{.isBinary}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/service.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/service.yaml new file mode 100644 index 0000000000..6dfea45c80 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/service.yaml @@ -0,0 +1,114 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".servicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end}} +spec: + type: {{ .Values.service.type | default "ClusterIP" }} +{{- with .Values.service.extraSpec }} + {{- toYaml . | nindent 2 }} + {{- end }} +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range .Values.service.loadBalancerSourceRanges }} + - {{ . }} + {{- end }} + {{- end }} +{{- end }} + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + {{- if .targetPort }} + targetPort: {{ .targetPort }} + {{- else if $.Values.appMetrics }} + targetPort: envoy-{{ .name }} + {{- else }} + targetPort: {{ .name }} + {{- end }} + {{- if (and (eq $.Values.service.type "NodePort") .nodePort )}} + nodePort: {{ .nodePort }} + {{- end }} + protocol: {{ .protocol | default "TCP"}} + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- if .Values.service.sessionAffinity.enabled }} + sessionAffinity: ClientIP +{{- end }} +{{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: +{{ toYaml .Values.service.sessionAffinityConfig | indent 4 }} +{{- end }} +{{- if or (eq .Values.deploymentType "BLUE-GREEN") (eq .Values.deploymentType "CANARY") }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template ".previewservicename" . }} + labels: + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + {{- if .targetPort }} + targetPort: {{ .targetPort }} + {{- else if $.Values.appMetrics }} + targetPort: envoy-{{ .name }} + {{- else }} + targetPort: {{ .name }} + {{- end }} + protocol: {{ .protocol | default "TCP"}} + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/serviceaccount.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/serviceaccount.yaml new file mode 100644 index 0000000000..ac258610fa --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if $.Values.serviceAccount }} +{{- if $.Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "serviceAccountName" . }} + {{- if .Values.podLabels }} + labels: +{{ toYaml .Values.podLabels | indent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/servicemonitor.yaml new file mode 100644 index 0000000000..8600f9d65b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/servicemonitor.yaml @@ -0,0 +1,117 @@ +{{ $serviceMonitorEnabled := include "serviceMonitorEnabled" . }} +{{- if eq "true" $serviceMonitorEnabled -}} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + {{- if .Values.servicemonitor.name }} + name: {{ .Values.servicemonitor.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-sm + {{- end }} + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.servicemonitor.additionalLabels }} +{{ toYaml .Values.servicemonitor.additionalLabels | indent 4 }} + {{- end }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout | quote }} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- range .Values.containers }} + {{- range .ports }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.servicemonitor.namespaceSelector }} + namespaceSelector: + matchNames: + {{- toYaml .Values.servicemonitor.namespaceSelector | nindent 6 }} + {{- end }} + selector: + matchLabels: + {{- if .Values.servicemonitor.matchLabels }} + {{- toYaml .Values.servicemonitor.matchLabels | nindent 6 }} + {{- else }} + app: {{ template ".Chart.Name .name" $ }} + {{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/sidecar-configmap.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/sidecar-configmap.yaml new file mode 100644 index 0000000000..cf32679409 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/sidecar-configmap.yaml @@ -0,0 +1,169 @@ +{{- if .Values.appMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2019-08-12T18:38:34Z + name: sidecar-config-{{ template ".Chart.Name .name" $ }} +data: + envoy-config.json: | + { + "stats_config": { + "use_all_default_tags": false, + "stats_tags": [ + { + "tag_name": "cluster_name", + "regex": "^cluster\\.((.+?(\\..+?\\.svc\\.cluster\\.local)?)\\.)" + }, + { + "tag_name": "tcp_prefix", + "regex": "^tcp\\.((.*?)\\.)\\w+?$" + }, + { + "tag_name": "response_code", + "regex": "_rq(_(\\d{3}))$" + }, + { + "tag_name": "response_code_class", + "regex": ".*_rq(_(\\dxx))$" + }, + { + "tag_name": "http_conn_manager_listener_prefix", + "regex": "^listener(?=\\.).*?\\.http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "http_conn_manager_prefix", + "regex": "^http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "listener_address", + "regex": "^listener\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "mongo_prefix", + "regex": "^mongo\\.(.+?)\\.(collection|cmd|cx_|op_|delays_|decoding_)(.*?)$" + } + ], + "stats_matcher": { + "inclusion_list": { + "patterns": [ + { + "regex": ".*_rq_\\dxx$" + }, + { + "regex": ".*_rq_time$" + }, + { + "regex": "cluster.*" + }, + ] + } + } + }, + "admin": { + "access_log_path": "/dev/null", + "address": { + "socket_address": { + "address": "0.0.0.0", + "port_value": 9901 + } + } + }, + "static_resources": { + "clusters": [ + {{- range $index, $element := .Values.ContainerPort }} + { + "name": "{{ $.Values.app }}-{{ $index }}", + "type": "STATIC", + "connect_timeout": "0.250s", + "lb_policy": "ROUND_ROBIN", +{{- if $element.idleTimeout }} + "common_http_protocol_options": { + "idle_timeout": {{ $element.idleTimeout | quote }} + }, +{{- end }} +{{- if or $element.useHTTP2 $element.useGRPC }} + "http2_protocol_options": {}, +{{- end }} +{{- if and (not $element.useGRPC) (not $element.supportStreaming) }} + "max_requests_per_connection": "1", +{{- end }} + "load_assignment": { + "cluster_name": "9", + "endpoints": { + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "127.0.0.1", + "port_value": {{ $element.port }} + } + } + } + } + ] + } + } + }, + {{- end }} + ], + "listeners":[ + {{- range $index, $element := .Values.ContainerPort }} + { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "0.0.0.0", + "port_value": {{ $element.envoyPort | default (add 8790 $index) }} + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "config": { + "codec_type": "AUTO", + "stat_prefix": "stats", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { +{{- if $element.supportStreaming }} + "timeout": "0s", +{{- end }} +{{- if and ($element.envoyTimeout) (not $element.supportStreaming) }} + "timeout": "{{ $element.envoyTimeout }}", +{{- end }} + "cluster": "{{ $.Values.app }}-{{ $index }}" + } + } + ] + } + ] + }, + "http_filters": { + "name": "envoy.filters.http.router" + } + } + } + ] + } + ] + }, + {{- end }} + ] + } + } +--- +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/vertical-pod-autoscaler.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/vertical-pod-autoscaler.yaml new file mode 100644 index 0000000000..7d1d1db475 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/vertical-pod-autoscaler.yaml @@ -0,0 +1,41 @@ +{{ $VerticalPodAutoScalingEnabled := include "VerticalPodAutoScalingEnabled" . }} +{{- if eq "true" $VerticalPodAutoScalingEnabled -}} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + {{- if .Values.verticalPodScaling.name }} + name: {{ .Values.verticalPodScaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-vpa + {{- end }} + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} + {{- if $.Values.verticalPodScaling.labels }} +{{ toYaml $.Values.verticalPodScaling.labels | indent 4 }} + {{- end }} + {{- if $.Values.verticalPodScaling.annotations }} + annotations: +{{ toYaml $.Values.verticalPodScaling.annotations | indent 4 }} + {{- end }} +spec: +{{- if .Values.verticalPodScaling.resourcePolicy }} + resourcePolicy: +{{ toYaml .Values.verticalPodScaling.resourcePolicy}} +{{- end }} +{{- if .Values.verticalPodScaling.updatePolicy }} + updatePolicy: +{{ toYaml .Values.verticalPodScaling.updatePolicy}} +{{- end }} + targetRef: + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + name: {{ include ".Chart.Name .fullname" $ }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/winter-soldier.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/winter-soldier.yaml new file mode 100644 index 0000000000..5ac2fd8443 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/templates/winter-soldier.yaml @@ -0,0 +1,45 @@ +{{- if .Values.winterSoldier.enabled }} +apiVersion: {{ $.Values.winterSoldier.apiVersion }} +kind: Hibernator +metadata: + {{- if .Values.winterSoldier.name }} + name: {{ .Values.winterSoldier.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-hibernator + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.winterSoldier.labels }} +{{ toYaml .Values.winterSoldier.labels | indent 4 }} + {{- end }} +{{- if .Values.winterSoldier.annotations }} + annotations: +{{ toYaml .Values.winterSoldier.annotations | indent 4 }} +{{- end }} +spec: + timeRangesWithZone: +{{ toYaml $.Values.winterSoldier.timeRangesWithZone | indent 4}} + selectors: + - inclusions: + - objectSelector: + name: {{ include ".Chart.Name .fullname" $ }} + type: {{ .Values.winterSoldier.type | quote }} + fieldSelector: +{{toYaml $.Values.winterSoldier.fieldSelector | indent 14}} + namespaceSelector: + name: {{ $.Release.Namespace }} + exclusions: [] + action: {{ $.Values.winterSoldier.action }} + {{- if eq .Values.winterSoldier.action "scale" }} + {{- if .Values.winterSoldier.targetReplicas }} + targetReplicas: {{ $.Values.winterSoldier.targetReplicas }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/test_values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/test_values.yaml new file mode 100644 index 0000000000..6fb7624482 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/test_values.yaml @@ -0,0 +1,783 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +containerName: "akshat" + +rolloutLabels: + name: devops-team + Company: Devtron + Job: DevOps + +rolloutAnnotations: + name: devops-team + Company: Devtron + Job: DevOps + +containerSpec: + lifecycle: + enabled: true + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +imagePullSecrets: + - test1 + - test2 +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 8000 + envoyTimeout: 15 + targetPort: 8080 + envoyPort: 8799 + useHTTP2: false + protocol: UDP + supportStreaming: false + idleTimeout: 1800s + servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace + + - name: app1 + port: 8090 + targetPort: 1234 + servicePort: 8080 + useGRPC: true + servicemonitor: + enabled: true + - name: app2 + port: 8091 + servicePort: 8081 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +autoscaling: + enabled: true + MinReplicas: 1 + MaxReplicas: 2c + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} + containerResource: + enabled: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + # scaleUp: + # stabilizationWindowSeconds: 0 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + # - type: Pods + # value: 4 + # periodSeconds: 15 + # selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +secret: + enabled: false + +service: + type: ClusterIP + # name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + sessionAffinity: + enabled: false + sessionAffinityConfig: {} + +server: + deployment: + image_tag: 1-95af053 + image: "" +deploymentType: "CANARY" + +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: + foo: bar + +EnvVariables: + - name: FLASK_ENV + value: qa + +deployment: + strategy: + blueGreen: + autoPromotionSeconds: 30 + scaleDownDelaySeconds: 30 + previewReplicaCount: 1 + autoPromotionEnabled: false + rolling: + maxSurge: "25%" + maxUnavailable: 1 + canary: + maxSurge: 30% + maxUnavailable: 0 + stableService: api-svc + canaryService: canary-api-svc + steps: + - setCanaryScale: + weight: 25 + - setHeaderRoute: + name: api-canary + match: + - headerName: X-Version + headerValue: + prefix: v2 + - pause: + duration: 2m + - analysis: + templates: + - templateName: success-rate + args: + - name: service-name + value: canary-api-svc.api.svc.cluster.local + - setWeight: 50 + - pause: + duration: 2m + - analysis: + templates: + - templateName: success-rate + args: + - name: service-name + value: canary-api-svc.api.svc.cluster.local + - setWeight: 100 + - pause: {} # final manual approval (optional) + + trafficRouting: + # managedRoutes: + # - name: api-canary + istio: + virtualService: + name: api-vs + routes: + - api-svc + recreate: {} + +pipelineName: ci-axhbc + +analysis: + successfulRunHistoryLimit: 4 + unsuccessfulRunHistoryLimit: 3 + +appLabels: + hello: hii + hey: hello + +analysisTemplate: + enabled: true + templates: + - name: success-rate + annotations: {} + labels: {} + args: + - name: service-name + value: example-svc.default.svc.cluster.local + measurementRetention: + - limit: 34 + metricName: test + metrics: + - name: success-rate + interval: 5m + # NOTE: prometheus queries return results in the form of a vector. + # So it is common to access the index 0 of the returned array to obtain the value + successCondition: result[0] >= 0.95 + failureLimit: 3 + provider: + prometheus: + address: http://prometheus.example.com:9090 + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code!~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + )) + +LivenessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + - name: Custom-Header2 + value: xyz + +ReadinessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: true + additionalLabels: {} + namespace: "" + rules: + # These are just examples rules, please adapt them to your needs + - alert: TooMany500s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 5XXs + summary: More than 5% of the all requests did return 5XX, this require your attention + - alert: TooMany400s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 4XXs + summary: More than 5% of the all requests did return 4XX, this require your attention + + +ingress: + enabled: true + className: nginx + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" +# Old Ingress Format +# host: "ingress-example.com" +# path: "/app" + +# New Ingress Format + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /ingress + pathType: "ImplementationSpecific" + backend: + service: + name: test-service-ingress + port: + number: 80 + tls: [] +### Legacy Ingress Format ## +# host: abc.com +# path: "/" +# pathType: "ImplementationSpecific" + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: nginx-internal + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /ingress-internal + pathType: "ImplementationSpecific" + backend: + service: + name: test-service-internal + port: + number: 80 + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +dbMigrationConfig: + enabled: false + +command: + workingDir: /app + enabled: false + value: ["ls"] + +args: + enabled: false + value: [] + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: false + secrets: + - name: config-secret-1 + type: environment + external: false + externalType: AWSSecretsManager + esoSecretData: + secretStore: + aws: + service: SecretsManager + region: us-east-1 + auth: + secretRef: + accessKeyIDSecretRef: + name: awssm-secret + key: access-key + secretAccessKeySecretRef: + name: awssm-secret + key: secret-access-key + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + data: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + - name: config-secret-2 + type: environment + external: false + externalType: ESO_HashiCorpVault + esoSecretData: + secretStore: + vault: + server: "http://my.vault.server:8200" + path: "secret" + version: "v2" + auth: + tokenSecretRef: + name: vault-token + key: token + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + date: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + - command: ["sh", "-c", "chown -R 1000:1000 logs"] + reuseContainerImage: true + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + privileged: true + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + - name: init-migrate + image: busybox:latest + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + capabilities: + drop: + - ALL + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs +# name: logs-data + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +envoyproxy: + image: envoyproxy/envoy:v1.14.1 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + readinessProbe: + path: / + livenessProbe: + path: / + +podDisruptionBudget: {} + # minAvailable: 1 + # maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +## + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +appMetrics: true +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "test1" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: + kubernetes.io/service-account.name: build-robot +containerSecurityContext: + allowPrivilegeEscalation: false +privileged: true +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" + + + +verticalPodScaling: + enabled: false + +rollbackWindow: {} +schedulingGates: {} +deploymentType: CANARY + + +topologySpreadConstraints: {} + +# istio: +# enable: true +# gateway: +# enabled: true +# labels: {} +# annotations: {} +# selector: +# istiokj: sdkj +# host: "istio.example.com" +# tls: +# enabled: true +# secretName: "asjkj" +# gatewayExtraSpec: {} +# # selector: +# # istio: "istio-1" +# # servers: +# # - port: +# # number: 8080 +# # name: http +# # protocol: HTTP +# # hosts: +# # - "istio.example.com" +# virtualService: +# enabled: true +# labels: {} +# annotations: {} +# gateways: +# - sdkk +# hosts: +# - istio-2.exmap.com +# - ksdj.sdkj.ckj +# http: +# - match: +# - uri: +# prefix: /v1 +# - uri: +# prefix: /v2 +# timeout: 12 +# headers: +# request: +# add: +# x-some-header: "value" +# retries: +# attempts: 2 +# perTryTimeout: 3s +# virtualServiceExtraSpec: +# tcp: +# skldjlkjds: kdjj + +istio: + enable: true + gateway: + enabled: true + labels: {} + annotations: {} + # host: example.com + hosts: + - "example4.com" + tls: + enabled: true + secretName: example-tls-secret + virtualService: + namespace: test + enabled: true + labels: {} + annotations: {} + gateways: [] + hosts: + - example1.local + tcp: + - match: + - uri: + prefix: /v1 + - uri: + prefix: /v2 + rewriteUri: / + timeout: 12 + headers: + request: + add: + x-some-header: "value" + retries: + attempts: 2 + perTryTimeout: 3s + route: + - destination: + host: service1 + port: 80 + - route: + - destination: + host: service2 + http: + - match: + - uri: + prefix: /v1 + - uri: + prefix: /v2 + rewriteUri: / + timeout: 12 + headers: + request: + add: + x-some-header: "value" + retries: + attempts: 2 + perTryTimeout: 3s + route: + - destination: + host: service1 + port: 80 + - route: + - destination: + host: service2 \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/values.yaml b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/values.yaml new file mode 100644 index 0000000000..ffbd71fd9b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/reference-chart_5-2-0/values.yaml @@ -0,0 +1,671 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + envoyTimeout: 15s + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s +# servicemonitor: +# enabled: true +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace + + - name: app1 + port: 8090 + servicePort: 8080 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +# Optional override for the main container name. If provided, this name will be used +# for the primary container instead of the default chart-derived name. +containerName: "" + +Spec: + Affinity: + Key: +# Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +restartPolicy: Always + +analysisTemplate: + enabled: false + templates: [] + # - name: success-rate + # annotations: {} + # labels: {} + # args: + # - name: service-name + # value: example-svc.default.svc.cluster.local + # measurementRetention: + # - limit: 34 + # metricName: test + # metrics: + # - name: success-rate + # interval: 5m + # # NOTE: prometheus queries return results in the form of a vector. + # # So it is common to access the index 0 of the returned array to obtain the value + # successCondition: result[0] >= 0.95 + # failureLimit: 3 + # provider: + # prometheus: + # address: http://prometheus.example.com:9090 + # query: | + # sum(irate( + # istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code!~"5.*"}[5m] + # )) / + # sum(irate( + # istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + # )) + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + # TargetCPUUtilizationPercentage: 90 + # TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} + containerResource: + enable: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + cooldownPeriod: 300 # Optional. Default: 300 seconds + minReplicaCount: 1 + maxReplicaCount: 2 + pollingInterval: 30 # Optional. Default: 30 seconds + # The fallback section is optional. It defines a number of replicas to fallback to if a scaler is in an error state. + fallback: {} # Optional. Section to specify fallback options + # failureThreshold: 3 # Mandatory if fallback section is included + # replicas: 6 + advanced: {} + # horizontalPodAutoscalerConfig: # Optional. Section to specify HPA related options + # behavior: # Optional. Use to modify HPA's scaling behavior + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +secret: + enabled: false + +service: + enabled: true + type: ClusterIP +# name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + sessionAffinity: + enabled: false + sessionAffinityConfig: {} + + +server: + deployment: + image_tag: 1-95af053 + image: "" + +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name + +EnvVariables: [] + # - name: FLASK_ENV + # value: qa + +EnvVariablesFromSecretKeys: [] + # - name: ENV_NAME + # secretName: SECRET_NAME + # keyName: SECRET_KEY + +EnvVariablesFromConfigMapKeys: [] + # - name: ENV_NAME + # configMapName: CONFIG_MAP_NAME + # keyName: CONFIG_MAP_KEY + +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" +# rules: +# # These are just examples rules, please adapt them to your needs +# - alert: TooMany500s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 5XXs +# summary: More than 5% of the all requests did return 5XX, this require your attention +# - alert: TooMany400s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 4XXs +# summary: More than 5% of the all requests did return 4XX, this require your attention +# + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + selector: {} + host: "" + tls: + enabled: false + secretName: "" + gatewayExtraSpec: {} + virtualService: + enabled: false + labels: {} + virtualServiceExtraSpec: {} + annotations: {} + gateways: [] + hosts: [] + http: [] + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + destinationRule: + enabled: false + labels: {} + annotations: {} + subsets: [] + trafficPolicy: {} + peerAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + mtls: + mode: "" + portLevelMtls: {} + requestAuthentication: + enabled: false + labels: {} + annotations: {} + selector: + enabled: false + jwtRules: [] + authorizationPolicy: + enabled: false + labels: {} + annotations: {} + action: + provider: {} + rules: [] + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] + +hibernator: + enable: false + +dbMigrationConfig: + enabled: false + +command: + enabled: false + value: [] + +args: + enabled: false + value: [] + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: false + secrets: [] +# - name: config-secret-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + # - name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + # # Uncomment below line ONLY IF you want to reuse the container image. + # # This will assign your application's docker image to init container. + # reuseContainerImage: true + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + labels: {} + annotations: {} + timeRangesWithZone: {} + # timeZone: "Asia/Kolkata" + # timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: [] + type: Rollout + # - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + +topologySpreadConstraints: [] + +schedulerName: "" + +envoyproxy: + image: docker.io/envoyproxy/envoy:v1.16.0 + lifecycle: {} + configMapName: "" + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +ambassadorMapping: + enabled: false + # labels: + # key1: value1 + # prefix: / + # ambassadorId: 1234 + # hostname: devtron.example.com + # rewrite: /foo/ + # retryPolicy: + # retry_on: "5xx" + # num_retries: 10 + # cors: + # origins: http://foo.example,http://bar.example + # methods: POST, GET, OPTIONS + # headers: Content-Type + # credentials: true + # exposed_headers: X-Custom-Header + # max_age: "86400" + # weight: 10 + # method: GET + # extraSpec: + # method_regex: true + # headers: + # x-quote-mode: backend + # x-random-header: devtron + # tls: + # context: httpd-context + # create: true + # secretName: httpd-secret + # hosts: + # - anything.example.info + # - devtron.example.com + # extraSpec: + # min_tls_version: v1.2 + +containerSpec: + lifecycle: + enabled: false + preStop: {} +# exec: +# command: ["sleep","10"] + postStart: {} +# httpGet: +# host: example.com +# path: /example +# port: 90 + +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + +podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 3000 + # fsGroup: 2000 + +containerSecurityContext: {} + # allowPrivilegeEscalation: false +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +imagePullSecrets: [] + # - test1 + # - test2 + +persistentVolumeClaim: {} + + +affinity: + enabled: false + values: {} + +verticalPodScaling: + enabled: false + +rollbackWindow: {} +schedulingGates: {} + +customPodLabels: {} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/.image_descriptor_template.json b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/.image_descriptor_template.json new file mode 100644 index 0000000000..8a99a95664 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/.image_descriptor_template.json @@ -0,0 +1 @@ +{"server":{"deployment":{"image_tag":"{{.Tag}}","image":"{{.Name}}"}},"pipelineName": "{{.PipelineName}}","releaseVersion":"{{.ReleaseVersion}}","deploymentType": "{{.DeploymentType}}", "app": "{{.App}}", "env": "{{.Env}}", "appMetrics": {{.AppMetrics}}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/Chart.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/Chart.yaml new file mode 100644 index 0000000000..522b880e4c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes (StatefulSet) +name: statefulset-chart_5-2-0 +version: 5.2.0 diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/README.md b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/README.md new file mode 100644 index 0000000000..b05d5d51c7 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/README.md @@ -0,0 +1,945 @@ + +# StatefulSet Chart v5.1.0 + +## 1. Yaml File - + +### Container Ports + +This defines ports on which application services will be exposed to other services + +```yaml +ContainerPort: + - envoyPort: 8799 + idleTimeout: + name: app + port: 8080 + servicePort: 80 + nodePort: 32056 + supportStreaming: true + useHTTP2: true +``` + +| Key | Description | +| :--- | :--- | +| `envoyPort` | envoy port for the container. | +| `idleTimeout` | the duration of time that a connection is idle before the connection is terminated. | +| `name` | name of the port. | +| `port` | port for the container. | +| `servicePort` | port of the corresponding kubernetes service. | +| `nodePort` | nodeport of the corresponding kubernetes service. | +| `supportStreaming` | Used for high performance protocols like grpc where timeout needs to be disabled. | +| `useHTTP2` | Envoy container can accept HTTP2 requests. | + +### EnvVariables +```yaml +EnvVariables: [] +``` + +### EnvVariablesFromSecretKeys +```yaml +EnvVariablesFromSecretKeys: + - name: ENV_NAME + secretName: SECRET_NAME + keyName: SECRET_KEY + +``` + It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable. + + ### EnvVariablesFromCongigMapKeys +```yaml +EnvVariablesFromCongigMapKeys: + - name: ENV_NAME + configMapName: CONFIG_MAP_NAME + keyName: CONFIG_MAP_KEY + +``` + It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable. + +To set environment variables for the containers that run in the Pod. +### StatefulSetConfig + +| Key | Description | +| :--- | :--- | +| `labels` | set of key-value pairs used to identify the StatefulSet . | +| `annotations` | A map of key-value pairs that are attached to the stateful set as metadata. | +| `serviceName` | The name of the Kubernetes Service that the StatefulSet should create. | +| `podManagementPolicy` | A policy that determines how Pods are created and deleted by the StatefulSet. In this case, the policy is set to "Parallel", which means that all Pods are created at once. | +| `revisionHistoryLimit` | The number of revisions that should be stored for each replica of the StatefulSet. | +| `updateStrategy` | The update strategy used by the StatefulSet when rolling out changes. | +| `mountPath` | The path where the volume should be mounted in the container. | + +volumeClaimTemplates: An array of volume claim templates that are used to create persistent volumes for the StatefulSet. Each volume claim template specifies the storage class, access mode, storage size, and other details of the persistent volume. + + +| Key | Description | +| :--- | :--- | +| `apiVersion` | The API version of the PVC . | +| `kind` | The type of object that the PVC is. | +| `metadata` | Metadata that is attached to the resource being created. | +| `labels` | A set of key-value pairs used to label the object for identification and selection. | +| `spec` | The specification of the object, which defines its desired state and behavior.| +| `accessModes` | A list of access modes for the PersistentVolumeClaim, such as "ReadWriteOnce" or "ReadWriteMany". | +| `dataSource` | A data source used to populate the PersistentVolumeClaim, such as a Snapshot or a StorageClass. | +| `kind`| specifies the kind of the snapshot, in this case Snapshot.| +| `apiGroup`| specifies the API group of the snapshot API, in this case snapshot.storage.k8s.io.| +| `name`| specifies the name of the snapshot, in this case my-snapshot.| +| `dataSourceRef` | A reference to a data source used to create the persistent volume. In this case, it's a secret. | +| `updateStrategy` | The update strategy used by the StatefulSet when rolling out changes. | +| `resources` | The resource requests and limits for the PersistentVolumeClaim, which define the minimum and maximum amount of storage it can use. | +| `requests` | The amount of storage requested by the PersistentVolumeClaim. | +| `limits` | The maximum amount of storage that the PersistentVolumeClaim can use. | +| `storageClassName` | The name of the storage class to use for the persistent volume. | +| `selector` | The selector used to match a persistent volume to a persistent volume claim. | +| `matchLabels` | a map of key-value pairs to match the labels of the corresponding PersistentVolume.| +| `matchExpressions` |A set of requirements that the selected object must meet to be considered a match. | +| `key` | The key of the label or annotation to match.| +| `operator` | The operator used to compare the key-value pairs (in this case, "In" specifies a set membership test).| +| `values` | A list of values that the selected object's label or annotation must match.| +| `volumeMode` | The mode of the volume, either "Filesystem" or "Block". | +| `volumeName` | The name of the PersistentVolume that is created for the PersistentVolumeClaim. | +These are all the configuration settings for the StatefulSet. +```yaml +statefulSetConfig: + labels: + app: my-statefulset + environment: production + annotations: + example.com/version: "1.0" + serviceName: "my-statefulset-service" + podManagementPolicy: "Parallel" + revisionHistoryLimit: 5 + mountPath: "/data" + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + labels: + app: my-statefulset + spec: + accessModes: + - ReadWriteOnce + dataSource: + kind: Snapshot + apiGroup: snapshot.storage.k8s.io + name: my-snapshot + resources: + requests: + storage: 5Gi + limits: + storage: 10Gi + storageClassName: my-storage-class + selector: + matchLabels: + app: my-statefulset + volumeMode: Filesystem + volumeName: my-pv + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: pvc-logs + labels: + app: myapp + spec: + accessModes: + - ReadWriteMany + dataSourceRef: + kind: Secret + apiGroup: v1 + name: my-secret + resources: + requests: + storage: 5Gi + storageClassName: my-storage-class + selector: + matchExpressions: + - {key: environment, operator: In, values: [production]} + volumeMode: Block + volumeName: my-pv + +``` + + +### Liveness Probe + +If this check fails, kubernetes restarts the pod. This should return error code in case of non-recoverable error. + +```yaml +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the liveness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for liveliness. | +| `periodSeconds` | It defines the time to check a given container for liveness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfil the liveness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as live. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | + + +### MaxUnavailable + +```yaml + MaxUnavailable: 0 +``` +The maximum number of pods that can be unavailable during the update process. The value of "MaxUnavailable: " can be an absolute number or percentage of the replicas count. The default value of "MaxUnavailable: " is 25%. + +### MaxSurge + +```yaml +MaxSurge: 1 +``` +The maximum number of pods that can be created over the desired number of pods. For "MaxSurge: " also, the value can be an absolute number or percentage of the replicas count. +The default value of "MaxSurge: " is 25%. + +### Min Ready Seconds + +```yaml +MinReadySeconds: 60 +``` +This specifies the minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available. This defaults to 0 (the Pod will be considered available as soon as it is ready). + +### Readiness Probe + +If this check fails, kubernetes stops sending traffic to the application. This should return error code in case of errors which can be recovered from if traffic is stopped. + +```yaml +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + scheme: "" + tcp: true +``` + +| Key | Description | +| :--- | :--- | +| `Path` | It define the path where the readiness needs to be checked. | +| `initialDelaySeconds` | It defines the time to wait before a given container is checked for readiness. | +| `periodSeconds` | It defines the time to check a given container for readiness. | +| `successThreshold` | It defines the number of successes required before a given container is said to fulfill the readiness probe. | +| `timeoutSeconds` | It defines the time for checking timeout. | +| `failureThreshold` | It defines the maximum number of failures that are acceptable before a given container is not considered as ready. | +| `httpHeaders` | Custom headers to set in the request. HTTP allows repeated headers,You can override the default headers by defining .httpHeaders for the probe. | +| `scheme` | Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP. +| `tcp` | The kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy. | + +### Ambassador Mappings + +You can create ambassador mappings to access your applications from outside the cluster. At its core a Mapping resource maps a resource to a service. + +```yaml +ambassadorMapping: + ambassadorId: "prod-emissary" + cors: {} + enabled: true + hostname: devtron.example.com + labels: {} + prefix: / + retryPolicy: {} + rewrite: "" + tls: + context: "devtron-tls-context" + create: false + hosts: [] + secretName: "" +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable ambassador mapping else set false.| +| `ambassadorId` | used to specify id for specific ambassador mappings controller. | +| `cors` | used to specify cors policy to access host for this mapping. | +| `weight` | used to specify weight for canary ambassador mappings. | +| `hostname` | used to specify hostname for ambassador mapping. | +| `prefix` | used to specify path for ambassador mapping. | +| `labels` | used to provide custom labels for ambassador mapping. | +| `retryPolicy` | used to specify retry policy for ambassador mapping. | +| `corsPolicy` | Provide cors headers on flagger resource. | +| `rewrite` | used to specify whether to redirect the path of this mapping and where. | +| `tls` | used to create or define ambassador TLSContext resource. | +| `extraSpec` | used to provide extra spec values which not present in deployment template for ambassador resource. | + +### Autoscaling + +This is connected to HPA and controls scaling up and down in response to request load. + +```yaml +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + extraMetrics: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Set true to enable autoscaling else set false.| +| `MinReplicas` | Minimum number of replicas allowed for scaling. | +| `MaxReplicas` | Maximum number of replicas allowed for scaling. | +| `TargetCPUUtilizationPercentage` | The target CPU utilization that is expected for a container. | +| `TargetMemoryUtilizationPercentage` | The target memory utilization that is expected for a container. | +| `extraMetrics` | Used to give external metrics for autoscaling. | + +### Fullname Override + +```yaml +fullnameOverride: app-name +``` +`fullnameOverride` replaces the release fullname created by default by devtron, which is used to construct Kubernetes object names. By default, devtron uses {app-name}-{environment-name} as release fullname. + +### Image + +```yaml +image: + pullPolicy: IfNotPresent +``` + +Image is used to access images in kubernetes, pullpolicy is used to define the instances calling the image, here the image is pulled when the image is not present,it can also be set as "Always". + +### imagePullSecrets + +`imagePullSecrets` contains the docker credentials that are used for accessing a registry. + +```yaml +imagePullSecrets: + - regcred +``` +regcred is the secret that contains the docker credentials that are used for accessing a registry. Devtron will not create this secret automatically, you'll have to create this secret using dt-secrets helm chart in the App store or create one using kubectl. You can follow this documentation Pull an Image from a Private Registry [https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) . + +### Ingress + +This allows public access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + className: nginx + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` +Legacy deployment-template ingress format + +```yaml +ingress: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + path: "" + host: "" + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + +### Ingress Internal + +This allows private access to the url, please ensure you are using right nginx annotation for nginx class, its default value is nginx + +```yaml +ingressInternal: + enabled: false + # For K8s 1.19 and above use ingressClassName instead of annotation kubernetes.io/ingress.class: + ingressClassName: nginx-internal + annotations: {} + hosts: + - host: example1.com + paths: + - /example + - host: example2.com + paths: + - /example2 + - /example2/healthz + tls: [] +``` + +| Key | Description | +| :--- | :--- | +| `enabled` | Enable or disable ingress | +| `annotations` | To configure some options depending on the Ingress controller | +| `path` | Path name | +| `host` | Host name | +| `tls` | It contains security details | + + +### additionalBackends + +This defines additional backend path in the ingress . + +```yaml + hosts: + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 +``` + + +### Init Containers +```yaml +initContainers: + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate + + - name: nginx + image: nginx:1.14.2 + securityContext: + privileged: true + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] +``` +Specialized containers that run before app containers in a Pod. Init containers can contain utilities or setup scripts not present in an app image. One can use base image inside initContainer by setting the reuseContainerImage flag to `true`. + +### Istio + +Istio is a service mesh which simplifies observability, traffic management, security and much more with it's virtual services and gateways. + +```yaml +istio: + enable: true + gateway: + annotations: {} + enabled: false + host: example.com + labels: {} + tls: + enabled: false + secretName: example-tls-secret + virtualService: + annotations: {} + enabled: false + gateways: [] + hosts: [] + http: + - corsPolicy: + allowCredentials: false + allowHeaders: + - x-some-header + allowMethods: + - GET + allowOrigin: + - example.com + maxAge: 24h + headers: + request: + add: + x-some-header: value + match: + - uri: + prefix: /v1 + - uri: + prefix: /v2 + retries: + attempts: 2 + perTryTimeout: 3s + rewriteUri: / + route: + - destination: + host: service1 + port: 80 + timeout: 12s + - route: + - destination: + host: service2 + labels: {} +``` + +### Pause For Seconds Before Switch Active +```yaml +pauseForSecondsBeforeSwitchActive: 30 +``` +To wait for given period of time before switch active the container. + +### Resources + +These define minimum and maximum RAM and CPU available to the application. + +```yaml +resources: + limits: + cpu: "1" + memory: "200Mi" + requests: + cpu: "0.10" + memory: "100Mi" +``` + +Resources are required to set CPU and memory usage. + +#### Limits + +Limits make sure a container never goes above a certain value. The container is only allowed to go up to the limit, and then it is restricted. + +#### Requests + +Requests are what the container is guaranteed to get. + +### Service + +This defines annotations and the type of service, optionally can define name also. + +```yaml + service: + type: ClusterIP + annotations: {} +``` + +### Volumes + +```yaml +volumes: + - name: log-volume + emptyDir: {} + - name: logpv + persistentVolumeClaim: + claimName: logpvc +``` + +It is required when some values need to be read from or written to an external disk. + +### Volume Mounts + +```yaml +volumeMounts: + - mountPath: /var/log/nginx/ + name: log-volume + - mountPath: /mnt/logs + name: logpvc + subPath: employee +``` + +It is used to provide mounts to the volume. + +### Affinity and anti-affinity + +```yaml +Spec: + Affinity: + Key: + Values: +``` + +Spec is used to define the desire state of the given container. + +Node Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node. + +Inter-pod affinity allow you to constrain which nodes your pod is eligible to be scheduled based on labels on pods. + +#### Key + +Key part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +#### Values + +Value part of the label for node selection, this should be same as that on node. Please confirm with devops team. + +### Tolerations + +```yaml +tolerations: + - key: "key" + operator: "Equal" + value: "value" + effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +``` + +Taints are the opposite, they allow a node to repel a set of pods. + +A given pod can access the given node and avoid the given taint only if the given pod satisfies a given taint. + +Taints and tolerations are a mechanism which work together that allows you to ensure that pods are not placed on inappropriate nodes. Taints are added to nodes, while tolerations are defined in the pod specification. When you taint a node, it will repel all the pods except those that have a toleration for that taint. A node can have one or many taints associated with it. + +### Arguments + +```yaml +args: + enabled: false + value: [] +``` + +This is used to give arguments to command. + +### Command + +```yaml +command: + enabled: false + value: [] +``` + +It contains the commands for the server. + +| Key | Description | +| :--- | :--- | +| `enabled` | To enable or disable the command. | +| `value` | It contains the commands. | + + +### Containers +Containers section can be used to run side-car containers along with your main container within same pod. Containers running within same pod can share volumes and IP Address and can address each other @localhost. We can use base image inside container by setting the reuseContainerImage flag to `true`. + +```yaml + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + command: ["/usr/local/bin/nginx"] + args: ["-g", "daemon off;"] + - reuseContainerImage: true + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + volumeMounts: + - mountPath: /etc/ls-oms + name: ls-oms-cm-vol + command: + - flyway + - -configFiles=/etc/ls-oms/flyway.conf + - migrate +``` + +### Prometheus + +```yaml + prometheus: + release: monitoring +``` + +It is a kubernetes monitoring tool and the name of the file to be monitored as monitoring in the given case.It describes the state of the prometheus. + +### rawYaml + +```yaml +rawYaml: + - apiVersion: v1 + kind: Service + metadata: + name: my-service + spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + type: ClusterIP +``` +Accepts an array of Kubernetes objects. You can specify any kubernetes yaml here and it will be applied when your app gets deployed. + +### Grace Period + +```yaml +GracePeriod: 30 +``` +Kubernetes waits for the specified time called the termination grace period before terminating the pods. By default, this is 30 seconds. If your pod usually takes longer than 30 seconds to shut down gracefully, make sure you increase the `GracePeriod`. + +A Graceful termination in practice means that your application needs to handle the SIGTERM message and begin shutting down when it receives it. This means saving all data that needs to be saved, closing down network connections, finishing any work that is left, and other similar tasks. + +There are many reasons why Kubernetes might terminate a perfectly healthy container. If you update your deployment with a rolling update, Kubernetes slowly terminates old pods while spinning up new ones. If you drain a node, Kubernetes terminates all pods on that node. If a node runs out of resources, Kubernetes terminates pods to free those resources. It’s important that your application handle termination gracefully so that there is minimal impact on the end user and the time-to-recovery is as fast as possible. + + +### Server + +```yaml +server: + deployment: + image_tag: 1-95a53 + image: "" +``` + +It is used for providing server configurations. + +#### Deployment + +It gives the details for deployment. + +| Key | Description | +| :--- | :--- | +| `image_tag` | It is the image tag | +| `image` | It is the URL of the image | + +### Service Monitor + +```yaml +servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace +``` + +It gives the set of targets to be monitored. + +### Db Migration Config + +```yaml +dbMigrationConfig: + enabled: false +``` + +It is used to configure database migration. + + +### KEDA Autoscaling +[KEDA](https://keda.sh) is a Kubernetes-based Event Driven Autoscaler. With KEDA, you can drive the scaling of any container in Kubernetes based on the number of events needing to be processed. KEDA can be installed into any Kubernetes cluster and can work alongside standard Kubernetes components like the Horizontal Pod Autoscaler(HPA). + +Example for autosccaling with KEDA using Prometheus metrics is given below: +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: + restoreToOriginalReplicaCount: true + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: prometheus + metadata: + serverAddress: http://:9090 + metricName: http_request_total + query: envoy_cluster_upstream_rq{appId="300", cluster_name="300-0", container="envoy",} + threshold: "50" + triggerAuthentication: + enabled: false + name: + spec: {} + authenticationRef: {} +``` +Example for autosccaling with KEDA based on kafka is given below : +```yaml +kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 + pollingInterval: 30 + advanced: {} + triggers: + - type: kafka + metadata: + bootstrapServers: b-2.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-3.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092,b-1.kafka-msk-dev.example.c2.kafka.ap-southeast-1.amazonaws.com:9092 + topic: Orders-Service-ESP.info + lagThreshold: "100" + consumerGroup: oders-remove-delivered-packages + allowIdleConsumers: "true" + triggerAuthentication: + enabled: true + name: keda-trigger-auth-kafka-credential + spec: + secretTargetRef: + - parameter: sasl + name: keda-kafka-secrets + key: sasl + - parameter: username + name: keda-kafka-secrets + key: username + authenticationRef: + name: keda-trigger-auth-kafka-credential +``` +### Winter-Soldier +Winter Soldier can be used to +- cleans up (delete) Kubernetes resources +- reduce workload pods to 0 + +**_NOTE:_** After deploying this we can create the Hibernator object and provide the custom configuration by which workloads going to delete, sleep and many more. for more information check [the main repo](https://github.com/devtron-labs/winter-soldier) + +Given below is template values you can give in winter-soldier: +```yaml +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + action: sleep + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + targetReplicas: [] + fieldSelector: [] +``` +Here, +| Key | values | Description | +| :--- | :--- | :--- | +| `enabled` | `fasle`,`true` | decide the enabling factor | +| `apiVersion` | `pincher.devtron.ai/v1beta1`, `pincher.devtron.ai/v1alpha1` | specific api version | +| `action` | `sleep`,`delete`, `scale` | This specify the action need to perform. | +| `timeRangesWithZone`:`timeZone` | eg:- `"Asia/Kolkata"`,`"US/Pacific"` | It use to specify the timeZone used. (It uses standard format. please refer [this](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)) | +| `timeRangesWithZone`:`timeRanges` | array of [ `timeFrom`, `timeTo`, `weekdayFrom`, `weekdayTo`] | It use to define time period/range on which the user need to perform the specified action. you can have multiple timeRanges.
These settings will take `action` on Sat and Sun from 00:00 to 23:59:59, | +| `targetReplicas` | `[n]` : n - number of replicas to scale. | These is mandatory field when the `action` is `scale`
Defalut value is `[]`. | +| `fieldSelector` | `- AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) ` | These value will take a list of methods to select the resources on which we perform specified `action` . | + + +here is an example, +```yaml +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + fieldSelector: + - AfterTime(AddTime( ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '10h'), Now()) +``` +Above settings will take action on `Sat` and `Sun` from 00:00 to 23:59:59, and on `Mon`-`Fri` from 00:00 to 08:00 and 20:00 to 23:59:59. If `action:sleep` then runs hibernate at timeFrom and unhibernate at `timeTo`. If `action: delete` then it will delete workloads at `timeFrom` and `timeTo`. Here the `action:scale` thus it scale the number of resource replicas to `targetReplicas: [1,1,1]`. Here each element of `targetReplicas` array is mapped with the corresponding elments of array `timeRangesWithZone/timeRanges`. Thus make sure the length of both array is equal, otherwise the cnages cannot be observed. + +The above example will select the application objects which have been created 10 hours ago across all namespaces excluding application's namespace. Winter soldier exposes following functions to handle time, cpu and memory. + +- ParseTime - This function can be used to parse time. For eg to parse creationTimestamp use ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z') +- AddTime - This can be used to add time. For eg AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '-10h') ll add 10h to the time. Use d for day, h for hour, m for minutes and s for seconds. Use negative number to get earlier time. +- Now - This can be used to get current time. +- CpuToNumber - This can be used to compare CPU. For eg any({{spec.containers.#.resources.requests}}, { MemoryToNumber(.memory) < MemoryToNumber('60Mi')}) will check if any resource.requests is less than 60Mi. + + + +### Security Context +A security context defines privilege and access control settings for a Pod or Container. + +To add a security context for main container: +```yaml +containerSecurityContext: + allowPrivilegeEscalation: false +``` + +To add a security context on pod level: +```yaml +podSecurityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 +``` + +### Topology Spread Constraints +You can use topology spread constraints to control how Pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. This can help to achieve high availability as well as efficient resource utilization. + +```yaml +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: {} +``` + +### Deployment Metrics + +It gives the realtime metrics of the deployed applications + +| Key | Description | +| :--- | :--- | +| `Deployment Frequency` | It shows how often this app is deployed to production | +| `Change Failure Rate` | It shows how often the respective pipeline fails. | +| `Mean Lead Time` | It shows the average time taken to deliver a change to production. | +| `Mean Time to Recovery` | It shows the average time taken to fix a failed pipeline. | + +## 2. Show application metrics + +If you want to see application metrics like different HTTP status codes metrics, application throughput, latency, response time. Enable the Application metrics from below the deployment template Save button. After enabling it, you should be able to see all metrics on App detail page. By default it remains disabled. +![](../../../.gitbook/assets/deployment_application_metrics%20%282%29.png) + +Once all the Deployment template configurations are done, click on `Save` to save your deployment configuration. Now you are ready to create [Workflow](workflow/) to do CI/CD. + +### Helm Chart Json Schema + +Helm Chart [json schema](../../../scripts/devtron-reference-helm-charts/reference-chart_4-11-0/schema.json) is used to validate the deployment template values. + +### Other Validations in Json Schema + +The values of CPU and Memory in limits must be greater than or equal to in requests respectively. Similarly, In case of envoyproxy, the values of limits are greater than or equal to requests as mentioned below. +``` +resources.limits.cpu >= resources.requests.cpu +resources.limits.memory >= resources.requests.memory +envoyproxy.resources.limits.cpu >= envoyproxy.resources.requests.cpu +envoyproxy.resources.limits.memory >= envoyproxy.resources.requests.memory +``` diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/app-values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/app-values.yaml new file mode 100644 index 0000000000..ffbe895ffd --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/app-values.yaml @@ -0,0 +1,398 @@ +# Mandatory configs +containerSpec: + lifecycle: + enabled: false + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 +appLabels: {} +EnvVariablesFromSecretKeys: [] + # - name: ENV_NAME + # secretName: SECRET_NAME + # keyName: SECRET_KEY + +EnvVariablesFromCongigMapKeys: [] + # - name: ENV_NAME + # configMapName: CONFIG_MAP_NAME + # keyName: CONFIG_MAP_KEY +winterSoldier: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + annotation: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: [] + # - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + + +statefulSetConfig: + labels: {} + annotations: {} + volumeClaimTemplates: [] + +service: + type: ClusterIP + enabled: false + #name: "service-1234567890" + loadBalancerSourceRanges: [] + # loadBalancerSourceRanges: + # - 1.2.3.4/32 + # - 1.2.5.6/23 + annotations: {} + # test1: test2 + # test3: test4 +replicaCount: 1 +MinReadySeconds: 60 +GracePeriod: 30 +image: + pullPolicy: IfNotPresent + +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + # servicemonitor: + # enabled: false + # path: /abc + # scheme: 'http' + # interval: 30s + # scrapeTimeout: 20s + # metricRelabelings: + # - sourceLabels: [namespace] + # regex: '(.*)' + # replacement: myapp + # targetLabel: target_namespace +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +# Optional configs +LivenessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + +ReadinessProbe: + Path: "" + port: 8080 + scheme: "" + httpHeaders: [] +# - name: Custom-Header +# value: abc + tcp: false + command: [] + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + +ingress: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/force-ssl-redirect: 'false' +# nginx.ingress.kubernetes.io/ssl-redirect: 'false' +# kubernetes.io/ingress.class: nginx +# nginx.ingress.kubernetes.io/rewrite-target: /$2 +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +command: + workingDir: {} + enabled: false + value: [] + +args: + enabled: false + value: + - /bin/sh + - -c + - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 + +#For adding custom labels to pods + +podLabels: {} +# customKey: customValue +podAnnotations: {} +# customKey: customValue + +rawYaml: [] + +topologySpreadConstraints: [] + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +containers: [] + ## Additional containers to run along with application pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + +volumeMounts: [] +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + +dbMigrationConfig: + enabled: false + +tolerations: [] + +podSecurityContext: {} + +containerSecurityContext: {} + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: +affinity: + enabled: false + values: {} + +ambassadorMapping: + enabled: false + labels: {} + prefix: / + ambassadorId: "" + hostname: devtron.example.com + rewrite: "" + retryPolicy: {} + cors: {} + tls: + context: "" + create: false + secretName: "" + hosts: [] + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 70 + TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} + containerResource: + enabled: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + minReplicaCount: 1 + maxReplicaCount: 2 + advanced: {} + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +prometheus: + release: monitoring + +server: + deployment: + image_tag: 1-95af053 + image: "" + +servicemonitor: + additionalLabels: {} + +envoyproxy: + image: quay.io/devtron/envoy:v1.16.0 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +istio: + enable: false + gateway: + enabled: false + labels: {} + annotations: {} + host: "example.com" + tls: + enabled: false + secretName: secret-name + virtualService: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: + - match: + - uri: + prefix: /v1 + - uri: + prefix: /v2 + rewriteUri: / + timeout: 12s + headers: {} + corsPolicy: {} + retries: + attempts: 2 + perTryTimeout: 3s + route: + - destination: + host: service1 + port: 80 + - route: + - destination: + host: service2 + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +imagePullSecrets: [] + # - test1 + # - test2 +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" +# - ip: "10.1.2.3" +# hostnames: +# - "foo.remote" +# - "bar.remote" + +verticalPodScaling: + enabled: false \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/env-values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/env-values.yaml new file mode 100644 index 0000000000..5cd07c0269 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/env-values.yaml @@ -0,0 +1,66 @@ +replicaCount: 1 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 + +Spec: + Affinity: + key: "" + Values: nodes + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# +secret: + enabled: false + data: {} +# my_own_secret: S3ViZXJuZXRlcyBXb3Jrcw== + +EnvVariables: [] +# - name: FLASK_ENV +# value: qa + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: "0.05" + memory: 50Mi + requests: + cpu: "0.01" + memory: 10Mi + + diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/pipeline-values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/pipeline-values.yaml new file mode 100644 index 0000000000..ec557c8c8c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/pipeline-values.yaml @@ -0,0 +1,5 @@ +deployment: + strategy: + rollingUpdate: + partition: 0 + onDelete: {} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/release-values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/release-values.yaml new file mode 100644 index 0000000000..48eb3f482c --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/release-values.yaml @@ -0,0 +1,14 @@ +server: + deployment: + image_tag: IMAGE_TAG + image: IMAGE_REPO + enabled: false +dbMigrationConfig: + enabled: false + +pauseForSecondsBeforeSwitchActive: 0 +waitForSecondsBeforeScalingDown: 0 +autoPromotionSeconds: 30 + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/schema.json b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/schema.json new file mode 100644 index 0000000000..672df0a42e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/schema.json @@ -0,0 +1,1530 @@ + +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "containerExtraSpecs":{ + "type": "object", + "title": "containerExtraSpecs", + "description": "Define container extra specs here" + }, + "ContainerPort": { + "type": "array", + "description": "defines ports on which application services will be exposed to other services", + "title": "Container Port", + "items": { + "type": "object", + "properties": { + "envoyPort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "envoy port for the container", + "title": "Envoy Port" + }, + "idleTimeout": { + "type": "string", + "description": "duration of time for which a connection is idle before the connection is terminated", + "title": "Idle Timeout" + }, + "name": { + "type": "string", + "description": "name of the port", + "title": "Name" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Port", + "title": "port for the container" + }, + "servicePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port of the corresponding kubernetes service", + "title": "Service Port" + }, + "nodePort": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "nodeport of the corresponding kubernetes service", + "title": "Node Port" + }, + "supportStreaming": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "field to enable/disable timeout for high performance protocols like grpc", + "title": "Support Streaming" + }, + "useHTTP2": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": " field for setting if envoy container can accept(or not) HTTP2 requests", + "title": "Use HTTP2" + } + } + } + }, + "statefulSetConfig":{ + "type": "object", + "description": "used by the statefulset reference chart", + "title": "StatefulSetConfig", + "properties": { + "labels": { + "type": "object", + "description": "used to provide custom labels for statefulset", + "title": "Labels" + }, + "annotations": { + "type": "object", + "description": "used to provide custom annotation for statefulset", + "title": "Annotations" + }, + "serviceName":{ + "type": "string", + "title": "ServiceName", + "description": "name of the service-headless you want to connect with your statefulset " + }, + "mountPath":{ + "type":"string", + "title": "MountPath", + "description": "used to provide mounts to the volume" + }, + "revisionHistoryLimit":{ + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "title": "RevisionHistoryLimit", + "description": "revisionHistoryLimit is the maximum number of revisions that will bemaintained in the StatefulSet's revision history." + }, + "volumeClaimTemplates":{ + "title": "VolumeClaimTemplates", + "type":"array", + "description": "The volumeClaimTemplates will provide stable storage using PersistentVolumes provisioned by a PersistentVolume Provisioner", + "items": [ + { "type": "object", + "properties": { + "apiVersion":{ + "type": "string", + "description": "(optional): The API version to use for the volume claim template. If not specified, the default is v1.", + "title": "ApiVersion" + }, + "kind":{ + "type": "string", + "title": "Kind", + "description": "(optional): Kind is a string value representing the REST resource this object represents." + }, + "metadata":{ + "type":"object", + "title": "MetaData", + "description": " Standard object's metadata.", + "items": [ + { + "type":"object", + "properties": { + "labels": { + "type": "object", + "description": "used to provide custom labels for statefulset volumes", + "title": "Labels" + }, + "annotations": { + "type": "object", + "description": "used to provide custom annotation for statefulset volumes", + "title": "Annotations" + }, + "name": { + "type": "string", + "description": "name of volume", + "title": "Name" + }, + "namespace":{ + "type":"string", + "description": "namespaces provides a mechanism for isolating groups of resources within a single cluster." + ,"title": "NameSpace" + + } + } + } + ] + }, + "spec": { + "type": "object", + "description": "used to define the desire state of the given volume", + "title": "Spec", + "items": [{ + "type":"object", + "properties": { + "accessModes":{ + "type":"array", + "description": "accessModes contains the desired access modes the volume should have such as ReadWriteOnce or ReadWriteMany", + "title": "AccessModes" + }, + "dataSource":{ + "type":"object", + "description": " (optional): A reference to the data source for the volume claim template.", + "title": "DataSource", + "items": { + "type":"object", + "properties": { + "apiGroup":{ + "type":"string", + "description": "The API group of the data source", + "title": "ApiGroup" + }, + "kind":{ + "type":"string", + "description": "The kind of the data source", + "title": "Kind" + }, + "name":{ + "type":"string", + "description": "The name of the data source", + "title": "Name" + } + } + } + + }, + "dataSourceRef":{ + "type":"object", + "title": "DataSourceRef", + "description": "(optional): A reference to the data source for the volume claim template.", + "items": { + "type":"object", + "properties": { + "apiGroup":{ + "type":"string", + "description": "The API group of the data source reference", + "title": "ApiGroup" + }, + "kind":{ + "type":"string", + "description": "The kind of the data source reference", + "title": "Kind" + }, + "name":{ + "type":"string", + "description": "The name of the data source reference", + "title": "Name" + } + } + } + }, + "resources":{ + "type":"object", + "title":"Resources", + "description": "The resource requirements for the volume claim", + "items": [ + { + "type":"object", + "properties": { + "claims":{ + "title": "Claims", + "type":"object","description": "(optional): The name of the claim resource for the volume claim.", + "items": { + "properties": { + "name":{ + "type":"string", + "description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container." + } + } + } + + }, + "requests":{ + "type":"object", + "title": "Requests", + "description": "The resource requests for the volume claim.", + "items": { + "type":"object", + "properties": { + "storage":{ + "title":"Storage", + "type":"string", + "description": "The amount of storage requested for the volume claim" + } + } + } + }, + "limits":{ + "type":"object", + "title": "Limits", + "description": " (optional): The resource limits for the volume claim.", + "items": { + "type":"object", + "properties": { + "storage":{ + "title":"Storage", + "type":"string", + "description": "The amount of storage requested for the volume claim" + } + } + } + } + } + } + ] + }, + "storageClassName":{ + "type":"string", + "title": "StorageClassName", + "description": " (optional): The name of the storage class to use for the volume claim." + }, + "selector":{ + "title": "Selector", + "type":"object", + "description": "(optional): A selector to match a PersistentVolume to the PersistentVolumeClaim.", + "items": { + "properties": { + "matchExpressions":{ + "type":"array", + "title": "MatchExpression", + "description": "to define more complex label selectors that match labels based on certain conditions." + }, + "matchLabels":{ + "type":"object", + "title": "Match Labels" + ,"description": "selector to specify the labels that will be used to select which Pods the StatefulSet manages." + } + } + + } + }, + "volumeMode":{ + "title": "VolumeModes", + "type":"string", + "description": " (optional): The mode for the volume claim, such as Filesystem or Block." + }, + "volumeName":{ + "title": "VolumeName", + "type":"string", + "description": "(optional): The name of the PersistentVolume to use for the volume claim." + } + } + }] + } + }} + ] + } + } + }, + "EnvVariables": { + "type": "array", + "items": {}, + "description": "contains environment variables needed by the containers", + "title": "Environment Variables" + }, + "EnvVariablesFromFieldPath":{ + "type": "array", + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs", + "title": "EnvVariablesFromFieldPath", + "items": [ + { + "type": "object", + "properties": { + "name":{ + "type": "string", + "title": "name", + "description": "Env variable name to be" + }, + "fieldPath":{ + "type": "string", + "title": "fieldPath", + "description": "Path of the field to select in the specified API version" + } + } + } + ] + }, "EnvVariablesFromSecretKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Secret name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromSecretKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "secretName": { + "type": "string", + "title": "secretName", + "description": "Name of Secret from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "EnvVariablesFromCongigMapKeys": { + "type": "array", + "description": "Selects a field of the deployment: It is use to get the name of Environment Variable name, Config Map name and the Key name from which we are using the value in that corresponding Environment Variable.", + "title": "EnvVariablesFromCongigMapKeys", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name", + "description": "Env variable name to be used." + }, + "configMapName": { + "type": "string", + "title": "configMapName", + "description": "Name of configMap from which we are taking the value." + }, + "keyName": { + "type": "string", + "title": "keyName", + "description": "Name of The Key Where the value is mapped with." + } + } + } + ] + }, + "GracePeriod": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "time for which Kubernetes waits before terminating the pods", + "title": "Grace Period" + }, + "LivenessProbe": { + "type": "object", + "description": "used by the kubelet to know when to restart a container", + "title": "Liveness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the liveness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as live", + "title": "Failure Threshold" + }, + "httpHeaders": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for liveness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for liveness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the liveness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "MaxSurge": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be created over the desired number of pods", + "title": "Maximum Surge" + }, + "MaxUnavailable": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "maximum number of pods that can be unavailable during the update process", + "title": "Maximum Unavailable" + }, + "MinReadySeconds": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "minimum number of seconds for which a newly created Pod should be ready without any of its containers crashing, for it to be considered available", + "title": "Minimum Ready Seconds" + }, + "ReadinessProbe": { + "type": "object", + "description": "kubelet uses readiness probes to know when a container is ready to start accepting traffic", + "title": "Readiness Probe", + "properties": { + "Path": { + "type": "string", + "description": "defines the path where the readiness needs to be checked", + "title": "Path" + }, + "command": { + "type": "array", + "items": {}, + "description": "commands executed to perform a probe", + "title": "Command" + }, + "failureThreshold": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the maximum number of failures that are acceptable before a given container is not considered as ready", + "title": "Failure Threshold" + }, + "httpHeader": { + "type": "array", + "items": {}, + "description": "used to override the default headers by defining .httpHeaders for the probe", + "title": "HTTP headers" + }, + "initialDelaySeconds": { + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to wait before a given container is checked for readiness", + "title": "Initial Delay Seconds" + }, + "periodSeconds": { + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time to check a given container for readiness", + "title": "Period Seconds" + }, + "port": { + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "port to access on the container", + "title": "Port" + }, + "scheme": { + "type": "string", + "description": "Scheme to use for connecting to the host (HTTP or HTTPS). Defaults to HTTP.", + "title": "Scheme" + }, + "successThreshold": { + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the number of successes required before a given container is said to fulfil the readiness probe", + "title": "Success Threshold" + }, + "tcp": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "If enabled, the kubelet will attempt to open a socket to container. If connection is established, the container is considered healthy", + "title": "TCP" + }, + "timeoutSeconds": { + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "defines the time for checking timeout", + "title": "Timeout Seconds" + } + } + }, + "Spec": { + "type": "object", + "description": "used to define the desire state of the given container", + "title": "Spec", + "properties": { + "Affinity": { + "type": "object", + "description": "Node/Inter-pod Affinity allows you to constrain which nodes your pod is eligible to schedule on, based on labels of the node/pods", + "title": "Affinity", + "properties": { + "Key": { + "anyOf": [ + { + "type": "null" + }, + { + "type": "string", + "description": "Key part of the label for node/pod selection", + "title": "Key" + } + ] + }, + "Values": { + "type": "string", + "description": "Value part of the label for node/pod selection", + "title": "Values" + }, + "key": { + "type": "string" + } + } + } + } + }, + "ambassadorMapping": { + "type": "object", + "description": "used to create ambassador mapping resource", + "title": "Mapping", + "properties": { + "ambassadorId": { + "type": "string", + "description": "used to specify id for specific ambassador mappings controller", + "title": "Ambassador ID" + }, + "cors": { + "type": "object", + "description": "used to specify cors policy to access host for this mapping", + "title": "CORS" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify whether to create an ambassador mapping or not", + "title": "Enabled" + }, + "weight": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to specify weight for canary ambassador mappings" + }, + "hostname": { + "type": "string", + "description": "used to specify hostname for ambassador mapping", + "title": "Hostname" + }, + "labels": { + "type": "object", + "description": "used to provide custom labels for ambassador mapping", + "title": "Labels" + }, + "prefix": { + "type": "string", + "description": "used to specify path for ambassador mapping", + "title": "Prefix" + }, + "retryPolicy": { + "type": "object", + "description": "used to specify retry policy for ambassador mapping", + "title": "Retry Policy" + }, + "rewrite": { + "type": "string", + "description": "used to specify whether to redirect the path of this mapping and where", + "title": "Rewrite" + }, + "tls": { + "type": "object", + "description": "used to create or define ambassador TLSContext resource", + "title": "TLS Context" + }, + "extraSpec": { + "type": "object", + "description": "used to provide extra spec values which not present in deployment template for ambassador resource", + "title": "Extra Spec" + } + } + }, + "args": { + "type": "object", + "description": " used to give arguments to command", + "title": "Arguments", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling aruguments", + "title": "Enabled" + }, + "value": { + "type": "array", + "description": "values of the arguments", + "title": "Value", + "items": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ] + } + } + }, + "autoscaling": { + "type": "object", + "description": "connected to HPA and controls scaling up and down in response to request load", + "title": "Autoscaling", + "properties": { + "MaxReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Maximum number of replicas allowed for scaling", + "title": "Maximum Replicas" + }, + "MinReplicas": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Minimum number of replicas allowed for scaling", + "title": "Minimum Replicas" + }, + "TargetCPUUtilizationPercentage": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target CPU utilization that is expected for a container", + "title": "TargetCPUUtilizationPercentage" + }, + "TargetMemoryUtilizationPercentage": { + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "The target memory utilization that is expected for a container", + "title": "TargetMemoryUtilizationPercentage" + }, + "behavior": { + "type": "object", + "description": "describes behavior and scaling policies for that behavior", + "title": "Behavior" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling autoscaling", + "title": "Enabled" + }, + "labels": { + "type": "object", + "description": "labels for HPA", + "title": "labels" + }, + "annotations": { + "type": "object", + "description": "used to configure some options for HPA", + "title": "annotations" + }, + "extraMetrics": { + "type": "array", + "items": {}, + "description": "used to give external metrics for autoscaling", + "title": "Extra Metrics" + } + } + }, + "command": { + "type": "object", + "description": "contains the commands for the server", + "title": "Command", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling commands" + }, + "value": { + "type": "array", + "items": {}, + "description": "contains the commands", + "title": "Value" + }, + "workingDir": { + "type": "object", + "items": {}, + "description": "contains the working directory", + "title": "Working directory" + } + } + }, + "containerSecurityContext": { + "type": "object", + "description": " defines privilege and access control settings for a Container", + "title": "Container Security Context" + }, + "containers": { + "type": "array", + "items": {}, + "description": " used to run side-car containers along with the main container within same pod" + }, + "dbMigrationConfig": { + "type": "object", + "description": "used to configure database migration", + "title": "Db Migration Config", + "properties": { + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used for enabling/disabling the config", + "title": "Enabled" + } + } + }, + "envoyproxy": { + "type": "object", + "description": "envoy is attached as a sidecar to the application container to collect metrics like 4XX, 5XX, throughput and latency", + "title": "Envoy Proxy", + "properties": { + "configMapName": { + "type": "string", + "description": "configMap containing configuration for Envoy", + "title": "ConfigMap" + }, + "lifecycle":{ + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled":{ + "type": "boolean" + }, + "postStart":{ + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created" + }, + "preStop":{ + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + }, + "image": { + "type": "string", + "description": "image of envoy to be used" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + } + } + }, + "hostAliases":{ + "type": "array", + "title": "hostAliases", + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file", + "items": [ + { + "type": "object", + "properties": { + "ip":{ + "type": "string", + "title": "IP", + "description": "IP address of the host file entry" + }, + "hostnames":{ + "type": "array", + "description": "Hostnames for the above IP address", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "image": { + "type": "object", + "description": "used to access images in kubernetes", + "title": "Image", + "properties": { + "pullPolicy": { + "type": "string", + "description": "used to define the instances calling the image", + "title": "Pull Policy", + "enum": ["IfNotPresent", "Always"] + } + } + }, + "imagePullSecrets": { + "type": "array", + "items": {}, + "description": "contains the docker credentials that are used for accessing a registry", + "title": "Image PullSecrets" + }, + "winterSoldier": { + "type": "object", + "description": "allows to scale, sleep or delete the resource based on time.", + "title": "winterSoldier", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the winterSoldier controller", + "title": "Annotations" + }, + "labels": { + "type": "object", + "description": "labels for winterSoldier", + "title": "winterSoldier labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "apiVersion": { + "type": "string", + "description": "Api version for winterSoldier", + "title": "winterSoldier apiVersion", + "default": "pincher.devtron.ai/v1alpha1" + }, + "timeRangesWithZone": { + "type": "object", + "description": "describe time zone and time ranges to input in the winterSoldier", + "title": "Time Ranges With Zone", + "timeZone": { + "type": "string", + "description": "describe time zone, and follow standard format", + "title": "Time Zone" + }, + "timeRanges": { + "type": "array", + "items": {}, + "description": "used to take array of time ranges in which each element contains timeFrom, timeTo, weekdayFrom and weekdayTo.", + "title": "Time Ranges" + } + }, + "type": { + "type": "string", + "description": "describe the type of application Rollout/deployment.", + "title": "Type" + }, + "action": { + "type": "string", + "description": "describe the action to be performed by winterSoldier.", + "title": "Action" + }, + "targetReplicas": { + "type": "array", + "description": "describe the number of replicas to which the resource should scale up or down.", + "title": "Target Replicas" + }, + "fieldSelector": { + "type": "array", + "description": "it takes arrays of methods to select specific fields.", + "title": "Field Selector" + } + } + }, + "ingress": { + "type": "object", + "description": "allows public access to URLs", + "title": "Ingress", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx" + }, + "labels": { + "type": "object", + "description": "labels for ingress", + "title": "Ingress labels", + "default": "" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "ingressInternal": { + "type": "object", + "description": "allows private access to the URLs", + "properties": { + "annotations": { + "type": "object", + "description": "used to configure some options depending on the Ingress controller", + "title": "Annotations" + }, + "className": { + "type": "string", + "description": "name of ingress class, a reference to an IngressClass resource that contains additional configuration including the name of the controller", + "title": "Ingress class name", + "default": "nginx-internal" + }, + "enabled": { + "type": [ + "boolean", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "used to enable or disable ingress", + "title": "Enabled" + }, + "hosts": { + "type": "array", + "description": "list of hosts in ingress", + "title": "Hosts", + "items": [ + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "host URL", + "title": "Host" + }, + "pathType": { + "type": "string", + "description": "type of path", + "title": "PathType" + }, + "paths": { + "type": "array", + "description": "list of paths for a given host", + "title": "Paths", + "items": [ + { + "type": "string" + } + ] + } + } + } + ] + }, + "tls": { + "type": "array", + "items": {}, + "description": "contains security details - private key and certificate", + "title": "TLS" + } + } + }, + "initContainers": { + "type": "array", + "items": {}, + "description": "specialized containers that run before app containers in a Pod, can contain utilities or setup scripts not present in an app image", + "title": "Init Containers" + }, + "kedaAutoscaling": { + "type": "object", + "description": "Kubernetes-based event driven autoscaler. With KEDA, one can drive the scaling of any container in Kubernetes based on the no. of events needing to be processed", + "title": "KEDA Autoscaling", + "properties": { + "advanced": { + "type": "object" + }, + "authenticationRef": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "envSourceContainerName": { + "type": "string" + }, + "maxReplicaCount": { + "type": "integer" + }, + "minReplicaCount": { + "type": "integer" + }, + "triggerAuthentication": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "spec": { + "type": "object" + } + } + }, + "triggers": { + "type": "array", + "items": {} + } + } + }, + "containerSpec": { + "type":"object", + "description": "define the container specic configuration", + "title": "containerSpec", + "properties": { + "lifecycle": { + "type": "object", + "description": "Actions that the management system should take in response to container lifecycle events", + "title": "lifecycle", + "properties": { + "enabled":{ + "type": "boolean" + }, + "postStart":{ + "type": "object", + "title": "postStart", + "description": "PostStart is called immediately after a container is created.You could use this event to check that a required API is available before the container’s main work begins" + }, + "preStop":{ + "type": "object", + "title": "preStop", + "description": "PreStop is called immediately before a container is terminated" + } + } + } + } + }, + "pauseForSecondsBeforeSwitchActive": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "tell how much to wait for given period of time before switch active the container", + "title": "Pause For Seconds Before SwitchActive" + }, + "podAnnotations": { + "type":"object", + "description": "used to attach metadata and configs in Kubernetes", + "title": "Pod Annotations" + }, + "podDisruptionBudget": { + "type": "object", + "description": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "properties": { + "minAvailable":{ + "type": "string", + "title": "minAvailable", + "description": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod" + }, + "maxUnavailable":{ + "type": "string", + "title": "maxUnavailable", + "description": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod." + } + } + }, + "podExtraSpecs":{ + "type": "object", + "description": "ExtraSpec for the pods to be configured", + "title": "podExtraSpecs" + }, + "podLabels": { + "type":"object", + "description": "key/value pairs that are attached to pods, are intended to be used to specify identifying attributes of objects that are meaningful and relevant to users, but do not directly imply semantics to the core system", + "title": "Pod Labels" + }, + "podSecurityContext": { + "type":"object", + "description": "defines privilege and access control settings for a Pod or Container", + "title": "Pod Security Context" + }, + "prometheus": { + "type": "object", + "description": "a kubernetes monitoring tool", + "title": "Prometheus", + "properties": { + "release": { + "type": "string", + "description": "name of the file to be monitored, describes the state of prometheus" + } + } + }, + "rawYaml": { + "type": "array", + "items": {}, + "description": "Accepts an array of Kubernetes objects. One can specify any kubernetes yaml here & it will be applied when a app gets deployed.", + "title": "Raw YAML" + }, + "replicaCount": { + "type": [ + "integer", + "string" + ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "count of Replicas of pod", + "title": "REplica Count" + }, + "resources": { + "type": "object", + "description": "minimum and maximum RAM and CPU available to the application", + "title": "Resources", + "properties": { + "limits": { + "type": "object", + "description": "the maximum values a container can reach", + "title": "Limits", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "limit of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "limit of memory", + "title": "Memory" + } + } + }, + "requests": { + "type": "object", + "description": "request is what the container is guaranteed to get", + "title": "Requests", + "properties": { + "cpu": { + "type": "string", + "format": "cpu", + "description": "request value of CPU", + "title": "CPU" + }, + "memory": { + "type": "string", + "format": "memory", + "description": "request value of memory", + "title": "Memory" + } + } + } + } + }, + "secret": { + "type": "object", + "properties": { + "data": { + "type": "object" + }, + "enabled": { + "type": "boolean" + } + } + }, + "server": { + "type": "object", + "description": "used for providing server configurations.", + "title": "Server", + "properties": { + "deployment": { + "type": "object", + "description": "gives the details for deployment", + "title": "Deployment", + "properties": { + "image": { + "type": "string", + "description": "URL of the image", + "title": "Image" + }, + "image_tag": { + "type": "string", + "description": "tag of the image", + "title": "Image Tag" + } + } + } + } + }, + "service": { + "type": "object", + "description": "defines annotations and the type of service", + "title": "Service", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service" + }, + "type": { + "type": "string", + "description": "type of service", + "title": "Type", + "enum": [ + "ClusterIP", + "LoadBalancer", + "NodePort", + "ExternalName" + ] + } + } + }, + "serviceAccount": { + "type": "object", + "description": "defines service account for pods", + "title": "Service Account", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations", + "description": "annotations of service account" + }, + "name": { + "type": "string", + "description": "name of service account", + "title": "Name" + }, + "create": { + "type": "boolean" + } + } + }, + "servicemonitor": { + "type": "object", + "description": "gives the set of targets to be monitored", + "title": "Service Monitor", + "properties": { + "additionalLabels": { + "type": "object" + } + } + }, + "tolerations": { + "type": "array", + "items": {}, + "description": "a mechanism which work together with Taints which ensures that pods are not placed on inappropriate nodes", + "title": "Tolerations" + }, + "topologySpreadConstraints": { + "type": "array", + "items": {}, + "description": "used to control how Pods are spread across a cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains", + "title": "Topology Spread Constraints" + }, + "volumeMounts": { + "type": "array", + "items": {}, + "description": "used to provide mounts to the volume", + "title": "Volume Mounts" + }, + "volumes": { + "type": "array", + "items": {}, + "description": "required when some values need to be read from or written to an external disk", + "title": "Volumes" + }, + "waitForSecondsBeforeScalingDown": { + "type": [ + "integer", + "string" ], + "pattern": "^@{{[a-zA-Z0-9-+/*%_\\s]+}}$", + "description": "Wait for given period of time before scaling down the container", + "title": "Wait For Seconds Before Scaling Down" + } + } + } + diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/secrets-test-values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/secrets-test-values.yaml new file mode 100644 index 0000000000..4a20404db8 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/secrets-test-values.yaml @@ -0,0 +1 @@ +{"ConfigSecrets":{"enabled":true,"secrets":[{"data":{"standard_key":"c3RhbmRhcmQtdmFsdWU="},"external":false,"externalType":"","mountPath":"/test","name":"normal-secret","type":"volume"},{"data":{"secret_key":"U0VDUkVUIERBVEE="},"external":true,"externalType":"AWSSecretsManager","mountPath":"","name":"external-secret-3","type":"environment"}]}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/NOTES.txt b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/NOTES.txt new file mode 100644 index 0000000000..2b14478168 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range $.Values.ingress.paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include ".Chart.Name .fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ include ".Chart.Name .fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include ".Chart.Name .fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include ".Chart.Name .name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/_helpers.tpl b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/_helpers.tpl new file mode 100644 index 0000000000..75ceac27e9 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/_helpers.tpl @@ -0,0 +1,150 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define ".Chart.Name .name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create service name +*/}} +{{- define ".servicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 63 | trimSuffix "-" -}} +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 55 | trimSuffix "-" -}}-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 55 | trimSuffix "-" -}}-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create preview service name +*/}} +{{- define ".previewservicename" -}} +{{- if .Values.service.name -}} +{{- .Values.service.name | trunc 55 | trimSuffix "-" -}}-preview +{{- else if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 47 | trimSuffix "-" -}}-preview-service +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define ".Chart.Name .fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "VerticalPodAutoScalingEnabled" -}} + {{- $SMenabled := false -}} + {{- if and .Values.verticalPodScaling.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define ".Chart.Name .chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define ".Chart.Name .color" -}} +{{- $active0 := (index .Values.server.deployment 0).enabled -}} +{{/* +{{- $active1 := (index .Values.server.deployment 1).enabled -}} +*/}} +{{- $active1 := include "safeenabledcheck" . -}} +{{- $active := and $active0 $active1 -}} +{{- $active -}} +{{- end -}} + +{{- define "safeenabledcheck" -}} +{{- if (eq (len .Values.server.deployment) 2) -}} + {{- if (index .Values.server.deployment 1).enabled -}} + {{- $active := true -}} + {{- $active -}} + {{- else -}} + {{- $active := false -}} + {{- $active -}} + {{- end -}} +{{- else -}} + {{- $active := false -}} + {{- $active -}} +{{- end -}} +{{- end -}} + + +{{- define "isCMVolumeExists" -}} + {{- $isCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $isCMVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isCMVolumeExists -}} +{{- end -}} + +{{- define "isSecretVolumeExists" -}} + {{- $isSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $isSecretVolumeExists = true}} + {{- end }} + {{- end }} + {{- end }} + {{- $isSecretVolumeExists -}} +{{- end -}} + + + + +{{- define "serviceMonitorEnabled" -}} + {{- $SMenabled := false -}} + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if and .servicemonitor.enabled }} + {{- $SMenabled = true -}} + {{- end }} + {{- end }} + {{- end }} + {{- $SMenabled -}} +{{- end -}} + +{{/* Create the name of the service account to use */}} +{{- define "serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include ".Chart.Name .fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/ambassador.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/ambassador.yaml new file mode 100644 index 0000000000..9ca3a2c708 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/ambassador.yaml @@ -0,0 +1,92 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ambassadorMapping.enabled }} +{{- with $.Values.ambassadorMapping }} +apiVersion: getambassador.io/v3alpha1 +kind: Mapping +metadata: + {{- if .name }} + name: {{ .name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }}-mapping + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if .labels }} +{{ toYaml .labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .ambassadorId }} + ambassador_id: {{ .ambassadorId }} + {{- end }} + {{- if .hostname }} + hostname: {{ .hostname | quote }} + {{- end }} + prefix: {{ .prefix }} + {{- if .rewrite }} + rewrite: {{ .rewrite }} + {{- end }} + service: {{ $svcName }}.{{ $.Release.Namespace }}:{{ $svcPort }} + {{- if .retryPolicy }} + retry_policy: +{{ toYaml .retryPolicy | indent 4 }} + {{- end }} + {{- if .cors }} + cors: +{{ toYaml .cors | indent 4 }} + {{- end }} + {{- if .weight }} + weight: {{ .weight }} + {{- end }} + {{- if .method }} + method: {{ .method }} + {{- end }} + {{- if .extraSpec }} +{{ toYaml .extraSpec | indent 2 }} + {{- end }} + {{- if .tls }} + {{- if .tls.context }} + tls: {{ .tls.context }} +{{- if .tls.create }} +--- +apiVersion: getambassador.io/v3alpha1 +kind: TLSContext +metadata: + name: {{ .tls.context }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .tls.labels }} +{{ toYaml .tls.labels | nindent 4 }} + {{- end }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .tls.secretName }} + secret: {{ .tls.secretName }} + {{- end }} + {{- if .tls.hosts }} + hosts: +{{ toYaml .tls.hosts | nindent 4 }} + {{- end }} + {{- if .tls.extraSpec }} +{{ toYaml .tls.extraSpec | indent 2 }} + {{- end }} +{{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/configmap.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/configmap.yaml new file mode 100644 index 0000000000..4e7879665e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/configmap.yaml @@ -0,0 +1,22 @@ +{{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .name}}-{{ $.Values.app }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +data: +{{ toYaml .data | trim | indent 2 }} + {{- end}} + {{- end}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/externalsecrets.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/externalsecrets.yaml new file mode 100644 index 0000000000..6b6682c0a6 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/externalsecrets.yaml @@ -0,0 +1,84 @@ +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external true }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} +{{- if .esoSecretData.secretStore }} +--- +{{- if $.Capabilities.APIVersions.Has "external-secrets.io/v1" }} +apiVersion: external-secrets.io/v1 +{{- else }} +apiVersion: external-secrets.io/v1beta1 +{{- end }} +kind: SecretStore +metadata: + name: {{ .name}} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + provider: + {{- toYaml .esoSecretData.secretStore | nindent 4 }} +{{- end }} +--- +{{- if $.Capabilities.APIVersions.Has "external-secrets.io/v1" }} +apiVersion: external-secrets.io/v1 +{{- else }} +apiVersion: external-secrets.io/v1beta1 +{{- end }} +kind: ExternalSecret +metadata: + name: {{ .name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .esoSecretData.refreshInterval }} + refreshInterval: {{ .esoSecretData.refreshInterval }} + {{- else }} + refreshInterval: 1h + {{- end}} + {{- if and .esoSecretData.secretStoreRef (not .esoSecretData.secretStore) }} + secretStoreRef: +{{ toYaml .esoSecretData.secretStoreRef | indent 4 }} + {{- else }} + secretStoreRef: + name: {{ .name}} + kind: SecretStore + {{- end }} + target: + name: {{ .name}} + {{- if .esoSecretData.template }} + template: + {{- toYaml .esoSecretData.template | nindent 6 }} + {{- end }} + creationPolicy: Owner + {{- if .esoSecretData.esoDataFrom }} + dataFrom: + {{- toYaml .esoSecretData.esoDataFrom | nindent 4 }} + {{- else }} + data: + {{- range .esoSecretData.esoData }} + - secretKey: {{ .secretKey }} + remoteRef: + key: {{ .key }} + {{- if .property }} + property: {{ .property }} + {{- end }} + {{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} +{{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/generic.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/generic.yaml new file mode 100644 index 0000000000..db95e84267 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/generic.yaml @@ -0,0 +1,4 @@ +{{- range .Values.rawYaml }} +--- +{{ toYaml . }} + {{- end -}} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/hpa.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/hpa.yaml new file mode 100644 index 0000000000..5153695725 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/hpa.yaml @@ -0,0 +1,94 @@ +{{- if $.Values.autoscaling.enabled }} +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2beta2 +{{- else }} +apiVersion: autoscaling/v2beta1 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + {{- if $.Values.autoscaling.name }} + name: {{ $.Values.autoscaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-hpa + {{- end }} + {{- if .Values.autoscaling.annotations }} + annotations: +{{ toYaml .Values.autoscaling.annotations | indent 4 }} + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.autoscaling.labels }} +{{ toYaml .Values.autoscaling.labels | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + {{- if .Values.statefulSetConfig.name }} + name: {{ .Values.statefulSetConfig.name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }} + {{- end }} + minReplicas: {{ $.Values.autoscaling.MinReplicas }} + maxReplicas: {{ $.Values.autoscaling.MaxReplicas }} + metrics: + {{- if $.Values.autoscaling.containerResource.enabled }} + {{- with $.Values.autoscaling.containerResource }} + {{- if .TargetCPUUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: cpu + container: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + target: + type: Utilization + averageUtilization: {{ .TargetCPUUtilizationPercentage }} + {{- end}} + {{- if .TargetMemoryUtilizationPercentage }} + - type: ContainerResource + containerResource: + name: memory + container: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + target: + type: Utilization + averageUtilization: {{ .TargetMemoryUtilizationPercentage }} + {{- end}} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if $.Values.autoscaling.TargetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + {{- if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }} + target: + type: Utilization + averageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ $.Values.autoscaling.TargetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if and $.Values.autoscaling.extraMetrics (semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion) }} + {{- toYaml $.Values.autoscaling.extraMetrics | nindent 2 }} + {{- end}} + {{- if and $.Values.autoscaling.behavior (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + behavior: + {{- toYaml $.Values.autoscaling.behavior | nindent 4 }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/ingress.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/ingress.yaml new file mode 100644 index 0000000000..7a5fa2374b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/ingress.yaml @@ -0,0 +1,187 @@ +{{ $svcName := include ".servicename" . }} +{{ $svcPort := (index .Values.ContainerPort 0).servicePort }} +{{- if $.Values.ingress.enabled -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- if and .Values.ingressInternal.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingressInternal.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingressInternal.annotations "kubernetes.io/ingress.class" .Values.ingressInternal.className}} + {{- end }} +{{- end }} +{{- end }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingress.name }} + name: {{ $.Values.ingress.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} + {{- end }} +{{- if .Values.ingress.annotations }} + annotations: +{{ toYaml .Values.ingress.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + {{- if or .Values.ingress.host .Values.ingress.path }} + - host: {{ .Values.ingress.host }} + http: + paths: + - path: {{ .Values.ingress.path }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingress.pathType | default "ImplementationSpecific" }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingress.hosts) (not ($.Values.ingress.host )) }} + {{- range .Values.ingress.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end -}} +{{- end }} +{{- if $.Values.ingressInternal.enabled }} +--- +{{ if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{ else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{ else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + {{- if $.Values.ingressInternal.name }} + name: {{ $.Values.ingressInternal.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-ingress-internal + {{- end }} + namespace: {{ $.Values.NameSpace }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.ingressInternal.annotations }} + annotations: +{{ toYaml .Values.ingressInternal.annotations | indent 4 }} +{{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingressInternal.className }} + {{- end }} + rules: + {{- if or .Values.ingressInternal.host .Values.ingressInternal.path }} + - host: {{ .Values.ingressInternal.host }} + http: + paths: + - path: {{ .Values.ingressInternal.path }} + {{- if and .Values.ingressInternal.pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $.Values.ingressInternal.pathType | default "Prefix" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if and ($.Values.ingressInternal.hosts) (not ($.Values.ingressInternal.host )) }} + {{- range .Values.ingressInternal.hosts }} + {{ $outer := . -}} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ $outer.pathType | default "ImplementationSpecific" | quote }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $svcName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $svcName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- if .additionalBackends }} +{{ toYaml .additionalBackends | indent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ingressInternal.tls }} + tls: +{{ toYaml .Values.ingressInternal.tls | indent 4 }} + {{- end -}} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/istio-gateway.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/istio-gateway.yaml new file mode 100644 index 0000000000..693a27f58e --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/istio-gateway.yaml @@ -0,0 +1,66 @@ +{{- if and .Values.istio.enable .Values.istio.gateway.enabled -}} +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + {{- if .Values.istio.gateway.name }} + name: {{ .Values.istio.gateway.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if $.Values.istio.gateway.labels }} +{{ toYaml $.Values.istio.gateway.labels | indent 4 }} + {{- end }} +{{- if $.Values.istio.gateway.annotations }} + annotations: +{{ toYaml $.Values.istio.gateway.annotations | indent 4 }} +{{- end }} +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: +{{- if .Values.istio.gateway.host }} + - {{ .Values.istio.gateway.host | quote -}} +{{- else if .Values.istio.gateway.hosts }} +{{- range .Values.istio.gateway.hosts }} + - {{ . | quote }} +{{- end }} +{{- end }} +{{ with .Values.istio.gateway }} +{{- if .tls.enabled }} + tls: + httpsRedirect: true + - port: + number: 443 + name: https + protocol: HTTPS + hosts: +{{- if .host }} + - {{ .host | quote }} +{{- else if .hosts }} +{{- range .hosts }} + - {{ . | quote }} +{{- end }} +{{- end }} + tls: + mode: SIMPLE + credentialName: {{ .tls.secretName }} +{{ end }} +{{ end }} +{{ end }} + + + diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/istio-virtualservice.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/istio-virtualservice.yaml new file mode 100644 index 0000000000..06932258fd --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/istio-virtualservice.yaml @@ -0,0 +1,68 @@ +{{- with .Values.istio }} +{{- if and .enable .virtualService.enabled }} +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + {{- if .virtualService.name }} + name: {{ .virtualService.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-virtualservice + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} + {{- if .virtualService.labels }} +{{ toYaml .virtualService.labels | indent 4 }} + {{- end }} +{{- if .virtualService.annotations }} + annotations: +{{ toYaml .virtualService.annotations | indent 4 }} +{{- end }} +spec: +{{- if or .gateway.enabled .virtualService.gateways }} + gateways: + {{- if .gateway.enabled }} + {{- if .gateway.name }} + - name: {{ .gateway.name }} + {{- else }} + - name: {{ template ".Chart.Name .fullname" $ }}-istio-gateway + {{- end }} + {{- end }} + {{- range .virtualService.gateways }} + - {{ . | quote }} + {{- end }} +{{- end }} +{{- if or .gateway.enabled .virtualService.hosts }} + hosts: + {{- if .gateway.enabled }} + {{- if .gateway.host }} + - {{ .gateway.host | quote }} + {{- else if .gateway.hosts }} +{{- range .gateway.hosts }} + - {{ . | quote }} +{{- end }} + {{- end }} + {{- end }} + {{- range .virtualService.hosts }} + - {{ . | quote }} + {{- end }} +{{- else }} + hosts: + - "{{ include ".servicename" $ }}.{{ $.Release.Namespace }}.svc.cluster.local" +{{- end }} +{{- if $.Values.istio.virtualService.http }} + http: +{{ toYaml $.Values.istio.virtualService.http | indent 4 }} +{{- end }} +{{- if $.Values.istio.virtualService.tcp }} + tcp: +{{ toYaml $.Values.istio.virtualService.tcp | indent 4 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/keda-autoscaling.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/keda-autoscaling.yaml new file mode 100644 index 0000000000..db5b1cf81b --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/keda-autoscaling.yaml @@ -0,0 +1,80 @@ +{{- if $.Values.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + {{- if $.Values.kedaAutoscaling.name }} + name: {{ $.Values.kedaAutoscaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-keda + {{- end }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ .Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.labels }} +{{ toYaml .Values.kedaAutoscaling.labels | indent 4 }} + {{- end }} + {{- if .Values.kedaAutoscaling.annotations }} + annotations: +{{ toYaml .Values.kedaAutoscaling.annotations | indent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + {{- if .Values.statefulSetConfig.name }} + name: {{ .Values.statefulSetConfig.name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }} + {{- end }} +{{- if $.Values.kedaAutoscaling.envSourceContainerName }} + envSourceContainerName: {{ $.Values.kedaAutoscaling.envSourceContainerName }} +{{- end }} +{{- if $.Values.kedaAutoscaling.pollingInterval }} + pollingInterval: {{ $.Values.kedaAutoscaling.pollingInterval }} +{{- end }} +{{- if $.Values.kedaAutoscaling.cooldownPeriod }} + cooldownPeriod: {{ $.Values.kedaAutoscaling.cooldownPeriod }} +{{- end }} +{{- if $.Values.kedaAutoscaling.idleReplicaCount }} + idleReplicaCount: {{ $.Values.kedaAutoscaling.idleReplicaCount }} +{{- end }} + minReplicaCount: {{ $.Values.kedaAutoscaling.minReplicaCount }} + maxReplicaCount: {{ $.Values.kedaAutoscaling.maxReplicaCount }} +{{- if $.Values.kedaAutoscaling.fallback }} + fallback: +{{ toYaml $.Values.kedaAutoscaling.fallback | indent 4 }} +{{- end }} +{{- if $.Values.kedaAutoscaling.advanced }} + advanced: +{{ toYaml $.Values.kedaAutoscaling.advanced | indent 4 }} +{{- end }} + triggers: +{{ toYaml .Values.kedaAutoscaling.triggers | indent 2}} +{{- if $.Values.kedaAutoscaling.authenticationRef }} + authenticationRef: +{{ toYaml $.Values.kedaAutoscaling.authenticationRef | indent 6 }} +{{- end }} +--- +{{- if $.Values.kedaAutoscaling.triggerAuthentication.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{ $.Values.kedaAutoscaling.triggerAuthentication.name }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + +spec: +{{ toYaml $.Values.kedaAutoscaling.triggerAuthentication.spec | indent 2 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/metrics-service-monitor.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/metrics-service-monitor.yaml new file mode 100644 index 0000000000..1f2de25c7a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/metrics-service-monitor.yaml @@ -0,0 +1,43 @@ +{{- if $.Values.appMetrics -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template ".Chart.Name .fullname" $ }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ .Values.prometheus.release }} +spec: + jobLabel: {{ template ".Chart.Name .name" $ }} + endpoints: + - port: envoy-admin + interval: 30s + path: /stats/prometheus + relabelings: + - action: replace + regex: (.*)(\S{10}) + replacement: ${2} + sourceLabels: + - __meta_kubernetes_pod_label_controller_revision_hash + targetLabel: statefulset_hash + - action: replace + sourceLabels: + - statefulset_hash + targetLabel: devtron_app_hash + selector: + matchLabels: + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + namespaceSelector: + matchNames: + - {{.Release.Namespace}} + podTargetLabels: + - appId + - envId + - controller-revision-hash + - devtron_app_hash + +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/poddisruptionbudget.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000000..84913e5464 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/poddisruptionbudget.yaml @@ -0,0 +1,35 @@ +{{- if .Values.podDisruptionBudget }} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: policy/v1 +{{- else -}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + {{- if .Values.podDisruptionBudget.name }} + name: {{ .Values.podDisruptionBudget.name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }} + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 6 }} + {{- else }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + {{- end }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/pre-sync-job.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/pre-sync-job.yaml new file mode 100644 index 0000000000..54c9f636ee --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/pre-sync-job.yaml @@ -0,0 +1,29 @@ +{{- if $.Values.dbMigrationConfig.enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template ".Chart.Name .fullname" $ }}-migrator + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + annotations: + argocd.argoproj.io/hook: PreSync +# argocd.argoproj.io/hook-delete-policy: HookSucceeded +spec: + template: + spec: + containers: + - name: migrator + image: 686244538589.dkr.ecr.us-east-2.amazonaws.com/migrator:0.0.1-rc14 + env: + {{- range $.Values.dbMigrationConfig.envValues }} + - name: {{ .key}} + value: {{ .value | quote }} + {{- end}} + restartPolicy: Never + backoffLimit: 0 +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/prometheusrules.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/prometheusrules.yaml new file mode 100644 index 0000000000..c9bcc12968 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/prometheusrules.yaml @@ -0,0 +1,33 @@ +{{- if .Values.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + {{- if .Values.prometheusRule.name }} + name: {{ .Values.prometheusRule.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }} + {{- end }} + {{- if .Values.prometheusRule.namespace }} + namespace: {{ .Values.prometheusRule.namespace }} + {{- end }} + labels: + kind: Prometheus + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ .Values.prometheus.release }} +{{- if .Values.prometheusRule.additionalLabels }} +{{ toYaml .Values.prometheusRule.additionalLabels | indent 4 }} + {{- end }} +spec: + {{- with .Values.prometheusRule.rules }} + groups: + {{- if $.Values.prometheusRule.name }} + - name: {{ $.Values.prometheusRule.name }} + {{- else }} + - name: {{ template ".Chart.Name .fullname" $ }} + {{- end }} + rules: {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/secret.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/secret.yaml new file mode 100644 index 0000000000..5ac3ae1410 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/secret.yaml @@ -0,0 +1,84 @@ +{{- if $.Values.secret.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: app-secret + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml $.Values.secret.data | indent 2 }} +{{- end }} + + +{{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{if eq .external false}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name}}-{{ $.Values.app }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + chart: {{ template ".Chart.Name .chart" $ }} +{{- if $.Values.appLabels }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +type: Opaque +data: +{{ toYaml .data | trim | indent 2 }} +{{- end}} + {{if eq .external true }} + {{if (or (eq .externalType "AWSSecretsManager") (eq .externalType "AWSSystemManager") (eq .externalType "HashiCorpVault"))}} +--- +apiVersion: kubernetes-client.io/v1 +kind: ExternalSecret +metadata: + name: {{ .name}} +{{- if $.Values.appLabels }} + labels: + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} +{{ toYaml $.Values.appLabels | indent 4 }} +{{- end }} +spec: + {{- if .roleARN }} + roleArn: .roleARN + {{- end}} + {{- if eq .externalType "AWSSecretsManager"}} + backendType: secretsManager + {{- end}} + {{- if eq .externalType "AWSSystemManager"}} + backendType: systemManager + {{- end}} + {{- if eq .externalType "HashiCorpVault"}} + backendType: vault + {{- end}} + data: + {{- range .secretData }} + - key: {{.key}} + name: {{.name}} + {{- if .property }} + property: {{.property}} + {{- end}} + isBinary: {{.isBinary}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} + {{- end}} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/service.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/service.yaml new file mode 100644 index 0000000000..ccfba4af62 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/service.yaml @@ -0,0 +1,146 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + {{- if .Values.service.name }} + name: {{ .Values.service.name}} + {{- else }} + name: {{ template ".servicename" . }} + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type | default "ClusterIP" }} +{{- with .Values.service.extraSpec }} + {{- toYaml . | nindent 2 }} + {{- end }} +{{- if (eq .Values.service.type "LoadBalancer") }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range .Values.service.loadBalancerSourceRanges }} + - {{ . }} + {{- end }} + {{- end }} +{{- end }} + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + {{- if .targetPort }} + targetPort: {{ .targetPort }} + {{- else }} + targetPort: {{ .name }} + {{- end }} + {{- if (and (eq $.Values.service.type "NodePort") .nodePort )}} + nodePort: {{ .nodePort }} + {{- end }} + protocol: {{ .protocol | default "TCP" }} + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- if .Values.service.sessionAffinity.enabled }} + sessionAffinity: ClientIP +{{- end }} +{{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: +{{ toYaml .Values.service.sessionAffinityConfig | indent 4 }} +{{- end }} +{{- end }} +--- +{{- if or .Values.service.enabled .Values.serviceheadless.enabled }} +apiVersion: v1 +kind: Service +metadata: + {{- if .Values.serviceheadless.enabled }} + {{- if .Values.serviceheadless.name }} + name: {{ .Values.serviceheadless.name }} + {{- else }} + name: {{ template ".servicename" . }}-headless + {{- end }} + {{- else }} + name: {{ template ".servicename" . }}-headless + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +spec: + clusterIP: None + ports: + {{- range .Values.ContainerPort }} + {{- if .servicePort }} + - port: {{ .servicePort }} + {{- else }} + - port: {{ .port }} + {{- end }} + {{- if .targetPort }} + targetPort: {{ .targetPort }} + {{- else }} + targetPort: {{ .name }} + {{- end }} + {{- if (and (eq $.Values.service.type "NodePort") .nodePort )}} + nodePort: {{ .nodePort }} + {{- end }} + protocol: TCP + name: {{ .name }} + {{- end }} + {{- if $.Values.appMetrics }} + - port: 9901 + name: envoy-admin + {{- end }} + selector: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 4 }} + {{- else }} + app: {{ template ".Chart.Name .name" . }} + {{- end }} +{{- if .Values.serviceheadless.sessionAffinity.enabled }} + sessionAffinity: ClientIP +{{- end }} +{{- if .Values.serviceheadless.sessionAffinityConfig }} + sessionAffinityConfig: +{{ toYaml .Values.serviceheadless.sessionAffinityConfig | indent 4 }} +{{- end }} + type: ClusterIP +{{- if (and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges )}} + loadBalancerSourceRanges: + {{- range .Values.service.loadBalancerSourceRanges }} + - {{ . }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/serviceaccount.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/serviceaccount.yaml new file mode 100644 index 0000000000..f337548e94 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if $.Values.serviceAccount }} +{{- if $.Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "serviceAccountName" . }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/servicemonitor.yaml new file mode 100644 index 0000000000..ba714bf9a2 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/servicemonitor.yaml @@ -0,0 +1,122 @@ +{{ $serviceMonitorEnabled := include "serviceMonitorEnabled" . }} +{{- if eq "true" $serviceMonitorEnabled -}} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + {{- if .Values.servicemonitor.name }} + name: {{ .Values.servicemonitor.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-sm + {{- end }} + labels: + kind: Prometheus + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ .Values.prometheus.release }} +{{- if .Values.servicemonitor.additionalLabels }} +{{ toYaml .Values.servicemonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + {{- range .Values.ContainerPort }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout | quote}} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- if .servicemonitor.relabelings }} + relabelings: +{{ toYaml .servicemonitor.relabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- range .Values.containers }} + {{- range .ports }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- if .servicemonitor.relabelings }} + relabelings: +{{ toYaml .servicemonitor.relabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.servicemonitor.namespaceSelector }} + namespaceSelector: + matchNames: + {{- toYaml .Values.servicemonitor.namespaceSelector | nindent 6 }} + {{- end }} + selector: + matchLabels: + {{- if .Values.servicemonitor.matchLabels }} + {{- toYaml .Values.servicemonitor.matchLabels | nindent 6 }} + {{- else }} + app: {{ template ".Chart.Name .name" $ }} + {{- end }} +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/sidecar-configmap.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/sidecar-configmap.yaml new file mode 100644 index 0000000000..03fed908db --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/sidecar-configmap.yaml @@ -0,0 +1,175 @@ +{{- if .Values.appMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2019-08-12T18:38:34Z + name: sidecar-config-{{ template ".Chart.Name .name" $ }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +data: + envoy-config.json: | + { + "stats_config": { + "use_all_default_tags": false, + "stats_tags": [ + { + "tag_name": "cluster_name", + "regex": "^cluster\\.((.+?(\\..+?\\.svc\\.cluster\\.local)?)\\.)" + }, + { + "tag_name": "tcp_prefix", + "regex": "^tcp\\.((.*?)\\.)\\w+?$" + }, + { + "tag_name": "response_code", + "regex": "_rq(_(\\d{3}))$" + }, + { + "tag_name": "response_code_class", + "regex": ".*_rq(_(\\dxx))$" + }, + { + "tag_name": "http_conn_manager_listener_prefix", + "regex": "^listener(?=\\.).*?\\.http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "http_conn_manager_prefix", + "regex": "^http\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "listener_address", + "regex": "^listener\\.(((?:[_.[:digit:]]*|[_\\[\\]aAbBcCdDeEfF[:digit:]]*))\\.)" + }, + { + "tag_name": "mongo_prefix", + "regex": "^mongo\\.(.+?)\\.(collection|cmd|cx_|op_|delays_|decoding_)(.*?)$" + } + ], + "stats_matcher": { + "inclusion_list": { + "patterns": [ + { + "regex": ".*_rq_\\dxx$" + }, + { + "regex": ".*_rq_time$" + }, + { + "regex": "cluster.*" + }, + ] + } + } + }, + "admin": { + "access_log_path": "/dev/null", + "address": { + "socket_address": { + "address": "0.0.0.0", + "port_value": 9901 + } + } + }, + "static_resources": { + "clusters": [ + {{- range $index, $element := .Values.ContainerPort }} + { + "name": "{{ $.Values.app }}-{{ $index }}", + "type": "STATIC", + "connect_timeout": "0.250s", + "lb_policy": "ROUND_ROBIN", +{{- if $element.idleTimeout }} + "common_http_protocol_options": { + "idle_timeout": {{ $element.idleTimeout | quote }} + }, +{{- end }} +{{- if or $element.useHTTP2 $element.useGRPC }} + "http2_protocol_options": {}, +{{- end }} +{{- if and (not $element.useGRPC) (not $element.supportStreaming) }} + "max_requests_per_connection": "1", +{{- end }} + "load_assignment": { + "cluster_name": "9", + "endpoints": { + "lb_endpoints": [ + { + "endpoint": { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "127.0.0.1", + "port_value": {{ $element.port }} + } + } + } + } + ] + } + } + }, + {{- end }} + ], + "listeners":[ + {{- range $index, $element := .Values.ContainerPort }} + { + "address": { + "socket_address": { + "protocol": "TCP", + "address": "0.0.0.0", + "port_value": {{ $element.envoyPort | default (add 8790 $index) }} + } + }, + "filter_chains": [ + { + "filters": [ + { + "name": "envoy.filters.network.http_connection_manager", + "config": { + "codec_type": "AUTO", + "stat_prefix": "stats", + "route_config": { + "virtual_hosts": [ + { + "name": "backend", + "domains": [ + "*" + ], + "routes": [ + { + "match": { + "prefix": "/" + }, + "route": { +{{- if $element.supportStreaming }} + "timeout": "0s", +{{- end }} +{{- if and ($element.envoyTimeout) (not $element.supportStreaming) }} + "timeout": "{{ $element.envoyTimeout }}", +{{- end }} + "cluster": "{{ $.Values.app }}-{{ $index }}" + } + } + ] + } + ] + }, + "http_filters": { + "name": "envoy.filters.http.router" + } + } + } + ] + } + ] + }, + {{- end }} + ] + } + } +--- +{{- end }} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/statefulset.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/statefulset.yaml new file mode 100644 index 0000000000..5155e36e02 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/statefulset.yaml @@ -0,0 +1,714 @@ + {{- $hasCMEnvExists := false -}} + {{- $hasCMVolumeExists := false -}} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $hasCMVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasCMEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} + + {{- $hasSecretEnvExists := false -}} + {{- $hasSecretVolumeExists := false -}} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $hasSecretVolumeExists = true}} + {{- end }} + {{- if eq .type "environment"}} + {{- $hasSecretEnvExists = true}} + {{- end }} + {{- end }} + {{- end }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + {{- if .Values.statefulSetConfig.name }} + name: {{ .Values.statefulSetConfig.name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }} + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} + releaseVersion: {{ $.Values.releaseVersion | quote }} + pipelineName: {{ $.Values.pipelineName }} + {{- if .Values.statefulSetConfig.labels }} +{{ toYaml .Values.statefulSetConfig.labels | indent 4 }} + {{- end }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4}} + {{- end }} +{{- if .Values.statefulSetConfig.annotations }} + annotations: +{{ toYaml .Values.statefulSetConfig.annotations | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + {{- if .Values.statefulSetConfig.matchLabels }} +{{ toYaml .Values.statefulSetConfig.matchLabels | indent 6 }} + {{- else }} + app: {{ template ".Chart.Name .name" $ }} + release: {{ $.Release.Name }} + {{- end }} + replicas: {{ $.Values.replicaCount }} + minReadySeconds: {{ $.Values.MinReadySeconds }} + {{- if $.Values.statefulSetConfig.serviceName }} + serviceName: {{ $.Values.statefulSetConfig.serviceName }} + {{- else }} + {{- if or .Values.service.enabled .Values.serviceheadless.enabled }} + {{- if .Values.serviceheadless.name }} + serviceName: {{ .Values.serviceheadless.name }} + {{- else }} + serviceName: {{ template ".servicename" . }}-headless + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.statefulSetConfig.podManagementPolicy }} + podManagementPolicy: {{ $.Values.statefulSetConfig.podManagementPolicy }} + {{- end }} + template: + metadata: + {{- if .Values.podAnnotations }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + labels: + {{- if .Values.customPodLabels }} +{{ toYaml .Values.customPodLabels | indent 8 }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 8 }} + {{- end }} + {{- else }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 8 }} + {{- end }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + {{- end }} + spec: +{{- if $.Values.podExtraSpecs }} +{{ toYaml .Values.podExtraSpecs | indent 6 }} +{{- end }} + terminationGracePeriodSeconds: {{ $.Values.GracePeriod }} + {{- if $.Values.restartPolicy }} + restartPolicy: {{ $.Values.restartPolicy }} + {{- end }} + {{- if $.Values.hostAliases }} + hostAliases: + {{ toYaml .Values.hostAliases | indent 8 }} + {{- end }} +{{- if and $.Values.Spec.Affinity.Key $.Values.Spec.Affinity.Values }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ $.Values.Spec.Affinity.Key | indent 14 }} + operator: In + values: + - {{ $.Values.Spec.Affinity.Values | default "nodes" }} +{{- else if $.Values.affinity.enabled }} + affinity: +{{ toYaml .Values.affinity.values | indent 8 }} +{{- end }} +{{- if $.Values.serviceAccountName }} + serviceAccountName: {{ $.Values.serviceAccountName }} +{{- else }} + serviceAccountName: {{ template "serviceAccountName" . }} +{{- end }} +{{- if $.Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} +{{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml $.Values.nodeSelector | indent 10 }} + {{- end }} + {{- if .Values.nodeName }} + nodeName: {{ $.Values.nodeName }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} +{{- if $.Values.imagePullSecrets}} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} +{{- end}} +{{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{- range $.Values.topologySpreadConstraints }} + - maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + {{- if semverCompare "<=1.30-0" $.Capabilities.KubeVersion.GitVersion }} + {{- if .minDomains }} + minDomains: {{ .minDomains }} + {{- end }} + {{- end }} + {{- if .nodeAffinityPolicy }} + nodeAffinityPolicy: {{ .nodeAffinityPolicy }} + {{- end }} + {{- if .nodeTaintsPolicy }} + nodeTaintsPolicy: {{ .nodeTaintsPolicy }} + {{- end }} + labelSelector: + matchLabels: + {{- if and .autoLabelSelector .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- else if .autoLabelSelector }} + app: {{ template ".Chart.Name .name" $ }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + release: {{ $.Release.Name }} + {{- else if .customLabelSelector }} +{{ toYaml .customLabelSelector | indent 12 }} + {{- end }} +{{- end }} +{{- end }} +{{- if $.Values.podSecurityContext }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} +{{- end }} +{{- if $.Values.initContainers}} + initContainers: +{{- range $i, $c := .Values.initContainers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-init-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .args}} + args: +{{ toYaml .args | indent 12 -}} +{{- end}} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} + - +{{ toYaml . | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + containers: + - name: "{{ if $.Values.containerName }}{{ $.Values.containerName }}{{ else }}{{ $.Chart.Name }}{{ end }}" + image: "{{ .Values.server.deployment.image }}:{{ .Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- if $.Values.containerSpec.lifecycle.postStart }} + postStart: +{{ toYaml $.Values.containerSpec.lifecycle.postStart | indent 12 -}} + {{- end }} + {{- end }} +{{- if and $.Values.containerSecurityContext $.Values.privileged }} + securityContext: + privileged: true +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- else if $.Values.privileged }} + securityContext: + privileged: true +{{- else if $.Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 12 }} +{{- end }} +{{- if $.Values.containerExtraSpecs }} +{{ toYaml .Values.containerExtraSpecs | indent 10 }} +{{- end }} + ports: + {{- range $.Values.ContainerPort }} + - name: {{ .name}} + containerPort: {{ .port }} + protocol: {{ .protocol | default "TCP" }} + {{- end}} +{{- if and $.Values.command.enabled $.Values.command.workingDir }} + workingDir: {{ $.Values.command.workingDir }} +{{- end}} +{{- if and $.Values.command.value $.Values.command.enabled}} + command: +{{ toYaml $.Values.command.value | indent 12 -}} +{{- end}} +{{- if and $.Values.args.value $.Values.args.enabled}} + args: +{{ toYaml $.Values.args.value | indent 12 -}} +{{- end }} + env: + - name: CONFIG_HASH + value: {{ include (print $.Chart.Name "/templates/configmap.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.ConfigHash) }}{{ .Values.devtronInternal.containerSpecs.ConfigHash }}{{ end }} + - name: SECRET_HASH + value: {{ include (print $.Chart.Name "/templates/secret.yaml") . | sha256sum }}{{ if and (.Values.devtronInternal) (.Values.devtronInternal.containerSpecs.SecretHash) }}{{ .Values.devtronInternal.containerSpecs.SecretHash }}{{ end }} + - name: DEVTRON_APP_NAME + value: {{ template ".Chart.Name .name" $ }} + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEVTRON_CONTAINER_REPO + value: "{{ .Values.server.deployment.image }}" + - name: DEVTRON_CONTAINER_TAG + value: "{{ .Values.server.deployment.image_tag }}" + {{- range $.Values.EnvVariablesFromFieldPath }} + - name: {{ .name }} + valueFrom: + fieldRef: + fieldPath: {{ .fieldPath }} + {{- end}} + {{- range $.Values.EnvVariables }} + - name: {{ .name}} + value: {{ .value | quote }} + {{- end}} + {{- range $.Values.EnvVariablesFromSecretKeys }} + {{- if and .name .secretName .keyName }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + name: {{ .secretName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- range $.Values.EnvVariablesFromCongigMapKeys }} + {{- if and .name .configMapName .keyName }} + - name: {{ .name }} + valueFrom: + configMapKeyRef: + name: {{ .configMapName }} + key: {{ .keyName }} + {{- end }} + {{- end }} + {{- if or (and ($hasCMEnvExists) (.Values.ConfigMaps.enabled)) (and ($hasSecretEnvExists) (.Values.ConfigSecrets.enabled)) }} + envFrom: + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "environment" }} + - configMapRef: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "environment" }} + - secretRef: + {{if eq .external true}} + name: {{ .name }} + {{else if eq .external false}} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + +{{- if or $.Values.LivenessProbe.Path $.Values.LivenessProbe.command $.Values.LivenessProbe.tcp }} + livenessProbe: +{{- if $.Values.LivenessProbe.Path }} + httpGet: + path: {{ $.Values.LivenessProbe.Path }} + port: {{ $.Values.LivenessProbe.port }} + scheme: {{ $.Values.LivenessProbe.scheme }} + {{- if $.Values.LivenessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.LivenessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.LivenessProbe.command }} + exec: + command: +{{ toYaml .Values.LivenessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.LivenessProbe.tcp }} + tcpSocket: + port: {{ $.Values.LivenessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.LivenessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.LivenessProbe.periodSeconds }} + successThreshold: {{ $.Values.LivenessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.LivenessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.LivenessProbe.failureThreshold }} +{{- end }} +{{- if or $.Values.ReadinessProbe.Path $.Values.ReadinessProbe.command $.Values.ReadinessProbe.tcp }} + readinessProbe: +{{- if $.Values.ReadinessProbe.Path }} + httpGet: + path: {{ $.Values.ReadinessProbe.Path }} + port: {{ $.Values.ReadinessProbe.port }} + {{- if $.Values.ReadinessProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.ReadinessProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.ReadinessProbe.command }} + exec: + command: +{{ toYaml .Values.ReadinessProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.ReadinessProbe.tcp }} + tcpSocket: + port: {{ $.Values.ReadinessProbe.port }} +{{- end}} + initialDelaySeconds: {{ $.Values.ReadinessProbe.initialDelaySeconds }} + periodSeconds: {{ $.Values.ReadinessProbe.periodSeconds }} + successThreshold: {{ $.Values.ReadinessProbe.successThreshold }} + timeoutSeconds: {{ $.Values.ReadinessProbe.timeoutSeconds }} + failureThreshold: {{ $.Values.ReadinessProbe.failureThreshold }} +{{- end }} + resources: +{{ toYaml $.Values.resources | trim | indent 12 }} +{{- if or $.Values.StartupProbe.Path $.Values.StartupProbe.command $.Values.StartupProbe.tcp }} + startupProbe: +{{- if $.Values.StartupProbe.Path }} + httpGet: + path: {{ $.Values.StartupProbe.Path }} + port: {{ $.Values.StartupProbe.port }} + {{- if $.Values.StartupProbe.httpHeaders }} + httpHeaders: + {{- range $.Values.StartupProbe.httpHeaders}} + - name: {{.name}} + value: {{.value}} + {{- end}} + {{- end }} +{{- end }} +{{- if $.Values.StartupProbe.command }} + exec: + command: +{{ toYaml .Values.StartupProbe.command | indent 16 }} +{{- end}} +{{- if and $.Values.StartupProbe.tcp }} + tcpSocket: + port: {{ $.Values.StartupProbe.port }} +{{- end}} +{{- end}} + volumeMounts: + {{- if $.Values.statefulSetConfig.volumeClaimTemplates }} + {{- with (index $.Values.statefulSetConfig.volumeClaimTemplates 0) }} + {{- if and .metadata .metadata.name }} + - name: {{ .metadata.name }} + {{- else }} + - name: {{ template ".Chart.Name .name" $ }} + {{- end}} + {{- end}} + {{- if .Values.statefulSetConfig.mountPath }} + mountPath: {{ $.Values.statefulSetConfig.mountPath }} + {{- else}} + mountPath: "/tmp" + {{- end}} + {{- end}} +{{- with .Values.volumeMounts }} +{{ toYaml . | trim | indent 12 }} +{{- end }} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{- range $k, $v := .data }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + {{- $cmName := .name -}} + {{- $cmMountPath := .mountPath -}} + {{- if eq .subPath false }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath }} + + {{- else }} + {{if (or (eq .externalType "ESO_GoogleSecretsManager") (eq .externalType "ESO_AWSSecretsManager") (eq .externalType "ESO_HashiCorpVault") (eq .externalType "ESO_AzureSecretsManager"))}} + {{- if and (.esoSubPath) (ne (len .esoSubPath) 0) }} + {{- range .esoSubPath }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ . }} + subPath: {{ . }} + {{- end }} + {{- else }} + {{- range .esoSecretData.esoData }} + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ .secretKey }} + subPath: {{ .secretKey }} + {{- end }} + {{- end }} + {{- else }} + {{- range $k, $v := .data }} # for others secrets the mount path will be .data[i].secretKey + - name: {{ $cmName | replace "." "-"}}-vol + mountPath: {{ $cmMountPath}}/{{ $k}} + subPath: {{ $k}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if and (eq (len .Values.volumes) 0) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) }} []{{- end }} + {{- if and (eq (len .Values.volumeMounts) 0) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) }} [] {{- end }} +{{- if $.Values.appMetrics }} + - name: envoy + image: {{ $.Values.envoyproxy.image | default "quay.io/devtron/envoy:v1.16.0"}} + {{- if $.Values.envoyproxy.lifecycle }} + lifecycle: +{{ toYaml .Values.envoyproxy.lifecycle | indent 12 -}} + {{- else if $.Values.containerSpec.lifecycle.enabled }} + lifecycle: + {{- if $.Values.containerSpec.lifecycle.preStop }} + preStop: +{{ toYaml $.Values.containerSpec.lifecycle.preStop | indent 12 -}} + {{- end }} + {{- end }} + resources: +{{ toYaml $.Values.envoyproxy.resources | trim | indent 12 }} + ports: + - containerPort: 9901 + protocol: TCP + name: envoy-admin + {{- range $index, $element := .Values.ContainerPort }} + - name: {{ $element.name}} + containerPort: {{ $element.envoyPort | default (add 8790 $index) }} + protocol: TCP + {{- end }} + command: ["/usr/local/bin/envoy"] + args: ["-c", "/etc/envoy-config/envoy-config.json", "-l", "info", "--log-format", "[METADATA][%Y-%m-%d %T.%e][%t][%l][%n] %v"] + volumeMounts: + - name: {{ $.Values.envoyproxy.configMapName | default "envoy-config-volume" }} + mountPath: /etc/envoy-config/ +{{- if $.Values.envoyproxy.readinessProbe}} + readinessProbe: +{{ toYaml $.Values.envoyproxy.readinessProbe | indent 12}} +{{- end }} +{{- if $.Values.envoyproxy.livenessProbe}} + livenessProbe: +{{ toYaml $.Values.envoyproxy.livenessProbe | indent 12}} +{{- end }} +{{- end}} +{{- if $.Values.containers }} +{{- range $i, $c := .Values.containers }} +{{- if .reuseContainerImage}} + - name: {{ $.Chart.Name }}-sidecontainer-{{ add1 $i }} + image: "{{ $.Values.server.deployment.image }}:{{ $.Values.server.deployment.image_tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} +{{- if .env }} + env: +{{ toYaml .env | indent 12 }} +{{- end }} + {{- if .envFrom }} + envFrom: +{{ toYaml .env | indent 12 }} +{{- end }} +{{- if .securityContext }} + securityContext: +{{ toYaml .securityContext | indent 12 }} +{{- end }} +{{- if .command}} + command: +{{ toYaml .command | indent 12 -}} +{{- end}} +{{- if .resizePolicy }} + resizePolicy: +{{ toYaml .resziePolicy | indent 12}} +{{- end }} +{{- if .resources}} + resources: +{{ toYaml .resources | indent 12 -}} +{{- end}} +{{- if .volumeMounts}} + volumeMounts: +{{ toYaml .volumeMounts | indent 12 -}} +{{- end}} +{{- else}} +{{- $cCopy := deepCopy . }} +{{- if hasKey $cCopy "ports" }} +{{- $newPorts := list }} +{{- range $port := $cCopy.ports }} + {{- $newPorts = append $newPorts (dict + "name" (get $port "name") + "containerPort" (get $port "containerPort") + "protocol" (get $port "protocol") + ) }} +{{- end }} +{{- $_ := set $cCopy "ports" $newPorts }} +{{- end }} + - +{{ toYaml $cCopy | indent 10 }} +{{- end}} +{{- end}} +{{- end}} + + + volumes: + {{- if $.Values.appMetrics }} + - name: envoy-config-volume + configMap: + name: sidecar-config-{{ template ".Chart.Name .name" $ }} + {{- end }} +{{- with .Values.volumes }} +{{ toYaml . | trim | indent 8 }} +{{- end }} + {{- if .Values.ConfigMaps.enabled }} + {{- range .Values.ConfigMaps.maps }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + configMap: + {{- if eq .external true }} + name: {{ .name }} + {{- else if eq .external false }} + name: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + defaultMode: {{ .filePermission}} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.ConfigSecrets.enabled }} + {{- range .Values.ConfigSecrets.secrets }} + {{- if eq .type "volume"}} + - name: {{ .name | replace "." "-"}}-vol + secret: + {{- if eq .external true }} + secretName: {{ .name }} + {{- else if eq .external false }} + secretName: {{ .name}}-{{ $.Values.app }} + {{- end }} + {{- if eq (len .filePermission) 0 }} + {{- else }} + defaultMode: {{ .filePermission}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- if and (eq (len .Values.volumes) 0) (or (eq (.Values.ConfigSecrets.enabled) true) (eq (.Values.ConfigMaps.enabled) true)) (eq ($hasCMVolumeExists) false) (eq ($hasSecretVolumeExists) false) (eq (.Values.appMetrics) false) }} []{{- end }} + {{- if and (eq (len .Values.volumes) 0) (eq (.Values.ConfigSecrets.enabled) false) (eq (.Values.ConfigMaps.enabled) false) (eq (.Values.appMetrics) false) }} []{{- end }} + + {{- if $.Values.statefulSetConfig.volumeClaimTemplates }} + volumeClaimTemplates: + {{- range $.Values.statefulSetConfig.volumeClaimTemplates }} + - apiVersion: {{ .apiVersion | default "v1" }} + kind: {{.kind | default "PersistentVolumeClaim"}} + metadata: + {{- if and .metadata .metadata.annotations }} + annotations: {{ toYaml .metadata.annotations | nindent 14 }} + {{- end}} + {{- if and .metadata .metadata.labels -}} + labels: + {{ toYaml .metadata.labels | nindent 14 }} + {{- end}} + {{- if and .metadata .metadata.name }} + name: {{ .metadata.name }} + {{- else }} + name: {{ template ".Chart.Name .name" $ }} + {{- end}} + {{- if and .metadata .metadata.namespace }} + namespace: {{ .metadata.namespace }} + {{- end}} + spec: + accessModes: + {{- range .spec.accessModes }} + - {{ . }} + {{- end }} + {{- if .spec.dataSource }} + dataSource: + apiGroup: {{ .spec.dataSource.apiGroup}} + kind: {{ .spec.dataSource.kind}} + name: {{ .spec.dataSource.name}} + {{- end }} + {{- if .spec.dataSourceRef }} + dataSourceRef: + apiGroup: {{ .spec.dataSourceRef.apiGroup}} + kind: {{ .spec.dataSourceRef.kind}} + name: {{ .spec.dataSourceRef.name}} + {{- end }} + {{- if .spec.selector }} + selector: {{ toYaml .spec.selector | nindent 10 }} + {{- end}} + {{- if .spec.storageClassName }} + storageClassName: {{ .spec.storageClassName }} + {{- end}} + resources: + {{- if .spec.resources.claim }} + claims: + name: {{ .spec.resources.claim.name}} + {{- end}} + {{- if .spec.resources.limits }} + limits: {{ toYaml .spec.resources.limits | nindent 14 }} + {{- end}} + requests: + storage: {{ .spec.resources.requests.storage |default "2Gi" }} + {{- if .volumeMode }} + volumeMode: {{ .volumeMode}} + {{- end}} + {{- if .volumeName }} + volumeName: {{ .volumeName}} + {{- end}} + {{- end }} + {{- end }} + {{- if eq .Values.deploymentType "ROLLINGUPDATE" }} + updateStrategy: + type: RollingUpdate + {{- if $.Values.deployment.strategy.rollingUpdate.partition }} + rollingUpdate: + partition: {{ .Values.deployment.strategy.rollingUpdate.partition }} + {{- end }} + {{- else if eq .Values.deploymentType "ONDELETE" }} + updateStrategy: + type: OnDelete + {{- end }} + {{- if.Values.statefulSetConfig.revisionHistoryLimit }} + revisionHistoryLimit: {{ .Values.statefulSetConfig.revisionHistoryLimit }} + {{- end }} diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/vertical-pod-autoscaler.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/vertical-pod-autoscaler.yaml new file mode 100644 index 0000000000..d92030cbd7 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/vertical-pod-autoscaler.yaml @@ -0,0 +1,31 @@ +{{ $VerticalPodAutoScalingEnabled := include "VerticalPodAutoScalingEnabled" . }} +{{- if eq "true" $VerticalPodAutoScalingEnabled -}} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + {{- if .Values.verticalPodScaling.name }} + name: {{ .Values.verticalPodScaling.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" . }}-vpa + {{- end }} + labels: + kind: Prometheus + app: {{ template ".Chart.Name .name" . }} + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + chart: {{ template ".Chart.Name .chart" . }} + release: {{ .Values.prometheus.release }} +spec: +{{- if .Values.verticalPodScaling.resourcePolicy }} + resourcePolicy: +{{ toYaml .Values.verticalPodScaling.resourcePolicy}} +{{- end }} +{{- if .Values.verticalPodScaling.updatePolicy }} + updatePolicy: +{{ toYaml .Values.verticalPodScaling.updatePolicy}} +{{- end }} + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include ".Chart.Name .fullname" $ }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/winter-soldier.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/winter-soldier.yaml new file mode 100644 index 0000000000..53f31c9c3a --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/templates/winter-soldier.yaml @@ -0,0 +1,49 @@ +{{- if .Values.winterSoldier.enabled }} +apiVersion: {{ $.Values.winterSoldier.apiVersion }} +kind: Hibernator +metadata: + {{- if .Values.winterSoldier.name }} + name: {{ .Values.winterSoldier.name }} + {{- else }} + name: {{ template ".Chart.Name .fullname" $ }}-hibernator + {{- end }} + labels: + appId: {{ $.Values.app | quote }} + envId: {{ $.Values.env | quote }} + app: {{ template ".Chart.Name .name" $ }} + chart: {{ template ".Chart.Name .chart" $ }} + release: {{ $.Release.Name }} +{{- if .Values.appLabels }} +{{ toYaml .Values.appLabels | indent 4 }} +{{- end }} + {{- if .Values.winterSoldier.labels }} +{{ toYaml .Values.winterSoldier.labels | indent 4 }} + {{- end }} +{{- if .Values.winterSoldier.annotations }} + annotations: +{{ toYaml .Values.winterSoldier.annotations | indent 4 }} +{{- end }} +spec: + timeRangesWithZone: +{{ toYaml $.Values.winterSoldier.timeRangesWithZone | indent 4}} + selectors: + - inclusions: + - objectSelector: + {{- if .Values.statefulSetConfig.name }} + name: {{ .Values.statefulSetConfig.name }} + {{- else }} + name: {{ include ".Chart.Name .fullname" $ }} + {{- end }} + type: StatefulSet + fieldSelector: +{{toYaml $.Values.winterSoldier.fieldSelector | indent 14}} + namespaceSelector: + name: {{ $.Release.Namespace }} + exclusions: [] + action: {{ $.Values.winterSoldier.action }} + {{- if eq .Values.winterSoldier.action "scale" }} + {{- if .Values.winterSoldier.targetReplicas }} + targetReplicas: {{ $.Values.winterSoldier.targetReplicas }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/test_values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/test_values.yaml new file mode 100644 index 0000000000..4e16da2d69 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/test_values.yaml @@ -0,0 +1,687 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +kedaAutoscaling: + enabled: false +ambassadorMapping: + enabled: true +istio: + enabled: true + gateway: + enabled: true + host: example.com +appLabels: + test: thyjhgfv +containerSpec: + lifecycle: + enabled: true + preStop: + exec: + command: ["sleep","10"] + postStart: + httpGet: + host: example.com + path: /example + port: 90 + +StartupProbe: + Path: "/" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false +imagePullSecrets: + - test1 + - test2 +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyTimeout: 15 + targetPort: 8080 + envoyPort: 8799 + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s + servicemonitor: + enabled: true + path: /abc + scheme: 'http' + interval: 30s + scrapeTimeout: 20s + metricRelabelings: + - sourceLabels: [namespace] + regex: '(.*)' + replacement: myapp + targetLabel: target_namespace + + - name: app1 + port: 8090 + targetPort: 1234 + servicePort: 8080 + useGRPC: true + servicemonitor: + enabled: true + - name: app2 + port: 8091 + servicePort: 8081 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +Spec: + Affinity: + Key: + # Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +autoscaling: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + TargetCPUUtilizationPercentage: 70 + TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} + containerResource: + enabled: false + TargetCPUUtilizationPercentage: 90 + TargetMemoryUtilizationPercentage: 80 +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# +EnvVariablesFromSecretKeys: + - name: ENV_NAME + secretName: SECRET_NAME + keyName: SECRET_KEY + +EnvVariablesFromCongigMapKeys: + - name: ENV_NAME + configMapName: CONFIG_MAP_NAME + keyName: CONFIG_MAP_KEY + +nodeName: "testiing" + +statefulSetConfig: + matchLabels: + app: devtron + + serviceheadless: + enabled: false + name: test-service_headless + labels: + app: example + annotations: + example.com/version: "1.0" + serviceName: "test-service-headless-statefulset" + podManagementPolicy: "OrderedReady" + revisionHistoryLimit: 3 + updateStrategy: + type: RollingUpdate + partition: 2 + mountPath: "/tmp" + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + labels: + app: myapp + spec: + accessModes: + - ReadWriteOnce + - ReadWriteOnce + - ReadWriteOnce + dataSource: + kind: Snapshot + apiGroup: snapshot.storage.k8s.io + name: my-snapshot + resources: + requests: + storage: 10Gi + limits: + storage: 20Gi + storageClassName: my-storage-class + selector: + matchLabels: + app: myapp + volumeMode: Filesystem + volumeName: my-pv + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: pvc-logs + labels: + app: myapp + spec: + accessModes: + - ReadWriteMany + dataSourceRef: + kind: Secret + apiGroup: v1 + name: my-secret + resources: + requests: + storage: 5Gi + storageClassName: my-storage-class + selector: + matchExpressions: + - {key: environment, operator: In, values: [production]} + volumeMode: Block + volumeName: my-pv + + +secret: + enabled: true + +service: + type: ClusterIP + enabled: true + # name: "1234567890123456789012345678901234567890123456789012345678901234567890" + annotations: {} + # test1: test2 + # test3: test4 + +server: + deployment: + image_tag: 1-95af053 + image: "" +deploymentType: "RECREATE" + +topologySpreadConstraints: + - maxSkew: 1 + topologyKey: zone + whenUnsatisfiable: DoNotSchedule + autoLabelSelector: true + customLabelSelector: + foo: bar + +EnvVariables: + - name: FLASK_ENV + value: qa + +LivenessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + - name: Custom-Header2 + value: xyz + +ReadinessProbe: + Path: / + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: + - name: Custom-Header + value: abc + +prometheus: + release: monitoring + +servicemonitor: + additionalLabels: {} + + +prometheusRule: + enabled: true + additionalLabels: {} + namespace: "" + rules: + # These are just examples rules, please adapt them to your needs + - alert: TooMany500s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 5XXs + summary: More than 5% of the all requests did return 5XX, this require your attention + - alert: TooMany400s + expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + for: 1m + labels: + severity: critical + annotations: + description: Too many 4XXs + summary: More than 5% of the all requests did return 4XX, this require your attention + +winterSoldier: + apiVersion: pincher.devtron.ai/v1alpha1 + enabled: true + annotations: {} + labels: {} + timeRangesWithZone: + timeZone: "Asia/Kolkata" + timeRanges: + - timeFrom: 00:00 + timeTo: 23:59:59 + weekdayFrom: Sat + weekdayTo: Sun + - timeFrom: 00:00 + timeTo: 08:00 + weekdayFrom: Mon + weekdayTo: Fri + - timeFrom: 20:00 + timeTo: 23:59:59 + weekdayFrom: Mon + weekdayTo: Fri + action: scale + targetReplicas: [1,1,1] + type: StatefulSet + fieldSelector: + - AfterTime(AddTime(ParseTime({{metadata.creationTimestamp}}, '2006-01-02T15:04:05Z'), '5m'), Now()) + +ingress: + enabled: true + className: nginx + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + nginx.ingress.kubernetes.io/ssl-redirect: "false" + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + nginx.ingress.kubernetes.io/canary: "true" + nginx.ingress.kubernetes.io/canary-weight: "10" + # Old Ingress Format + host: "ingress-example.com" + path: "/app" + +# New Ingress Format + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /ingress + pathType: "ImplementationSpecific" + backend: + service: + name: test-service-ingress + port: + number: 80 + tls: [] +### Legacy Ingress Format ## +# host: abc.com +# path: "/" +# pathType: "ImplementationSpecific" + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: nginx-internal + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + additionalBackends: + - path: /example1 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 + - path: /example2 + pathType: "ImplementationSpecific" + backend: + service: + name: test-service + port: + number: 80 + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +dbMigrationConfig: + enabled: true + +command: + workingDir: /app + enabled: false + value: ["ls"] + +args: + enabled: false + value: [] + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 0.10 + memory: 100Mi + +volumeMounts: + - name: log-volume + mountPath: /var/log + - name: pvc-logs + mountPath: /test +volumes: + - name: log-volume + emptyDir: {} + + +nodeSelector: + microk8s.io/cluster: true + + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: true + maps: + - name: config-map-1 + type: environment + external: false + data: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + +ConfigSecrets: + enabled: true + secrets: + - name: config-secret-1 + type: environment + external: false + externalType: AWSSecretsManager + esoSecretData: + secretStore: + aws: + service: SecretsManager + region: us-east-1 + auth: + secretRef: + accessKeyIDSecretRef: + name: awssm-secret + key: access-key + secretAccessKeySecretRef: + name: awssm-secret + key: secret-access-key + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + data: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + - name: config-secret-2 + type: environment + external: false + externalType: ESO_HashiCorpVault + esoSecretData: + secretStore: + vault: + server: "http://my.vault.server:8200" + path: "secret" + version: "v2" + auth: + tokenSecretRef: + name: vault-token + key: token + esoData: + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + - secretKey: prod-mysql-password + key: secrets/prod-mysql-secrets + property: prodPassword + date: + key1: key1value-1 + key2: key2value-1 + key3: key3value-1 + +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + - command: ["sh", "-c", "chown -R 1000:1000 logs"] + reuseContainerImage: true + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + privileged: true + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + - name: init-migrate + image: busybox:latest + command: ["sh", "-c", "chown -R 1000:1000 logs"] + volumeMounts: + - mountPath: /usr/local/airflow/logs + name: logs-data + securityContext: + capabilities: + drop: + - ALL + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs +# name: logs-data + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +envoyproxy: + image: envoyproxy/envoy:v1.14.1 + configMapName: "" + lifecycle: {} + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +podDisruptionBudget: + minAvailable: 1 + maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +## + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" +# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +appMetrics: true +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "test1" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: + kubernetes.io/service-account.name: build-robot +containerSecurityContext: + allowPrivilegeEscalation: false +privileged: true +hostAliases: [] +# - ip: "127.0.0.1" +# hostnames: +# - "foo.local" \ No newline at end of file diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/values.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/values.yaml new file mode 100644 index 0000000000..ce96ad32e1 --- /dev/null +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-2-0/values.yaml @@ -0,0 +1,617 @@ +# Default values for myapp. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +replicaCount: 1 +MinReadySeconds: 5 +MaxSurge: 1 +MaxUnavailable: 0 +GracePeriod: 30 +ContainerPort: + - name: app + port: 8080 + servicePort: 80 + envoyPort: 8799 + envoyTimeout: 15s + useHTTP2: false + supportStreaming: false + idleTimeout: 1800s +# servicemonitor: +# enabled: false +# path: /abc +# scheme: 'http' +# interval: 30s +# scrapeTimeout: 20s +# metricRelabelings: +# - sourceLabels: [namespace] +# regex: '(.*)' +# replacement: myapp +# targetLabel: target_namespace + + - name: app1 + port: 8090 + servicePort: 8080 + useGRPC: true + +pauseForSecondsBeforeSwitchActive: 30 +waitForSecondsBeforeScalingDown: 30 +autoPromotionSeconds: 30 + +# Optional override for the main container name. If provided, this name will be used +# for the primary container instead of the default chart-derived name. +containerName: "" + +Spec: + Affinity: + Key: +# Key: kops.k8s.io/instancegroup + Values: + + +image: + pullPolicy: IfNotPresent + +autoscaling: + name: + enabled: false + MinReplicas: 1 + MaxReplicas: 2 + # TargetCPUUtilizationPercentage: 90 + # TargetMemoryUtilizationPercentage: 80 + annotations: {} + labels: {} + behavior: {} +# scaleDown: +# stabilizationWindowSeconds: 300 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# scaleUp: +# stabilizationWindowSeconds: 0 +# policies: +# - type: Percent +# value: 100 +# periodSeconds: 15 +# - type: Pods +# value: 4 +# periodSeconds: 15 +# selectPolicy: Max + extraMetrics: [] +# - external: +# metricName: pubsub.googleapis.com|subscription|num_undelivered_messages +# metricSelector: +# matchLabels: +# resource.labels.subscription_id: echo-read +# targetAverageValue: "2" +# type: External +# + +kedaAutoscaling: + name: + enabled: false + envSourceContainerName: "" # Optional. Default: .spec.template.spec.containers[0] + cooldownPeriod: 300 # Optional. Default: 300 seconds + minReplicaCount: 1 + maxReplicaCount: 2 + idleReplicaCount: 0 # Optional. Must be less than minReplicaCount + pollingInterval: 30 # Optional. Default: 30 seconds + # The fallback section is optional. It defines a number of replicas to fallback to if a scaler is in an error state. + fallback: {} # Optional. Section to specify fallback options + # failureThreshold: 3 # Mandatory if fallback section is included + # replicas: 6 + advanced: {} + # horizontalPodAutoscalerConfig: # Optional. Section to specify HPA related options + # behavior: # Optional. Use to modify HPA's scaling behavior + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + triggers: [] + triggerAuthentication: + enabled: false + name: "" + spec: {} + authenticationRef: {} + +secret: + enabled: false +serviceheadless: + enabled: false + sessionAffinity: + enabled: false +service: + type: ClusterIP + enabled: false + #name: "test-service" + annotations: {} + sessionAffinity: + enabled: false + # test1: test2 + # test3: test4 +statefulSetConfig: + mountPath: "/tmp" + serviceheadless: + enabled: false + + + volumeClaimTemplates: [] + # - spec: + # accessModes: + # - ReadWriteOnce + # resources: + # requests: + # storage: "2Gi" +# nodeName: "" + + +server: + deployment: + image_tag: 1-95af053 + image: "" + +EnvVariablesFromFieldPath: [] +# - name: POD_NAME +# fieldPath: metadata.name + +EnvVariables: + - name: FLASK_ENV + value: qa + +LivenessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] +# - name: Custom-Header +# value: abc + +ReadinessProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] +# - name: Custom-Header +# value: abc + +prometheus: + name: + release: monitoring + +servicemonitor: + name: + additionalLabels: {} + + +StartupProbe: + Path: "" + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + httpHeaders: [] + command: [] + tcp: false +prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" +# rules: +# # These are just examples rules, please adapt them to your needs +# - alert: TooMany500s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 5XXs +# summary: More than 5% of the all requests did return 5XX, this require your attention +# - alert: TooMany400s +# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 +# for: 1m +# labels: +# severity: critical +# annotations: +# description: Too many 4XXs +# summary: More than 5% of the all requests did return 4XX, this require your attention +# +winterSoldier: + name: + enabled: false + apiVersion: pincher.devtron.ai/v1alpha1 + labels: {} + annotations: {} + timeRangesWithZone: {} + # timeZone: "Asia/Kolkata" + # timeRanges: [] + action: sleep + targetReplicas: [] + fieldSelector: [] + type: StatefulSet +ingress: + name: + enabled: false + className: "" + labels: {} + annotations: {} +# nginx.ingress.kubernetes.io/rewrite-target: / +# nginx.ingress.kubernetes.io/ssl-redirect: "false" +# kubernetes.io/ingress.class: nginx +# kubernetes.io/tls-acme: "true" +# nginx.ingress.kubernetes.io/canary: "true" +# nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.local + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.local + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +ingressInternal: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # nginx.ingress.kubernetes.io/canary: "true" + # nginx.ingress.kubernetes.io/canary-weight: "10" + + hosts: + - host: chart-example1.internal + pathType: "ImplementationSpecific" + paths: + - /example1 + - host: chart-example2.internal + pathType: "ImplementationSpecific" + paths: + - /example2 + - /example2/healthz + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +istio: + enable: false + gateway: + name: + enabled: false + labels: {} + annotations: {} + host: "" + tls: + enabled: false + secretName: "" + virtualService: + name: + enabled: false + labels: {} + annotations: {} + gateways: [] + hosts: [] + http: + # - match: + # - uri: + # prefix: /v1 + # - uri: + # prefix: /v2 + # rewriteUri: / + # timeout: 12 + # headers: + # request: + # add: + # x-some-header: "value" + # retries: + # attempts: 2 + # perTryTimeout: 3s + # route: + # - destination: + # host: service1 + # port: 80 + # - route: + # - destination: + # host: service2 + +dbMigrationConfig: + enabled: false + +command: + enabled: false + value: [] + +args: + enabled: false + value: [] + + + +networkPolicy: + enabled: false + annotations: {} + labels: {} + podSelector: + matchExpressions: [] + matchLabels: {} + policyTypes: [] + ingress: [] + egress: [] +hibernator: + enable: false + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + +volumeMounts: [] + # - name: pvc-logs + # mountPath: /test-tmp +# - name: log-volume +# mountPath: /var/log + +volumes: [] +# - name: log-volume +# emptyDir: {} + + +nodeSelector: {} + +# If you need to provide some extra specs for pod which are not included by default in deployment template +# then provide them here +podExtraSpecs: {} + +# If you need to provide some extra specs for main container which are not included by default in deployment template +# then provide them here +containerExtraSpecs: {} + +#used for deployment algo selection +orchestrator.deploymant.algo: 1 + +ConfigMaps: + enabled: false + maps: [] +# - name: config-map-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-map-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 +# key3: abc-2 +# - name: config-map-3 +# type: environment +# external: true +# mountPath: /etc/config/3 +# data: [] +# - name: config-map-4 +# type: volume +# external: true +# mountPath: /etc/config/4 +# data: [] + + +ConfigSecrets: + enabled: false + secrets: [] +# - name: config-secret-1 +# type: environment +# external: false +# data: +# key1: key1value-1 +# key2: key2value-1 +# key3: key3value-1 +# - name: config-secret-2 +# type: volume +# external: false +# mountPath: /etc/config/2 +# data: +# key1: | +# club : manchester utd +# nation : england +# key2: abc-2 + + +initContainers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + # - name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + # # Uncomment below line ONLY IF you want to reuse the container image. + # # This will assign your application's docker image to init container. + # reuseContainerImage: true + +containers: [] + ## Additional init containers to run before the Scheduler pods. + ## for example, be used to run a sidecar that chown Logs storage . + #- name: volume-mount-hack + # image: busybox + # command: ["sh", "-c", "chown -R 1000:1000 logs"] + # volumeMounts: + # - mountPath: /usr/local/airflow/logs + # name: logs-data + + +rawYaml: [] +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP +# - apiVersion: v1 +# kind: Service +# metadata: +# annotations: +# labels: +# app: sample-metrics-app +# name: sample-metrics-app +# namespace: default +# spec: +# ports: +# - name: web +# port: 80 +# protocol: TCP +# targetPort: 8080 +# selector: +# app: sample-metrics-app +# sessionAffinity: None +# type: ClusterIP + +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: zone + # whenUnsatisfiable: DoNotSchedule + # autoLabelSelector: true + # customLabelSelector: {} + +envoyproxy: + image: quay.io/devtron/envoy:v1.14.1 + lifecycle: {} + configMapName: "" + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi + +ambassadorMapping: + # name: "" + enabled: false + # labels: + # key1: value1 + # prefix: / + # ambassadorId: 1234 + # hostname: devtron.example.com + # rewrite: /foo/ + # retryPolicy: + # retry_on: "5xx" + # num_retries: 10 + # cors: + # origins: http://foo.example,http://bar.example + # methods: POST, GET, OPTIONS + # headers: Content-Type + # credentials: true + # exposed_headers: X-Custom-Header + # max_age: "86400" + # weight: 10 + # method: GET + # extraSpec: + # method_regex: true + # headers: + # x-quote-mode: backend + # x-random-header: devtron + # tls: + # context: httpd-context + # create: true + # secretName: httpd-secret + # hosts: + # - anything.example.info + # - devtron.example.com + # extraSpec: + # min_tls_version: v1.2 + +containerSpec: + lifecycle: + enabled: false + preStop: {} +# exec: +# command: ["sleep","10"] + postStart: {} +# httpGet: +# host: example.com +# path: /example +# port: 90 + +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + +podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 3000 + # fsGroup: 2000 + +containerSecurityContext: {} + # allowPrivilegeEscalation: false +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for pods + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the `.Chart.Name .fullname` template + name: "" + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. + ## Only used if `create` is `true`. + ## + annotations: {} + +tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + +imagePullSecrets: [] + # - test1 + # - test2 + +affinity: + enabled: false + +verticalPodScaling: + enabled: false \ No newline at end of file diff --git a/scripts/sql/34704402_helm_ownership_flag.down.sql b/scripts/sql/34704402_helm_ownership_flag.down.sql new file mode 100644 index 0000000000..3fa405721d --- /dev/null +++ b/scripts/sql/34704402_helm_ownership_flag.down.sql @@ -0,0 +1,4 @@ +--- Drop +ALTER TABLE "public"."user_deployment_request" + DROP COLUMN IF EXISTS "helm_redeployment_request", + DROP COLUMN IF EXISTS "helm_take_release_ownership"; \ No newline at end of file diff --git a/scripts/sql/34704402_helm_ownership_flag.up.sql b/scripts/sql/34704402_helm_ownership_flag.up.sql new file mode 100644 index 0000000000..d32927bd13 --- /dev/null +++ b/scripts/sql/34704402_helm_ownership_flag.up.sql @@ -0,0 +1,4 @@ +--- Adding helm_take_ownership column to user_deployment_request table +ALTER TABLE "public"."user_deployment_request" + ADD COLUMN IF NOT EXISTS "helm_redeployment_request" boolean DEFAULT false, + ADD COLUMN IF NOT EXISTS "helm_take_release_ownership" boolean DEFAULT false; \ No newline at end of file diff --git a/scripts/sql/34704403_deployment-chart-ref_4-22-0.down.sql b/scripts/sql/34704403_deployment-chart-ref_4-22-0.down.sql new file mode 100644 index 0000000000..c7b08abfd7 --- /dev/null +++ b/scripts/sql/34704403_deployment-chart-ref_4-22-0.down.sql @@ -0,0 +1,5 @@ +DELETE FROM global_strategy_metadata_chart_ref_mapping WHERE chart_ref_id=(select id from chart_ref where version='4.22.0' and name='Deployment'); + +DELETE FROM "public"."chart_ref" WHERE ("location" = 'deployment-chart_4-22-0' AND "version" = '4.22.0'); + +UPDATE "public"."chart_ref" SET "is_default" = 't' WHERE "location" = 'deployment-chart_4-21-0' AND "version" = '4.21.0'; \ No newline at end of file diff --git a/scripts/sql/34704403_deployment-chart-ref_4-22-0.up.sql b/scripts/sql/34704403_deployment-chart-ref_4-22-0.up.sql new file mode 100644 index 0000000000..1213a543ce --- /dev/null +++ b/scripts/sql/34704403_deployment-chart-ref_4-22-0.up.sql @@ -0,0 +1,30 @@ +UPDATE chart_ref SET is_default=false; +INSERT INTO "public"."chart_ref" ("location", "version", "is_default", "active", "created_on", "created_by", "updated_on", "updated_by", "name", "deployment_strategy_path") +SELECT 'deployment-chart_4-22-0', '4.22.0', 't', 't', 'now()', 1, 'now()', 1, 'Deployment', 'pipeline-values.yaml' +WHERE NOT EXISTS ( + SELECT 1 FROM "public"."chart_ref" WHERE "location" = 'deployment-chart_4-22-0' +); + +INSERT INTO "public"."global_strategy_metadata_chart_ref_mapping" ("global_strategy_metadata_id", "chart_ref_id", "active", "default", "created_on", "created_by", "updated_on", "updated_by") +SELECT + (SELECT "id" FROM "public"."global_strategy_metadata" WHERE "name" = 'ROLLING'), + (SELECT "id" FROM "public"."chart_ref" WHERE "location" = 'deployment-chart_4-22-0'), + true, true, 'now()', 1, 'now()', 1 +WHERE NOT EXISTS ( + SELECT 1 + FROM "public"."global_strategy_metadata_chart_ref_mapping" + WHERE "global_strategy_metadata_id" = (SELECT "id" FROM "public"."global_strategy_metadata" WHERE "name" = 'ROLLING') + AND "chart_ref_id" = (SELECT "id" FROM "public"."chart_ref" WHERE "location" = 'deployment-chart_4-22-0') +); + +INSERT INTO "public"."global_strategy_metadata_chart_ref_mapping" ("global_strategy_metadata_id", "chart_ref_id", "active", "default", "created_on", "created_by", "updated_on", "updated_by") +SELECT + (SELECT "id" FROM "public"."global_strategy_metadata" WHERE "name" = 'RECREATE'), + (SELECT "id" FROM "public"."chart_ref" WHERE "location" = 'deployment-chart_4-22-0'), + true, false, 'now()', 1, 'now()', 1 +WHERE NOT EXISTS ( + SELECT 1 + FROM "public"."global_strategy_metadata_chart_ref_mapping" + WHERE "global_strategy_metadata_id" = (SELECT "id" FROM "public"."global_strategy_metadata" WHERE "name" = 'RECREATE') + AND "chart_ref_id" = (SELECT "id" FROM "public"."chart_ref" WHERE "location" = 'deployment-chart_4-22-0') +); diff --git a/scripts/sql/34704404_statefulset_5_2_0.down.sql b/scripts/sql/34704404_statefulset_5_2_0.down.sql new file mode 100644 index 0000000000..d85ee60c98 --- /dev/null +++ b/scripts/sql/34704404_statefulset_5_2_0.down.sql @@ -0,0 +1,5 @@ +DELETE FROM global_strategy_metadata_chart_ref_mapping WHERE chart_ref_id=(select id from chart_ref where version='5.1.0' and location='statefulset-chart_5-2-0'); + +DELETE FROM "public"."chart_ref" WHERE ("location" = 'statefulset-chart_5-2-0' AND "version" = '5.2.0'); + +UPDATE "public"."chart_ref" SET "is_default" = 't' WHERE "location" = 'statefulset-chart_5-1-0' AND "version" = '5.1.0'; diff --git a/scripts/sql/34704404_statefulset_5_2_0.up.sql b/scripts/sql/34704404_statefulset_5_2_0.up.sql new file mode 100644 index 0000000000..ff5e58ba03 --- /dev/null +++ b/scripts/sql/34704404_statefulset_5_2_0.up.sql @@ -0,0 +1,95 @@ +-- 1. Idempotent Insert for chart_ref +INSERT INTO "public"."chart_ref" ( + "location", + "version", + "deployment_strategy_path", + "is_default", + "active", + "created_on", + "created_by", + "updated_on", + "updated_by", + "name" +) +SELECT + 'statefulset-chart_5-2-0', + '5.2.0', + 'pipeline-values.yaml', + FALSE, + FALSE, + now(), + 1, + now(), + 1, + 'StatefulSet' +WHERE NOT EXISTS ( + SELECT 1 + FROM "public"."chart_ref" + WHERE "location" = 'statefulset-chart_5-2-0' +); + +-- 2. Idempotent Insert for Mapping (ROLLINGUPDATE) +INSERT INTO global_strategy_metadata_chart_ref_mapping ( + "global_strategy_metadata_id", + "chart_ref_id", + "active", + "default", + "created_on", + "created_by", + "updated_on", + "updated_by" +) +SELECT + gsm.id, + cr.id, + TRUE, + TRUE, + now(), + 1, + now(), + 1 +FROM + global_strategy_metadata gsm, + chart_ref cr +WHERE + gsm.name = 'ROLLINGUPDATE' + AND cr.location = 'statefulset-chart_5-2-0' + AND NOT EXISTS ( + SELECT 1 + FROM global_strategy_metadata_chart_ref_mapping existing + WHERE existing.global_strategy_metadata_id = gsm.id + AND existing.chart_ref_id = cr.id + ); + +-- 3. Idempotent Insert for Mapping (ONDELETE) +INSERT INTO global_strategy_metadata_chart_ref_mapping ( + "global_strategy_metadata_id", + "chart_ref_id", + "active", + "default", + "created_on", + "created_by", + "updated_on", + "updated_by" +) +SELECT + gsm.id, + cr.id, + TRUE, + FALSE, + now(), + 1, + now(), + 1 +FROM + global_strategy_metadata gsm, + chart_ref cr +WHERE + gsm.name = 'ONDELETE' + AND cr.location = 'statefulset-chart_5-2-0' + AND NOT EXISTS ( + SELECT 1 + FROM global_strategy_metadata_chart_ref_mapping existing + WHERE existing.global_strategy_metadata_id = gsm.id + AND existing.chart_ref_id = cr.id + ); diff --git a/scripts/sql/34704405_reference-5-2-0.down.sql b/scripts/sql/34704405_reference-5-2-0.down.sql new file mode 100644 index 0000000000..d83bb7d946 --- /dev/null +++ b/scripts/sql/34704405_reference-5-2-0.down.sql @@ -0,0 +1,15 @@ +DELETE FROM global_strategy_metadata_chart_ref_mapping +WHERE chart_ref_id IN ( + SELECT id + FROM "public"."chart_ref" + WHERE "version" = '5.2.0' + AND "location" = 'reference-chart_5.2.0' + AND "name" = 'Rollout Deployment' +) +AND global_strategy_metadata_id IN (1, 2, 3, 4); + +-- 2. Remove the chart reference (Parent) +DELETE FROM "public"."chart_ref" +WHERE "version" = '5.2.0' +AND "location" = 'reference-chart_5-2-0' +AND "name" = 'Rollout Deployment'; diff --git a/scripts/sql/34704405_reference-5-2-0.up.sql b/scripts/sql/34704405_reference-5-2-0.up.sql new file mode 100644 index 0000000000..8676d086a0 --- /dev/null +++ b/scripts/sql/34704405_reference-5-2-0.up.sql @@ -0,0 +1,39 @@ +-- 1. Insert chart_ref if not exists +INSERT INTO "public"."chart_ref" ( + "location", "version", "deployment_strategy_path", + "is_default", "active", "created_on", "created_by", + "updated_on", "updated_by", "name" +) +SELECT + 'reference-chart_5-2-0', '5.2.0', 'pipeline-values.yaml', + 'f', 't', now(), 1, now(), 1, + 'Rollout Deployment' +WHERE NOT EXISTS ( + SELECT 1 + FROM "public"."chart_ref" + WHERE "version" = '5.2.0' + AND "location" = 'reference-chart_5-2-0' + AND "name" = 'Rollout Deployment' +); + +-- 2. Insert mappings based on the chart_ref above +INSERT INTO global_strategy_metadata_chart_ref_mapping ( + "global_strategy_metadata_id", "chart_ref_id", "active", + "created_on", "created_by", "updated_on", "updated_by", "default" +) +SELECT + m_ids.id, + cr.id, + true, now(), 1, now(), 1, + (m_ids.id = 1) +FROM + "public"."chart_ref" cr, + (VALUES (1), (2), (3), (4)) AS m_ids(id) +WHERE + cr.version = '5.2.0' AND cr.name = 'Rollout Deployment' + AND NOT EXISTS ( + SELECT 1 + FROM global_strategy_metadata_chart_ref_mapping existing + WHERE existing.global_strategy_metadata_id = m_ids.id + AND existing.chart_ref_id = cr.id + ); \ No newline at end of file diff --git a/scripts/sql/34704400_finops_tables.down.sql b/scripts/sql/34804400_finops_tables.down.sql similarity index 100% rename from scripts/sql/34704400_finops_tables.down.sql rename to scripts/sql/34804400_finops_tables.down.sql diff --git a/scripts/sql/34704400_finops_tables.up.sql b/scripts/sql/34804400_finops_tables.up.sql similarity index 100% rename from scripts/sql/34704400_finops_tables.up.sql rename to scripts/sql/34804400_finops_tables.up.sql diff --git a/scripts/sql/34804400_cost_module_installation.down.sql b/scripts/sql/34904400_cost_module_installation.down.sql similarity index 100% rename from scripts/sql/34804400_cost_module_installation.down.sql rename to scripts/sql/34904400_cost_module_installation.down.sql diff --git a/scripts/sql/34804400_cost_module_installation.up.sql b/scripts/sql/34904400_cost_module_installation.up.sql similarity index 100% rename from scripts/sql/34804400_cost_module_installation.up.sql rename to scripts/sql/34904400_cost_module_installation.up.sql diff --git a/scripts/sql/34904400_velero_backup_restore_tables.down.sql b/scripts/sql/35004400_velero_backup_restore_tables.down.sql similarity index 100% rename from scripts/sql/34904400_velero_backup_restore_tables.down.sql rename to scripts/sql/35004400_velero_backup_restore_tables.down.sql diff --git a/scripts/sql/34904400_velero_backup_restore_tables.up.sql b/scripts/sql/35004400_velero_backup_restore_tables.up.sql similarity index 100% rename from scripts/sql/34904400_velero_backup_restore_tables.up.sql rename to scripts/sql/35004400_velero_backup_restore_tables.up.sql diff --git a/scripts/sql/35004400_velero_installation_config.down.sql b/scripts/sql/35104400_velero_installation_config.down.sql similarity index 100% rename from scripts/sql/35004400_velero_installation_config.down.sql rename to scripts/sql/35104400_velero_installation_config.down.sql diff --git a/scripts/sql/35004400_velero_installation_config.up.sql b/scripts/sql/35104400_velero_installation_config.up.sql similarity index 100% rename from scripts/sql/35004400_velero_installation_config.up.sql rename to scripts/sql/35104400_velero_installation_config.up.sql diff --git a/vendor/github.com/devtron-labs/common-lib/utils/reflectUtils/ReflectUtil.go b/vendor/github.com/devtron-labs/common-lib/utils/reflectUtils/ReflectUtil.go new file mode 100644 index 0000000000..60de9d97f9 --- /dev/null +++ b/vendor/github.com/devtron-labs/common-lib/utils/reflectUtils/ReflectUtil.go @@ -0,0 +1,14 @@ +package reflectUtils + +import "reflect" + +func IsNullableValue(field reflect.Value) bool { + kind := field.Kind() + switch kind { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.UnsafePointer, + reflect.Interface, reflect.Slice: + return true + default: //other types can not be nil + return false + } +} diff --git a/vendor/github.com/devtron-labs/common-lib/workerPool/workerpool.go b/vendor/github.com/devtron-labs/common-lib/workerPool/workerpool.go new file mode 100644 index 0000000000..5750d6e1d9 --- /dev/null +++ b/vendor/github.com/devtron-labs/common-lib/workerPool/workerpool.go @@ -0,0 +1,116 @@ +package workerPool + +import ( + "fmt" + "github.com/devtron-labs/common-lib/constants" + "github.com/devtron-labs/common-lib/pubsub-lib/metrics" + "github.com/devtron-labs/common-lib/utils/reflectUtils" + "github.com/devtron-labs/common-lib/utils/runTime" + "github.com/gammazero/workerpool" + "go.uber.org/zap" + "reflect" + "runtime/debug" + "sync" +) + +type WorkerPool[T any] struct { + logger *zap.SugaredLogger + service constants.ServiceName + wp *workerpool.WorkerPool + mu *sync.Mutex + err chan error + response []T + includeZeroValue bool +} + +func NewWorkerPool[T any](maxWorkers int, serviceName constants.ServiceName, logger *zap.SugaredLogger) *WorkerPool[T] { + wp := &WorkerPool[T]{ + logger: logger, + service: serviceName, + wp: workerpool.New(maxWorkers), + mu: &sync.Mutex{}, + err: make(chan error, 1), + } + return wp +} + +func (wp *WorkerPool[T]) InitializeResponse() *WorkerPool[T] { + wp.response = []T{} + return wp +} + +func (wp *WorkerPool[T]) IncludeZeroValue() *WorkerPool[T] { + wp.includeZeroValue = true + return wp +} + +func (wp *WorkerPool[T]) Submit(task func() (T, error)) { + if task == nil { + return + } + wp.wp.Submit(func() { + defer func() { + if r := recover(); r != nil { + metrics.IncPanicRecoveryCount("go-routine", wp.service.ToString(), runTime.GetCallerFunctionName(), fmt.Sprintf("%s:%d", runTime.GetCallerFileName(), runTime.GetCallerLineNumber())) + wp.logger.Errorw(fmt.Sprintf("%s %s", constants.GoRoutinePanicMsgLogPrefix, "go-routine recovered from panic"), "err", r, "stack", string(debug.Stack())) + } + }() + if wp.Error() != nil { + return + } + res, err := task() + if err != nil { + wp.logger.Errorw("error in worker pool task", "err", err) + wp.setError(err) + return + } + wp.updateResponse(res) + }) +} + +func (wp *WorkerPool[T]) updateResponse(res T) { + wp.lock() + defer wp.unlock() + val := reflect.ValueOf(res) + if reflectUtils.IsNullableValue(val) && val.IsNil() { + return + } else if !wp.includeZeroValue && val.IsZero() { + return + } else { + wp.response = append(wp.response, res) + return + } +} + +func (wp *WorkerPool[_]) StopWait() error { + wp.wp.StopWait() + // return error from workerPool error channel + return wp.Error() +} + +func (wp *WorkerPool[_]) lock() { + wp.mu.Lock() +} + +func (wp *WorkerPool[_]) unlock() { + wp.mu.Unlock() +} + +func (wp *WorkerPool[_]) Error() error { + select { + case err := <-wp.err: + return err + default: + return nil + } +} + +func (wp *WorkerPool[_]) setError(err error) { + if err != nil && wp.Error() == nil { + wp.err <- err + } +} + +func (wp *WorkerPool[T]) GetResponse() []T { + return wp.response +} diff --git a/vendor/modules.txt b/vendor/modules.txt index feeff4bc3d..1e282737fd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -523,7 +523,7 @@ github.com/davecgh/go-spew/spew # github.com/deckarep/golang-set v1.8.0 ## explicit; go 1.17 github.com/deckarep/golang-set -# github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251208113217-e733437afcfe +# github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251217072923-a2f0562a9b35 ## explicit; go 1.24.0 github.com/devtron-labs/authenticator/apiToken github.com/devtron-labs/authenticator/client @@ -531,7 +531,7 @@ github.com/devtron-labs/authenticator/jwt github.com/devtron-labs/authenticator/middleware github.com/devtron-labs/authenticator/oidc github.com/devtron-labs/authenticator/password -# github.com/devtron-labs/common-lib v0.18.1-0.20241001061923-eda545dc839e => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251208113217-e733437afcfe +# github.com/devtron-labs/common-lib v0.18.1-0.20241001061923-eda545dc839e => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251217072923-a2f0562a9b35 ## explicit; go 1.24.0 github.com/devtron-labs/common-lib/async github.com/devtron-labs/common-lib/blob-storage @@ -559,6 +559,7 @@ github.com/devtron-labs/common-lib/utils/k8s/commonBean github.com/devtron-labs/common-lib/utils/k8s/configMap github.com/devtron-labs/common-lib/utils/k8s/health github.com/devtron-labs/common-lib/utils/k8sObjectsUtil +github.com/devtron-labs/common-lib/utils/reflectUtils github.com/devtron-labs/common-lib/utils/registry github.com/devtron-labs/common-lib/utils/remoteConnection/bean github.com/devtron-labs/common-lib/utils/retryFunc @@ -566,6 +567,7 @@ github.com/devtron-labs/common-lib/utils/runTime github.com/devtron-labs/common-lib/utils/sql github.com/devtron-labs/common-lib/utils/workFlow github.com/devtron-labs/common-lib/utils/yaml +github.com/devtron-labs/common-lib/workerPool github.com/devtron-labs/common-lib/workflow # github.com/devtron-labs/go-bitbucket v0.9.60-beta ## explicit; go 1.14 @@ -2673,5 +2675,5 @@ xorm.io/xorm/log xorm.io/xorm/names xorm.io/xorm/schemas xorm.io/xorm/tags -# github.com/devtron-labs/authenticator => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251208113217-e733437afcfe -# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251208113217-e733437afcfe +# github.com/devtron-labs/authenticator => github.com/devtron-labs/devtron-services/authenticator v0.0.0-20251217072923-a2f0562a9b35 +# github.com/devtron-labs/common-lib => github.com/devtron-labs/devtron-services/common-lib v0.0.0-20251217072923-a2f0562a9b35 diff --git a/wire_gen.go b/wire_gen.go index c099099491..9b6a1b2e7e 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -233,6 +233,9 @@ import ( "github.com/devtron-labs/devtron/pkg/module/repo" "github.com/devtron-labs/devtron/pkg/module/store" "github.com/devtron-labs/devtron/pkg/notifier" + "github.com/devtron-labs/devtron/pkg/overview" + "github.com/devtron-labs/devtron/pkg/overview/cache" + config5 "github.com/devtron-labs/devtron/pkg/overview/config" "github.com/devtron-labs/devtron/pkg/pipeline" "github.com/devtron-labs/devtron/pkg/pipeline/draftAwareConfigService" "github.com/devtron-labs/devtron/pkg/pipeline/executors" @@ -1035,7 +1038,8 @@ func InitializeApp() (*App, error) { apiTokenRestHandlerImpl := apiToken2.NewApiTokenRestHandlerImpl(sugaredLogger, apiTokenServiceImpl, userServiceImpl, enforcerImpl, validate) apiTokenRouterImpl := apiToken2.NewApiTokenRouterImpl(apiTokenRestHandlerImpl) k8sCapacityServiceImpl := capacity.NewK8sCapacityServiceImpl(sugaredLogger, k8sApplicationServiceImpl, k8sServiceImpl, k8sCommonServiceImpl) - k8sCapacityRestHandlerImpl := capacity2.NewK8sCapacityRestHandlerImpl(sugaredLogger, k8sCapacityServiceImpl, userServiceImpl, enforcerImpl, clusterServiceImplExtended, environmentServiceImpl, clusterRbacServiceImpl, clusterReadServiceImpl, validate) + clusterCacheServiceImpl := cache.NewClusterCacheServiceImpl(sugaredLogger) + k8sCapacityRestHandlerImpl := capacity2.NewK8sCapacityRestHandlerImpl(sugaredLogger, k8sCapacityServiceImpl, userServiceImpl, enforcerImpl, clusterServiceImplExtended, environmentServiceImpl, clusterRbacServiceImpl, clusterReadServiceImpl, validate, clusterCacheServiceImpl) k8sCapacityRouterImpl := capacity2.NewK8sCapacityRouterImpl(k8sCapacityRestHandlerImpl) webhookHelmServiceImpl := webhookHelm.NewWebhookHelmServiceImpl(sugaredLogger, helmAppServiceImpl, clusterServiceImplExtended, chartRepositoryServiceImpl, attributesServiceImpl) webhookHelmRestHandlerImpl := webhookHelm2.NewWebhookHelmRestHandlerImpl(sugaredLogger, webhookHelmServiceImpl, userServiceImpl, enforcerImpl, validate) @@ -1101,7 +1105,21 @@ func InitializeApp() (*App, error) { userResourceExtendedServiceImpl := userResource.NewUserResourceExtendedServiceImpl(sugaredLogger, teamServiceImpl, environmentServiceImpl, appCrudOperationServiceImpl, chartGroupServiceImpl, appListingServiceImpl, appWorkflowServiceImpl, k8sApplicationServiceImpl, clusterServiceImplExtended, commonEnforcementUtilImpl, enforcerUtilImpl, enforcerImpl) restHandlerImpl := userResource2.NewUserResourceRestHandler(sugaredLogger, userServiceImpl, userResourceExtendedServiceImpl) routerImpl := userResource2.NewUserResourceRouterImpl(restHandlerImpl) - muxRouter := router.NewMuxRouter(sugaredLogger, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, userRouterImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, userAttributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, cdApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, globalCMCSRouterImpl, userTerminalAccessRouterImpl, jobRouterImpl, ciStatusUpdateCronImpl, resourceGroupingRouterImpl, rbacRoleRouterImpl, scopedVariableRouterImpl, ciTriggerCronImpl, proxyRouterImpl, deploymentConfigurationRouterImpl, infraConfigRouterImpl, argoApplicationRouterImpl, devtronResourceRouterImpl, fluxApplicationRouterImpl, scanningResultRouterImpl, routerImpl) + appManagementServiceImpl := overview.NewAppManagementServiceImpl(sugaredLogger, appRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciWorkflowRepositoryImpl, cdWorkflowRepositoryImpl, environmentRepositoryImpl, teamRepositoryImpl, workflowStageRepositoryImpl, repositoryImpl) + doraMetricsServiceImpl := overview.NewDoraMetricsServiceImpl(sugaredLogger, lensClientImpl, appRepositoryImpl, pipelineRepositoryImpl, environmentRepositoryImpl, cdWorkflowRepositoryImpl) + insightsServiceImpl := overview.NewInsightsServiceImpl(sugaredLogger, appRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciWorkflowRepositoryImpl, cdWorkflowRepositoryImpl, environmentRepositoryImpl) + clusterOverviewConfig, err := config5.GetClusterOverviewConfig() + if err != nil { + return nil, err + } + clusterOverviewServiceImpl := overview.NewClusterOverviewServiceImpl(sugaredLogger, clusterServiceImplExtended, k8sCapacityServiceImpl, clusterCacheServiceImpl, k8sCommonServiceImpl, enforcerImpl, clusterOverviewConfig) + securityOverviewServiceImpl := overview.NewSecurityOverviewServiceImpl(sugaredLogger, imageScanResultRepositoryImpl, imageScanDeployInfoRepositoryImpl, cveStoreRepositoryImpl, ciPipelineRepositoryImpl, cdWorkflowRepositoryImpl) + overviewServiceImpl := overview.NewOverviewServiceImpl(appManagementServiceImpl, doraMetricsServiceImpl, insightsServiceImpl, clusterOverviewServiceImpl, clusterCacheServiceImpl, securityOverviewServiceImpl) + overviewRestHandlerImpl := restHandler.NewOverviewRestHandlerImpl(sugaredLogger, overviewServiceImpl, userServiceImpl, validate, enforcerImpl) + infraOverviewRestHandlerImpl := restHandler.NewInfraOverviewRestHandlerImpl(sugaredLogger, clusterOverviewServiceImpl, clusterCacheServiceImpl, userServiceImpl, validate, enforcerImpl) + infraOverviewRouterImpl := router.NewInfraOverviewRouterImpl(infraOverviewRestHandlerImpl) + overviewRouterImpl := router.NewOverviewRouterImpl(overviewRestHandlerImpl, infraOverviewRouterImpl) + muxRouter := router.NewMuxRouter(sugaredLogger, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, userRouterImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, userAttributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, cdApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, globalCMCSRouterImpl, userTerminalAccessRouterImpl, jobRouterImpl, ciStatusUpdateCronImpl, resourceGroupingRouterImpl, rbacRoleRouterImpl, scopedVariableRouterImpl, ciTriggerCronImpl, proxyRouterImpl, deploymentConfigurationRouterImpl, infraConfigRouterImpl, argoApplicationRouterImpl, devtronResourceRouterImpl, fluxApplicationRouterImpl, scanningResultRouterImpl, routerImpl, overviewRouterImpl) loggingMiddlewareImpl := util4.NewLoggingMiddlewareImpl(userServiceImpl) cdWorkflowServiceImpl := cd.NewCdWorkflowServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) webhookServiceImpl := pipeline.NewWebhookServiceImpl(ciArtifactRepositoryImpl, sugaredLogger, ciPipelineRepositoryImpl, ciWorkflowRepositoryImpl, cdWorkflowCommonServiceImpl, workFlowStageStatusServiceImpl, ciServiceImpl)