From 57cb35c2256ca6e11084890440c4c4a18a9d55bf Mon Sep 17 00:00:00 2001 From: Min Min Date: Wed, 18 Mar 2026 16:00:14 +0800 Subject: [PATCH 01/21] implement 3-way merge logic for deploying kubernetes resources Signed-off-by: Min Min --- .../aslan/core/common/service/kube/apply.go | 9 +- .../aslan/core/common/service/product.go | 2 +- .../jobcontroller/job_blue_green_deploy.go | 2 +- .../jobcontroller/job_blue_green_release.go | 4 +- .../job_blue_green_release_v2.go | 4 +- .../jobcontroller/job_canary_deploy.go | 2 +- .../jobcontroller/job_canary_release.go | 12 +- .../jobcontroller/job_custom_deploy.go | 2 +- .../jobcontroller/job_deploy.go | 4 +- .../jobcontroller/job_gray_release.go | 37 ++++- .../jobcontroller/job_gray_rollback.go | 26 ++-- .../jobcontroller/job_istio_release.go | 40 ++++- .../jobcontroller/job_istio_rollback.go | 26 ++-- .../jobcontroller/job_restart.go | 140 ++++++++---------- .../core/environment/service/environment.go | 8 +- .../aslan/core/environment/service/image.go | 2 +- .../aslan/core/environment/service/service.go | 8 +- pkg/tool/kube/updater/base.go | 19 +++ 18 files changed, 197 insertions(+), 150 deletions(-) diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index 7bac2b12ea..7fd34030a0 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -41,6 +41,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/helm/pkg/releaseutil" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" commonmodels "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/repository/models" commonrepo "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/repository/mongodb" @@ -840,7 +841,13 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge logContent := fmt.Sprintf("Applying %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) jobLogManager.SaveJobLog(logContent) - err = updater.CreateOrPatchDeployment(res, kubeClient) + resYAML, marshalErr := yaml.Marshal(res) + if marshalErr != nil { + log.Errorf("Failed to marshal deployment %s to YAML: %v", res.Name, marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + err = updater.CreateOrPatchDeploymentV2(context.TODO(), productInfo.ClusterID, namespace, "", string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), res, err) errList = multierror.Append(errList, err) diff --git a/pkg/microservice/aslan/core/common/service/product.go b/pkg/microservice/aslan/core/common/service/product.go index 808e1edca6..57bb37f943 100644 --- a/pkg/microservice/aslan/core/common/service/product.go +++ b/pkg/microservice/aslan/core/common/service/product.go @@ -81,7 +81,7 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste errors := new(multierror.Error) - if err := updater.DeleteDeployments(namespace, selector, clientset); err != nil { + if err := updater.DeleteDeploymentV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteDeployments error: %v", err)) } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy.go index 16766b58b8..07de1c8e86 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy.go @@ -189,7 +189,7 @@ func (c *BlueGreenDeployJobCtl) run(ctx context.Context) error { blueDeployment.Spec.Selector.MatchLabels[config.BlueGreenVersionLabelName] = c.jobTaskSpec.Version blueDeployment.Spec.Template.Labels[config.BlueGreenVersionLabelName] = c.jobTaskSpec.Version blueDeployment.ObjectMeta.ResourceVersion = "" - if err := updater.CreateOrPatchDeployment(blueDeployment, c.kubeClient); err != nil { + if err := updater.CreateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, blueDeployment); err != nil { msg := fmt.Sprintf("create blue deployment: %s error: %v", c.jobTaskSpec.BlueWorkloadName, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go index 56e965e24b..52b99fd246 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go @@ -80,7 +80,7 @@ func (c *BlueGreenReleaseJobCtl) Clean(ctx context.Context) { return } // clear intermediate state resources - if err := updater.DeleteDeploymentAndWaitWithTimeout(c.jobTaskSpec.Namespace, c.jobTaskSpec.BlueWorkloadName, config.DefaultDeleteDeploymentTimeout, kubeClient); err != nil { + if err := updater.DeleteDeploymentAndWaitV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, config.DefaultDeleteDeploymentTimeout, updater.WithName(c.jobTaskSpec.BlueWorkloadName)); err != nil { c.logger.Errorf("delete old deployment error: %v", err) } // if it was the first time blue-green deployment, clean the origin labels. @@ -143,7 +143,7 @@ func (c *BlueGreenReleaseJobCtl) Run(ctx context.Context) { c.jobTaskSpec.Events.Error(msg) c.ack() } - if err := updater.DeleteDeploymentAndWaitWithTimeout(c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, config.DefaultDeleteDeploymentTimeout, c.kubeClient); err != nil { + if err := updater.DeleteDeploymentAndWaitV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, config.DefaultDeleteDeploymentTimeout, updater.WithName(c.jobTaskSpec.WorkloadName)); err != nil { msg := fmt.Sprintf("delete old deployment: %s failed: %v", c.jobTaskSpec.WorkloadName, err) logError(c.job, msg, c.logger) return diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go index f72eb8e4cc..7b58e6406d 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go @@ -84,7 +84,7 @@ func (c *BlueGreenReleaseV2JobCtl) Clean(ctx context.Context) { } // ensure delete blue deployment and service - err = updater.DeleteDeploymentAndWait(c.namespace, c.jobTaskSpec.Service.BlueDeploymentName, c.kubeClient) + err = updater.DeleteDeploymentAndWaitV2(context.Background(), clusterID, c.namespace, config.DefaultDeleteDeploymentTimeout, updater.WithName(c.jobTaskSpec.Service.BlueDeploymentName)) if err != nil { c.logger.Warnf("can't delete blue deployment %s, err: %v", c.jobTaskSpec.Service.BlueDeploymentName, err) } @@ -172,7 +172,7 @@ func (c *BlueGreenReleaseV2JobCtl) run(ctx context.Context) error { // update green deployment image to new version for _, v := range c.jobTaskSpec.Service.ServiceAndImage { - err := updater.UpdateDeploymentImage(c.namespace, c.jobTaskSpec.Service.GreenDeploymentName, v.ServiceModule, v.Image, c.kubeClient) + err := updater.UpdateDeploymentImageV2(ctx, clusterID, c.namespace, c.jobTaskSpec.Service.GreenDeploymentName, v.ServiceModule, v.Image) if err != nil { msg := fmt.Sprintf("can't update deployment %s container %s image %s, err: %v", c.jobTaskSpec.Service.GreenDeploymentName, v.ServiceModule, v.Image, err) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_canary_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_canary_deploy.go index a7816cbbf2..c9342d5162 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_canary_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_canary_deploy.go @@ -129,7 +129,7 @@ func (c *CanaryDeployJobCtl) run(ctx context.Context) error { break } } - if err := updater.CreateOrPatchDeployment(deployment, c.kubeClient); err != nil { + if err := updater.CreateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, deployment); err != nil { msg := fmt.Sprintf("create canary deployment: %s failed: %v", c.jobTaskSpec.CanaryWorkloadName, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_canary_release.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_canary_release.go index a1a509d1d2..c300fff920 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_canary_release.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_canary_release.go @@ -63,14 +63,8 @@ func NewCanaryReleaseJobCtl(job *commonmodels.JobTask, workflowCtx *commonmodels } func (c *CanaryReleaseJobCtl) Clean(ctx context.Context) { - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(c.jobTaskSpec.ClusterID) - if err != nil { - c.logger.Errorf("can't init k8s client: %v", err) - return - } - canarydeploymentName := c.jobTaskSpec.WorkloadName + CanaryDeploymentSuffix - if err := updater.DeleteDeploymentAndWaitWithTimeout(c.jobTaskSpec.Namespace, canarydeploymentName, time.Duration(c.timeout())*time.Second, kubeClient); err != nil { + if err := updater.DeleteDeploymentAndWaitV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, time.Duration(c.timeout())*time.Second, updater.WithName(canarydeploymentName)); err != nil { c.logger.Errorf("delete canary deployment %s error: %v", canarydeploymentName, err) } } @@ -95,7 +89,7 @@ func (c *CanaryReleaseJobCtl) run(ctx context.Context) error { } canarydeploymentName := c.jobTaskSpec.WorkloadName + CanaryDeploymentSuffix - if err := updater.DeleteDeploymentAndWaitWithTimeout(c.jobTaskSpec.Namespace, canarydeploymentName, time.Duration(c.timeout())*time.Second, c.kubeClient); err != nil { + if err := updater.DeleteDeploymentAndWaitV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, time.Duration(c.timeout())*time.Second, updater.WithName(canarydeploymentName)); err != nil { msg := fmt.Sprintf("delete canary deployment %s error: %v", canarydeploymentName, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) @@ -104,7 +98,7 @@ func (c *CanaryReleaseJobCtl) run(ctx context.Context) error { msg := fmt.Sprintf("canary deployment: %s deleted", canarydeploymentName) c.jobTaskSpec.Events.Info(msg) c.ack() - if err := updater.UpdateDeploymentImage(c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, c.jobTaskSpec.ContainerName, c.jobTaskSpec.Image, c.kubeClient); err != nil { + if err := updater.UpdateDeploymentImageV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, c.jobTaskSpec.ContainerName, c.jobTaskSpec.Image); err != nil { msg := fmt.Sprintf("update deployment: %s image error: %v", c.jobTaskSpec.WorkloadName, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_custom_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_custom_deploy.go index 3e5ee05c84..9ddbc3a940 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_custom_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_custom_deploy.go @@ -133,7 +133,7 @@ func (c *CustomDeployJobCtl) run(ctx context.Context) error { } for _, container := range deployment.Spec.Template.Spec.Containers { if container.Name == c.jobTaskSpec.ContainerName { - err = updater.UpdateDeploymentImage(deployment.Namespace, deployment.Name, container.Name, c.jobTaskSpec.Image, c.kubeClient) + err = updater.UpdateDeploymentImageV2(ctx, c.jobTaskSpec.ClusterID, deployment.Namespace, deployment.Name, container.Name, c.jobTaskSpec.Image) if err != nil { err = errors.WithMessagef( err, diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go index 4d4cbb610b..87268e87a2 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go @@ -422,7 +422,7 @@ L: // Check if Deployment is stuck before updating isStuck := kube.IsDeploymentStuckInUpdate(deploy, logger) - err = updater.UpdateDeploymentImage(deploy.Namespace, deploy.Name, serviceModule.ServiceModule, serviceModule.Image, kubeClient) + err = updater.UpdateDeploymentImageV2(ctx, env.ClusterID, deploy.Namespace, deploy.Name, serviceModule.ServiceModule, serviceModule.Image) if err != nil { return nil, nil, fmt.Errorf("failed to update container image in %s/deployments/%s/%s: %v", env.Namespace, deploy.Name, container.Name, err) } @@ -455,7 +455,7 @@ L: for _, container := range deploy.Spec.Template.Spec.InitContainers { if container.Name == serviceModule.ServiceModule { - err = updater.UpdateDeploymentInitImage(deploy.Namespace, deploy.Name, serviceModule.ServiceModule, serviceModule.Image, kubeClient) + err = updater.UpdateDeploymentInitImageV2(ctx, env.ClusterID, deploy.Namespace, deploy.Name, serviceModule.ServiceModule, serviceModule.Image) if err != nil { return nil, nil, fmt.Errorf("failed to update container image in %s/deployments/%s/%s: %v", env.Namespace, deploy.Name, container.Name, err) } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_gray_release.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_gray_release.go index 399c16c079..7d0aaf436c 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_gray_release.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_gray_release.go @@ -21,6 +21,7 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" crClient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/koderover/zadig/v2/pkg/microservice/aslan/config" @@ -104,12 +105,25 @@ func (c *GrayReleaseJobCtl) Run(ctx context.Context) { } } - if err := updater.CreateOrPatchDeployment(deployment, c.kubeClient); err != nil { + if err := updater.UpdateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, func(d *appsv1.Deployment) error { + if d.Annotations == nil { + d.Annotations = make(map[string]string) + } + for _, container := range d.Spec.Template.Spec.Containers { + if container.Name == c.jobTaskSpec.ContainerName { + d.Annotations[config.GrayImageAnnotationKey] = container.Image + break + } + } + d.Annotations[config.GrayContainerAnnotationKey] = c.jobTaskSpec.ContainerName + d.Annotations[config.GrayReplicaAnnotationKey] = strconv.Itoa(c.jobTaskSpec.TotalReplica) + return nil + }); err != nil { c.Errorf("add annotations to origin deployment: %s failed: %v", c.jobTaskSpec.WorkloadName, err) return } c.jobTaskSpec.Events.Info(fmt.Sprintf("add annotations to origin deployment: %s", c.jobTaskSpec.WorkloadName)) - if err := updater.CreateOrPatchDeployment(grayDeployment, c.kubeClient); err != nil { + if err := updater.CreateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, grayDeployment); err != nil { c.Errorf("create gray release deployment: %s failed: %v", c.jobTaskSpec.GrayWorkloadName, err) return } @@ -124,7 +138,7 @@ func (c *GrayReleaseJobCtl) Run(ctx context.Context) { } c.jobTaskSpec.Events.Info(fmt.Sprintf("gray release deployment: %s ready", c.jobTaskSpec.GrayWorkloadName)) c.ack() - if err := updater.ScaleDeployment(c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, leftReplica, c.kubeClient); err != nil { + if err := updater.ScaleDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, leftReplica); err != nil { c.Errorf("update origin deployment: %s failed: %v", c.jobTaskSpec.WorkloadName, err) return } @@ -155,7 +169,16 @@ func (c *GrayReleaseJobCtl) Run(ctx context.Context) { break } } - if err := updater.CreateOrPatchDeployment(deployment, c.kubeClient); err != nil { + if err := updater.UpdateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, func(d *appsv1.Deployment) error { + d.Spec.Replicas = int32Ptr(int32(c.jobTaskSpec.TotalReplica)) + for i, container := range d.Spec.Template.Spec.Containers { + if container.Name == c.jobTaskSpec.ContainerName { + d.Spec.Template.Spec.Containers[i].Image = c.jobTaskSpec.Image + break + } + } + return nil + }); err != nil { c.Errorf("update origin deployment: %s failed: %v", c.jobTaskSpec.WorkloadName, err) return } @@ -170,7 +193,7 @@ func (c *GrayReleaseJobCtl) Run(ctx context.Context) { c.jobTaskSpec.Events.Info(fmt.Sprintf("deployment: %s image set to %s", c.jobTaskSpec.WorkloadName, c.jobTaskSpec.Image)) c.ack() - if err := updater.DeleteDeploymentAndWaitWithTimeout(c.jobTaskSpec.Namespace, c.jobTaskSpec.GrayWorkloadName, time.Duration(c.timeout())*time.Second, c.kubeClient); err != nil { + if err := updater.DeleteDeploymentAndWaitV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, time.Duration(c.timeout())*time.Second, updater.WithName(c.jobTaskSpec.GrayWorkloadName)); err != nil { msg := fmt.Sprintf("delete gray deployment %s error: %v", c.jobTaskSpec.GrayWorkloadName, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) @@ -180,7 +203,7 @@ func (c *GrayReleaseJobCtl) Run(ctx context.Context) { c.job.Status = config.StatusPassed return } - if err := updater.ScaleDeployment(c.jobTaskSpec.Namespace, c.jobTaskSpec.GrayWorkloadName, c.jobTaskSpec.GrayReplica, c.kubeClient); err != nil { + if err := updater.ScaleDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.GrayWorkloadName, c.jobTaskSpec.GrayReplica); err != nil { c.Errorf("update gray release deployment: %s failed: %v", c.jobTaskSpec.WorkloadName, err) return } @@ -193,7 +216,7 @@ func (c *GrayReleaseJobCtl) Run(ctx context.Context) { } c.jobTaskSpec.Events.Info(fmt.Sprintf("gray release deployment: %s replica set to %d", c.jobTaskSpec.GrayWorkloadName, c.jobTaskSpec.GrayReplica)) c.ack() - if err := updater.ScaleDeployment(c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, leftReplica, c.kubeClient); err != nil { + if err := updater.ScaleDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, leftReplica); err != nil { c.Errorf("update origin deployment: %s failed: %v", c.jobTaskSpec.WorkloadName, err) return } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_gray_rollback.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_gray_rollback.go index 2b70e03021..774b6c7298 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_gray_rollback.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_gray_rollback.go @@ -20,6 +20,7 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "go.uber.org/zap" + appsv1 "k8s.io/api/apps/v1" crClient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/koderover/zadig/v2/pkg/microservice/aslan/config" @@ -70,19 +71,16 @@ func (c *GrayRollbackJobCtl) Run(ctx context.Context) { c.Errorf("can't init k8s client: %v", err) return } - deployment, found, err := getter.GetDeployment(c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, c.kubeClient) - if err != nil || !found { - c.Errorf("deployment: %s not found: %v", c.jobTaskSpec.WorkloadName, err) - return - } - deployment.Spec.Replicas = int32Ptr(int32(c.jobTaskSpec.TotalReplica)) - for i := range deployment.Spec.Template.Spec.Containers { - if deployment.Spec.Template.Spec.Containers[i].Name == c.jobTaskSpec.ContainerName { - deployment.Spec.Template.Spec.Containers[i].Image = c.jobTaskSpec.Image - break + if err := updater.UpdateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.WorkloadName, func(d *appsv1.Deployment) error { + d.Spec.Replicas = int32Ptr(int32(c.jobTaskSpec.TotalReplica)) + for i, container := range d.Spec.Template.Spec.Containers { + if container.Name == c.jobTaskSpec.ContainerName { + d.Spec.Template.Spec.Containers[i].Image = c.jobTaskSpec.Image + break + } } - } - if err := updater.CreateOrPatchDeployment(deployment, c.kubeClient); err != nil { + return nil + }); err != nil { c.Errorf("update origin deployment: %s failed: %v", c.jobTaskSpec.WorkloadName, err) return } @@ -97,13 +95,13 @@ func (c *GrayRollbackJobCtl) Run(ctx context.Context) { c.jobTaskSpec.Events.Info(fmt.Sprintf("deployment: %s image set to %s", c.jobTaskSpec.WorkloadName, c.jobTaskSpec.Image)) c.ack() - _, found, err = getter.GetDeployment(c.jobTaskSpec.Namespace, c.jobTaskSpec.GrayWorkloadName, c.kubeClient) + _, found, err := getter.GetDeployment(c.jobTaskSpec.Namespace, c.jobTaskSpec.GrayWorkloadName, c.kubeClient) if err != nil { c.Errorf("get gray release deployment: %s error: %v", c.jobTaskSpec.GrayWorkloadName, err) return } if found { - if err := updater.DeleteDeploymentAndWaitWithTimeout(c.jobTaskSpec.Namespace, c.jobTaskSpec.GrayWorkloadName, time.Duration(c.timeout())*time.Second, c.kubeClient); err != nil { + if err := updater.DeleteDeploymentAndWaitV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, time.Duration(c.timeout())*time.Second, updater.WithName(c.jobTaskSpec.GrayWorkloadName)); err != nil { msg := fmt.Sprintf("delete gray deployment %s error: %v", c.jobTaskSpec.GrayWorkloadName, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_istio_release.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_istio_release.go index b369eca74d..c1b7c8582e 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_istio_release.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_istio_release.go @@ -144,7 +144,17 @@ func (c *IstioReleaseJobCtl) Run(ctx context.Context) { if c.jobTaskSpec.FirstJob { c.Infof("Adding annotation to original deployment: %s", c.jobTaskSpec.Targets.WorkloadName) c.ack() - if err := updater.CreateOrPatchDeployment(deployment, c.kubeClient); err != nil { + if err := updater.UpdateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.Targets.WorkloadName, func(d *appsv1.Deployment) error { + if d.Annotations == nil { + d.Annotations = make(map[string]string) + } + if c.jobTaskSpec.Targets.VirtualServiceName != "" { + d.Annotations[ZadigIstioOriginalVSLabel] = c.jobTaskSpec.Targets.VirtualServiceName + } else { + d.Annotations[ZadigIstioOriginalVSLabel] = "none" + } + return nil + }); err != nil { c.Errorf("add annotations to origin deployment: %s failed: %v", c.jobTaskSpec.Targets.WorkloadName, err) return } @@ -200,7 +210,7 @@ func (c *IstioReleaseJobCtl) Run(ctx context.Context) { c.Infof("Creating deployment copy for deployment: %s", c.jobTaskSpec.Targets.WorkloadName) c.ack() - if err := updater.CreateOrPatchDeployment(newDeployment, c.kubeClient); err != nil { + if err := updater.CreateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, newDeployment); err != nil { c.Errorf("creating deployment copy: %s failed: %v", fmt.Sprintf("%s-%s", deployment.Name, config.ZadigIstioCopySuffix), err) return } @@ -482,13 +492,29 @@ func (c *IstioReleaseJobCtl) Run(ctx context.Context) { targetReplica := int32(c.jobTaskSpec.Replicas) deployment.Spec.Replicas = &targetReplica - c.Infof("updating the original workload %s with the new image: %s", deployment.Name, c.jobTaskSpec.Targets.Image) - c.ack() + c.Infof("updating the original workload %s with the new image: %s", deployment.Name, c.jobTaskSpec.Targets.Image) + c.ack() - if err := updater.CreateOrPatchDeployment(deployment, c.kubeClient); err != nil { - c.Errorf("update origin deployment: %s failed: %v", deployment.Name, err) - return + if err := updater.UpdateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, deployment.Name, func(d *appsv1.Deployment) error { + var oldImg string + for i, container := range d.Spec.Template.Spec.Containers { + if container.Name == c.jobTaskSpec.Targets.ContainerName { + oldImg = container.Image + d.Spec.Template.Spec.Containers[i].Image = c.jobTaskSpec.Targets.Image + } + } + if d.Annotations == nil { + d.Annotations = make(map[string]string) } + d.Annotations[config.ZadigLastAppliedReplicas] = strconv.Itoa(int(*d.Spec.Replicas)) + d.Annotations[config.ZadigLastAppliedImage] = oldImg + targetReplica := int32(c.jobTaskSpec.Replicas) + d.Spec.Replicas = &targetReplica + return nil + }); err != nil { + c.Errorf("update origin deployment: %s failed: %v", deployment.Name, err) + return + } // waiting for original deployment to run c.Infof("Waiting for deployment: %s to start", c.jobTaskSpec.Targets.WorkloadName) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_istio_rollback.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_istio_rollback.go index 1907f77baf..9658867eb8 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_istio_rollback.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_istio_rollback.go @@ -22,7 +22,7 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "go.uber.org/zap" "istio.io/api/networking/v1alpha3" - corev1 "k8s.io/api/core/v1" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" crClient "sigs.k8s.io/controller-runtime/pkg/client" @@ -154,20 +154,18 @@ func (c *IstioRollbackJobCtl) Run(ctx context.Context) { return } replicas := int32(replica) - delete(deployment.Annotations, config.ZadigLastAppliedReplicas) - delete(deployment.Annotations, config.ZadigLastAppliedImage) - deployment.Spec.Replicas = &replicas - containerList := make([]corev1.Container, 0) - for _, container := range deployment.Spec.Template.Spec.Containers { - newContainer := container.DeepCopy() - if container.Name == c.jobTaskSpec.Targets.ContainerName { - newContainer.Image = image - } - containerList = append(containerList, *newContainer) - } - deployment.Spec.Template.Spec.Containers = containerList c.logger.Infof("reverting deployment: %s", deployment.Name) - if err := updater.CreateOrPatchDeployment(deployment, c.kubeClient); err != nil { + if err := updater.UpdateDeploymentV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.Targets.WorkloadName, func(d *appsv1.Deployment) error { + delete(d.Annotations, config.ZadigLastAppliedReplicas) + delete(d.Annotations, config.ZadigLastAppliedImage) + d.Spec.Replicas = &replicas + for i, container := range d.Spec.Template.Spec.Containers { + if container.Name == c.jobTaskSpec.Targets.ContainerName { + d.Spec.Template.Spec.Containers[i].Image = image + } + } + return nil + }); err != nil { logError(c.job, fmt.Sprintf("creating deployment copy: %s failed: %v", fmt.Sprintf("%s-%s", deployment.Name, config.ZadigIstioCopySuffix), err), c.logger) return } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_restart.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_restart.go index aca0fdb488..bf05190b54 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_restart.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_restart.go @@ -19,7 +19,6 @@ package jobcontroller import ( "context" "fmt" - "sort" "time" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" @@ -28,11 +27,8 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" versionedclient "istio.io/client-go/pkg/clientset/versioned" - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" crClient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/koderover/zadig/v2/pkg/microservice/aslan/config" @@ -41,8 +37,6 @@ import ( "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/service/kube" "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/service/repository" "github.com/koderover/zadig/v2/pkg/setting" - "github.com/koderover/zadig/v2/pkg/tool/kube/getter" - "github.com/koderover/zadig/v2/pkg/tool/kube/updater" ) type RestartJobCtl struct { @@ -127,20 +121,6 @@ func (c *RestartJobCtl) run(ctx context.Context) error { return errors.New(msg) } - c.informer, err = clientmanager.NewKubeClientManager().GetInformer(c.jobTaskSpec.ClusterID, c.namespace) - if err != nil { - msg := fmt.Sprintf("can't init k8s informer: %v", err) - logError(c.job, msg, c.logger) - return errors.New(msg) - } - - c.istioClient, err = clientmanager.NewKubeClientManager().GetIstioClientSet(c.jobTaskSpec.ClusterID) - if err != nil { - msg := fmt.Sprintf("can't init k8s istio client: %v", err) - logError(c.job, msg, c.logger) - return errors.New(msg) - } - if c.jobTaskSpec.DeployType == setting.K8SDeployType { err = c.restartK8sService(ctx, env, c.jobTaskSpec.ServiceName) if err != nil { @@ -188,7 +168,7 @@ func (c *RestartJobCtl) restartK8sService(ctx context.Context, env *commonmodels return fmt.Errorf("failed to fetch imported manifests: %v", err) } - replaceResources, relatedPodLabels, err := restartWorkloadResources(ctx, c.kubeClient, c.clientSet, resources, env) + replaceResources, relatedPodLabels, err := restartWorkloadResources(ctx, c.jobTaskSpec.ClusterID, resources, env) if err != nil { return fmt.Errorf("failed to restart workload resources: %v", err) } @@ -245,7 +225,7 @@ func (c *RestartJobCtl) restartHelmService(ctx context.Context, env *commonmodel } } - replaceResources, relatedPodLabels, err := restartWorkloadResources(ctx, c.kubeClient, c.clientSet, resources, env) + replaceResources, relatedPodLabels, err := restartWorkloadResources(ctx, c.jobTaskSpec.ClusterID, resources, env) if err != nil { return fmt.Errorf("failed to restart workload resources: %v", err) } @@ -256,64 +236,64 @@ func (c *RestartJobCtl) restartHelmService(ctx context.Context, env *commonmodel return nil } -func restartWorkloadResources(ctx context.Context, kubeClient client.Client, clientSet *kubernetes.Clientset, resources []*kube.WorkloadResource, env *commonmodels.Product) (replaceResources []commonmodels.Resource, relatedPodLabels []map[string]string, err error) { - deployments, statefulSets, _, _, _, err := kube.FetchSelectedWorkloads(env.Namespace, resources, kubeClient, clientSet) - if err != nil { - return nil, nil, err - } - - for _, deployment := range deployments { - err = updater.RestartDeployment(deployment.Namespace, deployment.Name, kubeClient) - if err != nil { - return nil, nil, fmt.Errorf("failed to restart deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) - } - - selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return nil, nil, fmt.Errorf("failed to get selector for deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) - } - - // ensure latest replicaset to be created - replicaSets, err := getter.ListReplicaSets(deployment.Namespace, selector, kubeClient) - if err != nil { - return nil, nil, fmt.Errorf("failed to list replica sets for deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) - } - - // Only include those whose ControllerRef matches the Deployment. - owned := make([]*appsv1.ReplicaSet, 0, len(replicaSets)) - for _, rs := range replicaSets { - if metav1.IsControlledBy(rs, deployment) { - owned = append(owned, rs) - } - } - if len(owned) <= 0 { - return nil, nil, fmt.Errorf("no replicaset found for deployment: %s", deployment.Name) - } - sort.Slice(owned, func(i, j int) bool { - return owned[i].CreationTimestamp.After(owned[j].CreationTimestamp.Time) - }) - - replaceResources = append(replaceResources, commonmodels.Resource{ - Kind: setting.Deployment, - Name: deployment.Name, - PodOwnerUID: string(owned[0].ObjectMeta.UID), - }) - relatedPodLabels = append(relatedPodLabels, deployment.Spec.Template.Labels) - } - - for _, sts := range statefulSets { - err = updater.RestartStatefulSet(sts.Namespace, sts.Name, kubeClient) - if err != nil { - return nil, nil, fmt.Errorf("failed to restart statefulset %s/%s: %v", sts.Namespace, sts.Name, err) - } - - replaceResources = append(replaceResources, commonmodels.Resource{ - Kind: setting.StatefulSet, - Name: sts.Name, - PodOwnerUID: string(sts.ObjectMeta.UID), - }) - relatedPodLabels = append(relatedPodLabels, sts.Spec.Template.Labels) - } +func restartWorkloadResources(ctx context.Context, clusterID string, resources []*kube.WorkloadResource, env *commonmodels.Product) (replaceResources []commonmodels.Resource, relatedPodLabels []map[string]string, err error) { + // deployments, statefulSets, _, _, _, err := kube.FetchSelectedWorkloads(env.Namespace, resources, kubeClient, clientSet) + // if err != nil { + // return nil, nil, err + // } + + // for _, deployment := range deployments { + // err = updater.RestartDeploymentV2(ctx, clusterID, deployment.Namespace, deployment.Name) + // if err != nil { + // return nil, nil, fmt.Errorf("failed to restart deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) + // } + + // selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) + // if err != nil { + // return nil, nil, fmt.Errorf("failed to get selector for deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) + // } + + // // ensure latest replicaset to be created + // replicaSets, err := getter.ListReplicaSets(deployment.Namespace, selector, kubeClient) + // if err != nil { + // return nil, nil, fmt.Errorf("failed to list replica sets for deployment %s/%s: %v", deployment.Namespace, deployment.Name, err) + // } + + // // Only include those whose ControllerRef matches the Deployment. + // owned := make([]*appsv1.ReplicaSet, 0, len(replicaSets)) + // for _, rs := range replicaSets { + // if metav1.IsControlledBy(rs, deployment) { + // owned = append(owned, rs) + // } + // } + // if len(owned) <= 0 { + // return nil, nil, fmt.Errorf("no replicaset found for deployment: %s", deployment.Name) + // } + // sort.Slice(owned, func(i, j int) bool { + // return owned[i].CreationTimestamp.After(owned[j].CreationTimestamp.Time) + // }) + + // replaceResources = append(replaceResources, commonmodels.Resource{ + // Kind: setting.Deployment, + // Name: deployment.Name, + // PodOwnerUID: string(owned[0].ObjectMeta.UID), + // }) + // relatedPodLabels = append(relatedPodLabels, deployment.Spec.Template.Labels) + // } + + // for _, sts := range statefulSets { + // err = updater.RestartStatefulSet(sts.Namespace, sts.Name, kubeClient) + // if err != nil { + // return nil, nil, fmt.Errorf("failed to restart statefulset %s/%s: %v", sts.Namespace, sts.Name, err) + // } + + // replaceResources = append(replaceResources, commonmodels.Resource{ + // Kind: setting.StatefulSet, + // Name: sts.Name, + // PodOwnerUID: string(sts.ObjectMeta.UID), + // }) + // relatedPodLabels = append(relatedPodLabels, sts.Spec.Template.Labels) + // } return replaceResources, relatedPodLabels, nil } diff --git a/pkg/microservice/aslan/core/environment/service/environment.go b/pkg/microservice/aslan/core/environment/service/environment.go index 1487a1efb8..0984ff7dfa 100644 --- a/pkg/microservice/aslan/core/environment/service/environment.go +++ b/pkg/microservice/aslan/core/environment/service/environment.go @@ -2868,7 +2868,7 @@ func getProjectType(productName string) string { } func restartRelatedWorkloads(env *commonmodels.Product, service *commonmodels.ProductService, - kubeClient client.Client, log *zap.SugaredLogger) error { + clusterID string, log *zap.SugaredLogger) error { parsedYaml, err := kube.RenderEnvService(env, service.GetServiceRender(), service) if err != nil { return fmt.Errorf("service template %s error: %v", service.ServiceName, err) @@ -2888,10 +2888,10 @@ func restartRelatedWorkloads(env *commonmodels.Product, service *commonmodels.Pr for _, u := range resources { switch u.GetKind() { case setting.Deployment: - err = updater.RestartDeployment(env.Namespace, u.GetName(), kubeClient) + err = updater.RestartDeploymentV2(context.Background(), clusterID, env.Namespace, u.GetName()) return errors.Wrapf(err, "failed to restart deployment %s", u.GetName()) case setting.StatefulSet: - err = updater.RestartStatefulSet(env.Namespace, u.GetName(), kubeClient) + // err = updater.RestartStatefulSet(env.Namespace, u.GetName(), kubeClient) return errors.Wrapf(err, "failed to restart statefulset %s", u.GetName()) } } @@ -4450,7 +4450,7 @@ func EnvSleep(productName, envName string, isEnable, isProduction bool, log *zap switch workload.Type { case setting.Deployment: log.Infof("scale workload %s(%s) to %d", workload.Name, workload.Type, scaleNum) - err := updater.ScaleDeployment(prod.Namespace, workload.Name, scaleNum, kubeClient) + err := updater.ScaleDeploymentV2(context.TODO(), prod.ClusterID, prod.Namespace, workload.Name, scaleNum) if err != nil { log.Errorf("failed to scale %s/deploy/%s to %d", prod.Namespace, workload.Name, scaleNum) } diff --git a/pkg/microservice/aslan/core/environment/service/image.go b/pkg/microservice/aslan/core/environment/service/image.go index 287be0b42f..f3388f134b 100644 --- a/pkg/microservice/aslan/core/environment/service/image.go +++ b/pkg/microservice/aslan/core/environment/service/image.go @@ -142,7 +142,7 @@ func UpdateContainerImage(requestID, username string, args *UpdateContainerImage } else { switch args.Type { case setting.Deployment: - if err := updater.UpdateDeploymentImage(namespace, args.Name, args.ContainerName, args.Image, kubeClient); err != nil { + if err := updater.UpdateDeploymentImageV2(context.TODO(), product.ClusterID, namespace, args.Name, args.ContainerName, args.Image); err != nil { log.Errorf("[%s] UpdateDeploymentImageByName error: %s", namespace, err.Error()) return e.ErrUpdateConainterImage.AddDesc("更新 Deployment 容器镜像失败") } diff --git a/pkg/microservice/aslan/core/environment/service/service.go b/pkg/microservice/aslan/core/environment/service/service.go index b342cdb3e7..a75c8940f8 100644 --- a/pkg/microservice/aslan/core/environment/service/service.go +++ b/pkg/microservice/aslan/core/environment/service/service.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "fmt" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" @@ -48,6 +49,7 @@ import ( "github.com/koderover/zadig/v2/pkg/util" ) + func RestartScale(args *RestartScaleArgs, production bool, _ *zap.SugaredLogger) error { opt := &commonrepo.ProductFindOptions{ Name: args.ProductName, @@ -85,7 +87,7 @@ func RestartScale(args *RestartScaleArgs, production bool, _ *zap.SugaredLogger) switch args.Type { case setting.Deployment: - err = updater.RestartDeployment(prod.Namespace, args.Name, kubeClient) + err = updater.RestartDeploymentV2(context.Background(), prod.ClusterID, prod.Namespace, args.Name) case setting.StatefulSet: err = updater.RestartStatefulSet(prod.Namespace, args.Name, kubeClient) } @@ -428,7 +430,7 @@ func RestartService(envName string, args *SvcOptArgs, production bool, log *zap. return fmt.Errorf("failed to find resource %s, type %s, err %s", args.ServiceName, setting.Deployment, err.Error()) } if found { - return updater.RestartDeployment(productObj.Namespace, deploy.Name, kubeClient) + return updater.RestartDeploymentV2(context.Background(), productObj.ClusterID, productObj.Namespace, deploy.Name) } sts, found, err := getter.GetStatefulSet(productObj.Namespace, args.ServiceName, kubeClient) @@ -446,7 +448,7 @@ func RestartService(envName string, args *SvcOptArgs, production bool, log *zap. } productService = serviceObj - err = restartRelatedWorkloads(productObj, productService, kubeClient, log) + err = restartRelatedWorkloads(productObj, productService, productObj.ClusterID, log) log.Infof("restart resource from namespace:%s/serviceName:%s ", productObj.Namespace, args.ServiceName) if err != nil { diff --git a/pkg/tool/kube/updater/base.go b/pkg/tool/kube/updater/base.go index 59651e7e11..4c74e7f93d 100644 --- a/pkg/tool/kube/updater/base.go +++ b/pkg/tool/kube/updater/base.go @@ -234,3 +234,22 @@ func deleteObjectsAndWait(ns string, selector labels.Selector, obj client.Object return len(us) == 0, nil }) } + +/* +V2 related base definitions +*/ + +type deleteConfig struct { + name string + selector string +} + +type DeleteOption func(*deleteConfig) + +func WithName(name string) DeleteOption { + return func(c *deleteConfig) { c.name = name } +} + +func WithSelector(selector string) DeleteOption { + return func(c *deleteConfig) { c.selector = selector } +} \ No newline at end of file From b363c75faf7c55bd3ca8426d33eeb73457737a0b Mon Sep 17 00:00:00 2001 From: Min Min Date: Wed, 18 Mar 2026 16:08:07 +0800 Subject: [PATCH 02/21] add missing file Signed-off-by: Min Min --- pkg/tool/kube/updater/deployment_v2.go | 450 +++++++++++++++++++++++++ 1 file changed, 450 insertions(+) create mode 100644 pkg/tool/kube/updater/deployment_v2.go diff --git a/pkg/tool/kube/updater/deployment_v2.go b/pkg/tool/kube/updater/deployment_v2.go new file mode 100644 index 0000000000..b43246cf8e --- /dev/null +++ b/pkg/tool/kube/updater/deployment_v2.go @@ -0,0 +1,450 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "encoding/json" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func RestartDeploymentV2(ctx context.Context, clusterID, namespace, name string) error { + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + deploy := &appsv1.Deployment{} + deployKey := client.ObjectKey{Namespace: namespace, Name: name} + if err := c.Get(ctx, deployKey, deploy); err != nil { + return fmt.Errorf("failed to get deployment %s/%s: %w", namespace, name, err) + } + + selector, err := metav1.LabelSelectorAsSelector(deploy.Spec.Selector) + if err != nil { + return fmt.Errorf("failed to parse deployment selector: %w", err) + } + + deleteOpts := []client.DeleteAllOfOption{ + client.InNamespace(namespace), + client.MatchingLabelsSelector{Selector: selector}, + } + + pod := &corev1.Pod{} + if err := c.DeleteAllOf(ctx, pod, deleteOpts...); err != nil { + return fmt.Errorf("failed to delete pods for deployment %s/%s: %w", namespace, name, err) + } + + return nil +} + +func DeleteDeploymentV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + if config.name != "" { + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: config.name, + }, + } + + propagationPolicy := metav1.DeletePropagationBackground + deleteOpts := &client.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + + err = c.Delete(ctx, deploy, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + if config.selector != "" { + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + deploy := &appsv1.Deployment{} + + propagationPolicy := metav1.DeletePropagationBackground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = c.DeleteAllOf(ctx, deploy, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + return nil +} + +func DeleteDeploymentAndWaitV2(ctx context.Context, clusterID, namespace string, timeout time.Duration, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + cli, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := metav1.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + + if config.name != "" { + err = cli.AppsV1().Deployments(namespace).Delete(ctx, config.name, deleteOpts) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return fmt.Errorf("failed to initiate deletion for deployment %s/%s: %w", namespace, config.name, err) + } + + err = wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, true, func(c context.Context) (done bool, err error) { + _, errGet := cli.AppsV1().Deployments(namespace).Get(c, config.name, metav1.GetOptions{}) + if apierrors.IsNotFound(errGet) { + return true, nil + } + if errGet != nil { + return false, nil + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("timeout (%v) waiting for deployment %s/%s to be completely deleted: %w", timeout, namespace, config.name, err) + } + + return nil + } + + if config.selector != "" { + err = cli.AppsV1().Deployments(namespace).DeleteCollection(ctx, deleteOpts, metav1.ListOptions{LabelSelector: config.selector}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return fmt.Errorf("failed to initiate deletion for deployments matching %q in %s: %w", config.selector, namespace, err) + } + + err = wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, true, func(c context.Context) (done bool, err error) { + list, errList := cli.AppsV1().Deployments(namespace).List(c, metav1.ListOptions{LabelSelector: config.selector}) + if errList != nil { + return false, nil + } + return len(list.Items) == 0, nil + }) + + if err != nil { + return fmt.Errorf("timeout (%v) waiting for deployments matching %q in %s to be completely deleted: %w", timeout, config.selector, namespace, err) + } + + return nil + } + + return nil +} + +// CreateOrPatchDeploymentV2 is used when the YAML is fully controlled by this system, it implements a 3-way merge patch for the deployment. +// If we are simply editing the deployment, use UpdateDeploymentV2 instead. +func CreateOrPatchDeploymentV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj appsv1.Deployment + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to Deployment: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("deployment name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj appsv1.Deployment + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + // since there might be 409 conflict on when the object is being updated frequently, we use a retry on conflict to handle it + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + // getting the live object from the cluster + liveObj, err := c.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + + // if the object wasn't there, just deploy it + if apierrors.IsNotFound(err) { + _, createErr := c.AppsV1().Deployments(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + // otherwise, calculate the 3-way merge based on + // 1. the original yaml this system saved + // 2. the target yaml this system wants to create + // 3. the live state in the cluster + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&appsv1.Deployment{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.AppsV1().Deployments(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("deployment operation failed after retries: %w", err) + } + + return nil +} + +// UpdateDeploymentV2 takes the cluster and resource info to identify a resource, and use the mutation function to update the object then tries to patch the resource. +func UpdateDeploymentV2(ctx context.Context, clusterID, namespace, deploymentName string, mutationFunc func(deployment *appsv1.Deployment) error) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + deploy, err := c.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get live deployment: %w", err) + } + + if err := mutationFunc(deploy); err != nil { + return fmt.Errorf("mutation failed or aborted: %w", err) + } + + _, err = c.AppsV1().Deployments(namespace).Update(ctx, deploy, metav1.UpdateOptions{}) + return err + }) + + return err +} + +func UpdateDeploymentImageV2(ctx context.Context, clusterID, namespace, deploymentName, containerName, newImage string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + patchPayload := map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "initContainers": []map[string]interface{}{ + { + "name": containerName, + "image": newImage, + }, + }, + }, + }, + }, + } + + patchBytes, err := json.Marshal(patchPayload) + if err != nil { + return fmt.Errorf("failed to marshal image update patch payload: %w", err) + } + + _, err = c.AppsV1().Deployments(namespace).Patch( + ctx, + deploymentName, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + + if err != nil { + return fmt.Errorf("failed to patch image for deployment %s/%s: %w", namespace, deploymentName, err) + } + + return nil +} + +func UpdateDeploymentInitImageV2(ctx context.Context, clusterID, namespace, deploymentName, containerName, newImage string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + patchPayload := map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": containerName, + "image": newImage, + }, + }, + }, + }, + }, + } + + patchBytes, err := json.Marshal(patchPayload) + if err != nil { + return fmt.Errorf("failed to marshal image update patch payload: %w", err) + } + + _, err = c.AppsV1().Deployments(namespace).Patch( + ctx, + deploymentName, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + + if err != nil { + return fmt.Errorf("failed to patch init image for deployment %s/%s: %w", namespace, deploymentName, err) + } + + return nil +} + +func ScaleDeploymentV2(ctx context.Context, clusterID, namespace, name string, replicas int) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + patchBytes := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, replicas)) + + _, err = c.AppsV1().Deployments(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + + if err != nil { + return fmt.Errorf("failed to scale deployment %s/%s: %w", namespace, name, err) + } + + return nil +} + +func CreateDeploymentV2(ctx context.Context, clusterID, namespace string, deployment *appsv1.Deployment) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + deployment.SetNamespace(namespace) + _, err = c.AppsV1().Deployments(namespace).Create(ctx, deployment, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create deployment %s/%s: %w", namespace, deployment.Name, err) + } + + return nil +} \ No newline at end of file From 40593f89cc51171b4f67fa084f9a7cabd23d1e96 Mon Sep 17 00:00:00 2001 From: Min Min Date: Wed, 18 Mar 2026 16:34:16 +0800 Subject: [PATCH 03/21] add statefulset support Signed-off-by: Min Min --- .../aslan/core/common/service/kube/apply.go | 8 +- .../aslan/core/common/service/product.go | 2 +- .../jobcontroller/job_deploy.go | 4 +- .../jobcontroller/job_restart.go | 2 +- .../core/environment/service/environment.go | 2 +- .../aslan/core/environment/service/image.go | 2 +- .../aslan/core/environment/service/service.go | 5 +- pkg/tool/kube/updater/statefulset_v2.go | 443 ++++++++++++++++++ 8 files changed, 458 insertions(+), 10 deletions(-) create mode 100644 pkg/tool/kube/updater/statefulset_v2.go diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index 7fd34030a0..5b455f3d5e 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -881,7 +881,13 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge logContent := fmt.Sprintf("Applying %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) jobLogManager.SaveJobLog(logContent) - err = updater.CreateOrPatchStatefulSet(res, kubeClient) + resYAML, marshalErr := yaml.Marshal(res) + if marshalErr != nil { + log.Errorf("Failed to marshal statefulset %s to YAML: %v", res.Name, marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + err = updater.CreateOrPatchStatefulSetV2(context.TODO(), productInfo.ClusterID, namespace, "", string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), res, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) diff --git a/pkg/microservice/aslan/core/common/service/product.go b/pkg/microservice/aslan/core/common/service/product.go index 57bb37f943..b963dd7885 100644 --- a/pkg/microservice/aslan/core/common/service/product.go +++ b/pkg/microservice/aslan/core/common/service/product.go @@ -92,7 +92,7 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteReplicaSets error: %v", err)) } - if err := updater.DeleteStatefulSets(namespace, selector, clientset); err != nil { + if err := updater.DeleteStatefulSetV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteStatefulSets error: %v", err)) } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go index 87268e87a2..48a13d1cc7 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go @@ -482,7 +482,7 @@ Loop: // Check if StatefulSet is stuck before updating isStuck := kube.IsStatefulSetStuckInUpdate(sts, logger) - err = updater.UpdateStatefulSetImage(sts.Namespace, sts.Name, serviceModule.ServiceModule, serviceModule.Image, kubeClient) + err = updater.UpdateStatefulSetImageV2(ctx, env.ClusterID, sts.Namespace, sts.Name, serviceModule.ServiceModule, serviceModule.Image) if err != nil { return nil, nil, fmt.Errorf("failed to update container image in %s/statefulsets/%s/%s: %v", env.Namespace, sts.Name, container.Name, err) } @@ -514,7 +514,7 @@ Loop: } for _, container := range sts.Spec.Template.Spec.InitContainers { if container.Name == serviceModule.ServiceModule { - err = updater.UpdateStatefulSetInitImage(sts.Namespace, sts.Name, serviceModule.ServiceModule, serviceModule.Image, kubeClient) + err = updater.UpdateStatefulSetInitImageV2(ctx, env.ClusterID, sts.Namespace, sts.Name, serviceModule.ServiceModule, serviceModule.Image) if err != nil { return nil, nil, fmt.Errorf("failed to update container image in %s/statefulsets/%s/%s: %v", env.Namespace, sts.Name, container.Name, err) } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_restart.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_restart.go index bf05190b54..1d04f385fc 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_restart.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_restart.go @@ -282,7 +282,7 @@ func restartWorkloadResources(ctx context.Context, clusterID string, resources [ // } // for _, sts := range statefulSets { - // err = updater.RestartStatefulSet(sts.Namespace, sts.Name, kubeClient) + // err = updater.RestartStatefulSetV2(ctx, clusterID, sts.Namespace, sts.Name) // if err != nil { // return nil, nil, fmt.Errorf("failed to restart statefulset %s/%s: %v", sts.Namespace, sts.Name, err) // } diff --git a/pkg/microservice/aslan/core/environment/service/environment.go b/pkg/microservice/aslan/core/environment/service/environment.go index 0984ff7dfa..414d4ff6e9 100644 --- a/pkg/microservice/aslan/core/environment/service/environment.go +++ b/pkg/microservice/aslan/core/environment/service/environment.go @@ -4456,7 +4456,7 @@ func EnvSleep(productName, envName string, isEnable, isProduction bool, log *zap } case setting.StatefulSet: log.Infof("scale workload %s(%s) to %d", workload.Name, workload.Type, scaleNum) - err := updater.ScaleStatefulSet(prod.Namespace, workload.Name, scaleNum, kubeClient) + err := updater.ScaleStatefulSetV2(context.TODO(), prod.ClusterID, prod.Namespace, workload.Name, scaleNum) if err != nil { log.Errorf("failed to scale %s/sts/%s to %d", prod.Namespace, workload.Name, scaleNum) } diff --git a/pkg/microservice/aslan/core/environment/service/image.go b/pkg/microservice/aslan/core/environment/service/image.go index f3388f134b..707d9e9b4d 100644 --- a/pkg/microservice/aslan/core/environment/service/image.go +++ b/pkg/microservice/aslan/core/environment/service/image.go @@ -147,7 +147,7 @@ func UpdateContainerImage(requestID, username string, args *UpdateContainerImage return e.ErrUpdateConainterImage.AddDesc("更新 Deployment 容器镜像失败") } case setting.StatefulSet: - if err := updater.UpdateStatefulSetImage(namespace, args.Name, args.ContainerName, args.Image, kubeClient); err != nil { + if err := updater.UpdateStatefulSetImageV2(context.TODO(), product.ClusterID, namespace, args.Name, args.ContainerName, args.Image); err != nil { log.Errorf("[%s] UpdateStatefulsetImageByName error: %s", namespace, err.Error()) return e.ErrUpdateConainterImage.AddDesc("更新 StatefulSet 容器镜像失败") } diff --git a/pkg/microservice/aslan/core/environment/service/service.go b/pkg/microservice/aslan/core/environment/service/service.go index a75c8940f8..e32f73d4ec 100644 --- a/pkg/microservice/aslan/core/environment/service/service.go +++ b/pkg/microservice/aslan/core/environment/service/service.go @@ -49,7 +49,6 @@ import ( "github.com/koderover/zadig/v2/pkg/util" ) - func RestartScale(args *RestartScaleArgs, production bool, _ *zap.SugaredLogger) error { opt := &commonrepo.ProductFindOptions{ Name: args.ProductName, @@ -89,7 +88,7 @@ func RestartScale(args *RestartScaleArgs, production bool, _ *zap.SugaredLogger) case setting.Deployment: err = updater.RestartDeploymentV2(context.Background(), prod.ClusterID, prod.Namespace, args.Name) case setting.StatefulSet: - err = updater.RestartStatefulSet(prod.Namespace, args.Name, kubeClient) + err = updater.RestartStatefulSetV2(context.Background(), prod.ClusterID, prod.Namespace, args.Name) } if err != nil { @@ -438,7 +437,7 @@ func RestartService(envName string, args *SvcOptArgs, production bool, log *zap. return fmt.Errorf("failed to find resource %s, type %s, err %s", args.ServiceName, setting.StatefulSet, err.Error()) } if found { - return updater.RestartStatefulSet(productObj.Namespace, sts.Name, kubeClient) + return updater.RestartStatefulSetV2(context.Background(), productObj.ClusterID, productObj.Namespace, sts.Name) } default: var productService *commonmodels.ProductService diff --git a/pkg/tool/kube/updater/statefulset_v2.go b/pkg/tool/kube/updater/statefulset_v2.go new file mode 100644 index 0000000000..525e798efe --- /dev/null +++ b/pkg/tool/kube/updater/statefulset_v2.go @@ -0,0 +1,443 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "encoding/json" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func RestartStatefulSetV2(ctx context.Context, clusterID, namespace, name string) error { + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + sts := &appsv1.StatefulSet{} + stsKey := client.ObjectKey{Namespace: namespace, Name: name} + if err := c.Get(ctx, stsKey, sts); err != nil { + return fmt.Errorf("failed to get statefulset %s/%s: %w", namespace, name, err) + } + + selector, err := metav1.LabelSelectorAsSelector(sts.Spec.Selector) + if err != nil { + return fmt.Errorf("failed to parse statefulset selector: %w", err) + } + + deleteOpts := []client.DeleteAllOfOption{ + client.InNamespace(namespace), + client.MatchingLabelsSelector{Selector: selector}, + } + + pod := &corev1.Pod{} + if err := c.DeleteAllOf(ctx, pod, deleteOpts...); err != nil { + return fmt.Errorf("failed to delete pods for statefulset %s/%s: %w", namespace, name, err) + } + + return nil +} + +func DeleteStatefulSetV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + if config.name != "" { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: config.name, + }, + } + + propagationPolicy := metav1.DeletePropagationBackground + deleteOpts := &client.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + + err = c.Delete(ctx, sts, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + if config.selector != "" { + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + sts := &appsv1.StatefulSet{} + + propagationPolicy := metav1.DeletePropagationBackground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = c.DeleteAllOf(ctx, sts, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + return nil +} + +func DeleteStatefulSetAndWaitV2(ctx context.Context, clusterID, namespace string, timeout time.Duration, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + cli, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := metav1.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + + if config.name != "" { + err = cli.AppsV1().StatefulSets(namespace).Delete(ctx, config.name, deleteOpts) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return fmt.Errorf("failed to initiate deletion for statefulset %s/%s: %w", namespace, config.name, err) + } + + err = wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, true, func(c context.Context) (done bool, err error) { + _, errGet := cli.AppsV1().StatefulSets(namespace).Get(c, config.name, metav1.GetOptions{}) + if apierrors.IsNotFound(errGet) { + return true, nil + } + if errGet != nil { + return false, nil + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("timeout (%v) waiting for statefulset %s/%s to be completely deleted: %w", timeout, namespace, config.name, err) + } + + return nil + } + + if config.selector != "" { + err = cli.AppsV1().StatefulSets(namespace).DeleteCollection(ctx, deleteOpts, metav1.ListOptions{LabelSelector: config.selector}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return fmt.Errorf("failed to initiate deletion for statefulsets matching %q in %s: %w", config.selector, namespace, err) + } + + err = wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, true, func(c context.Context) (done bool, err error) { + list, errList := cli.AppsV1().StatefulSets(namespace).List(c, metav1.ListOptions{LabelSelector: config.selector}) + if errList != nil { + return false, nil + } + return len(list.Items) == 0, nil + }) + + if err != nil { + return fmt.Errorf("timeout (%v) waiting for statefulsets matching %q in %s to be completely deleted: %w", timeout, config.selector, namespace, err) + } + + return nil + } + + return nil +} + +// CreateOrPatchStatefulSetV2 is used when the YAML is fully controlled by this system, it implements a 3-way merge patch for the statefulset. +// If we are simply editing the statefulset, use UpdateStatefulSetV2 instead. +func CreateOrPatchStatefulSetV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj appsv1.StatefulSet + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to StatefulSet: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("statefulset name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj appsv1.StatefulSet + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.AppsV1().StatefulSets(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&appsv1.StatefulSet{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.AppsV1().StatefulSets(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("statefulset operation failed after retries: %w", err) + } + + return nil +} + +// UpdateStatefulSetV2 takes the cluster and resource info to identify a resource, and uses the mutation function to update the object. +func UpdateStatefulSetV2(ctx context.Context, clusterID, namespace, statefulSetName string, mutationFunc func(sts *appsv1.StatefulSet) error) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + sts, err := c.AppsV1().StatefulSets(namespace).Get(ctx, statefulSetName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get live statefulset: %w", err) + } + + if err := mutationFunc(sts); err != nil { + return fmt.Errorf("mutation failed or aborted: %w", err) + } + + _, err = c.AppsV1().StatefulSets(namespace).Update(ctx, sts, metav1.UpdateOptions{}) + return err + }) + + return err +} + +func UpdateStatefulSetImageV2(ctx context.Context, clusterID, namespace, statefulSetName, containerName, newImage string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + patchPayload := map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": containerName, + "image": newImage, + }, + }, + }, + }, + }, + } + + patchBytes, err := json.Marshal(patchPayload) + if err != nil { + return fmt.Errorf("failed to marshal image update patch payload: %w", err) + } + + _, err = c.AppsV1().StatefulSets(namespace).Patch( + ctx, + statefulSetName, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + + if err != nil { + return fmt.Errorf("failed to patch image for statefulset %s/%s: %w", namespace, statefulSetName, err) + } + + return nil +} + +func UpdateStatefulSetInitImageV2(ctx context.Context, clusterID, namespace, statefulSetName, containerName, newImage string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + patchPayload := map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "initContainers": []map[string]interface{}{ + { + "name": containerName, + "image": newImage, + }, + }, + }, + }, + }, + } + + patchBytes, err := json.Marshal(patchPayload) + if err != nil { + return fmt.Errorf("failed to marshal image update patch payload: %w", err) + } + + _, err = c.AppsV1().StatefulSets(namespace).Patch( + ctx, + statefulSetName, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + + if err != nil { + return fmt.Errorf("failed to patch init image for statefulset %s/%s: %w", namespace, statefulSetName, err) + } + + return nil +} + +func ScaleStatefulSetV2(ctx context.Context, clusterID, namespace, name string, replicas int) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + patchBytes := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, replicas)) + + _, err = c.AppsV1().StatefulSets(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + + if err != nil { + return fmt.Errorf("failed to scale statefulset %s/%s: %w", namespace, name, err) + } + + return nil +} + +func CreateStatefulSetV2(ctx context.Context, clusterID, namespace string, sts *appsv1.StatefulSet) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + sts.SetNamespace(namespace) + _, err = c.AppsV1().StatefulSets(namespace).Create(ctx, sts, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create statefulset %s/%s: %w", namespace, sts.Name, err) + } + + return nil +} From ba5b01ea5322bb4871804c8e29b111854c7936b2 Mon Sep 17 00:00:00 2001 From: Min Min Date: Wed, 18 Mar 2026 20:24:14 +0800 Subject: [PATCH 04/21] add multiple resource support Signed-off-by: Min Min --- .../aslan/core/common/service/kube/actions.go | 66 +++++----- .../aslan/core/common/service/kube/apply.go | 20 ++- .../aslan/core/common/service/product.go | 67 ++++------ .../jobcontroller/job_blue_green_deploy.go | 24 +++- .../jobcontroller/job_blue_green_deploy_v2.go | 15 ++- .../jobcontroller/job_blue_green_release.go | 15 ++- .../job_blue_green_release_v2.go | 22 +++- .../jobcontroller/job_custom_deploy.go | 6 +- .../jobcontroller/job_deploy.go | 8 +- .../jobcontroller/job_freestyle.go | 12 +- .../jobcontroller/job_plugin.go | 6 +- .../jobcontroller/kubernetes.go | 12 +- .../service/workflowcontroller/workflow.go | 10 +- .../aslan/core/cron/service/cron.go | 45 +++---- .../environment/service/common_env_cfg.go | 37 ++---- .../core/environment/service/configmap.go | 33 ++--- .../core/environment/service/environment.go | 23 ++-- .../service/environment_creator.go | 6 +- .../environment/service/environment_update.go | 2 +- .../aslan/core/environment/service/image.go | 2 +- .../aslan/core/environment/service/kube.go | 7 +- .../aslan/core/environment/service/pvc.go | 16 +-- .../aslan/core/environment/service/secret.go | 8 +- pkg/tool/kube/updater/configmap.go | 76 ----------- pkg/tool/kube/updater/cronjob.go | 88 ------------- pkg/tool/kube/updater/deployment.go | 124 ------------------ pkg/tool/kube/updater/ingress.go | 91 ------------- pkg/tool/kube/updater/job.go | 91 ------------- pkg/tool/kube/updater/namespace.go | 52 -------- pkg/tool/kube/updater/pod.go | 62 --------- pkg/tool/kube/updater/pv.go | 42 ------ pkg/tool/kube/updater/pvc.go | 65 --------- pkg/tool/kube/updater/replicaset.go | 42 ------ pkg/tool/kube/updater/role.go | 59 --------- pkg/tool/kube/updater/rolebinding.go | 42 ------ 35 files changed, 220 insertions(+), 1076 deletions(-) delete mode 100644 pkg/tool/kube/updater/configmap.go delete mode 100644 pkg/tool/kube/updater/cronjob.go delete mode 100644 pkg/tool/kube/updater/deployment.go delete mode 100644 pkg/tool/kube/updater/ingress.go delete mode 100644 pkg/tool/kube/updater/job.go delete mode 100644 pkg/tool/kube/updater/namespace.go delete mode 100644 pkg/tool/kube/updater/pod.go delete mode 100644 pkg/tool/kube/updater/pv.go delete mode 100644 pkg/tool/kube/updater/pvc.go delete mode 100644 pkg/tool/kube/updater/replicaset.go delete mode 100644 pkg/tool/kube/updater/role.go delete mode 100644 pkg/tool/kube/updater/rolebinding.go diff --git a/pkg/microservice/aslan/core/common/service/kube/actions.go b/pkg/microservice/aslan/core/common/service/kube/actions.go index 4f122ecd49..dcbe347a61 100644 --- a/pkg/microservice/aslan/core/common/service/kube/actions.go +++ b/pkg/microservice/aslan/core/common/service/kube/actions.go @@ -21,7 +21,6 @@ import ( "fmt" "regexp" "strings" - "time" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -31,6 +30,8 @@ import ( "k8s.io/apimachinery/pkg/selection" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/microservice/aslan/config" commonmodels "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/repository/models" "github.com/koderover/zadig/v2/pkg/setting" @@ -42,7 +43,7 @@ import ( var registrySecretSuffix = "-registry-secret" -func CreateNamespace(namespace string, customLabels map[string]string, enableIstioInjection bool, kubeClient client.Client) error { +func CreateNamespace(namespace, clusterID string, customLabels map[string]string, enableIstioInjection bool) error { nsLabels := map[string]string{ setting.EnvCreatedBy: setting.EnvCreator, } @@ -54,56 +55,49 @@ func CreateNamespace(namespace string, customLabels map[string]string, enableIst customLabels = map[string]string{} } mergedLabels := labels.Merge(customLabels, nsLabels) - createErr := updater.CreateNamespaceByName(namespace, mergedLabels, kubeClient) + createErr := updater.CreateNamespaceByNameV2(context.TODO(), clusterID, namespace, mergedLabels) if createErr != nil && !apierrors.IsAlreadyExists(createErr) { return createErr } - var err error - nsObj := &corev1.Namespace{} - // It may fail to obtain the namespace immediately after it is created due to synchronization delay. - // Try twice. - for i := 0; i < 2; i++ { - err = kubeClient.Get(context.TODO(), client.ObjectKey{ - Name: namespace, - }, nsObj) - if err == nil { - break - } - - time.Sleep(time.Second) - } - if err != nil { - return err - } if enableIstioInjection && createErr != nil && apierrors.IsAlreadyExists(createErr) { - nsObj.Labels[zadigtypes.IstioLabelKeyInjection] = zadigtypes.IstioLabelValueInjection - err = updater.UpdateNamespace(nsObj, kubeClient) + err := updater.UpdateNamespaceV2(context.TODO(), clusterID, namespace, func(ns *corev1.Namespace) error { + if ns.Labels == nil { + ns.Labels = make(map[string]string) + } + ns.Labels[zadigtypes.IstioLabelKeyInjection] = zadigtypes.IstioLabelValueInjection + return nil + }) if err != nil { return fmt.Errorf("failed to add istio-injection label and update namespace %s: %s", namespace, err) } } - if nsObj.Status.Phase == corev1.NamespaceTerminating { - return fmt.Errorf("namespace `%s` is in terminating state, please wait for a whilie and try again", namespace) + if createErr != nil && apierrors.IsAlreadyExists(createErr) { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + nsObj, err := c.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get namespace %s: %w", namespace, err) + } + if nsObj.Status.Phase == corev1.NamespaceTerminating { + return fmt.Errorf("namespace `%s` is in terminating state, please wait for a while and try again", namespace) + } } return nil } -func EnsureNamespaceLabels(namespace string, customLabels map[string]string, kubeClient client.Client) error { - nsObj := &corev1.Namespace{} - err := kubeClient.Get(context.TODO(), client.ObjectKey{ - Name: namespace, - }, nsObj) - if err != nil { - return err - } - if labels.SelectorFromValidatedSet(customLabels).Matches(labels.Set(nsObj.Labels)) { +func EnsureNamespaceLabels(namespace, clusterID string, customLabels map[string]string) error { + return updater.UpdateNamespaceV2(context.TODO(), clusterID, namespace, func(ns *corev1.Namespace) error { + if labels.SelectorFromValidatedSet(customLabels).Matches(labels.Set(ns.Labels)) { + return nil + } + ns.Labels = labels.Merge(ns.Labels, customLabels) return nil - } - nsObj.Labels = labels.Merge(nsObj.Labels, customLabels) - return updater.UpdateNamespace(nsObj, kubeClient) + }) } func CreateOrUpdateRSASecret(publicKey, privateKey []byte, kubeClient client.Client) error { diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index 5b455f3d5e..56fb97685b 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -935,7 +935,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge logContent := fmt.Sprintf("Deleting old %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) jobLogManager.SaveJobLog(logContent) - if err := updater.DeleteJobAndWait(namespace, obj.Name, kubeClient); err != nil { + if err := updater.DeleteJobAndWaitV2(context.TODO(), productInfo.ClusterID, namespace, obj.Name); err != nil { log.Errorf("Failed to delete Job, error: %v", err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) continue @@ -944,7 +944,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge logContent = fmt.Sprintf("Applying new %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) jobLogManager.SaveJobLog(logContent) - if err := updater.CreateJob(obj, kubeClient); err != nil { + if err := updater.CreateJobV2(context.TODO(), productInfo.ClusterID, obj); err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), obj, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) continue @@ -977,7 +977,13 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge logContent := fmt.Sprintf("Applying %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) jobLogManager.SaveJobLog(logContent) - err = updater.CreateOrPatchCronJob(obj, kubeClient) + resYAML, marshalErr := yaml.Marshal(obj) + if marshalErr != nil { + log.Errorf("Failed to marshal cronjob %s to YAML: %v", obj.Name, marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, "", string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), obj, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -1004,7 +1010,13 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge logContent := fmt.Sprintf("Applying %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) jobLogManager.SaveJobLog(logContent) - err = updater.CreateOrPatchCronJob(obj, kubeClient) + resYAML, marshalErr := yaml.Marshal(obj) + if marshalErr != nil { + log.Errorf("Failed to marshal cronjob-beta %s to YAML: %v", obj.Name, marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, "", string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), obj, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) diff --git a/pkg/microservice/aslan/core/common/service/product.go b/pkg/microservice/aslan/core/common/service/product.go index b963dd7885..23bbd24361 100644 --- a/pkg/microservice/aslan/core/common/service/product.go +++ b/pkg/microservice/aslan/core/common/service/product.go @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -61,7 +62,7 @@ func DeleteClusterResource(selector labels.Selector, clusterID string, log *zap. errors = multierror.Append(errors, err) } - if err := updater.DeletePersistentVolumes(selector, clientset); err != nil { + if err := updater.DeletePersistentVolumesV2(context.Background(), clusterID, updater.WithSelector(selector.String())); err != nil { log.Errorf("failed to delete PV for clusterID: %s, the error is: %s", clusterID, err) errors = multierror.Append(errors, err) } @@ -87,7 +88,7 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste } // could have replicas created by deployment - if err := updater.DeleteReplicaSets(namespace, selector, clientset); err != nil { + if err := updater.DeleteReplicaSetsV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteReplicaSets error: %v", err)) } @@ -97,7 +98,7 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteStatefulSets error: %v", err)) } - if err := updater.DeleteJobs(namespace, selector, clientset); err != nil { + if err := updater.DeleteJobsV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteJobs error: %v", err)) } @@ -108,7 +109,7 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste } // TODO: Questionable delete logic, needs further attention - if err := updater.DeleteIngresses(namespace, selector, clientset); err != nil { + if err := updater.DeleteIngressesV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteIngresses error: %v", err)) } @@ -118,12 +119,12 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteSecrets error: %v", err)) } - if err := updater.DeleteConfigMaps(namespace, selector, clientset); err != nil { + if err := updater.DeleteConfigMapsV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteConfigMaps error: %v", err)) } - if err := updater.DeletePersistentVolumeClaims(namespace, selector, clientset); err != nil { + if err := updater.DeletePVCV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeletePersistentVolumeClaim error: %v", err)) } @@ -133,17 +134,17 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteServiceAccounts error: %v", err)) } - if err := updater.DeleteCronJobs(namespace, selector, clientset); err != nil { + if err := updater.DeleteCronJobsV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteCronJobs error: %v", err)) } - if err := updater.DeleteRoleBindings(namespace, selector, clientset); err != nil { + if err := updater.DeleteRoleBindingsV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteRoleBinding error: %v", err)) } - if err := updater.DeleteRoles(namespace, selector, clientset); err != nil { + if err := updater.DeleteRolesV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteRole error: %v", err)) } @@ -167,7 +168,7 @@ func DeleteNamespaceIfMatch(namespace string, selector labels.Selector, clusterI } if selector.Matches(labels.Set(ns.Labels)) { - return updater.DeleteNamespace(namespace, clientset) + return updater.DeleteNamespaceV2(context.TODO(), clusterID, namespace) } return nil @@ -176,40 +177,20 @@ func DeleteNamespaceIfMatch(namespace string, selector labels.Selector, clusterI func DeleteZadigLabelFromNamespace(namespace string, clusterID string, log *zap.SugaredLogger) error { log.Infof("removing zadig label from namespace [%s]", namespace) - clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) - if err != nil { - log.Errorf("failed to create kubernetes clientset for clusterID: %s, the error is: %s", clusterID, err) - return err - } - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) - if err != nil { - return err - } - - ns, err := clientset.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}) - if err != nil { - log.Errorf("failed to list namespace to delete matching namespace in cluster ID: %s, the error is: %s", clusterID, err) - return err - } - - curLabels := ns.Labels - filteredLabels := make(map[string]string) - for name, value := range curLabels { - if name == setting.EnvCreatedBy && value == setting.EnvCreator { - continue - } - if name == setting.ProductLabel { - continue + return updater.UpdateNamespaceV2(context.TODO(), clusterID, namespace, func(ns *corev1.Namespace) error { + filteredLabels := make(map[string]string) + for name, value := range ns.Labels { + if name == setting.EnvCreatedBy && value == setting.EnvCreator { + continue + } + if name == setting.ProductLabel { + continue + } + filteredLabels[name] = value } - filteredLabels[name] = value - } - ns.Labels = filteredLabels - - err = updater.UpdateNamespace(ns, kubeClient) - if err != nil { - return err - } - return nil + ns.Labels = filteredLabels + return nil + }) } func GetProductEnvNamespace(envName, productName, namespace string) string { diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy.go index 07de1c8e86..2e7417bbb4 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy.go @@ -24,6 +24,7 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" crClient "sigs.k8s.io/controller-runtime/pkg/client" @@ -122,8 +123,13 @@ func (c *BlueGreenDeployJobCtl) run(ctx context.Context) error { return errors.New(msg) } for _, pod := range pods { - addlabelPatch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, config.BlueGreenVersionLabelName, config.OriginVersion) - if err := updater.PatchPod(c.jobTaskSpec.Namespace, pod.Name, []byte(addlabelPatch), c.kubeClient); err != nil { + if err := updater.UpdatePodV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, pod.Name, func(p *corev1.Pod) error { + if p.Labels == nil { + p.Labels = make(map[string]string) + } + p.Labels[config.BlueGreenVersionLabelName] = config.OriginVersion + return nil + }); err != nil { msg := fmt.Sprintf("add origin label to pod error: %v", err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) @@ -132,8 +138,10 @@ func (c *BlueGreenDeployJobCtl) run(ctx context.Context) error { } c.jobTaskSpec.Events.Info("add origin label to pods") c.ack() - service.Spec.Selector[config.BlueGreenVersionLabelName] = config.OriginVersion - if err := updater.CreateOrPatchService(service, c.kubeClient); err != nil { + if err := updater.UpdateServiceV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.K8sServiceName, func(svc *corev1.Service) error { + svc.Spec.Selector[config.BlueGreenVersionLabelName] = config.OriginVersion + return nil + }); err != nil { msg := fmt.Sprintf("add origin label selector to service: %s error: %v", c.jobTaskSpec.K8sServiceName, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) @@ -144,8 +152,10 @@ func (c *BlueGreenDeployJobCtl) run(ctx context.Context) error { } else { // ensure service have the label selector match deployments. if _, ok := service.Spec.Selector[config.BlueGreenVersionLabelName]; !ok { - service.Spec.Selector[config.BlueGreenVersionLabelName] = previousLabel - if err := updater.CreateOrPatchService(service, c.kubeClient); err != nil { + if err := updater.UpdateServiceV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.K8sServiceName, func(svc *corev1.Service) error { + svc.Spec.Selector[config.BlueGreenVersionLabelName] = previousLabel + return nil + }); err != nil { msg := fmt.Sprintf("add label selector to service: %s error: %v", c.jobTaskSpec.K8sServiceName, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) @@ -169,7 +179,7 @@ func (c *BlueGreenDeployJobCtl) run(ctx context.Context) error { } blueService.ObjectMeta.ResourceVersion = "" - if err := updater.CreateOrPatchService(blueService, c.kubeClient); err != nil { + if err := updater.CreateServiceV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, blueService); err != nil { msg := fmt.Sprintf("create blue service: %s error: %v", c.jobTaskSpec.BlueK8sServiceName, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy_v2.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy_v2.go index 6461dff694..a412bef312 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy_v2.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_deploy_v2.go @@ -134,8 +134,13 @@ func (c *BlueGreenDeployV2JobCtl) run(ctx context.Context) error { return errors.New(msg) } for _, pod := range pods { - addlabelPatch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, config.BlueGreenVersionLabelName, config.OriginVersion) - if err := updater.PatchPod(c.namespace, pod.Name, []byte(addlabelPatch), c.kubeClient); err != nil { + if err := updater.UpdatePodV2(ctx, clusterID, c.namespace, pod.Name, func(p *corev1.Pod) error { + if p.Labels == nil { + p.Labels = make(map[string]string) + } + p.Labels[config.BlueGreenVersionLabelName] = config.OriginVersion + return nil + }); err != nil { msg := fmt.Sprintf("add origin label to pod error: %v", err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) @@ -146,8 +151,10 @@ func (c *BlueGreenDeployV2JobCtl) run(ctx context.Context) error { c.ack() // green service selector add original version label - greenService.Spec.Selector[config.BlueGreenVersionLabelName] = config.OriginVersion - if err := updater.CreateOrPatchService(greenService, c.kubeClient); err != nil { + if err := updater.UpdateServiceV2(ctx, clusterID, c.namespace, greenService.Name, func(svc *corev1.Service) error { + svc.Spec.Selector[config.BlueGreenVersionLabelName] = config.OriginVersion + return nil + }); err != nil { msg := fmt.Sprintf("add origin label selector to green service: %s error: %v", greenService.Name, err) logError(c.job, msg, c.logger) c.jobTaskSpec.Events.Error(msg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go index 52b99fd246..77ef565190 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go @@ -22,6 +22,7 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" crClient "sigs.k8s.io/controller-runtime/pkg/client" @@ -101,8 +102,10 @@ func (c *BlueGreenReleaseJobCtl) Clean(ctx context.Context) { if pod.ObjectMeta.Labels[config.BlueGreenVersionLabelName] != config.OriginVersion { continue } - deleteLabelPatch := fmt.Sprintf(`{"metadata":{"labels":{"%s":null}}}`, config.BlueGreenVersionLabelName) - if err := updater.PatchPod(c.jobTaskSpec.Namespace, pod.Name, []byte(deleteLabelPatch), kubeClient); err != nil { + if err := updater.UpdatePodV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, pod.Name, func(p *corev1.Pod) error { + delete(p.Labels, config.BlueGreenVersionLabelName) + return nil + }); err != nil { c.logger.Errorf("patch pod error: %v", err) } } @@ -121,14 +124,16 @@ func (c *BlueGreenReleaseJobCtl) Run(ctx context.Context) { return } - service, exist, err := getter.GetService(c.jobTaskSpec.Namespace, c.jobTaskSpec.K8sServiceName, c.kubeClient) + _, exist, err := getter.GetService(c.jobTaskSpec.Namespace, c.jobTaskSpec.K8sServiceName, c.kubeClient) if err != nil || !exist { msg := fmt.Sprintf("get service %s failed, err: %v", c.jobTaskSpec.K8sServiceName, err) logError(c.job, msg, c.logger) return } - service.Spec.Selector[config.BlueGreenVersionLabelName] = c.jobTaskSpec.Version - if err := updater.CreateOrPatchService(service, c.kubeClient); err != nil { + if err := updater.UpdateServiceV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.K8sServiceName, func(svc *corev1.Service) error { + svc.Spec.Selector[config.BlueGreenVersionLabelName] = c.jobTaskSpec.Version + return nil + }); err != nil { msg := fmt.Sprintf("point service: %s to deployment: %s failed: %v", c.jobTaskSpec.K8sServiceName, c.jobTaskSpec.BlueWorkloadName, err) logError(c.job, msg, c.logger) return diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go index 7b58e6406d..de0d6142bc 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go @@ -24,6 +24,7 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/pkg/errors" "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" crClient "sigs.k8s.io/controller-runtime/pkg/client" @@ -110,8 +111,10 @@ func (c *BlueGreenReleaseV2JobCtl) Clean(ctx context.Context) { } // must remove service selector before remove pods labels if _, ok := greenService.Spec.Selector[config.BlueGreenVersionLabelName]; ok { - delete(greenService.Spec.Selector, config.BlueGreenVersionLabelName) - if err := updater.CreateOrPatchService(greenService, c.kubeClient); err != nil { + if err := updater.UpdateServiceV2(ctx, clusterID, c.namespace, greenService.Name, func(svc *corev1.Service) error { + delete(svc.Spec.Selector, config.BlueGreenVersionLabelName) + return nil + }); err != nil { c.logger.Errorf("delete origin label for service error: %v", err) return } @@ -126,8 +129,10 @@ func (c *BlueGreenReleaseV2JobCtl) Clean(ctx context.Context) { continue } if _, ok := pod.Labels[config.BlueGreenVersionLabelName]; ok { - removeLabelPatch := fmt.Sprintf(`{"metadata":{"labels":{"%s":null}}}`, config.BlueGreenVersionLabelName) - if err := updater.PatchPod(c.namespace, pod.Name, []byte(removeLabelPatch), c.kubeClient); err != nil { + if err := updater.UpdatePodV2(ctx, clusterID, c.namespace, pod.Name, func(p *corev1.Pod) error { + delete(p.Labels, config.BlueGreenVersionLabelName) + return nil + }); err != nil { c.logger.Errorf("remove origin label to pod error: %v", err) continue } @@ -224,8 +229,13 @@ func (c *BlueGreenReleaseV2JobCtl) run(ctx context.Context) error { continue } if _, ok := pod.Labels[config.BlueGreenVersionLabelName]; !ok { - addLabelPatch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, config.BlueGreenVersionLabelName, config.OriginVersion) - if err := updater.PatchPod(c.namespace, pod.Name, []byte(addLabelPatch), c.kubeClient); err != nil { + if err := updater.UpdatePodV2(ctx, clusterID, c.namespace, pod.Name, func(p *corev1.Pod) error { + if p.Labels == nil { + p.Labels = make(map[string]string) + } + p.Labels[config.BlueGreenVersionLabelName] = config.OriginVersion + return nil + }); err != nil { c.logger.Errorf("remove origin label to pod error: %v", err) continue } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_custom_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_custom_deploy.go index 9ddbc3a940..8fadb5be15 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_custom_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_custom_deploy.go @@ -162,7 +162,7 @@ func (c *CustomDeployJobCtl) run(ctx context.Context) error { } for _, container := range statefulSet.Spec.Template.Spec.Containers { if container.Name == c.jobTaskSpec.ContainerName { - err = updater.UpdateDeploymentImage(statefulSet.Namespace, statefulSet.Name, container.Name, c.jobTaskSpec.Image, c.kubeClient) + err = updater.UpdateStatefulSetImageV2(ctx, c.jobTaskSpec.ClusterID, statefulSet.Namespace, statefulSet.Name, container.Name, c.jobTaskSpec.Image) if err != nil { err = errors.WithMessagef( err, @@ -190,7 +190,7 @@ func (c *CustomDeployJobCtl) run(ctx context.Context) error { if cronJob != nil { for _, container := range cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers { if container.Name == c.jobTaskSpec.ContainerName { - err = updater.UpdateCronJobImage(cronJob.Namespace, cronJob.Name, container.Name, c.jobTaskSpec.Image, c.kubeClient, kubeclient.VersionLessThan121(c.version)) + err = updater.UpdateCronJobImageV2(ctx, c.jobTaskSpec.ClusterID, cronJob.Namespace, cronJob.Name, container.Name, c.jobTaskSpec.Image) if err != nil { err = errors.WithMessagef( err, @@ -213,7 +213,7 @@ func (c *CustomDeployJobCtl) run(ctx context.Context) error { if cronJobBeta != nil { for _, container := range cronJobBeta.Spec.JobTemplate.Spec.Template.Spec.Containers { if container.Name == c.jobTaskSpec.ContainerName { - err = updater.UpdateCronJobImage(cronJobBeta.Namespace, cronJobBeta.Name, container.Name, c.jobTaskSpec.Image, c.kubeClient, kubeclient.VersionLessThan121(c.version)) + err = updater.UpdateCronJobImageV2(ctx, c.jobTaskSpec.ClusterID, cronJobBeta.Namespace, cronJobBeta.Name, container.Name, c.jobTaskSpec.Image) if err != nil { err = errors.WithMessagef( err, diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go index 48a13d1cc7..07b8b1e7ad 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go @@ -538,7 +538,7 @@ CronLoop: for _, cron := range cronJobs { for _, container := range cron.Spec.JobTemplate.Spec.Template.Spec.Containers { if container.Name == serviceModule.ServiceModule { - err = updater.UpdateCronJobImage(cron.Namespace, cron.Name, serviceModule.ServiceModule, serviceModule.Image, kubeClient, false) + err = updater.UpdateCronJobImageV2(ctx, env.ClusterID, cron.Namespace, cron.Name, serviceModule.ServiceModule, serviceModule.Image) if err != nil { return nil, nil, fmt.Errorf("failed to update container image in %s/cronJob/%s/%s: %v", env.Namespace, cron.Name, container.Name, err) } @@ -559,7 +559,7 @@ CronLoop: } for _, container := range cron.Spec.JobTemplate.Spec.Template.Spec.InitContainers { if container.Name == serviceModule.ServiceModule { - err = updater.UpdateCronJobInitImage(cron.Namespace, cron.Name, serviceModule.ServiceModule, serviceModule.Image, kubeClient, false) + err = updater.UpdateCronJobInitImageV2(ctx, env.ClusterID, cron.Namespace, cron.Name, serviceModule.ServiceModule, serviceModule.Image) if err != nil { return nil, nil, fmt.Errorf("failed to update container image in %s/cronJob/%s/%s: %v", env.Namespace, cron.Name, container.Name, err) } @@ -583,7 +583,7 @@ BetaCronLoop: for _, cron := range betaCronJobs { for _, container := range cron.Spec.JobTemplate.Spec.Template.Spec.Containers { if container.Name == serviceModule.ServiceModule { - err = updater.UpdateCronJobImage(cron.Namespace, cron.Name, serviceModule.ServiceModule, serviceModule.Image, kubeClient, true) + err = updater.UpdateCronJobImageV2(ctx, env.ClusterID, cron.Namespace, cron.Name, serviceModule.ServiceModule, serviceModule.Image) if err != nil { return nil, nil, fmt.Errorf("failed to update container image in %s/cronJobBeta/%s/%s: %v", env.Namespace, cron.Name, container.Name, err) } @@ -605,7 +605,7 @@ BetaCronLoop: } for _, container := range cron.Spec.JobTemplate.Spec.Template.Spec.InitContainers { if container.Name == serviceModule.ServiceModule { - err = updater.UpdateCronJobInitImage(cron.Namespace, cron.Name, serviceModule.ServiceModule, serviceModule.Image, kubeClient, true) + err = updater.UpdateCronJobInitImageV2(ctx, env.ClusterID, cron.Namespace, cron.Name, serviceModule.ServiceModule, serviceModule.Image) if err != nil { return nil, nil, fmt.Errorf("failed to update container image in %s/cronJobBeta/%s/%s: %v", env.Namespace, cron.Name, container.Name, err) } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_freestyle.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_freestyle.go index f143fb5b56..297dd04cbd 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_freestyle.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_freestyle.go @@ -227,13 +227,13 @@ func (c *FreestyleJobCtl) run(ctx context.Context) error { JobType: string(c.job.JobType), JobName: c.job.K8sJobName, } - if err := ensureDeleteConfigMap(c.jobTaskSpec.Properties.Namespace, jobLabel, c.kubeclient); err != nil { + if err := ensureDeleteConfigMap(c.jobTaskSpec.Properties.Namespace, jobLabel, c.jobTaskSpec.Properties.ClusterID); err != nil { logError(c.job, err.Error(), c.logger) return err } if err := createJobConfigMap( - c.jobTaskSpec.Properties.Namespace, c.job.K8sJobName, jobLabel, string(jobCtxBytes), c.kubeclient); err != nil { + c.jobTaskSpec.Properties.Namespace, c.job.K8sJobName, jobLabel, string(jobCtxBytes), c.jobTaskSpec.Properties.ClusterID); err != nil { msg := fmt.Sprintf("createJobConfigMap error: %v", err) logError(c.job, msg, c.logger) return errors.New(msg) @@ -280,7 +280,7 @@ func (c *FreestyleJobCtl) run(ctx context.Context) error { job.Namespace = c.jobTaskSpec.Properties.Namespace - if err := ensureDeleteJob(c.jobTaskSpec.Properties.Namespace, jobLabel, c.kubeclient); err != nil { + if err := ensureDeleteJob(c.jobTaskSpec.Properties.Namespace, jobLabel, c.jobTaskSpec.Properties.ClusterID); err != nil { msg := fmt.Sprintf("delete job error: %v", err) logError(c.job, msg, c.logger) return errors.New(msg) @@ -292,7 +292,7 @@ func (c *FreestyleJobCtl) run(ctx context.Context) error { return errors.New(msg) } - if err := updater.CreateJob(job, c.kubeclient); err != nil { + if err := updater.CreateJobV2(ctx, c.jobTaskSpec.Properties.ClusterID, job); err != nil { msg := fmt.Sprintf("create job error: %v", err) logError(c.job, msg, c.logger) return errors.New(msg) @@ -1233,10 +1233,10 @@ func (c *FreestyleJobCtl) complete(ctx context.Context) { c.logger.Errorf("Failed to cleanup files PVCs: %v", err) } } - if err := ensureDeleteJob(c.jobTaskSpec.Properties.Namespace, jobLabel, c.kubeclient); err != nil { + if err := ensureDeleteJob(c.jobTaskSpec.Properties.Namespace, jobLabel, c.jobTaskSpec.Properties.ClusterID); err != nil { c.logger.Error(err) } - if err := ensureDeleteConfigMap(c.jobTaskSpec.Properties.Namespace, jobLabel, c.kubeclient); err != nil { + if err := ensureDeleteConfigMap(c.jobTaskSpec.Properties.Namespace, jobLabel, c.jobTaskSpec.Properties.ClusterID); err != nil { c.logger.Error(err) } }() diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_plugin.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_plugin.go index af3e5d1ca7..582b2a9e3c 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_plugin.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_plugin.go @@ -126,7 +126,7 @@ func (c *PluginJobCtl) run(ctx context.Context) error { job.Namespace = c.jobTaskSpec.Properties.Namespace - if err := ensureDeleteJob(c.jobTaskSpec.Properties.Namespace, jobLabel, c.kubeclient); err != nil { + if err := ensureDeleteJob(c.jobTaskSpec.Properties.Namespace, jobLabel, c.jobTaskSpec.Properties.ClusterID); err != nil { msg := fmt.Sprintf("delete job error: %v", err) logError(c.job, msg, c.logger) return err @@ -138,7 +138,7 @@ func (c *PluginJobCtl) run(ctx context.Context) error { return errors.New(msg) } - if err := updater.CreateJob(job, c.kubeclient); err != nil { + if err := updater.CreateJobV2(ctx, c.jobTaskSpec.Properties.ClusterID, job); err != nil { msg := fmt.Sprintf("create job error: %v", err) logError(c.job, msg, c.logger) return err @@ -172,7 +172,7 @@ func (c *PluginJobCtl) complete(ctx context.Context) { // 清理用户取消和超时的任务 defer func() { go func() { - if err := ensureDeleteJob(c.jobTaskSpec.Properties.Namespace, jobLabel, c.kubeclient); err != nil { + if err := ensureDeleteJob(c.jobTaskSpec.Properties.Namespace, jobLabel, c.jobTaskSpec.Properties.ClusterID); err != nil { c.logger.Error(err) } }() diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/kubernetes.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/kubernetes.go index c9f215b92b..fec243dad8 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/kubernetes.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/kubernetes.go @@ -125,14 +125,14 @@ func ensureDeletePVC(pvcName, namespace string, storage *types.NFSProperties, ku }) } -func ensureDeleteConfigMap(namespace string, jobLabel *JobLabel, kubeClient crClient.Client) error { +func ensureDeleteConfigMap(namespace string, jobLabel *JobLabel, clusterID string) error { ls := getJobLabels(jobLabel) - return updater.DeleteConfigMapsAndWait(namespace, labels.Set(ls).AsSelector(), kubeClient) + return updater.DeleteConfigMapsAndWaitV2(context.TODO(), clusterID, namespace, updater.WithSelector(labels.Set(ls).AsSelector().String())) } -func ensureDeleteJob(namespace string, jobLabel *JobLabel, kubeClient crClient.Client) error { +func ensureDeleteJob(namespace string, jobLabel *JobLabel, clusterID string) error { ls := getJobLabels(jobLabel) - return updater.DeleteJobsAndWait(namespace, labels.Set(ls).AsSelector(), kubeClient) + return updater.DeleteJobsAndWaitV2(context.TODO(), clusterID, namespace, updater.WithSelector(labels.Set(ls).AsSelector().String())) } func getJobLabelsWithCustomizeData(jobLabel *JobLabel, customizedData map[string]string) map[string]string { @@ -187,7 +187,7 @@ func GetJobContainerName(name string) string { return resp } -func createJobConfigMap(namespace, jobName string, jobLabel *JobLabel, jobCtx string, kubeClient crClient.Client) error { +func createJobConfigMap(namespace, jobName string, jobLabel *JobLabel, jobCtx, clusterID string) error { cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -199,7 +199,7 @@ func createJobConfigMap(namespace, jobName string, jobLabel *JobLabel, jobCtx st }, } - return updater.CreateConfigMap(cm, kubeClient) + return updater.CreateConfigMapV2(context.TODO(), clusterID, cm) } func getBaseImage(buildOS, imageFrom string) string { diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/workflow.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/workflow.go index 5031e29f92..27bc169d66 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/workflow.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/workflow.go @@ -30,8 +30,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/rand" - "sigs.k8s.io/controller-runtime/pkg/client" - config2 "github.com/koderover/zadig/v2/pkg/config" "github.com/koderover/zadig/v2/pkg/microservice/aslan/config" commonmodels "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/repository/models" @@ -611,15 +609,15 @@ func (c *workflowCtl) CleanShareStorage() { continue } job.Namespace = namespace - if err := updater.CreateJob(job, kubeClient); err != nil { + if err := updater.CreateJobV2(context.Background(), clusterID, job); err != nil { c.logger.Errorf("create job error: %v", err) continue } - defer func(client client.Client, name, namespace string) { - if err := updater.DeleteJobAndWait(namespace, name, client); err != nil { + defer func(clusterID, name, namespace string) { + if err := updater.DeleteJobAndWaitV2(context.Background(), clusterID, namespace, name); err != nil { c.logger.Errorf("delete job error: %v", err) } - }(kubeClient, cleanJobName, namespace) + }(clusterID, cleanJobName, namespace) status := jobcontroller.WaitPlainJobEnd(context.Background(), 10, namespace, cleanJobName, kubeClient, kubeApiServer, c.logger) c.logger.Infof("clean job %s finished, status: %s", cleanJobName, status) } diff --git a/pkg/microservice/aslan/core/cron/service/cron.go b/pkg/microservice/aslan/core/cron/service/cron.go index 387c2e84e1..fb3689dc2e 100644 --- a/pkg/microservice/aslan/core/cron/service/cron.go +++ b/pkg/microservice/aslan/core/cron/service/cron.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "errors" "sort" "time" @@ -44,11 +45,11 @@ func CleanJobCronJob(log *zap.SugaredLogger) { singleSelector := labels.Set{"p-type": "single"}.AsSelector() testSelector := labels.Set{"p-type": "test"}.AsSelector() artifactSelector := labels.Set{"p-type": "artifact"}.AsSelector() - cleanJob(config.Namespace(), workflowSelector, kubeClient, log) - cleanJob(config.Namespace(), singleSelector, kubeClient, log) - cleanJob(config.Namespace(), testSelector, kubeClient, log) - cleanJob(config.Namespace(), artifactSelector, kubeClient, log) - cleanServiceJob(kubeClient, log) + cleanJob(config.Namespace(), workflowSelector, "", kubeClient, log) + cleanJob(config.Namespace(), singleSelector, "", kubeClient, log) + cleanJob(config.Namespace(), testSelector, "", kubeClient, log) + cleanJob(config.Namespace(), artifactSelector, "", kubeClient, log) + cleanServiceJob("", kubeClient, log) log.Infof("finnish clean job...") } @@ -63,15 +64,15 @@ func CleanConfigmapCronJob(log *zap.SugaredLogger) { singleSelector := labels.Set{"p-type": "single"}.AsSelector() testSelector := labels.Set{"p-type": "test"}.AsSelector() artifactSelector := labels.Set{"p-type": "artifact"}.AsSelector() - cleanConfigmap(config.Namespace(), workflowSelector, kubeClient, log) - cleanConfigmap(config.Namespace(), singleSelector, kubeClient, log) - cleanConfigmap(config.Namespace(), testSelector, kubeClient, log) - cleanConfigmap(config.Namespace(), artifactSelector, kubeClient, log) - cleanServiceConfigmap(kubeClient, log) + cleanConfigmap(config.Namespace(), "", workflowSelector, kubeClient, log) + cleanConfigmap(config.Namespace(), "", singleSelector, kubeClient, log) + cleanConfigmap(config.Namespace(), "", testSelector, kubeClient, log) + cleanConfigmap(config.Namespace(), "", artifactSelector, kubeClient, log) + cleanServiceConfigmap("", kubeClient, log) log.Infof("finnish clean configmap...") } -func cleanJob(namespace string, selector labels.Selector, client client.Client, log *zap.SugaredLogger) { +func cleanJob(namespace string, selector labels.Selector, clusterID string, client client.Client, log *zap.SugaredLogger) { jobList, err := getter.ListJobs(namespace, selector, client) if err != nil { log.Infof("[%s]list jobs error: %v", namespace, err) @@ -81,7 +82,7 @@ func cleanJob(namespace string, selector labels.Selector, client client.Client, if job.CreationTimestamp.Unix() < time.Now().Unix()-60*60*24 { name := job.Name log.Infof("[%s][%s]deleting job", namespace, name) - err := ensureDeleteJob(namespace, name, client, log) + err := ensureDeleteJob(namespace, name, clusterID, log) if err != nil { log.Infof("[%s][%s]delete job error: %v", namespace, name, err) } else { @@ -91,7 +92,7 @@ func cleanJob(namespace string, selector labels.Selector, client client.Client, } } -func cleanConfigmap(namespace string, selector labels.Selector, client client.Client, log *zap.SugaredLogger) { +func cleanConfigmap(namespace, clusterID string, selector labels.Selector, client client.Client, log *zap.SugaredLogger) { configmaps, err := getter.ListConfigMaps(namespace, selector, client) if err != nil { log.Infof("[%s]list configmaps error: %v", namespace, err) @@ -103,7 +104,7 @@ func cleanConfigmap(namespace string, selector labels.Selector, client client.Cl if configmap.CreationTimestamp.Unix() < time.Now().Unix()-60*60*24*7 { name := configmap.Name log.Infof("[%s][%s]deleting configmap", namespace, name) - err := ensureDeleteConfigmap(namespace, name, client, log) + err := ensureDeleteConfigmap(namespace, name, clusterID, client, log) if err != nil { log.Infof("[%s][%s]delete configmap error: %v", namespace, name, err) } else { @@ -113,7 +114,7 @@ func cleanConfigmap(namespace string, selector labels.Selector, client client.Cl } } -func cleanServiceJob(client client.Client, log *zap.SugaredLogger) { +func cleanServiceJob(clusterID string, client client.Client, log *zap.SugaredLogger) { taskServiceMap, err := commonservice.GetServiceTasks(log) if err != nil { log.Infof("cleanServiceJob getServiceTasks error: %v", err) @@ -136,7 +137,7 @@ func cleanServiceJob(client client.Client, log *zap.SugaredLogger) { if index > 0 && job.CreationTimestamp.Unix() < time.Now().Unix()-60*60*24 { name := job.Name log.Infof("[%s][%s]deleting job", namespace, name) - err := ensureDeleteJob(namespace, name, client, log) + err := ensureDeleteJob(namespace, name, clusterID, log) if err != nil { log.Infof("[%s][%s]delete job error: %v", namespace, name, err) } else { @@ -148,7 +149,7 @@ func cleanServiceJob(client client.Client, log *zap.SugaredLogger) { } } -func cleanServiceConfigmap(client client.Client, log *zap.SugaredLogger) { +func cleanServiceConfigmap(clusterID string, client client.Client, log *zap.SugaredLogger) { taskServiceMap, err := commonservice.GetServiceTasks(log) if err != nil { log.Infof("cleanServiceConfigmap getServiceTasks error: %v", err) @@ -172,7 +173,7 @@ func cleanServiceConfigmap(client client.Client, log *zap.SugaredLogger) { if index > 0 && configmap.CreationTimestamp.Unix() < time.Now().Unix()-60*60*24*7 { name := configmap.Name log.Infof("[%s][%s]deleting configmap", namespace, name) - err := ensureDeleteConfigmap(namespace, name, client, log) + err := ensureDeleteConfigmap(namespace, name, clusterID, client, log) if err != nil { log.Infof("[%s][%s]delete configmap error: %v", namespace, name, err) } else { @@ -184,15 +185,15 @@ func cleanServiceConfigmap(client client.Client, log *zap.SugaredLogger) { } } -func ensureDeleteJob(namespace, jobName string, client client.Client, log *zap.SugaredLogger) error { - return updater.DeleteJobAndWait(namespace, jobName, client) +func ensureDeleteJob(namespace, jobName, clusterID string, log *zap.SugaredLogger) error { + return updater.DeleteJobAndWaitV2(context.TODO(), clusterID, namespace, jobName) } -func ensureDeleteConfigmap(namespace, configmapName string, client client.Client, log *zap.SugaredLogger) error { +func ensureDeleteConfigmap(namespace, configmapName, clusterID string, client client.Client, log *zap.SugaredLogger) error { _, found, err := getter.GetConfigMap(namespace, configmapName, client) if err == nil && found { - updater.DeleteConfigMap(namespace, configmapName, client) + updater.DeleteConfigMapV2(context.TODO(), clusterID, namespace, configmapName) } timeout := false diff --git a/pkg/microservice/aslan/core/environment/service/common_env_cfg.go b/pkg/microservice/aslan/core/environment/service/common_env_cfg.go index 7e843c046e..5b2f6afbaf 100644 --- a/pkg/microservice/aslan/core/environment/service/common_env_cfg.go +++ b/pkg/microservice/aslan/core/environment/service/common_env_cfg.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "encoding/json" "fmt" "time" @@ -98,21 +99,16 @@ func DeleteCommonEnvCfg(envName, productName, objectName string, commonEnvCfgTyp if err != nil { return e.ErrDeleteResource.AddErr(err) } - clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(product.ClusterID) - if err != nil { - log.Errorf("failed to create kubernetes clientset for clusterID: %s, the error is: %s", product.ClusterID, err) - return e.ErrDeleteResource.AddErr(err) - } switch commonEnvCfgType { case config.CommonEnvCfgTypeConfigMap: - err = updater.DeleteConfigMap(product.Namespace, objectName, kubeClient) + err = updater.DeleteConfigMapV2(context.TODO(), product.ClusterID, product.Namespace, objectName) case config.CommonEnvCfgTypeSecret: err = updater.DeleteSecretWithName(product.Namespace, objectName, kubeClient) case config.CommonEnvCfgTypeIngress: - err = updater.DeleteIngresseWithName(product.Namespace, objectName, clientset) + err = updater.DeleteIngressesV2(context.TODO(), product.ClusterID, product.Namespace, updater.WithName(objectName)) case config.CommonEnvCfgTypePvc: - err = updater.DeletePvcWithName(product.Namespace, objectName, clientset) + err = updater.DeletePVCV2(context.TODO(), product.ClusterID, product.Namespace, updater.WithName(objectName)) default: return e.ErrDeleteResource.AddDesc(fmt.Sprintf("%s is not support delete", commonEnvCfgType)) } @@ -247,11 +243,6 @@ func CreateCommonEnvCfg(args *models.CreateUpdateCommonEnvCfgArgs, userName stri if err != nil { return e.ErrUpdateResource.AddErr(err) } - clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(product.ClusterID) - if err != nil { - log.Errorf("failed to create kubernetes clientset for clusterID: %s, the error is: %s", product.ClusterID, err) - return e.ErrUpdateResource.AddErr(err) - } u, err := serializer.NewDecoder().YamlToUnstructured(js) if err != nil { @@ -287,7 +278,7 @@ func CreateCommonEnvCfg(args *models.CreateUpdateCommonEnvCfgArgs, userName stri return e.ErrUpdateResource.AddErr(err) } - if err := updater.CreateConfigMap(cm, kubeClient); err != nil { + if err := updater.CreateConfigMapV2(context.TODO(), product.ClusterID, cm); err != nil { log.Error(err) return e.ErrUpdateResource.AddErr(err) } @@ -344,7 +335,7 @@ func CreateCommonEnvCfg(args *models.CreateUpdateCommonEnvCfgArgs, userName stri return e.ErrUpdateResource.AddErr(err) } - if err := updater.CreatePvc(product.Namespace, pvc, clientset); err != nil { + if err := updater.CreatePVCV2(context.TODO(), product.ClusterID, product.Namespace, pvc); err != nil { log.Error(err) return e.ErrUpdateResource.AddErr(err) } @@ -634,7 +625,7 @@ func SyncEnvResource(args *SyncEnvResourceArg, log *zap.SugaredLogger) error { return nil } -func restartPod(name, productName, envName, namespace string, commonEnvCfgType config.CommonEnvCfgType, clientset *kubernetes.Clientset, kubcli client.Client) error { +func restartPod(name, productName, envName, namespace, clusterID string, commonEnvCfgType config.CommonEnvCfgType, kubcli client.Client) error { opts := &commonrepo.ListEnvSvcDependOption{ ProductName: productName, EnvName: envName, @@ -672,14 +663,14 @@ func restartPod(name, productName, envName, namespace string, commonEnvCfgType c ServiceName: esp.ServiceName, } if tplProduct.ProductFeature.DeployType == "k8s" { - if err := restartK8sPod(restartArgs, namespace, clientset); err != nil { + if err := restartK8sPod(restartArgs, namespace, clusterID); err != nil { log.Error(err) return err } continue } - if err := restartPodHelmByWorkload(restartArgs, namespace, clientset, kubcli); err != nil { + if err := restartPodHelmByWorkload(restartArgs, namespace, clusterID, kubcli); err != nil { log.Error(err) return err } @@ -687,13 +678,13 @@ func restartPod(name, productName, envName, namespace string, commonEnvCfgType c return nil } -func restartK8sPod(args *SvcOptArgs, ns string, clientset *kubernetes.Clientset) error { +func restartK8sPod(args *SvcOptArgs, ns, clusterID string) error { selector := labels.Set{setting.ProductLabel: args.ProductName, setting.ServiceLabel: args.ServiceName}.AsSelector() log.Infof("deleting pod from %s where %s", ns, selector) - return updater.DeletePods(ns, selector, clientset) + return updater.DeletePodsV2(context.TODO(), clusterID, ns, updater.WithSelector(selector.String())) } -func restartPodHelmByWorkload(args *SvcOptArgs, ns string, clientset *kubernetes.Clientset, kucli client.Client) error { +func restartPodHelmByWorkload(args *SvcOptArgs, ns, clusterID string, kucli client.Client) error { deployment, found, err := getter.GetDeployment(ns, args.ServiceName, kucli) if err != nil { return err @@ -701,7 +692,7 @@ func restartPodHelmByWorkload(args *SvcOptArgs, ns string, clientset *kubernetes if found { selector := labels.Set(deployment.GetLabels()).AsSelector() log.Infof("deleting deploy %s pod from %s where %s", args.ServiceName, ns, selector) - return updater.DeletePods(ns, selector, clientset) + return updater.DeletePodsV2(context.TODO(), clusterID, ns, updater.WithSelector(selector.String())) } sts, found, err := getter.GetStatefulSet(ns, args.ServiceName, kucli) @@ -711,7 +702,7 @@ func restartPodHelmByWorkload(args *SvcOptArgs, ns string, clientset *kubernetes if found { selector := labels.Set(sts.GetLabels()).AsSelector() log.Infof("deleting sts %s pod from %s where %s", args.ServiceName, ns, selector) - return updater.DeletePods(ns, selector, clientset) + return updater.DeletePodsV2(context.TODO(), clusterID, ns, updater.WithSelector(selector.String())) } return nil diff --git a/pkg/microservice/aslan/core/environment/service/configmap.go b/pkg/microservice/aslan/core/environment/service/configmap.go index 6c52d76118..2db490da57 100644 --- a/pkg/microservice/aslan/core/environment/service/configmap.go +++ b/pkg/microservice/aslan/core/environment/service/configmap.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "fmt" "sort" "strings" @@ -214,18 +215,12 @@ func UpdateConfigMap(args *models.CreateUpdateCommonEnvCfgArgs, userName string, cm.Data[key] = value } - clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(product.ClusterID) - if err != nil { - log.Errorf("failed to create kubernetes clientset for clusterID: %s, the error is: %s", product.ClusterID, err) - return e.ErrUpdateConfigMap.AddErr(err) - } - yamlData, err := ensureLabelAndNs(cm, product.Namespace, args.ProductName) if err != nil { return e.ErrUpdateResource.AddErr(err) } - if err := updater.UpdateConfigMap(namespace, cm, clientset); err != nil { + if err := updater.UpdateConfigMapV2(context.TODO(), product.ClusterID, namespace, cm); err != nil { log.Error(err) return e.ErrUpdateConfigMap.AddDesc(err.Error()) } @@ -255,7 +250,7 @@ func UpdateConfigMap(args *models.CreateUpdateCommonEnvCfgArgs, userName string, if err != nil { return e.ErrUpdateConfigMap.AddErr(err) } - if err := restartPod(cm.Name, args.ProductName, args.EnvName, namespace, config.CommonEnvCfgTypeConfigMap, clientset, kubeClient); err != nil { + if err := restartPod(cm.Name, args.ProductName, args.EnvName, namespace, product.ClusterID, config.CommonEnvCfgTypeConfigMap, kubeClient); err != nil { return e.ErrRestartService.AddDesc(err.Error()) } return nil @@ -275,12 +270,6 @@ func RollBackConfigMap(envName string, args *RollBackConfigMapArgs, userName, us return e.ErrUpdateConfigMap.AddErr(err) } - clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(product.ClusterID) - if err != nil { - log.Errorf("failed to create kubernetes clientset for clusterID: %s, the error is: %s", product.ClusterID, err) - return e.ErrUpdateConfigMap.AddErr(err) - } - namespace := product.Namespace srcCfg, found, err := getter.GetConfigMap(namespace, args.SrcConfigName, kubeClient) if err != nil { @@ -298,7 +287,7 @@ func RollBackConfigMap(envName string, args *RollBackConfigMapArgs, userName, us return e.ErrGetConfigMap.AddDesc("target configMap not found") } - if err := archiveConfigMap(namespace, destinSrc, kubeClient, log); err != nil { + if err := archiveConfigMap(namespace, product.ClusterID, destinSrc, kubeClient, log); err != nil { log.Error(err) return err } @@ -311,7 +300,7 @@ func RollBackConfigMap(envName string, args *RollBackConfigMapArgs, userName, us if updateTime, ok := srcCfg.Labels[setting.UpdateTime]; ok { destinSrc.Labels[setting.UpdateTime] = updateTime } - if err := updater.UpdateConfigMap(namespace, destinSrc, clientset); err != nil { + if err := updater.UpdateConfigMapV2(context.TODO(), product.ClusterID, namespace, destinSrc); err != nil { log.Error(err) return e.ErrUpdateConfigMap.AddDesc(err.Error()) } @@ -322,7 +311,7 @@ func RollBackConfigMap(envName string, args *RollBackConfigMapArgs, userName, us ServiceName: args.ServiceName, } - if err := restartK8sPod(restartArgs, namespace, clientset); err != nil { + if err := restartK8sPod(restartArgs, namespace, product.ClusterID); err != nil { log.Error(err) return e.ErrRestartService.AddDesc(err.Error()) } @@ -331,7 +320,7 @@ func RollBackConfigMap(envName string, args *RollBackConfigMapArgs, userName, us } // archiveConfigMap 备份当前configmap,时间戳最小间隔为秒,需要控制每秒只能更新一次configmap, 只保留最近10次配置 -func archiveConfigMap(namespace string, cfg *corev1.ConfigMap, kubeClient client.Client, log *zap.SugaredLogger) error { +func archiveConfigMap(namespace, clusterID string, cfg *corev1.ConfigMap, kubeClient client.Client, log *zap.SugaredLogger) error { archiveLabel := make(map[string]string) for k, v := range cfg.Labels { @@ -355,17 +344,17 @@ func archiveConfigMap(namespace string, cfg *corev1.ConfigMap, kubeClient client Data: cfg.Data, } - if err := updater.CreateConfigMap(configMap, kubeClient); err != nil { + if err := updater.CreateConfigMapV2(context.TODO(), clusterID, configMap); err != nil { log.Error(err) return e.ErrCreateConfigMap.AddDesc(err.Error()) } - cleanArchiveConfigMap(namespace, configMap.Labels, kubeClient, log) + cleanArchiveConfigMap(namespace, clusterID, configMap.Labels, kubeClient, log) return nil } -func cleanArchiveConfigMap(namespace string, ls map[string]string, kubeClient client.Client, log *zap.SugaredLogger) { +func cleanArchiveConfigMap(namespace, clusterID string, ls map[string]string, kubeClient client.Client, log *zap.SugaredLogger) { selector := labels.Set{ setting.ProductLabel: ls[setting.ProductLabel], setting.ServiceLabel: ls[setting.ServiceLabel], @@ -384,7 +373,7 @@ func cleanArchiveConfigMap(namespace string, ls map[string]string, kubeClient cl continue } - if err := updater.DeleteConfigMap(namespace, v.Name, kubeClient); err != nil { + if err := updater.DeleteConfigMapV2(context.TODO(), clusterID, namespace, v.Name); err != nil { log.Errorf("kubeCli.DeleteConfigMap error: %v", err) } } diff --git a/pkg/microservice/aslan/core/environment/service/environment.go b/pkg/microservice/aslan/core/environment/service/environment.go index 414d4ff6e9..6a29bf1a76 100644 --- a/pkg/microservice/aslan/core/environment/service/environment.go +++ b/pkg/microservice/aslan/core/environment/service/environment.go @@ -63,7 +63,6 @@ import ( commonutil "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/util" "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/workflow/service/workflow" "github.com/koderover/zadig/v2/pkg/setting" - kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" "github.com/koderover/zadig/v2/pkg/tool/analysis" "github.com/koderover/zadig/v2/pkg/tool/cache" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" @@ -923,7 +922,7 @@ func UpdateProductRegistry(envName, productName, registryID string, production b if err != nil { return e.ErrUpdateEnv.AddErr(err) } - err = ensureKubeEnv(exitedProd.Namespace, registryID, map[string]string{setting.ProductLabel: productName}, false, kubeClient, log) + err = ensureKubeEnv(exitedProd.Namespace, registryID, exitedProd.ClusterID, map[string]string{setting.ProductLabel: productName}, false, kubeClient, log) if err != nil { log.Errorf("UpdateProductRegistry ensureKubeEnv by envName:%s,error: %v", envName, err) @@ -1895,7 +1894,7 @@ func UpdateProductDefaultValues(productName, envName, userName, requestID string log.Errorf("UpdateHelmProductRenderset GetKubeClient error, error msg:%s", err) return err } - return ensureKubeEnv(product.Namespace, product.RegistryID, map[string]string{setting.ProductLabel: product.ProductName}, false, kubeClient, log) + return ensureKubeEnv(product.Namespace, product.RegistryID, product.ClusterID, map[string]string{setting.ProductLabel: product.ProductName}, false, kubeClient, log) } func UpdateProductDefaultValuesWithRender(product *commonmodels.Product, _ *models.RenderSet, userName, requestID string, args *EnvRendersetArg, production bool, log *zap.SugaredLogger) error { @@ -3038,7 +3037,7 @@ func preCreateProduct(envName string, args *commonmodels.Product, kubeClient cli if args.ShareEnv.Enable || args.IstioGrayscale.Enable { enableIstioInjection = true } - return ensureKubeEnv(args.Namespace, args.RegistryID, map[string]string{setting.ProductLabel: args.ProductName}, enableIstioInjection, kubeClient, log) + return ensureKubeEnv(args.Namespace, args.RegistryID, args.ClusterID, map[string]string{setting.ProductLabel: args.ProductName}, enableIstioInjection, kubeClient, log) } return nil } @@ -3053,8 +3052,8 @@ func preCreateNSAndSecret(productFeature *templatemodels.ProductFeature) bool { return false } -func ensureKubeEnv(namespace, registryId string, customLabels map[string]string, enableIstioInjection bool, kubeClient client.Client, log *zap.SugaredLogger) error { - err := kube.CreateNamespace(namespace, customLabels, enableIstioInjection, kubeClient) +func ensureKubeEnv(namespace, registryId, clusterID string, customLabels map[string]string, enableIstioInjection bool, kubeClient client.Client, log *zap.SugaredLogger) error { + err := kube.CreateNamespace(namespace, clusterID, customLabels, enableIstioInjection) if err != nil { log.Errorf("[%s] get or create namespace error: %v", namespace, err) return e.ErrCreateNamspace.AddDesc(err.Error()) @@ -3514,7 +3513,7 @@ func UpdateProductGlobalVariables(productName, envName, userName, requestID stri log.Errorf("UpdateHelmProductRenderset GetKubeClient error, error msg:%s", err) return err } - return ensureKubeEnv(product.Namespace, product.RegistryID, map[string]string{setting.ProductLabel: product.ProductName}, false, kubeClient, log) + return ensureKubeEnv(product.Namespace, product.RegistryID, product.ClusterID, map[string]string{setting.ProductLabel: product.ProductName}, false, kubeClient, log) } func UpdateProductGlobalVariablesWithRender(templateProduct *templatemodels.Product, product *commonmodels.Product, productRenderset *models.RenderSet, userName, requestID string, args []*commontypes.GlobalVariableKV, log *zap.SugaredLogger) error { @@ -4274,12 +4273,6 @@ func EnvSleep(productName, envName string, isEnable, isProduction bool, log *zap return e.ErrAnalysisEnvResource.AddErr(err) } - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(prod.ClusterID) - if err != nil { - err = fmt.Errorf("failed to get kube client, err: %s", err) - log.Error(err) - return e.ErrEnvSleep.AddErr(err) - } clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(prod.ClusterID) if err != nil { wrapErr := fmt.Errorf("Failed to create kubernetes clientset for cluster id: %s, the error is: %s", prod.ClusterID, err) @@ -4463,13 +4456,13 @@ func EnvSleep(productName, envName string, isEnable, isProduction bool, log *zap case setting.CronJob: if isEnable { log.Infof("suspend cronjob %s", workload.Name) - err := updater.SuspendCronJob(prod.Namespace, workload.Name, kubeClient, kubeclient.VersionLessThan121(version)) + err := updater.SuspendCronJobV2(context.TODO(), prod.ClusterID, prod.Namespace, workload.Name) if err != nil { log.Errorf("failed to suspend %s/cronjob/%s", prod.Namespace, workload.Name) } } else { log.Infof("resume cronjob %s", workload.Name) - err := updater.ResumeCronJob(prod.Namespace, workload.Name, kubeClient, kubeclient.VersionLessThan121(version)) + err := updater.ResumeCronJobV2(context.TODO(), prod.ClusterID, prod.Namespace, workload.Name) if err != nil { log.Errorf("failed to resume %s/cronjob/%s", prod.Namespace, workload.Name) } diff --git a/pkg/microservice/aslan/core/environment/service/environment_creator.go b/pkg/microservice/aslan/core/environment/service/environment_creator.go index 481b4e9a50..43ee009359 100644 --- a/pkg/microservice/aslan/core/environment/service/environment_creator.go +++ b/pkg/microservice/aslan/core/environment/service/environment_creator.go @@ -216,11 +216,7 @@ func (creator *ExternalProductCreator) Create(user, requestID string, args *Prod args.Status = setting.ProductStatusUnstable args.RecycleDay = config.DefaultRecycleDay() - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(args.ClusterID) - if err != nil { - return e.ErrCreateEnv.AddErr(err) - } - err = kube.EnsureNamespaceLabels(args.Namespace, map[string]string{setting.ProductLabel: args.ProductName}, kubeClient) + err := kube.EnsureNamespaceLabels(args.Namespace, args.ClusterID, map[string]string{setting.ProductLabel: args.ProductName}) if err != nil { log.Errorf("[%s][%s] create add namesapce label error: %v", args.EnvName, args.ProductName, err) return e.ErrCreateEnv.AddDesc(err.Error()) diff --git a/pkg/microservice/aslan/core/environment/service/environment_update.go b/pkg/microservice/aslan/core/environment/service/environment_update.go index b32ad23034..ccd2cc5536 100644 --- a/pkg/microservice/aslan/core/environment/service/environment_update.go +++ b/pkg/microservice/aslan/core/environment/service/environment_update.go @@ -225,7 +225,7 @@ func updateK8sProduct(exitedProd *commonmodels.Product, user, requestID string, } } - err = ensureKubeEnv(exitedProd.Namespace, exitedProd.RegistryID, map[string]string{setting.ProductLabel: productName}, exitedProd.ShareEnv.Enable, kubeClient, log) + err = ensureKubeEnv(exitedProd.Namespace, exitedProd.RegistryID, exitedProd.ClusterID, map[string]string{setting.ProductLabel: productName}, exitedProd.ShareEnv.Enable, kubeClient, log) if err != nil { log.Errorf("[%s][P:%s] service.updateK8sProduct create kubeEnv error: %v", envName, productName, err) return err diff --git a/pkg/microservice/aslan/core/environment/service/image.go b/pkg/microservice/aslan/core/environment/service/image.go index 707d9e9b4d..c3dac2839c 100644 --- a/pkg/microservice/aslan/core/environment/service/image.go +++ b/pkg/microservice/aslan/core/environment/service/image.go @@ -152,7 +152,7 @@ func UpdateContainerImage(requestID, username string, args *UpdateContainerImage return e.ErrUpdateConainterImage.AddDesc("更新 StatefulSet 容器镜像失败") } case setting.CronJob: - if err := updater.UpdateCronJobImage(namespace, args.Name, args.ContainerName, args.Image, kubeClient, VersionLessThan121(version)); err != nil { + if err := updater.UpdateCronJobImageV2(context.TODO(), product.ClusterID, namespace, args.Name, args.ContainerName, args.Image); err != nil { log.Errorf("[%s] UpdateCronJobImageByName error: %s", namespace, err.Error()) return e.ErrUpdateConainterImage.AddDesc("更新 CronJob 容器镜像失败") } diff --git a/pkg/microservice/aslan/core/environment/service/kube.go b/pkg/microservice/aslan/core/environment/service/kube.go index b5ec3feac1..8a2219d913 100644 --- a/pkg/microservice/aslan/core/environment/service/kube.go +++ b/pkg/microservice/aslan/core/environment/service/kube.go @@ -18,6 +18,7 @@ package service import ( "archive/tar" + "context" "errors" "fmt" "io" @@ -254,13 +255,9 @@ func DeletePod(envName, productName, podName string, production bool, log *zap.S if err != nil { return e.ErrDeletePod.AddErr(err) } - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(product.ClusterID) - if err != nil { - return e.ErrDeletePod.AddErr(err) - } namespace := product.Namespace - err = updater.DeletePod(namespace, podName, kubeClient) + err = updater.DeletePodsV2(context.TODO(), product.ClusterID, namespace, updater.WithName(podName)) if err != nil { errMsg := fmt.Sprintf("[%s] delete pod %s error: %v", namespace, podName, err) log.Error(errMsg) diff --git a/pkg/microservice/aslan/core/environment/service/pvc.go b/pkg/microservice/aslan/core/environment/service/pvc.go index 4ff49024fe..ee0821194b 100644 --- a/pkg/microservice/aslan/core/environment/service/pvc.go +++ b/pkg/microservice/aslan/core/environment/service/pvc.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "encoding/json" "sort" "sync" @@ -181,18 +182,17 @@ func UpdatePvc(args *models.CreateUpdateCommonEnvCfgArgs, userName string, log * return e.ErrUpdateResource.AddErr(err) } - clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(product.ClusterID) - if err != nil { - log.Errorf("failed to create kubernetes clientset for clusterID: %s, the error is: %s", product.ClusterID, err) - return e.ErrUpdateResource.AddErr(err) - } - yamlData, err := ensureLabelAndNs(pvc, product.Namespace, args.ProductName) if err != nil { return e.ErrUpdateResource.AddErr(err) } - err = updater.UpdatePvc(product.Namespace, pvc, clientset) + err = updater.UpdatePvcV2(context.TODO(), product.ClusterID, product.Namespace, pvc.Name, func(livePvc *corev1.PersistentVolumeClaim) error { + livePvc.Spec = pvc.Spec + livePvc.Labels = pvc.Labels + livePvc.Annotations = pvc.Annotations + return nil + }) if err != nil { log.Error(err) return e.ErrUpdateResource.AddDesc(err.Error()) @@ -220,7 +220,7 @@ func UpdatePvc(args *models.CreateUpdateCommonEnvCfgArgs, userName string, log * return e.ErrUpdateResource.AddErr(err) } - if err := restartPod(pvc.Name, args.ProductName, args.EnvName, product.Namespace, config.CommonEnvCfgTypeSecret, clientset, kubeClient); err != nil { + if err := restartPod(pvc.Name, args.ProductName, args.EnvName, product.Namespace, product.ClusterID, config.CommonEnvCfgTypeSecret, kubeClient); err != nil { return e.ErrRestartService.AddDesc(err.Error()) } return nil diff --git a/pkg/microservice/aslan/core/environment/service/secret.go b/pkg/microservice/aslan/core/environment/service/secret.go index 5bb087e373..b40d9915bb 100644 --- a/pkg/microservice/aslan/core/environment/service/secret.go +++ b/pkg/microservice/aslan/core/environment/service/secret.go @@ -181,13 +181,7 @@ func UpdateSecret(args *models.CreateUpdateCommonEnvCfgArgs, userName string, lo if !args.RestartAssociatedSvc { return nil } - clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(product.ClusterID) - if err != nil { - log.Errorf("failed to create kubernetes clientset for clusterID: %s, the error is: %s", product.ClusterID, err) - return e.ErrUpdateConfigMap.AddErr(err) - } - - if err := restartPod(secret.Name, args.ProductName, args.EnvName, product.Namespace, config.CommonEnvCfgTypeSecret, clientset, kubeClient); err != nil { + if err := restartPod(secret.Name, args.ProductName, args.EnvName, product.Namespace, product.ClusterID, config.CommonEnvCfgTypeSecret, kubeClient); err != nil { return e.ErrRestartService.AddDesc(err.Error()) } return nil diff --git a/pkg/tool/kube/updater/configmap.go b/pkg/tool/kube/updater/configmap.go deleted file mode 100644 index 0a1a169078..0000000000 --- a/pkg/tool/kube/updater/configmap.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeleteConfigMaps(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.CoreV1().ConfigMaps(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} - -func UpdateConfigMap(namespace string, cm *corev1.ConfigMap, clientset *kubernetes.Clientset) error { - _, err := clientset.CoreV1().ConfigMaps(namespace).Update( - context.TODO(), - cm, - metav1.UpdateOptions{}, - ) - return err -} - -func DeleteConfigMap(ns, name string, cl client.Client) error { - return deleteObjectWithDefaultOptions(&corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, cl) -} - -func CreateConfigMap(cm *corev1.ConfigMap, cl client.Client) error { - return createObjectNeverAnnotation(cm, cl) -} - -func DeleteConfigMapsAndWait(ns string, selector labels.Selector, cl client.Client) error { - gvk := schema.GroupVersionKind{ - Group: "", - Kind: "ConfigMap", - Version: "v1", - } - return deleteObjectsAndWait(ns, selector, &corev1.ConfigMap{}, gvk, cl) -} diff --git a/pkg/tool/kube/updater/cronjob.go b/pkg/tool/kube/updater/cronjob.go deleted file mode 100644 index 963eb56ad8..0000000000 --- a/pkg/tool/kube/updater/cronjob.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - "fmt" - - batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeleteCronJobs(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.BatchV1().CronJobs(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} - -func CreateOrPatchCronJob(cj client.Object, cl client.Client) error { - return createOrPatchObject(cj, cl) -} - -func PatchCronJob(ns, name string, patchBytes []byte, cl client.Client, versionLessThan121 bool) error { - if versionLessThan121 { - return patchObject(&batchv1beta1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, patchBytes, cl) - } - return patchObject(&batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, patchBytes, cl) -} - -func UpdateCronJobImage(ns, name, container, image string, cl client.Client, versionLessThan121 bool) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}}}`, container, image)) - return PatchCronJob(ns, name, patchBytes, cl, versionLessThan121) -} - -func UpdateCronJobInitImage(ns, name, container, image string, cl client.Client, versionLessThan121 bool) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"jobTemplate":{"spec":{"template":{"spec":{"initContainers":[{"name":"%s","image":"%s"}]}}}}}}`, container, image)) - return PatchCronJob(ns, name, patchBytes, cl, versionLessThan121) -} - -func SuspendCronJob(ns, name string, cl client.Client, versionLessThan121 bool) error { - patchBytes := []byte(`{"spec":{"suspend":true}}`) - return PatchCronJob(ns, name, patchBytes, cl, versionLessThan121) -} - -func ResumeCronJob(ns, name string, cl client.Client, versionLessThan121 bool) error { - patchBytes := []byte(`{"spec":{"suspend":false}}`) - return PatchCronJob(ns, name, patchBytes, cl, versionLessThan121) -} diff --git a/pkg/tool/kube/updater/deployment.go b/pkg/tool/kube/updater/deployment.go deleted file mode 100644 index 400c449f31..0000000000 --- a/pkg/tool/kube/updater/deployment.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "bytes" - "context" - "fmt" - "text/template" - "time" - - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -var restartPatchTemplate = template.Must(template.New("restart-patch-template").Parse(`{ - "spec": { - "template": { - "metadata": { - "annotations": { - "restart-by-koderover": "{{.Time}}" - } - } - } - } -}`)) - -func PatchDeployment(ns, name string, patchBytes []byte, cl client.Client) error { - return patchObject(&appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, patchBytes, cl) -} - -// TODO: LOU: it is not the right way to restart a deployment, since it is a hack and it -// will generate a new revision which will pollute the revision history. -func RestartDeployment(ns, name string, cl client.Client) error { - now := time.Now().Format(time.RFC3339Nano) - payload := bytes.NewBufferString("") - _ = restartPatchTemplate.Execute(payload, struct { - Time string - }{now}) - - if err := PatchDeployment(ns, name, payload.Bytes(), cl); err != nil { - return fmt.Errorf("failed to restart %s/deploy/%s: %v", ns, name, err) - } - - return nil -} - -func DeleteDeployments(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.AppsV1().Deployments(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} - -func UpdateDeploymentImage(ns, name, container, image string, cl client.Client) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, container, image)) - - return PatchDeployment(ns, name, patchBytes, cl) -} - -func UpdateDeploymentInitImage(ns, name, container, image string, cl client.Client) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"template":{"spec":{"initContainers":[{"name":"%s","image":"%s"}]}}}}`, container, image)) - - return PatchDeployment(ns, name, patchBytes, cl) -} - -func ScaleDeployment(ns, name string, replicas int, cl client.Client) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"replicas": %d}}`, replicas)) - return PatchDeployment(ns, name, patchBytes, cl) -} - -func CreateOrPatchDeployment(d *appsv1.Deployment, cl client.Client) error { - return createOrPatchObject(d, cl) -} - -func DeleteDeploymentAndWait(ns, name string, cl client.Client) error { - return deleteObjectAndWait(&appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, cl) -} - -func DeleteDeploymentAndWaitWithTimeout(ns, name string, timeout time.Duration, cl client.Client) error { - return deleteObjectAndWaitWithTimeout(&appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, cl, timeout) -} diff --git a/pkg/tool/kube/updater/ingress.go b/pkg/tool/kube/updater/ingress.go deleted file mode 100644 index 1376588405..0000000000 --- a/pkg/tool/kube/updater/ingress.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeleteIngresses(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - version, err := clientset.Discovery().ServerVersion() - if err != nil { - return err - } - - deletePolicy := metav1.DeletePropagationForeground - - if kubeclient.VersionLessThan122(version) { - err = clientset.ExtensionsV1beta1().Ingresses(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, metav1.ListOptions{ - LabelSelector: selector.String(), - }) - } else { - err = clientset.NetworkingV1().Ingresses(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - } - - return util.IgnoreNotFoundError(err) -} - -func DeleteIngresseWithName(namespace, name string, clientset *kubernetes.Clientset) error { - version, err := clientset.Discovery().ServerVersion() - if err != nil { - return err - } - - deletePolicy := metav1.DeletePropagationForeground - if kubeclient.VersionLessThan122(version) { - return clientset.ExtensionsV1beta1().Ingresses(namespace).Delete( - context.TODO(), name, - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }) - } - - return clientset.NetworkingV1().Ingresses(namespace).Delete( - context.TODO(), name, - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - ) -} - -func CreateIngress(namespace string, ingress *v1.Ingress, clientset *kubernetes.Clientset) error { - _, err := clientset.NetworkingV1().Ingresses(namespace).Create( - context.TODO(), ingress, - metav1.CreateOptions{}, - ) - return err -} diff --git a/pkg/tool/kube/updater/job.go b/pkg/tool/kube/updater/job.go deleted file mode 100644 index 45910626f6..0000000000 --- a/pkg/tool/kube/updater/job.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - "fmt" - - batchv1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeleteJobs(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.BatchV1().Jobs(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} - -func CreateJob(job *batchv1.Job, cl client.Client) error { - return createObject(job, cl) -} - -func DeleteJob(ns, name string, cl client.Client) error { - return deleteObjectWithDefaultOptions(&batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, cl) -} - -func DeleteJobAndWait(ns, name string, cl client.Client) error { - return deleteObjectAndWait(&batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, cl) -} - -func DeleteJobsAndWait(ns string, selector labels.Selector, cl client.Client) error { - gvk := schema.GroupVersionKind{ - Group: "batch", - Kind: "Job", - Version: "v1", - } - return deleteObjectsAndWait(ns, selector, &batchv1.Job{}, gvk, cl) -} - -func PatchJob(ns, name string, patchBytes []byte, cl client.Client) error { - return patchObject(&batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, patchBytes, cl) -} - -func UpdateJobImage(ns, name, container, image string, cl client.Client) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, container, image)) - return PatchJob(ns, name, patchBytes, cl) -} diff --git a/pkg/tool/kube/updater/namespace.go b/pkg/tool/kube/updater/namespace.go deleted file mode 100644 index 699369d3a1..0000000000 --- a/pkg/tool/kube/updater/namespace.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func DeleteNamespace(name string, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - return clientset.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }) -} - -func CreateNamespace(ns *corev1.Namespace, cl client.Client) error { - return createObject(ns, cl) -} - -func CreateNamespaceByName(ns string, labels map[string]string, cl client.Client) error { - n := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "", - Name: ns, - Labels: labels, - }, - } - return CreateNamespace(n, cl) -} - -func UpdateNamespace(ns *corev1.Namespace, cl client.Client) error { - return updateObject(ns, cl) -} diff --git a/pkg/tool/kube/updater/pod.go b/pkg/tool/kube/updater/pod.go deleted file mode 100644 index 1ab88566dc..0000000000 --- a/pkg/tool/kube/updater/pod.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeletePods(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.CoreV1().Pods(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} - -func DeletePod(ns, name string, cl client.Client) error { - return deleteObjectWithDefaultOptions(&corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, cl) -} - -func PatchPod(ns, name string, patchBytes []byte, cl client.Client) error { - return patchObject(&corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, patchBytes, cl) -} diff --git a/pkg/tool/kube/updater/pv.go b/pkg/tool/kube/updater/pv.go deleted file mode 100644 index ba6377c0a9..0000000000 --- a/pkg/tool/kube/updater/pv.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeletePersistentVolumes(selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.CoreV1().PersistentVolumes().DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} diff --git a/pkg/tool/kube/updater/pvc.go b/pkg/tool/kube/updater/pvc.go deleted file mode 100644 index 79459950cb..0000000000 --- a/pkg/tool/kube/updater/pvc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeletePersistentVolumeClaims(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.CoreV1().PersistentVolumeClaims(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} - -func DeletePvcWithName(namespace, name string, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - return clientset.CoreV1().PersistentVolumeClaims(namespace).Delete( - context.TODO(), name, - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - ) -} - -func CreatePvc(namespace string, pvc *corev1.PersistentVolumeClaim, clientset *kubernetes.Clientset) error { - _, err := clientset.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvc, - metav1.CreateOptions{}) - return err -} - -func UpdatePvc(namespace string, pvc *corev1.PersistentVolumeClaim, clientset *kubernetes.Clientset) error { - _, err := clientset.CoreV1().PersistentVolumeClaims(namespace).Update(context.TODO(), pvc, - metav1.UpdateOptions{}) - return err -} diff --git a/pkg/tool/kube/updater/replicaset.go b/pkg/tool/kube/updater/replicaset.go deleted file mode 100644 index a3c2eb21e9..0000000000 --- a/pkg/tool/kube/updater/replicaset.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeleteReplicaSets(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.AppsV1().ReplicaSets(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} diff --git a/pkg/tool/kube/updater/role.go b/pkg/tool/kube/updater/role.go deleted file mode 100644 index 868bf5bdf0..0000000000 --- a/pkg/tool/kube/updater/role.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeleteRoles(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - version, err := clientset.Discovery().ServerVersion() - if err != nil { - return err - } - - deletePolicy := metav1.DeletePropagationForeground - if kubeclient.VersionLessThan122(version) { - err = clientset.RbacV1beta1().Roles(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - } else { - err = clientset.RbacV1().Roles(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - } - - return util.IgnoreNotFoundError(err) -} diff --git a/pkg/tool/kube/updater/rolebinding.go b/pkg/tool/kube/updater/rolebinding.go deleted file mode 100644 index 4372d718ce..0000000000 --- a/pkg/tool/kube/updater/rolebinding.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeleteRoleBindings(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.RbacV1().RoleBindings(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} From 84ee350bfa289c85d701f5aa67ec3c29492ea202 Mon Sep 17 00:00:00 2001 From: Min Min Date: Wed, 18 Mar 2026 20:24:38 +0800 Subject: [PATCH 05/21] add missing files Signed-off-by: Min Min --- pkg/tool/kube/updater/cloneset_v2.go | 47 +++ pkg/tool/kube/updater/clusterrole_v2.go | 59 ++++ pkg/tool/kube/updater/configmap_v2.go | 255 ++++++++++++++ pkg/tool/kube/updater/cronjob_v2.go | 428 ++++++++++++++++++++++++ pkg/tool/kube/updater/ingress_v2.go | 70 ++++ pkg/tool/kube/updater/job_v2.go | 181 ++++++++++ pkg/tool/kube/updater/namespace_v2.go | 84 +++++ 7 files changed, 1124 insertions(+) create mode 100644 pkg/tool/kube/updater/cloneset_v2.go create mode 100644 pkg/tool/kube/updater/clusterrole_v2.go create mode 100644 pkg/tool/kube/updater/configmap_v2.go create mode 100644 pkg/tool/kube/updater/cronjob_v2.go create mode 100644 pkg/tool/kube/updater/ingress_v2.go create mode 100644 pkg/tool/kube/updater/job_v2.go create mode 100644 pkg/tool/kube/updater/namespace_v2.go diff --git a/pkg/tool/kube/updater/cloneset_v2.go b/pkg/tool/kube/updater/cloneset_v2.go new file mode 100644 index 0000000000..9c87180611 --- /dev/null +++ b/pkg/tool/kube/updater/cloneset_v2.go @@ -0,0 +1,47 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + "github.com/openkruise/kruise-api/apps/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" +) + +func ScaleCloneSetV2(ctx context.Context, clusterID, namespace, name string, replicas int) error { + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + patchBytes := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, replicas)) + err = c.Patch(ctx, &v1alpha1.CloneSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + }, client.RawPatch(types.MergePatchType, patchBytes)) + if err != nil { + return fmt.Errorf("failed to scale CloneSet %s/%s: %w", namespace, name, err) + } + return nil +} diff --git a/pkg/tool/kube/updater/clusterrole_v2.go b/pkg/tool/kube/updater/clusterrole_v2.go new file mode 100644 index 0000000000..59a68fcefd --- /dev/null +++ b/pkg/tool/kube/updater/clusterrole_v2.go @@ -0,0 +1,59 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteClusterRolesV2(ctx context.Context, clusterID string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.selector == "" { + return fmt.Errorf("must specify a selector for cluster role deletion") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector}, + } + + err = c.DeleteAllOf(ctx, &rbacv1.ClusterRole{}, deleteOpts) + return util.IgnoreNotFoundError(err) +} diff --git a/pkg/tool/kube/updater/configmap_v2.go b/pkg/tool/kube/updater/configmap_v2.go new file mode 100644 index 0000000000..63b8b7726d --- /dev/null +++ b/pkg/tool/kube/updater/configmap_v2.go @@ -0,0 +1,255 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "encoding/json" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/yaml" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +// TODO: fix the whole function design + +func DeleteConfigMapsV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + deletePolicy := metav1.DeletePropagationForeground + deleteOpts := metav1.DeleteOptions{ + PropagationPolicy: &deletePolicy, + } + + if config.name != "" { + err = c.CoreV1().ConfigMaps(namespace).Delete(ctx, config.name, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + err = c.CoreV1().ConfigMaps(namespace).DeleteCollection(ctx, deleteOpts, metav1.ListOptions{ + LabelSelector: selector.String(), + }) + return util.IgnoreNotFoundError(err) +} + +func DeleteConfigMapV2(ctx context.Context, clusterID, namespace, name string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + propagationPolicy := metav1.DeletePropagationForeground + err = c.CoreV1().ConfigMaps(namespace).Delete(ctx, name, metav1.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + }) + return util.IgnoreNotFoundError(err) +} + +func CreateConfigMapV2(ctx context.Context, clusterID string, cm *corev1.ConfigMap) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + _, err = c.CoreV1().ConfigMaps(cm.Namespace).Create(ctx, cm, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create configmap %s/%s: %w", cm.Namespace, cm.Name, err) + } + + return nil +} + +func UpdateConfigMapV2(ctx context.Context, clusterID, namespace string, cm *corev1.ConfigMap) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + _, err = c.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update configmap %s/%s: %w", namespace, cm.Name, err) + } + + return nil +} + +// CreateOrPatchConfigMapV2 implements a 3-way merge patch for ConfigMap, similar to CreateOrPatchDeploymentV2. +func CreateOrPatchConfigMapV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj corev1.ConfigMap + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to ConfigMap: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("configmap name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj corev1.ConfigMap + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().ConfigMaps(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&corev1.ConfigMap{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.CoreV1().ConfigMaps(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("configmap operation failed after retries: %w", err) + } + + return nil +} + +func DeleteConfigMapsAndWaitV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.selector == "" { + return fmt.Errorf("must specify a selector for deletion to prevent accidental namespace wipeout") + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + cli, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + propagationPolicy := metav1.DeletePropagationForeground + err = cli.CoreV1().ConfigMaps(namespace).DeleteCollection(ctx, + metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}, + metav1.ListOptions{LabelSelector: selector.String()}, + ) + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete configmaps matching %q in %s: %w", config.selector, namespace, err) + } + + err = wait.PollUntilContextTimeout(ctx, time.Second, 60*time.Second, true, func(c context.Context) (done bool, err error) { + list, listErr := cli.CoreV1().ConfigMaps(namespace).List(c, metav1.ListOptions{LabelSelector: selector.String()}) + if listErr != nil { + return false, nil + } + return len(list.Items) == 0, nil + }) + + if err != nil { + return fmt.Errorf("timeout waiting for configmaps matching %q in %s to be completely deleted: %w", config.selector, namespace, err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/cronjob_v2.go b/pkg/tool/kube/updater/cronjob_v2.go new file mode 100644 index 0000000000..711afb4af3 --- /dev/null +++ b/pkg/tool/kube/updater/cronjob_v2.go @@ -0,0 +1,428 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "encoding/json" + "fmt" + + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/yaml" + + kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteCronJobsV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + version, err := c.Discovery().ServerVersion() + if err != nil { + return fmt.Errorf("failed to get server version: %w", err) + } + + deletePolicy := metav1.DeletePropagationForeground + deleteOpts := metav1.DeleteOptions{ + PropagationPolicy: &deletePolicy, + } + + if config.name != "" { + if kubeclient.VersionLessThan121(version) { + err = c.BatchV1beta1().CronJobs(namespace).Delete(ctx, config.name, deleteOpts) + } else { + err = c.BatchV1().CronJobs(namespace).Delete(ctx, config.name, deleteOpts) + } + return util.IgnoreNotFoundError(err) + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + listOpts := metav1.ListOptions{ + LabelSelector: selector.String(), + } + + if kubeclient.VersionLessThan121(version) { + err = c.BatchV1beta1().CronJobs(namespace).DeleteCollection(ctx, deleteOpts, listOpts) + } else { + err = c.BatchV1().CronJobs(namespace).DeleteCollection(ctx, deleteOpts, listOpts) + } + return util.IgnoreNotFoundError(err) +} + +// CreateOrPatchCronJobV2 implements a 3-way merge patch for CronJob, similar to CreateOrPatchDeploymentV2. +// On clusters < 1.21, it falls back to batch/v1beta1 API. +func CreateOrPatchCronJobV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + version, err := c.Discovery().ServerVersion() + if err != nil { + return fmt.Errorf("failed to get server version: %w", err) + } + + if kubeclient.VersionLessThan121(version) { + return createOrPatchCronJobBeta(ctx, c, namespace, originalYAML, targetYAML) + } + return createOrPatchCronJobV1(ctx, c, namespace, originalYAML, targetYAML) +} + +func createOrPatchCronJobV1(ctx context.Context, c *kubernetes.Clientset, namespace, originalYAML, targetYAML string) error { + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj batchv1.CronJob + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to CronJob: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("cronjob name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj batchv1.CronJob + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.BatchV1().CronJobs(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&batchv1.CronJob{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.BatchV1().CronJobs(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("cronjob operation failed after retries: %w", err) + } + + return nil +} + +func createOrPatchCronJobBeta(ctx context.Context, c *kubernetes.Clientset, namespace, originalYAML, targetYAML string) error { + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj batchv1beta1.CronJob + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to CronJob: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("cronjob name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj batchv1beta1.CronJob + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.BatchV1beta1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.BatchV1beta1().CronJobs(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&batchv1beta1.CronJob{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.BatchV1beta1().CronJobs(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("cronjob operation failed after retries: %w", err) + } + + return nil +} + +func UpdateCronJobImageV2(ctx context.Context, clusterID, namespace, name, containerName, newImage string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + version, err := c.Discovery().ServerVersion() + if err != nil { + return fmt.Errorf("failed to get server version: %w", err) + } + + patchPayload := map[string]interface{}{ + "spec": map[string]interface{}{ + "jobTemplate": map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": containerName, + "image": newImage, + }, + }, + }, + }, + }, + }, + }, + } + + patchBytes, err := json.Marshal(patchPayload) + if err != nil { + return fmt.Errorf("failed to marshal image update patch payload: %w", err) + } + + if kubeclient.VersionLessThan121(version) { + _, err = c.BatchV1beta1().CronJobs(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + } else { + _, err = c.BatchV1().CronJobs(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + } + if err != nil { + return fmt.Errorf("failed to patch image for cronjob %s/%s: %w", namespace, name, err) + } + + return nil +} + +func UpdateCronJobInitImageV2(ctx context.Context, clusterID, namespace, name, containerName, newImage string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + version, err := c.Discovery().ServerVersion() + if err != nil { + return fmt.Errorf("failed to get server version: %w", err) + } + + patchPayload := map[string]interface{}{ + "spec": map[string]interface{}{ + "jobTemplate": map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "initContainers": []map[string]interface{}{ + { + "name": containerName, + "image": newImage, + }, + }, + }, + }, + }, + }, + }, + } + + patchBytes, err := json.Marshal(patchPayload) + if err != nil { + return fmt.Errorf("failed to marshal init image update patch payload: %w", err) + } + + if kubeclient.VersionLessThan121(version) { + _, err = c.BatchV1beta1().CronJobs(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + } else { + _, err = c.BatchV1().CronJobs(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + } + if err != nil { + return fmt.Errorf("failed to patch init image for cronjob %s/%s: %w", namespace, name, err) + } + + return nil +} + +func SuspendCronJobV2(ctx context.Context, clusterID, namespace, name string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + version, err := c.Discovery().ServerVersion() + if err != nil { + return fmt.Errorf("failed to get server version: %w", err) + } + + patchBytes := []byte(`{"spec":{"suspend":true}}`) + + if kubeclient.VersionLessThan121(version) { + _, err = c.BatchV1beta1().CronJobs(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + } else { + _, err = c.BatchV1().CronJobs(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + } + if err != nil { + return fmt.Errorf("failed to suspend cronjob %s/%s: %w", namespace, name, err) + } + + return nil +} + +func ResumeCronJobV2(ctx context.Context, clusterID, namespace, name string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + version, err := c.Discovery().ServerVersion() + if err != nil { + return fmt.Errorf("failed to get server version: %w", err) + } + + patchBytes := []byte(`{"spec":{"suspend":false}}`) + + if kubeclient.VersionLessThan121(version) { + _, err = c.BatchV1beta1().CronJobs(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + } else { + _, err = c.BatchV1().CronJobs(namespace).Patch(ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + } + if err != nil { + return fmt.Errorf("failed to resume cronjob %s/%s: %w", namespace, name, err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/ingress_v2.go b/pkg/tool/kube/updater/ingress_v2.go new file mode 100644 index 0000000000..0ba7e6206d --- /dev/null +++ b/pkg/tool/kube/updater/ingress_v2.go @@ -0,0 +1,70 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteIngressesV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + deletePolicy := metav1.DeletePropagationForeground + deleteOpts := metav1.DeleteOptions{ + PropagationPolicy: &deletePolicy, + } + + if config.name != "" { + err = c.NetworkingV1().Ingresses(namespace).Delete(ctx, config.name, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + if config.selector != "" { + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + err = c.NetworkingV1().Ingresses(namespace).DeleteCollection(ctx, deleteOpts, metav1.ListOptions{ + LabelSelector: selector.String(), + }) + return util.IgnoreNotFoundError(err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/job_v2.go b/pkg/tool/kube/updater/job_v2.go new file mode 100644 index 0000000000..b92156eb95 --- /dev/null +++ b/pkg/tool/kube/updater/job_v2.go @@ -0,0 +1,181 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + "time" + + batchv1 "k8s.io/api/batch/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteJobsV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.selector == "" { + return fmt.Errorf("must specify a selector for deletion to prevent accidental namespace wipeout") + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = cl.DeleteAllOf(ctx, &batchv1.Job{}, deleteOpts) + return util.IgnoreNotFoundError(err) +} + +func CreateJobV2(ctx context.Context, clusterID string, job *batchv1.Job) error { + cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + if err := util.CreateApplyAnnotation(job); err != nil { + return fmt.Errorf("failed to create apply annotation: %w", err) + } + + if err := cl.Create(ctx, job); err != nil { + return fmt.Errorf("failed to create job %s/%s: %w", job.Namespace, job.Name, err) + } + + return nil +} + +func DeleteJobV2(ctx context.Context, clusterID, namespace, name string) error { + cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + + propagationPolicy := metav1.DeletePropagationForeground + err = cl.Delete(ctx, job, &client.DeleteOptions{PropagationPolicy: &propagationPolicy}) + return util.IgnoreNotFoundError(err) +} + +func DeleteJobAndWaitV2(ctx context.Context, clusterID, namespace, name string) error { + cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + + propagationPolicy := metav1.DeletePropagationForeground + err = cl.Delete(ctx, job, &client.DeleteOptions{PropagationPolicy: &propagationPolicy}) + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete job %s/%s: %w", namespace, name, err) + } + + err = wait.PollUntilContextTimeout(ctx, time.Second, 60*time.Second, true, func(c context.Context) (done bool, err error) { + fetched := &batchv1.Job{} + errGet := cl.Get(c, client.ObjectKey{Namespace: namespace, Name: name}, fetched) + if apierrors.IsNotFound(errGet) { + return true, nil + } + if errGet != nil { + return false, nil + } + return false, nil + }) + + if err != nil { + return fmt.Errorf("timeout waiting for job %s/%s to be completely deleted: %w", namespace, name, err) + } + + return nil +} + +func DeleteJobsAndWaitV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.selector == "" { + return fmt.Errorf("must specify a selector for deletion to prevent accidental namespace wipeout") + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = cl.DeleteAllOf(ctx, &batchv1.Job{}, deleteOpts) + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete jobs matching %q in %s: %w", config.selector, namespace, err) + } + + err = wait.PollUntilContextTimeout(ctx, time.Second, 60*time.Second, true, func(c context.Context) (done bool, err error) { + var list batchv1.JobList + if err := cl.List(c, &list, &client.ListOptions{LabelSelector: selector, Namespace: namespace}); err != nil { + return false, nil + } + return len(list.Items) == 0, nil + }) + + if err != nil { + return fmt.Errorf("timeout waiting for jobs matching %q in %s to be completely deleted: %w", config.selector, namespace, err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/namespace_v2.go b/pkg/tool/kube/updater/namespace_v2.go new file mode 100644 index 0000000000..ba02bc9098 --- /dev/null +++ b/pkg/tool/kube/updater/namespace_v2.go @@ -0,0 +1,84 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteNamespaceV2(ctx context.Context, clusterID, name string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + deletePolicy := metav1.DeletePropagationForeground + err = c.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{ + PropagationPolicy: &deletePolicy, + }) + return util.IgnoreNotFoundError(err) +} + +// TODO: move the common labels into this function, leaving the additional label to the input. +func CreateNamespaceByNameV2(ctx context.Context, clusterID, ns string, nsLabels map[string]string) error { + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + Labels: nsLabels, + }, + } + err = c.Create(ctx, namespace) + if err != nil { + return fmt.Errorf("failed to create namespace %s: %w", ns, err) + } + return nil +} + +func UpdateNamespaceV2(ctx context.Context, clusterID, namespaceName string, mutationFunc func(ns *corev1.Namespace) error) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + ns, err := c.CoreV1().Namespaces().Get(ctx, namespaceName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get live namespace: %w", err) + } + + if err := mutationFunc(ns); err != nil { + return fmt.Errorf("mutation failed or aborted: %w", err) + } + + _, err = c.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{}) + return err + }) + + return err +} From 4425510525134f0767818b1f7aa33489ef2319cb Mon Sep 17 00:00:00 2001 From: Min Min Date: Wed, 18 Mar 2026 20:27:09 +0800 Subject: [PATCH 06/21] add even more missing files Signed-off-by: Min Min --- pkg/tool/kube/updater/pod_v2.go | 106 +++++++++++++++++ pkg/tool/kube/updater/pv_v2.go | 59 ++++++++++ pkg/tool/kube/updater/pvc_v2.go | 127 +++++++++++++++++++++ pkg/tool/kube/updater/replicaset_v2.go | 59 ++++++++++ pkg/tool/kube/updater/role_v2.go | 59 ++++++++++ pkg/tool/kube/updater/rolebinding_v2.go | 59 ++++++++++ pkg/tool/kube/updater/secret_v2.go | 104 +++++++++++++++++ pkg/tool/kube/updater/service_v2.go | 119 +++++++++++++++++++ pkg/tool/kube/updater/serviceaccount_v2.go | 74 ++++++++++++ 9 files changed, 766 insertions(+) create mode 100644 pkg/tool/kube/updater/pod_v2.go create mode 100644 pkg/tool/kube/updater/pv_v2.go create mode 100644 pkg/tool/kube/updater/pvc_v2.go create mode 100644 pkg/tool/kube/updater/replicaset_v2.go create mode 100644 pkg/tool/kube/updater/role_v2.go create mode 100644 pkg/tool/kube/updater/rolebinding_v2.go create mode 100644 pkg/tool/kube/updater/secret_v2.go create mode 100644 pkg/tool/kube/updater/service_v2.go create mode 100644 pkg/tool/kube/updater/serviceaccount_v2.go diff --git a/pkg/tool/kube/updater/pod_v2.go b/pkg/tool/kube/updater/pod_v2.go new file mode 100644 index 0000000000..4e4369802d --- /dev/null +++ b/pkg/tool/kube/updater/pod_v2.go @@ -0,0 +1,106 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeletePodsV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := &client.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + + if config.name != "" { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: config.name, + }, + } + err = c.Delete(ctx, pod, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + if config.selector != "" { + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + pod := &corev1.Pod{} + delAllOfOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = c.DeleteAllOf(ctx, pod, delAllOfOpts) + return util.IgnoreNotFoundError(err) + } + + return nil +} + +func UpdatePodV2(ctx context.Context, clusterID, namespace, name string, mutationFunc func(pod *corev1.Pod) error) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + pod, err := c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get live pod: %w", err) + } + + if err := mutationFunc(pod); err != nil { + return fmt.Errorf("mutation failed or aborted: %w", err) + } + + _, err = c.CoreV1().Pods(namespace).Update(ctx, pod, metav1.UpdateOptions{}) + return err + }) + + return err +} diff --git a/pkg/tool/kube/updater/pv_v2.go b/pkg/tool/kube/updater/pv_v2.go new file mode 100644 index 0000000000..30982cfa78 --- /dev/null +++ b/pkg/tool/kube/updater/pv_v2.go @@ -0,0 +1,59 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeletePersistentVolumesV2(ctx context.Context, clusterID string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.selector == "" { + return fmt.Errorf("must specify a selector for persistent volume deletion") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector}, + } + + err = c.DeleteAllOf(ctx, &corev1.PersistentVolume{}, deleteOpts) + return util.IgnoreNotFoundError(err) +} diff --git a/pkg/tool/kube/updater/pvc_v2.go b/pkg/tool/kube/updater/pvc_v2.go new file mode 100644 index 0000000000..86193342c0 --- /dev/null +++ b/pkg/tool/kube/updater/pvc_v2.go @@ -0,0 +1,127 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeletePVCV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + propagationPolicy := metav1.DeletePropagationForeground + + if config.name != "" { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: config.name, + }, + } + deleteOpts := &client.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + err = c.Delete(ctx, pvc, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + if config.selector != "" { + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + pvc := &corev1.PersistentVolumeClaim{} + delAllOfOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = c.DeleteAllOf(ctx, pvc, delAllOfOpts) + return util.IgnoreNotFoundError(err) + } + + return nil +} + +func CreatePVCV2(ctx context.Context, clusterID, namespace string, pvc *corev1.PersistentVolumeClaim) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + pvc.SetNamespace(namespace) + _, err = c.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create PVC %s/%s: %w", namespace, pvc.Name, err) + } + return nil +} + +func UpdatePvcV2(ctx context.Context, clusterID, namespace, pvcName string, mutationFunc func(pvc *corev1.PersistentVolumeClaim) error) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + pvc, err := c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get live PVC: %w", err) + } + + before := pvc.DeepCopy() + + if err := mutationFunc(pvc); err != nil { + return fmt.Errorf("mutation failed or aborted: %w", err) + } + + if equality.Semantic.DeepEqual(before, pvc) { + return nil + } + + _, err = c.CoreV1().PersistentVolumeClaims(namespace).Update(ctx, pvc, metav1.UpdateOptions{}) + return err + }) + + return err +} diff --git a/pkg/tool/kube/updater/replicaset_v2.go b/pkg/tool/kube/updater/replicaset_v2.go new file mode 100644 index 0000000000..621791442d --- /dev/null +++ b/pkg/tool/kube/updater/replicaset_v2.go @@ -0,0 +1,59 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteReplicaSetsV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.selector == "" { + return fmt.Errorf("must specify a selector for replica set deletion") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = c.DeleteAllOf(ctx, &appsv1.ReplicaSet{}, deleteOpts) + return util.IgnoreNotFoundError(err) +} diff --git a/pkg/tool/kube/updater/role_v2.go b/pkg/tool/kube/updater/role_v2.go new file mode 100644 index 0000000000..e464c006a2 --- /dev/null +++ b/pkg/tool/kube/updater/role_v2.go @@ -0,0 +1,59 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteRolesV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.selector == "" { + return fmt.Errorf("must specify a selector for role deletion") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = c.DeleteAllOf(ctx, &rbacv1.Role{}, deleteOpts) + return util.IgnoreNotFoundError(err) +} diff --git a/pkg/tool/kube/updater/rolebinding_v2.go b/pkg/tool/kube/updater/rolebinding_v2.go new file mode 100644 index 0000000000..61b5fdd675 --- /dev/null +++ b/pkg/tool/kube/updater/rolebinding_v2.go @@ -0,0 +1,59 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteRoleBindingsV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.selector == "" { + return fmt.Errorf("must specify a selector for role binding deletion") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = c.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, deleteOpts) + return util.IgnoreNotFoundError(err) +} diff --git a/pkg/tool/kube/updater/secret_v2.go b/pkg/tool/kube/updater/secret_v2.go new file mode 100644 index 0000000000..7693eca9f0 --- /dev/null +++ b/pkg/tool/kube/updater/secret_v2.go @@ -0,0 +1,104 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteSecretsV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + if config.name != "" { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: config.name, + }, + } + err = cl.Delete(ctx, svc) + return util.IgnoreNotFoundError(err) + } + + if config.selector != "" { + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + deploy := &corev1.Secret{} + + propagationPolicy := metav1.DeletePropagationBackground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = cl.DeleteAllOf(ctx, deploy, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + return fmt.Errorf("must specify either a name or a selector for deletion of the service to prevent accidental namespace wipeout") +} + +func UpdateOrCreateSecretV2(ctx context.Context, clusterID string, s *corev1.Secret) error { + cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + if err := util.CreateApplyAnnotation(s); err != nil { + return fmt.Errorf("failed to create apply annotation: %w", err) + } + + err = cl.Update(ctx, s) + if err == nil { + return nil + } + if apierrors.IsNotFound(err) { + if createErr := cl.Create(ctx, s); createErr != nil { + return fmt.Errorf("failed to create secret %s/%s: %w", s.Namespace, s.Name, createErr) + } + return nil + } + return fmt.Errorf("failed to update secret %s/%s: %w", s.Namespace, s.Name, err) +} diff --git a/pkg/tool/kube/updater/service_v2.go b/pkg/tool/kube/updater/service_v2.go new file mode 100644 index 0000000000..f22820c388 --- /dev/null +++ b/pkg/tool/kube/updater/service_v2.go @@ -0,0 +1,119 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteServicesV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") + } + + cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + if config.name != "" { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: config.name, + }, + } + err = cl.Delete(ctx, svc) + return util.IgnoreNotFoundError(err) + } + + if config.selector != "" { + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + deploy := &corev1.Service{} + + propagationPolicy := metav1.DeletePropagationBackground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = cl.DeleteAllOf(ctx, deploy, deleteOpts) + return util.IgnoreNotFoundError(err) + } + + return fmt.Errorf("must specify either a name or a selector for deletion of the service to prevent accidental namespace wipeout") +} + +// UpdateServiceV2 takes the cluster and resource info to identify a service, and uses the mutation function to update it with retry on conflict. +func UpdateServiceV2(ctx context.Context, clusterID, namespace, serviceName string, mutationFunc func(svc *corev1.Service) error) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + svc, err := c.CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get live service: %w", err) + } + + if err := mutationFunc(svc); err != nil { + return fmt.Errorf("mutation failed or aborted: %w", err) + } + + _, err = c.CoreV1().Services(namespace).Update(ctx, svc, metav1.UpdateOptions{}) + return err + }) + + return err +} + +func CreateServiceV2(ctx context.Context, clusterID, namespace string, svc *corev1.Service) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + svc.SetNamespace(namespace) + _, err = c.CoreV1().Services(namespace).Create(ctx, svc, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create service %s/%s: %w", namespace, svc.Name, err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/serviceaccount_v2.go b/pkg/tool/kube/updater/serviceaccount_v2.go new file mode 100644 index 0000000000..6ddfab23ec --- /dev/null +++ b/pkg/tool/kube/updater/serviceaccount_v2.go @@ -0,0 +1,74 @@ +/* +Copyright 2026 The KodeRover Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package updater + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/koderover/zadig/v2/pkg/tool/clientmanager" + "github.com/koderover/zadig/v2/pkg/tool/kube/util" +) + +func DeleteServiceAccountsV2(ctx context.Context, clusterID, namespace string, opts ...DeleteOption) error { + config := &deleteConfig{} + for _, opt := range opts { + opt(config) + } + + if config.selector == "" { + return fmt.Errorf("must specify a selector for service account deletion") + } + + c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } + + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := &client.DeleteAllOfOptions{ + DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, + ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + } + + err = c.DeleteAllOf(ctx, &corev1.ServiceAccount{}, deleteOpts) + return util.IgnoreNotFoundError(err) +} + +func CreateServiceAccountV2(ctx context.Context, clusterID, namespace string, sa *corev1.ServiceAccount) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + sa.SetNamespace(namespace) + _, err = c.CoreV1().ServiceAccounts(namespace).Create(ctx, sa, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create service account %s/%s: %w", namespace, sa.Name, err) + } + + return nil +} From 786a22a8f52ff38334d2dd587a499357d5d7ac27 Mon Sep 17 00:00:00 2001 From: Min Min Date: Thu, 19 Mar 2026 13:07:01 +0800 Subject: [PATCH 07/21] all logic temporarily done Signed-off-by: Min Min --- .../aslan/core/common/service/kube/actions.go | 12 +- .../aslan/core/common/service/kube/apply.go | 64 +++++- .../aslan/core/common/service/product.go | 20 +- .../aslan/core/common/service/registry.go | 5 +- .../jobcontroller/job_blue_green_release.go | 10 +- .../job_blue_green_release_v2.go | 2 +- .../jobcontroller/job_freestyle.go | 2 +- .../jobcontroller/job_plugin.go | 2 +- .../jobcontroller/kubernetes.go | 4 +- .../environment/service/common_env_cfg.go | 16 +- .../core/environment/service/environment.go | 4 +- .../service/environment_creator.go | 26 ++- .../aslan/core/environment/service/image.go | 2 +- .../aslan/core/environment/service/ingress.go | 8 +- .../aslan/core/environment/service/secret.go | 3 +- .../aslan/core/environment/service/service.go | 9 +- pkg/microservice/aslan/core/service.go | 6 +- .../aslan/core/system/service/registry.go | 9 +- pkg/tool/helmclient/helmclient.go | 7 +- pkg/tool/kube/updater/base.go | 6 + pkg/tool/kube/updater/cloneset.go | 24 -- pkg/tool/kube/updater/cloneset_v2.go | 13 +- pkg/tool/kube/updater/clusterrole.go | 52 ----- pkg/tool/kube/updater/clusterrole_v2.go | 196 ++++++++++++++++ pkg/tool/kube/updater/ingress_v2.go | 209 ++++++++++++++++++ pkg/tool/kube/updater/pvc_v2.go | 101 +++++++++ pkg/tool/kube/updater/secret.go | 64 ------ pkg/tool/kube/updater/secret_v2.go | 162 +++++++++++--- pkg/tool/kube/updater/service.go | 65 ------ pkg/tool/kube/updater/service_v2.go | 147 +++++++++--- pkg/tool/kube/updater/serviceaccount.go | 48 ---- pkg/tool/kube/updater/serviceaccount_v2.go | 30 ++- pkg/tool/kube/updater/statefulset.go | 90 -------- pkg/tool/kube/updater/unstructured.go | 5 +- 34 files changed, 913 insertions(+), 510 deletions(-) delete mode 100644 pkg/tool/kube/updater/cloneset.go delete mode 100644 pkg/tool/kube/updater/clusterrole.go delete mode 100644 pkg/tool/kube/updater/secret.go delete mode 100644 pkg/tool/kube/updater/service.go delete mode 100644 pkg/tool/kube/updater/serviceaccount.go delete mode 100644 pkg/tool/kube/updater/statefulset.go diff --git a/pkg/microservice/aslan/core/common/service/kube/actions.go b/pkg/microservice/aslan/core/common/service/kube/actions.go index dcbe347a61..48baf979af 100644 --- a/pkg/microservice/aslan/core/common/service/kube/actions.go +++ b/pkg/microservice/aslan/core/common/service/kube/actions.go @@ -100,7 +100,7 @@ func EnsureNamespaceLabels(namespace, clusterID string, customLabels map[string] }) } -func CreateOrUpdateRSASecret(publicKey, privateKey []byte, kubeClient client.Client) error { +func CreateOrUpdateRSASecret(publicKey, privateKey []byte, clusterID string) error { data := make(map[string][]byte) data["publicKey"] = publicKey @@ -114,14 +114,14 @@ func CreateOrUpdateRSASecret(publicKey, privateKey []byte, kubeClient client.Cli Data: data, Type: corev1.SecretTypeOpaque, } - return updater.UpdateOrCreateSecret(secret, kubeClient) + return updater.CreateOrUpdateSecretV2(context.TODO(), clusterID, secret) } -func CreateOrUpdateDefaultRegistrySecret(namespace string, reg *commonmodels.RegistryNamespace, kubeClient client.Client) error { - return CreateOrUpdateRegistrySecret(namespace, reg, true, kubeClient) +func CreateOrUpdateDefaultRegistrySecret(namespace, clusterID string, reg *commonmodels.RegistryNamespace) error { + return CreateOrUpdateRegistrySecret(namespace, clusterID, reg, true) } -func CreateOrUpdateRegistrySecret(namespace string, reg *commonmodels.RegistryNamespace, isDefault bool, kubeClient client.Client) error { +func CreateOrUpdateRegistrySecret(namespace, clusterID string, reg *commonmodels.RegistryNamespace, isDefault bool) error { var secretName string var err error if !isDefault { @@ -152,7 +152,7 @@ func CreateOrUpdateRegistrySecret(namespace string, reg *commonmodels.RegistryNa Data: data, Type: corev1.SecretTypeDockercfg, } - return updater.UpdateOrCreateSecret(secret, kubeClient) + return updater.CreateOrUpdateSecretV2(context.TODO(), clusterID, secret) } func GenRegistrySecretName(reg *commonmodels.RegistryNamespace) (string, error) { diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index 56fb97685b..2e2b766a8f 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -724,7 +724,18 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge logContent := fmt.Sprintf("Applying %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) jobLogManager.SaveJobLog(logContent) - err = updater.CreateOrPatchUnstructured(u, kubeClient) + targetYAML, marshalErr := yaml.Marshal(u.UnstructuredContent()) + if marshalErr != nil { + log.Errorf("Failed to marshal ingress %s to YAML: %v", u.GetName(), marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + gvkn := fmt.Sprintf("%s-%s", u.GetObjectKind().GroupVersionKind(), u.GetName()) + originalYAML := "" + if curRes, ok := curResourceMap[gvkn]; ok { + originalYAML = curRes.Manifest + } + err = updater.CreateOrPatchIngressV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -737,7 +748,18 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge logContent := fmt.Sprintf("Applying %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) jobLogManager.SaveJobLog(logContent) - err = updater.CreateOrPatchUnstructured(u, kubeClient) + targetYAML, marshalErr := yaml.Marshal(u.UnstructuredContent()) + if marshalErr != nil { + log.Errorf("Failed to marshal service %s to YAML: %v", u.GetName(), marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + gvkn := fmt.Sprintf("%s-%s", u.GetObjectKind().GroupVersionKind(), u.GetName()) + originalYAML := "" + if curRes, ok := curResourceMap[gvkn]; ok { + originalYAML = curRes.Manifest + } + err = updater.CreateOrPatchServiceV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -1023,13 +1045,47 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge continue } } - case setting.ClusterRole, setting.ClusterRoleBinding: + case setting.ClusterRole: u.SetLabels(MergeLabels(clusterLabels, u.GetLabels())) logContent := fmt.Sprintf("Applying %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) jobLogManager.SaveJobLog(logContent) - err = updater.CreateOrPatchUnstructured(u, kubeClient) + targetYAML, marshalErr := yaml.Marshal(u.UnstructuredContent()) + if marshalErr != nil { + log.Errorf("Failed to marshal clusterrole %s to YAML: %v", u.GetName(), marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + gvkn := fmt.Sprintf("%s-%s", u.GetObjectKind().GroupVersionKind(), u.GetName()) + originalYAML := "" + if curRes, ok := curResourceMap[gvkn]; ok { + originalYAML = curRes.Manifest + } + err = updater.CreateOrPatchClusterRoleV2(context.TODO(), productInfo.ClusterID, originalYAML, string(targetYAML)) + if err != nil { + log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) + errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) + continue + } + case setting.ClusterRoleBinding: + u.SetLabels(MergeLabels(clusterLabels, u.GetLabels())) + + logContent := fmt.Sprintf("Applying %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) + jobLogManager.SaveJobLog(logContent) + + targetYAML, marshalErr := yaml.Marshal(u.UnstructuredContent()) + if marshalErr != nil { + log.Errorf("Failed to marshal clusterrolebinding %s to YAML: %v", u.GetName(), marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + gvkn := fmt.Sprintf("%s-%s", u.GetObjectKind().GroupVersionKind(), u.GetName()) + originalYAML := "" + if curRes, ok := curResourceMap[gvkn]; ok { + originalYAML = curRes.Manifest + } + err = updater.CreateOrPatchClusterRoleBindingV2(context.TODO(), productInfo.ClusterID, originalYAML, string(targetYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) diff --git a/pkg/microservice/aslan/core/common/service/product.go b/pkg/microservice/aslan/core/common/service/product.go index 23bbd24361..109401d9aa 100644 --- a/pkg/microservice/aslan/core/common/service/product.go +++ b/pkg/microservice/aslan/core/common/service/product.go @@ -50,14 +50,8 @@ func FilterWorkloadsByEnv(exist []commonmodels.Workload, productName, env string func DeleteClusterResource(selector labels.Selector, clusterID string, log *zap.SugaredLogger) error { log.Infof("Deleting cluster resources with selector: [%s]", selector) - clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) - if err != nil { - log.Errorf("failed to create kubernetes clientset for clusterID: %s, the error is: %s", clusterID, err) - return err - } - errors := new(multierror.Error) - if err := updater.DeleteClusterRoles(selector, clientset); err != nil { + if err := updater.DeleteClusterRolesV2(context.Background(), clusterID, updater.WithSelector(selector.String())); err != nil { log.Errorf("failed to delete clusterRoles for clusterID: %s, the error is: %s", clusterID, err) errors = multierror.Append(errors, err) } @@ -74,12 +68,6 @@ func DeleteClusterResource(selector labels.Selector, clusterID string, log *zap. func DeleteNamespacedResource(namespace string, selector labels.Selector, clusterID string, log *zap.SugaredLogger) error { log.Infof("Deleting namespaced resources with selector: [%s] in namespace [%s]", selector, namespace) - clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) - if err != nil { - log.Errorf("failed to create kubernetes clientset for clusterID: %s, the error is: %s", clusterID, err) - return err - } - errors := new(multierror.Error) if err := updater.DeleteDeploymentV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { @@ -103,7 +91,7 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteJobs error: %v", err)) } - if err := updater.DeleteServices(namespace, selector, clientset); err != nil { + if err := updater.DeleteServicesV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteServices error: %v", err)) } @@ -114,7 +102,7 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteIngresses error: %v", err)) } - if err := updater.DeleteSecrets(namespace, selector, clientset); err != nil { + if err := updater.DeleteSecretsV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteSecrets error: %v", err)) } @@ -129,7 +117,7 @@ func DeleteNamespacedResource(namespace string, selector labels.Selector, cluste errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeletePersistentVolumeClaim error: %v", err)) } - if err := updater.DeleteServiceAccounts(namespace, selector, clientset); err != nil { + if err := updater.DeleteServiceAccountsV2(context.Background(), clusterID, namespace, updater.WithSelector(selector.String())); err != nil { log.Error(err) errors = multierror.Append(errors, fmt.Errorf("kubeCli.DeleteServiceAccounts error: %v", err)) } diff --git a/pkg/microservice/aslan/core/common/service/registry.go b/pkg/microservice/aslan/core/common/service/registry.go index 73ba42bc6d..005924c4c9 100644 --- a/pkg/microservice/aslan/core/common/service/registry.go +++ b/pkg/microservice/aslan/core/common/service/registry.go @@ -20,7 +20,6 @@ import ( "fmt" "go.uber.org/zap" - "sigs.k8s.io/controller-runtime/pkg/client" "github.com/koderover/zadig/v2/pkg/microservice/aslan/config" "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/repository/models" @@ -130,7 +129,7 @@ func ListRegistryNamespaces(encryptedKey string, getRealCredential bool, log *za return resp, nil } -func EnsureDefaultRegistrySecret(namespace string, registryId string, kubeClient client.Client, log *zap.SugaredLogger) error { +func EnsureDefaultRegistrySecret(namespace, registryId, clusterID string, log *zap.SugaredLogger) error { var reg *models.RegistryNamespace var err error if len(registryId) > 0 { @@ -153,7 +152,7 @@ func EnsureDefaultRegistrySecret(namespace string, registryId string, kubeClient } } - err = kube.CreateOrUpdateDefaultRegistrySecret(namespace, reg, kubeClient) + err = kube.CreateOrUpdateDefaultRegistrySecret(namespace, clusterID, reg) if err != nil { log.Errorf("[%s] CreateDockerSecret error: %s", namespace, err) return e.ErrUpdateSecret.AddDesc(e.CreateDefaultRegistryErrMsg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go index 77ef565190..f8ff665fae 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release.go @@ -72,7 +72,7 @@ func (c *BlueGreenReleaseJobCtl) Clean(ctx context.Context) { return } // ensure delete blue service. - if err := updater.DeleteService(c.jobTaskSpec.Namespace, c.jobTaskSpec.BlueK8sServiceName, kubeClient); err != nil { + if err := updater.DeleteServicesV2(context.TODO(), c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, updater.WithName(c.jobTaskSpec.BlueK8sServiceName)); err != nil { c.logger.Errorf("delete blue service error: %v", err) } // @@ -86,8 +86,10 @@ func (c *BlueGreenReleaseJobCtl) Clean(ctx context.Context) { } // if it was the first time blue-green deployment, clean the origin labels. if service.Spec.Selector[config.BlueGreenVersionLabelName] == config.OriginVersion { - delete(service.Spec.Selector, config.BlueGreenVersionLabelName) - if err := updater.CreateOrPatchService(service, kubeClient); err != nil { + if err := updater.UpdateServiceV2(context.TODO(), c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, c.jobTaskSpec.K8sServiceName, func(svc *corev1.Service) error { + delete(svc.Spec.Selector, config.BlueGreenVersionLabelName) + return nil + }); err != nil { c.logger.Errorf("delete origin label for service error: %v", err) return } @@ -142,7 +144,7 @@ func (c *BlueGreenReleaseJobCtl) Run(ctx context.Context) { c.ack() blueServiceName := c.jobTaskSpec.BlueK8sServiceName - if err := updater.DeleteService(c.jobTaskSpec.Namespace, blueServiceName, c.kubeClient); err != nil { + if err := updater.DeleteServicesV2(ctx, c.jobTaskSpec.ClusterID, c.jobTaskSpec.Namespace, updater.WithName(blueServiceName)); err != nil { // delete failed, but we don't care msg := fmt.Sprintf("delete blue service: %s failed: %v", blueServiceName, err) c.jobTaskSpec.Events.Error(msg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go index de0d6142bc..692f383084 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_blue_green_release_v2.go @@ -89,7 +89,7 @@ func (c *BlueGreenReleaseV2JobCtl) Clean(ctx context.Context) { if err != nil { c.logger.Warnf("can't delete blue deployment %s, err: %v", c.jobTaskSpec.Service.BlueDeploymentName, err) } - err = updater.DeleteService(c.namespace, c.jobTaskSpec.Service.BlueServiceName, c.kubeClient) + err = updater.DeleteServicesV2(context.TODO(), clusterID, c.namespace, updater.WithName(c.jobTaskSpec.Service.BlueServiceName)) if err != nil { c.logger.Warnf("can't delete blue service %s, err: %v", c.jobTaskSpec.Service.BlueServiceName, err) } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_freestyle.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_freestyle.go index 297dd04cbd..73c5526221 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_freestyle.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_freestyle.go @@ -286,7 +286,7 @@ func (c *FreestyleJobCtl) run(ctx context.Context) error { return errors.New(msg) } - if err := createOrUpdateRegistrySecrets(c.jobTaskSpec.Properties.Namespace, c.jobTaskSpec.Properties.Registries, c.kubeclient); err != nil { + if err := createOrUpdateRegistrySecrets(c.jobTaskSpec.Properties.Namespace, c.jobTaskSpec.Properties.ClusterID, c.jobTaskSpec.Properties.Registries); err != nil { msg := fmt.Sprintf("create secret error: %v", err) logError(c.job, msg, c.logger) return errors.New(msg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_plugin.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_plugin.go index 582b2a9e3c..e2e3dad2aa 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_plugin.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_plugin.go @@ -132,7 +132,7 @@ func (c *PluginJobCtl) run(ctx context.Context) error { return err } - if err := createOrUpdateRegistrySecrets(c.jobTaskSpec.Properties.Namespace, c.jobTaskSpec.Properties.Registries, c.kubeclient); err != nil { + if err := createOrUpdateRegistrySecrets(c.jobTaskSpec.Properties.Namespace, c.jobTaskSpec.Properties.ClusterID, c.jobTaskSpec.Properties.Registries); err != nil { msg := fmt.Sprintf("create secret error: %v", err) logError(c.job, msg, c.logger) return errors.New(msg) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/kubernetes.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/kubernetes.go index fec243dad8..340cd8509d 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/kubernetes.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/kubernetes.go @@ -1226,7 +1226,7 @@ func waitDeploymentReady(ctx context.Context, deploymentName, namespace string, } } -func createOrUpdateRegistrySecrets(namespace string, registries []*commonmodels.RegistryNamespace, kubeClient crClient.Client) error { +func createOrUpdateRegistrySecrets(namespace, clusterID string, registries []*commonmodels.RegistryNamespace) error { for _, reg := range registries { if reg.AccessKey == "" { continue @@ -1255,7 +1255,7 @@ func createOrUpdateRegistrySecrets(namespace string, registries []*commonmodels. Data: data, Type: corev1.SecretTypeDockercfg, } - if err := updater.UpdateOrCreateSecret(secret, kubeClient); err != nil { + if err := updater.CreateOrUpdateSecretV2(context.TODO(), clusterID, secret); err != nil { return err } } diff --git a/pkg/microservice/aslan/core/environment/service/common_env_cfg.go b/pkg/microservice/aslan/core/environment/service/common_env_cfg.go index 5b2f6afbaf..4ea1499c66 100644 --- a/pkg/microservice/aslan/core/environment/service/common_env_cfg.go +++ b/pkg/microservice/aslan/core/environment/service/common_env_cfg.go @@ -95,16 +95,11 @@ func DeleteCommonEnvCfg(envName, productName, objectName string, commonEnvCfgTyp return e.ErrDeleteResource.AddErr(err) } - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(product.ClusterID) - if err != nil { - return e.ErrDeleteResource.AddErr(err) - } - switch commonEnvCfgType { case config.CommonEnvCfgTypeConfigMap: err = updater.DeleteConfigMapV2(context.TODO(), product.ClusterID, product.Namespace, objectName) case config.CommonEnvCfgTypeSecret: - err = updater.DeleteSecretWithName(product.Namespace, objectName, kubeClient) + err = updater.DeleteSecretWithNameV2(context.TODO(), product.ClusterID, product.Namespace, objectName) case config.CommonEnvCfgTypeIngress: err = updater.DeleteIngressesV2(context.TODO(), product.ClusterID, product.Namespace, updater.WithName(objectName)) case config.CommonEnvCfgTypePvc: @@ -239,11 +234,6 @@ func CreateCommonEnvCfg(args *models.CreateUpdateCommonEnvCfgArgs, userName stri return e.ErrUpdateResource.AddErr(err) } - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(product.ClusterID) - if err != nil { - return e.ErrUpdateResource.AddErr(err) - } - u, err := serializer.NewDecoder().YamlToUnstructured(js) if err != nil { return e.ErrUpdateResource.AddErr(err) @@ -296,7 +286,7 @@ func CreateCommonEnvCfg(args *models.CreateUpdateCommonEnvCfgArgs, userName stri return e.ErrUpdateResource.AddErr(err) } - if err := updater.UpdateOrCreateSecret(secret, kubeClient); err != nil { + if err := updater.CreateOrUpdateSecretV2(context.TODO(), product.ClusterID, secret); err != nil { log.Error(err) return e.ErrUpdateResource.AddDesc(err.Error()) } @@ -315,7 +305,7 @@ func CreateCommonEnvCfg(args *models.CreateUpdateCommonEnvCfgArgs, userName stri return e.ErrUpdateResource.AddErr(err) } - err = updater.UpdateOrCreateUnstructured(u, kubeClient) + err = updater.CreateOrPatchIngressV2(context.TODO(), product.ClusterID, product.Namespace, "", yamlData) if err != nil { log.Errorf("Failed to UpdateOrCreateIngress %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) return e.ErrUpdateResource.AddErr(fmt.Errorf("Failed to UpdateOrCreateIngress %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err)) diff --git a/pkg/microservice/aslan/core/environment/service/environment.go b/pkg/microservice/aslan/core/environment/service/environment.go index 6a29bf1a76..f6e4ef294d 100644 --- a/pkg/microservice/aslan/core/environment/service/environment.go +++ b/pkg/microservice/aslan/core/environment/service/environment.go @@ -2802,7 +2802,7 @@ func createGroups(user, requestID string, args *commonmodels.Product, eventStart } }() - err = initEnvConfigSetAction(args.EnvName, args.Namespace, args.ProductName, user, args.EnvConfigs, false, kubeClient) + err = initEnvConfigSetAction(args.EnvName, args.Namespace, args.ProductName, user, args.ClusterID, args.EnvConfigs, false, kubeClient) if err != nil { args.Status = setting.ProductStatusFailed log.Errorf("initEnvConfigSet error :%s", err) @@ -3060,7 +3060,7 @@ func ensureKubeEnv(namespace, registryId, clusterID string, customLabels map[str } // 创建默认的镜像仓库secret - if err := commonservice.EnsureDefaultRegistrySecret(namespace, registryId, kubeClient, log); err != nil { + if err := commonservice.EnsureDefaultRegistrySecret(namespace, registryId, clusterID, log); err != nil { log.Errorf("[%s] get or create namespace error: %v", namespace, err) return e.ErrCreateSecret.AddDesc(e.CreateDefaultRegistryErrMsg) } diff --git a/pkg/microservice/aslan/core/environment/service/environment_creator.go b/pkg/microservice/aslan/core/environment/service/environment_creator.go index 43ee009359..de691d77ae 100644 --- a/pkg/microservice/aslan/core/environment/service/environment_creator.go +++ b/pkg/microservice/aslan/core/environment/service/environment_creator.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "errors" "fmt" "strings" @@ -174,7 +175,7 @@ func (creator *HelmProductCreator) Create(user, requestID string, args *ProductC // before create product, do install -dryRun to expose errors earlier dryRunClient := client.NewDryRunClient(kubeClient) - err = initEnvConfigSetAction(args.EnvName, args.Namespace, args.ProductName, user, args.EnvConfigs, true, dryRunClient) + err = initEnvConfigSetAction(args.EnvName, args.Namespace, args.ProductName, user, clusterID, args.EnvConfigs, true, dryRunClient) if err != nil { log.Errorf("failed to dyrRun env resource [%s][P:%s], the error is: %s", args.EnvName, args.ProductName, err) return e.ErrCreateEnv.AddErr(err) @@ -193,7 +194,7 @@ func (creator *HelmProductCreator) Create(user, requestID string, args *ProductC return e.ErrCreateEnv.AddDesc(err.Error()) } - err = initEnvConfigSetAction(args.EnvName, args.Namespace, args.ProductName, user, args.EnvConfigs, false, kubeClient) + err = initEnvConfigSetAction(args.EnvName, args.Namespace, args.ProductName, user, clusterID, args.EnvConfigs, false, kubeClient) if err != nil { log.Errorf("failed to helmInitEnvConfigSet [%s][P:%s], the error is: %s", args.EnvName, args.ProductName, err) if err := commonrepo.NewProductColl().UpdateStatusAndError(args.EnvName, args.ProductName, setting.ProductStatusFailed, err.Error()); err != nil { @@ -306,7 +307,7 @@ func (creator *K8sYamlProductCreator) Create(user, requestID string, args *Produ // before we apply yaml to k8s, we run kubectl apply --dry-run to expose problems early dryRunClient := client.NewDryRunClient(kubeClient) - err = initEnvConfigSetAction(args.EnvName, args.Namespace, args.ProductName, user, args.EnvConfigs, true, dryRunClient) + err = initEnvConfigSetAction(args.EnvName, args.Namespace, args.ProductName, user, clusterID, args.EnvConfigs, true, dryRunClient) if err != nil { return e.ErrCreateEnv.AddErr(err) } @@ -336,7 +337,7 @@ func (creator *K8sYamlProductCreator) Create(user, requestID string, args *Produ return nil } -func initEnvConfigSetAction(envName, namespace, productName, userName string, envResources []*models.CreateUpdateCommonEnvCfgArgs, dryRun bool, kubeClient client.Client) error { +func initEnvConfigSetAction(envName, namespace, productName, userName, clusterID string, envResources []*models.CreateUpdateCommonEnvCfgArgs, dryRun bool, kubeClient client.Client) error { errList := &multierror.Error{ ErrorFormat: func(es []error) string { format := "创建环境配置" @@ -372,7 +373,22 @@ func initEnvConfigSetAction(envName, namespace, productName, userName string, en continue } - err = updater.CreateOrPatchUnstructuredNeverAnnotation(u, kubeClient) + targetYAML, marshalErr := yaml.Marshal(u.UnstructuredContent()) + if marshalErr != nil { + log.Errorf("Failed to marshal %s %s to YAML: %v", u.GetKind(), u.GetName(), marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + switch u.GetKind() { + case setting.ConfigMap: + err = updater.CreateOrPatchConfigMapV2(context.TODO(), clusterID, namespace, "", string(targetYAML)) + case setting.Ingress: + err = updater.CreateOrPatchIngressV2(context.TODO(), clusterID, namespace, "", string(targetYAML)) + case setting.Secret: + err = updater.CreateOrPatchSecretV2(context.TODO(), clusterID, namespace, "", string(targetYAML)) + case setting.PersistentVolumeClaim: + err = updater.CreateOrPatchPVCV2(context.TODO(), clusterID, namespace, "", string(targetYAML)) + } if err != nil { log.Errorf("Failed to initEnvConfigSet %s, manifest is\n%v\n, error: %s", u.GetKind(), u, err) errList = multierror.Append(errList, err) diff --git a/pkg/microservice/aslan/core/environment/service/image.go b/pkg/microservice/aslan/core/environment/service/image.go index c3dac2839c..9052ceb4ad 100644 --- a/pkg/microservice/aslan/core/environment/service/image.go +++ b/pkg/microservice/aslan/core/environment/service/image.go @@ -115,7 +115,7 @@ func UpdateContainerImage(requestID, username string, args *UpdateContainerImage } for _, reg := range regs { if reg.RegProvider == config.RegistryTypeAWS { - if err := kube.CreateOrUpdateRegistrySecret(namespace, reg, false, kubeClient); err != nil { + if err := kube.CreateOrUpdateRegistrySecret(namespace, product.ClusterID, reg, false); err != nil { retErr := fmt.Errorf("failed to update pull secret for registry: %s, the error is: %s", reg.ID.Hex(), err) log.Errorf("%s\n", retErr.Error()) return retErr diff --git a/pkg/microservice/aslan/core/environment/service/ingress.go b/pkg/microservice/aslan/core/environment/service/ingress.go index 58212d44d4..e9bab8c85d 100644 --- a/pkg/microservice/aslan/core/environment/service/ingress.go +++ b/pkg/microservice/aslan/core/environment/service/ingress.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "encoding/json" "fmt" "sort" @@ -219,17 +220,12 @@ func UpdateOrCreateIngress(args *models.CreateUpdateCommonEnvCfgArgs, userName s return e.ErrUpdateResource.AddErr(err) } - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(product.ClusterID) - if err != nil { - return e.ErrUpdateResource.AddErr(err) - } - yamlData, err := ensureLabelAndNs(u, product.Namespace, args.ProductName) if err != nil { return e.ErrUpdateResource.AddErr(err) } - err = updater.UpdateOrCreateUnstructured(u, kubeClient) + err = updater.CreateOrPatchIngressV2(context.TODO(), product.ClusterID, product.Namespace, "", yamlData) if err != nil { log.Errorf("Failed to UpdateOrCreateIngress %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) return e.ErrUpdateResource.AddErr(fmt.Errorf("Failed to UpdateOrCreateIngress %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err)) diff --git a/pkg/microservice/aslan/core/environment/service/secret.go b/pkg/microservice/aslan/core/environment/service/secret.go index b40d9915bb..b69a65421d 100644 --- a/pkg/microservice/aslan/core/environment/service/secret.go +++ b/pkg/microservice/aslan/core/environment/service/secret.go @@ -17,6 +17,7 @@ limitations under the License. package service import ( + "context" "encoding/json" "sort" "sync" @@ -158,7 +159,7 @@ func UpdateSecret(args *models.CreateUpdateCommonEnvCfgArgs, userName string, lo return e.ErrUpdateResource.AddErr(err) } - err = updater.UpdateOrCreateSecret(secret, kubeClient) + err = updater.CreateOrUpdateSecretV2(context.TODO(), product.ClusterID, secret) if err != nil { log.Error(err) return e.ErrUpdateResource.AddDesc(err.Error()) diff --git a/pkg/microservice/aslan/core/environment/service/service.go b/pkg/microservice/aslan/core/environment/service/service.go index e32f73d4ec..a5186c3dfd 100644 --- a/pkg/microservice/aslan/core/environment/service/service.go +++ b/pkg/microservice/aslan/core/environment/service/service.go @@ -63,11 +63,6 @@ func RestartScale(args *RestartScaleArgs, production bool, _ *zap.SugaredLogger) return e.ErrScaleService.AddErr(fmt.Errorf("environment is sleeping")) } - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(prod.ClusterID) - if err != nil { - return err - } - // aws secrets needs to be refreshed regs, err := commonservice.ListRegistryNamespaces("", true, log.SugaredLogger()) if err != nil { @@ -76,7 +71,7 @@ func RestartScale(args *RestartScaleArgs, production bool, _ *zap.SugaredLogger) } for _, reg := range regs { if reg.RegProvider == config.RegistryTypeAWS { - if err := kube.CreateOrUpdateRegistrySecret(prod.Namespace, reg, false, kubeClient); err != nil { + if err := kube.CreateOrUpdateRegistrySecret(prod.Namespace, prod.ClusterID, reg, false); err != nil { retErr := fmt.Errorf("failed to update pull secret for registry: %s, the error is: %s", reg.ID.Hex(), err) log.Errorf("%s\n", retErr.Error()) return retErr @@ -414,7 +409,7 @@ func RestartService(envName string, args *SvcOptArgs, production bool, log *zap. } for _, reg := range regs { if reg.RegProvider == config.RegistryTypeAWS { - if err := kube.CreateOrUpdateRegistrySecret(productObj.Namespace, reg, false, kubeClient); err != nil { + if err := kube.CreateOrUpdateRegistrySecret(productObj.Namespace, productObj.ClusterID, reg, false); err != nil { retErr := fmt.Errorf("failed to update pull secret for registry: %s, the error is: %s", reg.ID.Hex(), err) log.Errorf("%s\n", retErr.Error()) return retErr diff --git a/pkg/microservice/aslan/core/service.go b/pkg/microservice/aslan/core/service.go index f78d6ca45c..6005ca36e9 100644 --- a/pkg/microservice/aslan/core/service.go +++ b/pkg/microservice/aslan/core/service.go @@ -86,10 +86,6 @@ func StartControllers(stopCh <-chan struct{}) { } func initRsaKey() { - client, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(setting.LocalClusterID) - if err != nil { - log.DPanic(err) - } clientset, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(setting.LocalClusterID) if err != nil { log.DPanic(err) @@ -102,7 +98,7 @@ func initRsaKey() { if err != nil { log.DPanic(err) } - err = kube.CreateOrUpdateRSASecret(publicKey, privateKey, client) + err = kube.CreateOrUpdateRSASecret(publicKey, privateKey, setting.LocalClusterID) if err != nil { log.DPanic(err) } diff --git a/pkg/microservice/aslan/core/system/service/registry.go b/pkg/microservice/aslan/core/system/service/registry.go index 519a9ff759..ef1080c08d 100644 --- a/pkg/microservice/aslan/core/system/service/registry.go +++ b/pkg/microservice/aslan/core/system/service/registry.go @@ -36,7 +36,6 @@ import ( "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/service/registry" commonutil "github.com/koderover/zadig/v2/pkg/microservice/aslan/core/common/util" "github.com/koderover/zadig/v2/pkg/setting" - "github.com/koderover/zadig/v2/pkg/tool/clientmanager" e "github.com/koderover/zadig/v2/pkg/tool/errors" "github.com/koderover/zadig/v2/pkg/util" ) @@ -193,13 +192,7 @@ func UpdateRegistryNamespace(username, id string, args *commonmodels.RegistryNam for _, env := range envs { if env.RegistryID == id { - kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(env.ClusterID) - if err != nil { - log.Errorf("[UpdateRegistryNamespace] GetKubeClient %s error: %v", env.ClusterID, err) - continue - } - - err = kube.CreateOrUpdateDefaultRegistrySecret(env.Namespace, args, kubeClient) + err = kube.CreateOrUpdateDefaultRegistrySecret(env.Namespace, env.ClusterID, args) if err != nil { log.Errorf("[UpdateRegistryNamespaces] CreateOrUpdateDefaultRegistrySecret, namespace: %s, regID: %s error: %s", env.Namespace, id, err) } diff --git a/pkg/tool/helmclient/helmclient.go b/pkg/tool/helmclient/helmclient.go index c6d44d0047..c5144e2d8b 100644 --- a/pkg/tool/helmclient/helmclient.go +++ b/pkg/tool/helmclient/helmclient.go @@ -88,6 +88,7 @@ func init() { type HelmClient struct { *hc.HelmClient kubeClient client.Client + ClusterID string Namespace string KubeVersion *helmchartutil.KubeVersion lock *sync.Mutex @@ -165,6 +166,7 @@ func NewClientFromNamespace(clusterID, namespace string) (*HelmClient, error) { return &HelmClient{ HelmClient: helmClient, kubeClient: kubeClient, + ClusterID: clusterID, Namespace: namespace, KubeVersion: kubeVersion, lock: &sync.Mutex{}, @@ -473,11 +475,8 @@ func (hClient *HelmClient) ensureUpgrade(maxHistoryCount int, releaseName string if maxHistoryCount <= 0 || len(releases) < maxHistoryCount { return nil } - if hClient.kubeClient == nil { - return errors.New("kubeClient is nil") - } secretName := fmt.Sprintf("%s.%s.v%d", storage.HelmStorageType, releaseName, releases[len(releases)-1].Version) - return updater.DeleteSecretWithName(hClient.Namespace, secretName, hClient.kubeClient) + return updater.DeleteSecretWithNameV2(context.TODO(), hClient.ClusterID, hClient.Namespace, secretName) } // getChart returns a chart matching the provided chart name and options. diff --git a/pkg/tool/kube/updater/base.go b/pkg/tool/kube/updater/base.go index 4c74e7f93d..2c9e305a48 100644 --- a/pkg/tool/kube/updater/base.go +++ b/pkg/tool/kube/updater/base.go @@ -27,6 +27,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/koderover/zadig/v2/pkg/tool/kube/getter" @@ -34,6 +36,10 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/kube/util" ) +func init() { + controllerruntime.SetLogger(klog.Background()) +} + func patchObject(obj client.Object, patchBytes []byte, cl client.Client) error { return cl.Patch(context.TODO(), obj, client.RawPatch(types.StrategicMergePatchType, patchBytes)) } diff --git a/pkg/tool/kube/updater/cloneset.go b/pkg/tool/kube/updater/cloneset.go deleted file mode 100644 index 924d2ca4dc..0000000000 --- a/pkg/tool/kube/updater/cloneset.go +++ /dev/null @@ -1,24 +0,0 @@ -package updater - -import ( - "context" - "fmt" - "github.com/openkruise/kruise-api/apps/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func ScaleCloneSet(ns, name string, replicas int, cl client.Client) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"replicas": %d}}`, replicas)) - return PatchCloneSet(ns, name, patchBytes, cl) -} - -func PatchCloneSet(ns, name string, patchBytes []byte, cl client.Client) error { - return cl.Patch(context.TODO(), &v1alpha1.CloneSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, client.RawPatch(types.MergePatchType, patchBytes)) -} diff --git a/pkg/tool/kube/updater/cloneset_v2.go b/pkg/tool/kube/updater/cloneset_v2.go index 9c87180611..195c7dde1e 100644 --- a/pkg/tool/kube/updater/cloneset_v2.go +++ b/pkg/tool/kube/updater/cloneset_v2.go @@ -27,13 +27,15 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/clientmanager" ) -func ScaleCloneSetV2(ctx context.Context, clusterID, namespace, name string, replicas int) error { +// PatchCloneSetV2 applies a raw merge patch to a CloneSet. +// CloneSet is a CRD from OpenKruise so we use controller-runtime client +// (no typed kubernetes clientset API available for CRDs). +func PatchCloneSetV2(ctx context.Context, clusterID, namespace, name string, patchBytes []byte) error { c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } - patchBytes := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, replicas)) err = c.Patch(ctx, &v1alpha1.CloneSet{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -41,7 +43,12 @@ func ScaleCloneSetV2(ctx context.Context, clusterID, namespace, name string, rep }, }, client.RawPatch(types.MergePatchType, patchBytes)) if err != nil { - return fmt.Errorf("failed to scale CloneSet %s/%s: %w", namespace, name, err) + return fmt.Errorf("failed to patch CloneSet %s/%s: %w", namespace, name, err) } return nil } + +func ScaleCloneSetV2(ctx context.Context, clusterID, namespace, name string, replicas int) error { + patchBytes := []byte(fmt.Sprintf(`{"spec":{"replicas":%d}}`, replicas)) + return PatchCloneSetV2(ctx, clusterID, namespace, name, patchBytes) +} diff --git a/pkg/tool/kube/updater/clusterrole.go b/pkg/tool/kube/updater/clusterrole.go deleted file mode 100644 index 5028c1defb..0000000000 --- a/pkg/tool/kube/updater/clusterrole.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeleteClusterRoles(selector labels.Selector, clientset *kubernetes.Clientset) error { - version, err := clientset.Discovery().ServerVersion() - if err != nil { - return err - } - - deletePolicy := metav1.DeletePropagationForeground - if kubeclient.VersionLessThan122(version) { - err = clientset.RbacV1beta1().ClusterRoles().DeleteCollection(context.TODO(), metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, metav1.ListOptions{ - LabelSelector: selector.String(), - }) - } else { - err = clientset.RbacV1().ClusterRoles().DeleteCollection(context.TODO(), metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, metav1.ListOptions{ - LabelSelector: selector.String(), - }) - } - - return util.IgnoreNotFoundError(err) -} diff --git a/pkg/tool/kube/updater/clusterrole_v2.go b/pkg/tool/kube/updater/clusterrole_v2.go index 59a68fcefd..a170e33228 100644 --- a/pkg/tool/kube/updater/clusterrole_v2.go +++ b/pkg/tool/kube/updater/clusterrole_v2.go @@ -17,12 +17,18 @@ package updater import ( "context" + "encoding/json" "fmt" rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -57,3 +63,193 @@ func DeleteClusterRolesV2(ctx context.Context, clusterID string, opts ...DeleteO err = c.DeleteAllOf(ctx, &rbacv1.ClusterRole{}, deleteOpts) return util.IgnoreNotFoundError(err) } + +// CreateOrPatchClusterRoleV2 is cluster-scoped (no namespace). +func CreateOrPatchClusterRoleV2(ctx context.Context, clusterID, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj rbacv1.ClusterRole + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to ClusterRole: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("clusterrole name cannot be empty in target YAML") + } + + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj rbacv1.ClusterRole + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.RbacV1().ClusterRoles().Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.RbacV1().ClusterRoles().Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&rbacv1.ClusterRole{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.RbacV1().ClusterRoles().Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("clusterrole operation failed after retries: %w", err) + } + + return nil +} + +// CreateOrPatchClusterRoleBindingV2 is cluster-scoped (no namespace). +func CreateOrPatchClusterRoleBindingV2(ctx context.Context, clusterID, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj rbacv1.ClusterRoleBinding + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to ClusterRoleBinding: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("clusterrolebinding name cannot be empty in target YAML") + } + + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj rbacv1.ClusterRoleBinding + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.RbacV1().ClusterRoleBindings().Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&rbacv1.ClusterRoleBinding{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.RbacV1().ClusterRoleBindings().Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("clusterrolebinding operation failed after retries: %w", err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/ingress_v2.go b/pkg/tool/kube/updater/ingress_v2.go index 0ba7e6206d..f92b091309 100644 --- a/pkg/tool/kube/updater/ingress_v2.go +++ b/pkg/tool/kube/updater/ingress_v2.go @@ -17,11 +17,21 @@ package updater import ( "context" + "encoding/json" "fmt" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/yaml" + kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" ) @@ -68,3 +78,202 @@ func DeleteIngressesV2(ctx context.Context, clusterID, namespace string, opts .. return nil } + +func CreateOrPatchIngressV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + version, err := c.Discovery().ServerVersion() + if err != nil { + return fmt.Errorf("failed to get server version: %w", err) + } + + if kubeclient.VersionLessThan122(version) { + return createOrPatchIngressBeta(ctx, c, namespace, originalYAML, targetYAML) + } + return createOrPatchIngressV1(ctx, c, namespace, originalYAML, targetYAML) +} + +func createOrPatchIngressV1(ctx context.Context, c kubernetes.Interface, namespace, originalYAML, targetYAML string) error { + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj networkingv1.Ingress + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to Ingress: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("ingress name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj networkingv1.Ingress + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.NetworkingV1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.NetworkingV1().Ingresses(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&networkingv1.Ingress{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.NetworkingV1().Ingresses(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("ingress operation failed after retries: %w", err) + } + + return nil +} + +func createOrPatchIngressBeta(ctx context.Context, c kubernetes.Interface, namespace, originalYAML, targetYAML string) error { + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj extensionsv1beta1.Ingress + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to Ingress (v1beta1): %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("ingress name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj extensionsv1beta1.Ingress + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.ExtensionsV1beta1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.ExtensionsV1beta1().Ingresses(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&extensionsv1beta1.Ingress{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.ExtensionsV1beta1().Ingresses(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("ingress (v1beta1) operation failed after retries: %w", err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/pvc_v2.go b/pkg/tool/kube/updater/pvc_v2.go index 86193342c0..5cb96f8704 100644 --- a/pkg/tool/kube/updater/pvc_v2.go +++ b/pkg/tool/kube/updater/pvc_v2.go @@ -17,14 +17,19 @@ package updater import ( "context" + "encoding/json" "fmt" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -97,6 +102,102 @@ func CreatePVCV2(ctx context.Context, clusterID, namespace string, pvc *corev1.P return nil } +func CreateOrPatchPVCV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj corev1.PersistentVolumeClaim + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to PVC: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("PVC name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj corev1.PersistentVolumeClaim + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&corev1.PersistentVolumeClaim{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.CoreV1().PersistentVolumeClaims(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("PVC operation failed after retries: %w", err) + } + + return nil +} + func UpdatePvcV2(ctx context.Context, clusterID, namespace, pvcName string, mutationFunc func(pvc *corev1.PersistentVolumeClaim) error) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { diff --git a/pkg/tool/kube/updater/secret.go b/pkg/tool/kube/updater/secret.go deleted file mode 100644 index b28a41fd42..0000000000 --- a/pkg/tool/kube/updater/secret.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - "k8s.io/klog/v2" - controllerruntime "sigs.k8s.io/controller-runtime" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func init() { - controllerruntime.SetLogger(klog.Background()) -} - -func DeleteSecrets(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.CoreV1().Secrets(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} - -func UpdateOrCreateSecret(s *corev1.Secret, cl client.Client) error { - return updateOrCreateObject(s, cl) -} - -func DeleteSecretWithName(ns, name string, cl client.Client) error { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - } - return deleteObject(secret, cl) -} diff --git a/pkg/tool/kube/updater/secret_v2.go b/pkg/tool/kube/updater/secret_v2.go index 7693eca9f0..259a1b9efc 100644 --- a/pkg/tool/kube/updater/secret_v2.go +++ b/pkg/tool/kube/updater/secret_v2.go @@ -17,13 +17,17 @@ package updater import ( "context" + "encoding/json" "fmt" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/controller-runtime/pkg/client" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -38,67 +42,159 @@ func DeleteSecretsV2(ctx context.Context, clusterID, namespace string, opts ...D if config.name == "" && config.selector == "" { return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") } - if config.name != "" && config.selector != "" { return fmt.Errorf("cannot specify both name and selector simultaneously") } - cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := metav1.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + if config.name != "" { - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: config.name, - }, - } - err = cl.Delete(ctx, svc) + err = c.CoreV1().Secrets(namespace).Delete(ctx, config.name, deleteOpts) return util.IgnoreNotFoundError(err) } - if config.selector != "" { - selector, err := labels.Parse(config.selector) - if err != nil { - return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) - } - - deploy := &corev1.Secret{} - - propagationPolicy := metav1.DeletePropagationBackground - deleteOpts := &client.DeleteAllOfOptions{ - DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, - ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, - } - - err = cl.DeleteAllOf(ctx, deploy, deleteOpts) - return util.IgnoreNotFoundError(err) + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) } - return fmt.Errorf("must specify either a name or a selector for deletion of the service to prevent accidental namespace wipeout") + err = c.CoreV1().Secrets(namespace).DeleteCollection(ctx, deleteOpts, metav1.ListOptions{ + LabelSelector: selector.String(), + }) + return util.IgnoreNotFoundError(err) } -func UpdateOrCreateSecretV2(ctx context.Context, clusterID string, s *corev1.Secret) error { - cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) +func DeleteSecretWithNameV2(ctx context.Context, clusterID, namespace, name string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } - if err := util.CreateApplyAnnotation(s); err != nil { - return fmt.Errorf("failed to create apply annotation: %w", err) + propagationPolicy := metav1.DeletePropagationForeground + err = c.CoreV1().Secrets(namespace).Delete(ctx, name, metav1.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + }) + return util.IgnoreNotFoundError(err) +} + +// CreateOrUpdateSecretV2 tries to update the secret first; if it doesn't exist, creates it. +func CreateOrUpdateSecretV2(ctx context.Context, clusterID string, s *corev1.Secret) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) } - err = cl.Update(ctx, s) + _, err = c.CoreV1().Secrets(s.Namespace).Update(ctx, s, metav1.UpdateOptions{}) if err == nil { return nil } if apierrors.IsNotFound(err) { - if createErr := cl.Create(ctx, s); createErr != nil { + _, createErr := c.CoreV1().Secrets(s.Namespace).Create(ctx, s, metav1.CreateOptions{}) + if createErr != nil { return fmt.Errorf("failed to create secret %s/%s: %w", s.Namespace, s.Name, createErr) } return nil } return fmt.Errorf("failed to update secret %s/%s: %w", s.Namespace, s.Name, err) } + +// CreateOrPatchSecretV2 implements a 3-way merge patch for Secret. +func CreateOrPatchSecretV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj corev1.Secret + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to Secret: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("secret name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj corev1.Secret + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().Secrets(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&corev1.Secret{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.CoreV1().Secrets(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("secret operation failed after retries: %w", err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/service.go b/pkg/tool/kube/updater/service.go deleted file mode 100644 index 186ca268b3..0000000000 --- a/pkg/tool/kube/updater/service.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func DeleteServices(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - services, err := clientset.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) - if err != nil { - return err - } - - var lastErr error - deletePolicy := metav1.DeletePropagationForeground - for _, svc := range services.Items { - err := clientset.CoreV1().Services(namespace).Delete( - context.TODO(), - svc.Name, - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - ) - if err != nil { - } - lastErr = err - } - - return lastErr -} - -func CreateOrPatchService(s *corev1.Service, cl client.Client) error { - return createOrPatchObject(s, cl) -} - -func DeleteService(ns, name string, cl client.Client) error { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - } - return deleteObject(service, cl) -} diff --git a/pkg/tool/kube/updater/service_v2.go b/pkg/tool/kube/updater/service_v2.go index f22820c388..a2f5c3cef4 100644 --- a/pkg/tool/kube/updater/service_v2.go +++ b/pkg/tool/kube/updater/service_v2.go @@ -17,13 +17,17 @@ package updater import ( "context" + "encoding/json" "fmt" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/util/retry" - "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -42,41 +46,54 @@ func DeleteServicesV2(ctx context.Context, clusterID, namespace string, opts ... return fmt.Errorf("cannot specify both name and selector simultaneously") } - cl, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := metav1.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + if config.name != "" { - svc := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: config.name, - }, - } - err = cl.Delete(ctx, svc) + err = c.CoreV1().Services(namespace).Delete(ctx, config.name, deleteOpts) return util.IgnoreNotFoundError(err) } - if config.selector != "" { - selector, err := labels.Parse(config.selector) - if err != nil { - return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) - } + // Kubernetes Services don't support DeleteCollection, so list + delete individually + selector, err := labels.Parse(config.selector) + if err != nil { + return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) + } - deploy := &corev1.Service{} + services, err := c.CoreV1().Services(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return fmt.Errorf("failed to list services matching %q in %s: %w", config.selector, namespace, err) + } - propagationPolicy := metav1.DeletePropagationBackground - deleteOpts := &client.DeleteAllOfOptions{ - DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, - ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, + var lastErr error + for _, svc := range services.Items { + if err := c.CoreV1().Services(namespace).Delete(ctx, svc.Name, deleteOpts); err != nil { + lastErr = err } + } + return lastErr +} - err = cl.DeleteAllOf(ctx, deploy, deleteOpts) - return util.IgnoreNotFoundError(err) +func CreateServiceV2(ctx context.Context, clusterID, namespace string, svc *corev1.Service) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + svc.SetNamespace(namespace) + _, err = c.CoreV1().Services(namespace).Create(ctx, svc, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create service %s/%s: %w", namespace, svc.Name, err) } - return fmt.Errorf("must specify either a name or a selector for deletion of the service to prevent accidental namespace wipeout") + return nil } // UpdateServiceV2 takes the cluster and resource info to identify a service, and uses the mutation function to update it with retry on conflict. @@ -103,16 +120,94 @@ func UpdateServiceV2(ctx context.Context, clusterID, namespace, serviceName stri return err } -func CreateServiceV2(ctx context.Context, clusterID, namespace string, svc *corev1.Service) error { +// CreateOrPatchServiceV2 implements a 3-way merge patch for Service. +func CreateOrPatchServiceV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } - svc.SetNamespace(namespace) - _, err = c.CoreV1().Services(namespace).Create(ctx, svc, metav1.CreateOptions{}) + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { - return fmt.Errorf("failed to create service %s/%s: %w", namespace, svc.Name, err) + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj corev1.Service + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to Service: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("service name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj corev1.Service + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + liveObj, err := c.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) + + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().Services(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + return createErr + } + if err != nil { + return fmt.Errorf("failed to get live state: %w", err) + } + + liveJSON, err := json.Marshal(liveObj) + if err != nil { + return fmt.Errorf("failed to marshal live object: %w", err) + } + + lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&corev1.Service{}) + if err != nil { + return fmt.Errorf("failed to create lookup patch meta: %w", err) + } + + patchBytes, err := strategicpatch.CreateThreeWayMergePatch( + originalJSONMutated, + targetJSONMutated, + liveJSON, + lookupPatchMeta, + true, + ) + if err != nil { + return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.CoreV1().Services(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + return err + }) + + if err != nil { + return fmt.Errorf("service operation failed after retries: %w", err) } return nil diff --git a/pkg/tool/kube/updater/serviceaccount.go b/pkg/tool/kube/updater/serviceaccount.go deleted file mode 100644 index 091bc47f61..0000000000 --- a/pkg/tool/kube/updater/serviceaccount.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/koderover/zadig/v2/pkg/tool/kube/util" -) - -func DeleteServiceAccounts(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - err := clientset.CoreV1().ServiceAccounts(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) - - return util.IgnoreNotFoundError(err) -} - -func CreateServiceAccount(sa *corev1.ServiceAccount, cl client.Client) error { - return createObject(sa, cl) -} diff --git a/pkg/tool/kube/updater/serviceaccount_v2.go b/pkg/tool/kube/updater/serviceaccount_v2.go index 6ddfab23ec..a45aac72d6 100644 --- a/pkg/tool/kube/updater/serviceaccount_v2.go +++ b/pkg/tool/kube/updater/serviceaccount_v2.go @@ -22,7 +22,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/controller-runtime/pkg/client" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -34,27 +33,36 @@ func DeleteServiceAccountsV2(ctx context.Context, clusterID, namespace string, o opt(config) } - if config.selector == "" { - return fmt.Errorf("must specify a selector for service account deletion") + if config.name == "" && config.selector == "" { + return fmt.Errorf("must specify either a name or a selector for deletion to prevent accidental namespace wipeout") + } + if config.name != "" && config.selector != "" { + return fmt.Errorf("cannot specify both name and selector simultaneously") } - c, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(clusterID) + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + propagationPolicy := metav1.DeletePropagationForeground + deleteOpts := metav1.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + + if config.name != "" { + err = c.CoreV1().ServiceAccounts(namespace).Delete(ctx, config.name, deleteOpts) + return util.IgnoreNotFoundError(err) + } + selector, err := labels.Parse(config.selector) if err != nil { return fmt.Errorf("failed to parse selector %q: %w", config.selector, err) } - propagationPolicy := metav1.DeletePropagationForeground - deleteOpts := &client.DeleteAllOfOptions{ - DeleteOptions: client.DeleteOptions{PropagationPolicy: &propagationPolicy}, - ListOptions: client.ListOptions{LabelSelector: selector, Namespace: namespace}, - } - - err = c.DeleteAllOf(ctx, &corev1.ServiceAccount{}, deleteOpts) + err = c.CoreV1().ServiceAccounts(namespace).DeleteCollection(ctx, deleteOpts, metav1.ListOptions{ + LabelSelector: selector.String(), + }) return util.IgnoreNotFoundError(err) } diff --git a/pkg/tool/kube/updater/statefulset.go b/pkg/tool/kube/updater/statefulset.go deleted file mode 100644 index d7d372ec6b..0000000000 --- a/pkg/tool/kube/updater/statefulset.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2021 The KodeRover Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package updater - -import ( - "bytes" - "context" - "fmt" - "time" - - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func PatchStatefulSet(ns, name string, patchBytes []byte, cl client.Client) error { - return patchObject(&appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - }, patchBytes, cl) -} - -// TODO: LOU: it is not the right way to restart a statefulSet, since it is a hack and it -// will generate a new revision which will pollute the revision history. -func RestartStatefulSet(ns, name string, cl client.Client) error { - now := time.Now().Format(time.RFC3339Nano) - payload := bytes.NewBufferString("") - _ = restartPatchTemplate.Execute(payload, struct { - Time string - }{now}) - - if err := PatchStatefulSet(ns, name, payload.Bytes(), cl); err != nil { - return fmt.Errorf("failed to restart %s/deploy/%s: %v", ns, name, err) - } - - return nil -} - -func DeleteStatefulSets(namespace string, selector labels.Selector, clientset *kubernetes.Clientset) error { - deletePolicy := metav1.DeletePropagationForeground - return clientset.AppsV1().StatefulSets(namespace).DeleteCollection( - context.TODO(), - metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }, - metav1.ListOptions{ - LabelSelector: selector.String(), - }, - ) -} - -func UpdateStatefulSetImage(ns, name, container, image string, cl client.Client) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, container, image)) - - return PatchStatefulSet(ns, name, patchBytes, cl) -} - -func UpdateStatefulSetInitImage(ns, name, container, image string, cl client.Client) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"template":{"spec":{"initContainers":[{"name":"%s","image":"%s"}]}}}}`, container, image)) - - return PatchStatefulSet(ns, name, patchBytes, cl) -} - -func ScaleStatefulSet(ns, name string, replicas int, cl client.Client) error { - patchBytes := []byte(fmt.Sprintf(`{"spec":{"replicas": %d}}`, replicas)) - return PatchStatefulSet(ns, name, patchBytes, cl) -} - -func CreateOrPatchStatefulSet(sts *appsv1.StatefulSet, cl client.Client) error { - return createOrPatchObject(sts, cl) -} diff --git a/pkg/tool/kube/updater/unstructured.go b/pkg/tool/kube/updater/unstructured.go index 709093fc22..fa8a7493e8 100644 --- a/pkg/tool/kube/updater/unstructured.go +++ b/pkg/tool/kube/updater/unstructured.go @@ -26,6 +26,7 @@ func CreateOrPatchUnstructured(u *unstructured.Unstructured, cl client.Client) e return createOrPatchObject(u, cl) } +// remain as is func PatchUnstructured(u *unstructured.Unstructured, patchBytes []byte, patchType types.PatchType, cl client.Client) error { return PatchObject(u, patchBytes, patchType, cl) } @@ -34,10 +35,6 @@ func CreateOrPatchUnstructuredNeverAnnotation(u *unstructured.Unstructured, cl c return createOrPatchObjectNeverAnnotation(u, cl) } -func UpdateOrCreateUnstructured(u *unstructured.Unstructured, cl client.Client) error { - return updateOrCreateObject(u, cl) -} - func DeleteUnstructured(u *unstructured.Unstructured, cl client.Client) error { return deleteObjectWithDefaultOptions(u, cl) } From 60892188d08e15045bfff650182a10dc1ebc04c1 Mon Sep 17 00:00:00 2001 From: Min Min Date: Thu, 19 Mar 2026 14:00:46 +0800 Subject: [PATCH 08/21] add some debug logs to see what went wrong Signed-off-by: Min Min --- .../aslan/core/common/service/kube/apply.go | 28 ++++++++++++++++--- .../aslan/core/common/service/version.go | 3 ++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index 2e2b766a8f..9b4300d847 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -869,7 +869,12 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge errList = multierror.Append(errList, marshalErr) continue } - err = updater.CreateOrPatchDeploymentV2(context.TODO(), productInfo.ClusterID, namespace, "", string(resYAML)) + gvkn := fmt.Sprintf("%s-%s", u.GetObjectKind().GroupVersionKind(), u.GetName()) + originalYAML := "" + if curRes, ok := curResourceMap[gvkn]; ok { + originalYAML = curRes.Manifest + } + err = updater.CreateOrPatchDeploymentV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), res, err) errList = multierror.Append(errList, err) @@ -909,7 +914,12 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge errList = multierror.Append(errList, marshalErr) continue } - err = updater.CreateOrPatchStatefulSetV2(context.TODO(), productInfo.ClusterID, namespace, "", string(resYAML)) + gvkn := fmt.Sprintf("%s-%s", u.GetObjectKind().GroupVersionKind(), u.GetName()) + originalYAML := "" + if curRes, ok := curResourceMap[gvkn]; ok { + originalYAML = curRes.Manifest + } + err = updater.CreateOrPatchStatefulSetV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), res, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -1005,7 +1015,12 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge errList = multierror.Append(errList, marshalErr) continue } - err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, "", string(resYAML)) + gvkn := fmt.Sprintf("%s-%s", u.GetObjectKind().GroupVersionKind(), u.GetName()) + originalYAML := "" + if curRes, ok := curResourceMap[gvkn]; ok { + originalYAML = curRes.Manifest + } + err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), obj, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -1038,7 +1053,12 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge errList = multierror.Append(errList, marshalErr) continue } - err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, "", string(resYAML)) + gvkn := fmt.Sprintf("%s-%s", u.GetObjectKind().GroupVersionKind(), u.GetName()) + originalYAML := "" + if curRes, ok := curResourceMap[gvkn]; ok { + originalYAML = curRes.Manifest + } + err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), obj, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) diff --git a/pkg/microservice/aslan/core/common/service/version.go b/pkg/microservice/aslan/core/common/service/version.go index 6c80792d6b..ed8acfa3e8 100644 --- a/pkg/microservice/aslan/core/common/service/version.go +++ b/pkg/microservice/aslan/core/common/service/version.go @@ -442,6 +442,9 @@ func RollbackEnvServiceVersion(ctx *internalhandler.Context, projectName, envNam } preProdSvc.RenderedYaml = preResourceYaml + log.Infof("current yaml is: %s", preProdSvc.RenderedYaml) + log.Infof("rollback yaml is: %s", parsedYaml) + err = kube.CheckResourceAppliedByOtherEnv(parsedYaml, env, envSvcVersion.Service.ServiceName) if err != nil { return nil, e.ErrRollbackEnvServiceVersion.AddErr(err) From cbac87f6812bb43feb9d68794f031718a246fe87 Mon Sep 17 00:00:00 2001 From: Min Min Date: Thu, 19 Mar 2026 14:14:51 +0800 Subject: [PATCH 09/21] even more debug logs to see what happens Signed-off-by: Min Min --- pkg/tool/kube/updater/deployment_v2.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/tool/kube/updater/deployment_v2.go b/pkg/tool/kube/updater/deployment_v2.go index b43246cf8e..d3823a31cd 100644 --- a/pkg/tool/kube/updater/deployment_v2.go +++ b/pkg/tool/kube/updater/deployment_v2.go @@ -35,6 +35,7 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" + "github.com/koderover/zadig/v2/pkg/tool/log" ) func RestartDeploymentV2(ctx context.Context, clusterID, namespace, name string) error { @@ -284,6 +285,11 @@ func CreateOrPatchDeploymentV2(ctx context.Context, clusterID, namespace, origin return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) } + log.Infof("[CreateOrPatchDeploymentV2] original yaml is: %s", string(originalJSONMutated)) + log.Infof("[CreateOrPatchDeploymentV2] target yaml is: %s", string(targetJSONMutated)) + log.Infof("[CreateOrPatchDeploymentV2] live yaml is: %s", string(liveJSON)) + log.Infof("[CreateOrPatchDeploymentV2] patch bytes is: %s", string(patchBytes)) + if string(patchBytes) == "{}" { return nil } From d24b76b8642962203057202758c1f02501f8f1d6 Mon Sep 17 00:00:00 2001 From: Min Min Date: Thu, 19 Mar 2026 15:07:31 +0800 Subject: [PATCH 10/21] change 3-way patch to 2-way patch to prevent overriding env resources Signed-off-by: Min Min --- pkg/tool/kube/updater/clusterrole_v2.go | 137 +++++++++--------------- pkg/tool/kube/updater/configmap_v2.go | 63 ++++------- pkg/tool/kube/updater/cronjob_v2.go | 123 ++++++++------------- pkg/tool/kube/updater/deployment_v2.go | 81 +++++--------- pkg/tool/kube/updater/ingress_v2.go | 137 +++++++++--------------- pkg/tool/kube/updater/pvc_v2.go | 68 +++++------- pkg/tool/kube/updater/secret_v2.go | 61 ++++------- pkg/tool/kube/updater/service_v2.go | 62 ++++------- pkg/tool/kube/updater/statefulset_v2.go | 70 +++++------- 9 files changed, 284 insertions(+), 518 deletions(-) diff --git a/pkg/tool/kube/updater/clusterrole_v2.go b/pkg/tool/kube/updater/clusterrole_v2.go index a170e33228..a857ebffb7 100644 --- a/pkg/tool/kube/updater/clusterrole_v2.go +++ b/pkg/tool/kube/updater/clusterrole_v2.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -106,54 +105,36 @@ func CreateOrPatchClusterRoleV2(ctx context.Context, clusterID, originalYAML, ta } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.RbacV1().ClusterRoles().Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.RbacV1().ClusterRoles().Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&rbacv1.ClusterRole{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.RbacV1().ClusterRoles().Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.RbacV1().ClusterRoles().Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create clusterrole: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check clusterrole existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &rbacv1.ClusterRole{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.RbacV1().ClusterRoles().Patch( - ctx, - name, - types.StrategicMergePatchType, - patchBytes, - metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.RbacV1().ClusterRoles().Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("clusterrole operation failed after retries: %w", err) + return fmt.Errorf("clusterrole patch failed: %w", err) } return nil @@ -201,54 +182,36 @@ func CreateOrPatchClusterRoleBindingV2(ctx context.Context, clusterID, originalY } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.RbacV1().ClusterRoleBindings().Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&rbacv1.ClusterRoleBinding{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.RbacV1().ClusterRoleBindings().Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create clusterrolebinding: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check clusterrolebinding existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &rbacv1.ClusterRoleBinding{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.RbacV1().ClusterRoleBindings().Patch( - ctx, - name, - types.StrategicMergePatchType, - patchBytes, - metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.RbacV1().ClusterRoleBindings().Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("clusterrolebinding operation failed after retries: %w", err) + return fmt.Errorf("clusterrolebinding patch failed: %w", err) } return nil diff --git a/pkg/tool/kube/updater/configmap_v2.go b/pkg/tool/kube/updater/configmap_v2.go index 63b8b7726d..a96bae71f6 100644 --- a/pkg/tool/kube/updater/configmap_v2.go +++ b/pkg/tool/kube/updater/configmap_v2.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" @@ -117,7 +116,7 @@ func UpdateConfigMapV2(ctx context.Context, clusterID, namespace string, cm *cor return nil } -// CreateOrPatchConfigMapV2 implements a 3-way merge patch for ConfigMap, similar to CreateOrPatchDeploymentV2. +// CreateOrPatchConfigMapV2 implements a 2-way merge patch for ConfigMap. func CreateOrPatchConfigMapV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { @@ -161,50 +160,32 @@ func CreateOrPatchConfigMapV2(ctx context.Context, clusterID, namespace, origina } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.CoreV1().ConfigMaps(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&corev1.ConfigMap{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().ConfigMaps(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create configmap: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check configmap existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.ConfigMap{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.CoreV1().ConfigMaps(namespace).Patch( - ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.CoreV1().ConfigMaps(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("configmap operation failed after retries: %w", err) + return fmt.Errorf("configmap patch failed: %w", err) } return nil diff --git a/pkg/tool/kube/updater/cronjob_v2.go b/pkg/tool/kube/updater/cronjob_v2.go index 711afb4af3..c389175d92 100644 --- a/pkg/tool/kube/updater/cronjob_v2.go +++ b/pkg/tool/kube/updater/cronjob_v2.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" @@ -90,7 +89,7 @@ func DeleteCronJobsV2(ctx context.Context, clusterID, namespace string, opts ... return util.IgnoreNotFoundError(err) } -// CreateOrPatchCronJobV2 implements a 3-way merge patch for CronJob, similar to CreateOrPatchDeploymentV2. +// CreateOrPatchCronJobV2 implements a 2-way merge patch for CronJob, similar to CreateOrPatchDeploymentV2. // On clusters < 1.21, it falls back to batch/v1beta1 API. func CreateOrPatchCronJobV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) @@ -147,50 +146,32 @@ func createOrPatchCronJobV1(ctx context.Context, c *kubernetes.Clientset, namesp } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.BatchV1().CronJobs(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&batchv1.CronJob{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.BatchV1().CronJobs(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create cronjob: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check cronjob existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &batchv1.CronJob{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.BatchV1().CronJobs(namespace).Patch( - ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.BatchV1().CronJobs(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("cronjob operation failed after retries: %w", err) + return fmt.Errorf("cronjob patch failed: %w", err) } return nil @@ -234,50 +215,32 @@ func createOrPatchCronJobBeta(ctx context.Context, c *kubernetes.Clientset, name } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.BatchV1beta1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.BatchV1beta1().CronJobs(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&batchv1beta1.CronJob{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.BatchV1beta1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.BatchV1beta1().CronJobs(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create cronjob (v1beta1): %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check cronjob existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &batchv1beta1.CronJob{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.BatchV1beta1().CronJobs(namespace).Patch( - ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.BatchV1beta1().CronJobs(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("cronjob operation failed after retries: %w", err) + return fmt.Errorf("cronjob (v1beta1) patch failed: %w", err) } return nil diff --git a/pkg/tool/kube/updater/deployment_v2.go b/pkg/tool/kube/updater/deployment_v2.go index d3823a31cd..bb1bb37c96 100644 --- a/pkg/tool/kube/updater/deployment_v2.go +++ b/pkg/tool/kube/updater/deployment_v2.go @@ -35,7 +35,6 @@ import ( "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" - "github.com/koderover/zadig/v2/pkg/tool/log" ) func RestartDeploymentV2(ctx context.Context, clusterID, namespace, name string) error { @@ -246,66 +245,36 @@ func CreateOrPatchDeploymentV2(ctx context.Context, clusterID, namespace, origin } } - // since there might be 409 conflict on when the object is being updated frequently, we use a retry on conflict to handle it - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - // getting the live object from the cluster - liveObj, err := c.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) - - // if the object wasn't there, just deploy it - if apierrors.IsNotFound(err) { - _, createErr := c.AppsV1().Deployments(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - // otherwise, calculate the 3-way merge based on - // 1. the original yaml this system saved - // 2. the target yaml this system wants to create - // 3. the live state in the cluster - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&appsv1.Deployment{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.AppsV1().Deployments(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create deployment: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check deployment existence: %w", err) + } - log.Infof("[CreateOrPatchDeploymentV2] original yaml is: %s", string(originalJSONMutated)) - log.Infof("[CreateOrPatchDeploymentV2] target yaml is: %s", string(targetJSONMutated)) - log.Infof("[CreateOrPatchDeploymentV2] live yaml is: %s", string(liveJSON)) - log.Infof("[CreateOrPatchDeploymentV2] patch bytes is: %s", string(patchBytes)) - - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &appsv1.Deployment{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.AppsV1().Deployments(namespace).Patch( - ctx, - name, - types.StrategicMergePatchType, - patchBytes, - metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.AppsV1().Deployments(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("deployment operation failed after retries: %w", err) + return fmt.Errorf("deployment patch failed: %w", err) } return nil diff --git a/pkg/tool/kube/updater/ingress_v2.go b/pkg/tool/kube/updater/ingress_v2.go index f92b091309..98d3b2c042 100644 --- a/pkg/tool/kube/updater/ingress_v2.go +++ b/pkg/tool/kube/updater/ingress_v2.go @@ -28,7 +28,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" @@ -134,54 +133,36 @@ func createOrPatchIngressV1(ctx context.Context, c kubernetes.Interface, namespa } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.NetworkingV1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.NetworkingV1().Ingresses(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&networkingv1.Ingress{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.NetworkingV1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.NetworkingV1().Ingresses(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create ingress: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check ingress existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &networkingv1.Ingress{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.NetworkingV1().Ingresses(namespace).Patch( - ctx, - name, - types.StrategicMergePatchType, - patchBytes, - metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.NetworkingV1().Ingresses(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("ingress operation failed after retries: %w", err) + return fmt.Errorf("ingress patch failed: %w", err) } return nil @@ -225,54 +206,36 @@ func createOrPatchIngressBeta(ctx context.Context, c kubernetes.Interface, names } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.ExtensionsV1beta1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.ExtensionsV1beta1().Ingresses(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&extensionsv1beta1.Ingress{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.ExtensionsV1beta1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.ExtensionsV1beta1().Ingresses(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create ingress (v1beta1): %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check ingress existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &extensionsv1beta1.Ingress{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.ExtensionsV1beta1().Ingresses(namespace).Patch( - ctx, - name, - types.StrategicMergePatchType, - patchBytes, - metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.ExtensionsV1beta1().Ingresses(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("ingress (v1beta1) operation failed after retries: %w", err) + return fmt.Errorf("ingress (v1beta1) patch failed: %w", err) } return nil diff --git a/pkg/tool/kube/updater/pvc_v2.go b/pkg/tool/kube/updater/pvc_v2.go index 5cb96f8704..bad637dc81 100644 --- a/pkg/tool/kube/updater/pvc_v2.go +++ b/pkg/tool/kube/updater/pvc_v2.go @@ -145,54 +145,36 @@ func CreateOrPatchPVCV2(ctx context.Context, clusterID, namespace, originalYAML, } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&corev1.PersistentVolumeClaim{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create PVC: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check PVC existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.PersistentVolumeClaim{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.CoreV1().PersistentVolumeClaims(namespace).Patch( - ctx, - name, - types.StrategicMergePatchType, - patchBytes, - metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.CoreV1().PersistentVolumeClaims(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("PVC operation failed after retries: %w", err) + return fmt.Errorf("PVC patch failed: %w", err) } return nil diff --git a/pkg/tool/kube/updater/secret_v2.go b/pkg/tool/kube/updater/secret_v2.go index 259a1b9efc..046edf0525 100644 --- a/pkg/tool/kube/updater/secret_v2.go +++ b/pkg/tool/kube/updater/secret_v2.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" @@ -150,50 +149,32 @@ func CreateOrPatchSecretV2(ctx context.Context, clusterID, namespace, originalYA } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.CoreV1().Secrets(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&corev1.Secret{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().Secrets(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create secret: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check secret existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.Secret{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.CoreV1().Secrets(namespace).Patch( - ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.CoreV1().Secrets(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("secret operation failed after retries: %w", err) + return fmt.Errorf("secret patch failed: %w", err) } return nil diff --git a/pkg/tool/kube/updater/service_v2.go b/pkg/tool/kube/updater/service_v2.go index a2f5c3cef4..07c4d6a2a1 100644 --- a/pkg/tool/kube/updater/service_v2.go +++ b/pkg/tool/kube/updater/service_v2.go @@ -120,7 +120,7 @@ func UpdateServiceV2(ctx context.Context, clusterID, namespace, serviceName stri return err } -// CreateOrPatchServiceV2 implements a 3-way merge patch for Service. +// CreateOrPatchServiceV2 implements a 2-way merge patch for Service. func CreateOrPatchServiceV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { @@ -164,50 +164,32 @@ func CreateOrPatchServiceV2(ctx context.Context, clusterID, namespace, originalY } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.CoreV1().Services(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&corev1.Service{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().Services(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create service: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check service existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.Service{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.CoreV1().Services(namespace).Patch( - ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.CoreV1().Services(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("service operation failed after retries: %w", err) + return fmt.Errorf("service patch failed: %w", err) } return nil diff --git a/pkg/tool/kube/updater/statefulset_v2.go b/pkg/tool/kube/updater/statefulset_v2.go index 525e798efe..33ff1690e3 100644 --- a/pkg/tool/kube/updater/statefulset_v2.go +++ b/pkg/tool/kube/updater/statefulset_v2.go @@ -200,7 +200,7 @@ func DeleteStatefulSetAndWaitV2(ctx context.Context, clusterID, namespace string return nil } -// CreateOrPatchStatefulSetV2 is used when the YAML is fully controlled by this system, it implements a 3-way merge patch for the statefulset. +// CreateOrPatchStatefulSetV2 is used when the YAML is fully controlled by this system, it implements a 2-way merge patch for the statefulset. // If we are simply editing the statefulset, use UpdateStatefulSetV2 instead. func CreateOrPatchStatefulSetV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) @@ -245,54 +245,36 @@ func CreateOrPatchStatefulSetV2(ctx context.Context, clusterID, namespace, origi } } - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - liveObj, err := c.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) - - if apierrors.IsNotFound(err) { - _, createErr := c.AppsV1().StatefulSets(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) - return createErr - } - if err != nil { - return fmt.Errorf("failed to get live state: %w", err) - } - - liveJSON, err := json.Marshal(liveObj) - if err != nil { - return fmt.Errorf("failed to marshal live object: %w", err) - } - - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(&appsv1.StatefulSet{}) - if err != nil { - return fmt.Errorf("failed to create lookup patch meta: %w", err) - } - - patchBytes, err := strategicpatch.CreateThreeWayMergePatch( - originalJSONMutated, - targetJSONMutated, - liveJSON, - lookupPatchMeta, - true, - ) - if err != nil { - return fmt.Errorf("failed to calculate 3-way merge patch: %w", err) + _, err = c.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.AppsV1().StatefulSets(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create statefulset: %w", createErr) } + return nil + } + if err != nil { + return fmt.Errorf("failed to check statefulset existence: %w", err) + } - if string(patchBytes) == "{}" { - return nil - } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &appsv1.StatefulSet{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } - _, err = c.AppsV1().StatefulSets(namespace).Patch( - ctx, - name, - types.StrategicMergePatchType, - patchBytes, - metav1.PatchOptions{}, - ) - return err - }) + if string(patchBytes) == "{}" { + return nil + } + _, err = c.AppsV1().StatefulSets(namespace).Patch( + ctx, + name, + types.StrategicMergePatchType, + patchBytes, + metav1.PatchOptions{}, + ) if err != nil { - return fmt.Errorf("statefulset operation failed after retries: %w", err) + return fmt.Errorf("statefulset patch failed: %w", err) } return nil From 386e91159f3bcec20bc418f3aa791d6d973e4de8 Mon Sep 17 00:00:00 2001 From: Min Min Date: Thu, 19 Mar 2026 16:29:20 +0800 Subject: [PATCH 11/21] added specific typed resource support instead of treating them in unstrctured to force 2-way merge update Signed-off-by: Min Min --- .../aslan/core/common/service/kube/apply.go | 45 +++++++++++ pkg/tool/kube/updater/pod_v2.go | 79 ++++++++++++++++++ pkg/tool/kube/updater/replicaset_v2.go | 79 ++++++++++++++++++ pkg/tool/kube/updater/role_v2.go | 80 +++++++++++++++++++ pkg/tool/kube/updater/rolebinding_v2.go | 80 +++++++++++++++++++ pkg/tool/kube/updater/serviceaccount_v2.go | 80 +++++++++++++++++++ 6 files changed, 443 insertions(+) diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index 9b4300d847..8bba6787d0 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -1106,6 +1106,51 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge originalYAML = curRes.Manifest } err = updater.CreateOrPatchClusterRoleBindingV2(context.TODO(), productInfo.ClusterID, originalYAML, string(targetYAML)) + if err != nil { + log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) + errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) + continue + } + case setting.ConfigMap, setting.Secret, setting.PersistentVolumeClaim, + setting.ServiceAccount, setting.Role, setting.RoleBinding, + setting.Pod, setting.ReplicaSet: + u.SetNamespace(namespace) + u.SetLabels(MergeLabels(labels, u.GetLabels())) + + logContent := fmt.Sprintf("Applying %s/%s in namespace %s", u.GetKind(), u.GetName(), namespace) + jobLogManager.SaveJobLog(logContent) + + targetYAML, marshalErr := yaml.Marshal(u.UnstructuredContent()) + if marshalErr != nil { + log.Errorf("Failed to marshal %s %s to YAML: %v", u.GetKind(), u.GetName(), marshalErr) + errList = multierror.Append(errList, marshalErr) + continue + } + gvkn := fmt.Sprintf("%s-%s", u.GetObjectKind().GroupVersionKind(), u.GetName()) + originalYAML := "" + if curRes, ok := curResourceMap[gvkn]; ok { + originalYAML = curRes.Manifest + } + + switch u.GetKind() { + case setting.ConfigMap: + err = updater.CreateOrPatchConfigMapV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + case setting.Secret: + err = updater.CreateOrPatchSecretV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + case setting.PersistentVolumeClaim: + err = updater.CreateOrPatchPVCV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + case setting.ServiceAccount: + err = updater.CreateOrPatchServiceAccountV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + case setting.Role: + err = updater.CreateOrPatchRoleV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + case setting.RoleBinding: + err = updater.CreateOrPatchRoleBindingV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + case setting.Pod: + err = updater.CreateOrPatchPodV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + case setting.ReplicaSet: + err = updater.CreateOrPatchReplicaSetV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + } + if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) diff --git a/pkg/tool/kube/updater/pod_v2.go b/pkg/tool/kube/updater/pod_v2.go index 4e4369802d..d157700066 100644 --- a/pkg/tool/kube/updater/pod_v2.go +++ b/pkg/tool/kube/updater/pod_v2.go @@ -17,13 +17,18 @@ package updater import ( "context" + "encoding/json" "fmt" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -104,3 +109,77 @@ func UpdatePodV2(ctx context.Context, clusterID, namespace, name string, mutatio return err } + +func CreateOrPatchPodV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj corev1.Pod + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to Pod: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("pod name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj corev1.Pod + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + _, err = c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().Pods(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create pod: %w", createErr) + } + return nil + } + if err != nil { + return fmt.Errorf("failed to check pod existence: %w", err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.Pod{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.CoreV1().Pods(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + if err != nil { + return fmt.Errorf("pod patch failed: %w", err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/replicaset_v2.go b/pkg/tool/kube/updater/replicaset_v2.go index 621791442d..2849481d6f 100644 --- a/pkg/tool/kube/updater/replicaset_v2.go +++ b/pkg/tool/kube/updater/replicaset_v2.go @@ -17,12 +17,17 @@ package updater import ( "context" + "encoding/json" "fmt" appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -57,3 +62,77 @@ func DeleteReplicaSetsV2(ctx context.Context, clusterID, namespace string, opts err = c.DeleteAllOf(ctx, &appsv1.ReplicaSet{}, deleteOpts) return util.IgnoreNotFoundError(err) } + +func CreateOrPatchReplicaSetV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj appsv1.ReplicaSet + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to ReplicaSet: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("replicaset name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj appsv1.ReplicaSet + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + _, err = c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.AppsV1().ReplicaSets(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create replicaset: %w", createErr) + } + return nil + } + if err != nil { + return fmt.Errorf("failed to check replicaset existence: %w", err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &appsv1.ReplicaSet{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.AppsV1().ReplicaSets(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + if err != nil { + return fmt.Errorf("replicaset patch failed: %w", err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/role_v2.go b/pkg/tool/kube/updater/role_v2.go index e464c006a2..3acec95a7d 100644 --- a/pkg/tool/kube/updater/role_v2.go +++ b/pkg/tool/kube/updater/role_v2.go @@ -17,12 +17,17 @@ package updater import ( "context" + "encoding/json" "fmt" rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -57,3 +62,78 @@ func DeleteRolesV2(ctx context.Context, clusterID, namespace string, opts ...Del err = c.DeleteAllOf(ctx, &rbacv1.Role{}, deleteOpts) return util.IgnoreNotFoundError(err) } + +// CreateOrPatchRoleV2 implements a 2-way merge patch for Role. +func CreateOrPatchRoleV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj rbacv1.Role + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to Role: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("role name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj rbacv1.Role + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + _, err = c.RbacV1().Roles(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.RbacV1().Roles(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create role: %w", createErr) + } + return nil + } + if err != nil { + return fmt.Errorf("failed to check role existence: %w", err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &rbacv1.Role{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.RbacV1().Roles(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + if err != nil { + return fmt.Errorf("role patch failed: %w", err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/rolebinding_v2.go b/pkg/tool/kube/updater/rolebinding_v2.go index 61b5fdd675..25c870bb27 100644 --- a/pkg/tool/kube/updater/rolebinding_v2.go +++ b/pkg/tool/kube/updater/rolebinding_v2.go @@ -17,12 +17,17 @@ package updater import ( "context" + "encoding/json" "fmt" rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -57,3 +62,78 @@ func DeleteRoleBindingsV2(ctx context.Context, clusterID, namespace string, opts err = c.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, deleteOpts) return util.IgnoreNotFoundError(err) } + +// CreateOrPatchRoleBindingV2 implements a 2-way merge patch for RoleBinding. +func CreateOrPatchRoleBindingV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj rbacv1.RoleBinding + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to RoleBinding: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("role binding name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj rbacv1.RoleBinding + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + _, err = c.RbacV1().RoleBindings(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.RbacV1().RoleBindings(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create role binding: %w", createErr) + } + return nil + } + if err != nil { + return fmt.Errorf("failed to check role binding existence: %w", err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &rbacv1.RoleBinding{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.RbacV1().RoleBindings(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + if err != nil { + return fmt.Errorf("role binding patch failed: %w", err) + } + + return nil +} diff --git a/pkg/tool/kube/updater/serviceaccount_v2.go b/pkg/tool/kube/updater/serviceaccount_v2.go index a45aac72d6..afc93b00ec 100644 --- a/pkg/tool/kube/updater/serviceaccount_v2.go +++ b/pkg/tool/kube/updater/serviceaccount_v2.go @@ -17,11 +17,16 @@ package updater import ( "context" + "encoding/json" "fmt" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" "github.com/koderover/zadig/v2/pkg/tool/kube/util" @@ -80,3 +85,78 @@ func CreateServiceAccountV2(ctx context.Context, clusterID, namespace string, sa return nil } + +// CreateOrPatchServiceAccountV2 implements a 2-way merge patch for ServiceAccount. +func CreateOrPatchServiceAccountV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { + c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) + if err != nil { + return fmt.Errorf("failed to get kube client: %w", err) + } + + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) + if err != nil { + return fmt.Errorf("failed to convert target YAML to JSON: %w", err) + } + + var targetObj corev1.ServiceAccount + if err := json.Unmarshal(targetJSON, &targetObj); err != nil { + return fmt.Errorf("failed to unmarshal target JSON to ServiceAccount: %w", err) + } + + name := targetObj.GetName() + if name == "" { + return fmt.Errorf("service account name cannot be empty in target YAML") + } + + targetObj.SetNamespace(namespace) + targetJSONMutated, err := json.Marshal(targetObj) + if err != nil { + return fmt.Errorf("failed to re-marshal mutated target object: %w", err) + } + + originalJSONMutated := []byte("{}") + if originalYAML != "" { + originalJSON, err := yaml.YAMLToJSON([]byte(originalYAML)) + if err != nil { + return fmt.Errorf("failed to convert original YAML to JSON: %w", err) + } + + var originalObj corev1.ServiceAccount + if err := json.Unmarshal(originalJSON, &originalObj); err == nil { + originalObj.SetNamespace(namespace) + originalJSONMutated, _ = json.Marshal(originalObj) + } else { + return fmt.Errorf("failed to unmarshal original JSON: %w", err) + } + } + + _, err = c.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, createErr := c.CoreV1().ServiceAccounts(namespace).Create(ctx, &targetObj, metav1.CreateOptions{}) + if createErr != nil { + return fmt.Errorf("failed to create service account: %w", createErr) + } + return nil + } + if err != nil { + return fmt.Errorf("failed to check service account existence: %w", err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.ServiceAccount{}) + if err != nil { + return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) + } + + if string(patchBytes) == "{}" { + return nil + } + + _, err = c.CoreV1().ServiceAccounts(namespace).Patch( + ctx, name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, + ) + if err != nil { + return fmt.Errorf("service account patch failed: %w", err) + } + + return nil +} From 500a91e1d34e7ab7c867d8d7852da3704dcac316 Mon Sep 17 00:00:00 2001 From: Min Min Date: Thu, 19 Mar 2026 18:29:19 +0800 Subject: [PATCH 12/21] add api support for overriding full yaml override Signed-off-by: Min Min --- pkg/microservice/aslan/config/consts.go | 11 ++++++++-- .../repository/models/wokflow_task_v4.go | 3 +++ .../common/repository/models/workflow_v4.go | 1 + .../aslan/core/common/service/kube/apply.go | 1 + .../aslan/core/common/service/version.go | 3 ++- .../jobcontroller/job_deploy.go | 6 ++++-- .../aslan/core/environment/handler/service.go | 2 ++ .../aslan/core/environment/handler/version.go | 3 ++- .../core/environment/service/environment.go | 20 +++++++++++-------- .../environment/service/environment_update.go | 8 ++++---- .../aslan/core/environment/service/k8s.go | 4 ++-- .../aslan/core/environment/service/types.go | 1 + .../workflow/controller/job/job_deploy.go | 1 + .../service/workflow/workflow_task_v4.go | 12 ++++++++--- 14 files changed, 53 insertions(+), 23 deletions(-) diff --git a/pkg/microservice/aslan/config/consts.go b/pkg/microservice/aslan/config/consts.go index 3137e079b6..bae6ed987e 100644 --- a/pkg/microservice/aslan/config/consts.go +++ b/pkg/microservice/aslan/config/consts.go @@ -760,8 +760,15 @@ const ( type ValueMergeStrategy string const ( - ValueMergeStrategyReuseValue = "reuse-values" - ValueMergeStrategyOverride = "override" + ValueMergeStrategyReuseValue ValueMergeStrategy = "reuse-values" + ValueMergeStrategyOverride ValueMergeStrategy= "override" +) + +type YAMLMergeStrategy string + +const ( + YAMLMergeStrategyMerge YAMLMergeStrategy = "merge" + YAMLMergeStrategyOverride YAMLMergeStrategy = "override" ) type SystemLanguage string diff --git a/pkg/microservice/aslan/core/common/repository/models/wokflow_task_v4.go b/pkg/microservice/aslan/core/common/repository/models/wokflow_task_v4.go index 8aeac2eb0c..af2aafb7b3 100644 --- a/pkg/microservice/aslan/core/common/repository/models/wokflow_task_v4.go +++ b/pkg/microservice/aslan/core/common/repository/models/wokflow_task_v4.go @@ -233,6 +233,8 @@ type JobTaskDeploySpec struct { Timeout int `bson:"timeout" json:"timeout" yaml:"timeout"` ReplaceResources []Resource `bson:"replace_resources" json:"replace_resources" yaml:"replace_resources"` RelatedPodLabels []map[string]string `bson:"-" json:"-" yaml:"-"` + // overrideResource is used to do a full yaml override instead of a 2-way merge patching for all the resources + OverrideResource bool `bson:"override_resource" json:"override_resource" yaml:"override_resource"` // for compatibility ServiceModule string `bson:"service_module" json:"service_module" yaml:"-"` Image string `bson:"image" json:"image" yaml:"-"` @@ -264,6 +266,7 @@ type JobTaskDeployRevertSpec struct { OverrideKVs string `bson:"override_kvs" json:"override_kvs" yaml:"override_kvs"` Revision int64 `bson:"revision" json:"revision" yaml:"revision"` RevisionCreateTime int64 `bson:"revision_create_time" json:"revision_create_time" yaml:"revision_create_time"` + OverrideResource bool `bson:"override_resource" json:"override_resource" yaml:"override_resource"` } type DeployServiceModule struct { diff --git a/pkg/microservice/aslan/core/common/repository/models/workflow_v4.go b/pkg/microservice/aslan/core/common/repository/models/workflow_v4.go index 43e59b0be8..254111370b 100644 --- a/pkg/microservice/aslan/core/common/repository/models/workflow_v4.go +++ b/pkg/microservice/aslan/core/common/repository/models/workflow_v4.go @@ -576,6 +576,7 @@ type DeployServiceVariableConfigList []*DeployServiceVariableConfig type DeployVariableInfo struct { ValueMergeStrategy config.ValueMergeStrategy `bson:"value_merge_strategy,omitempty" json:"value_merge_strategy,omitempty" yaml:"value_merge_strategy,omitempty"` + YAMLMergeStrategy config.YAMLMergeStrategy `bson:"yaml_merge_strategy,omitempty" json:"yaml_merge_strategy,omitempty" yaml:"yaml_merge_strategy,omitempty"` VariableKVs []*commontypes.RenderVariableKV `bson:"variable_kvs" yaml:"variable_kvs" json:"variable_kvs"` OverrideKVs string `bson:"override_kvs" yaml:"override_kvs" json:"override_kvs"` // used for helm services, json-encoded string of kv value diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index 8bba6787d0..fc1736204a 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -81,6 +81,7 @@ type ResourceApplyParam struct { JobLogContext *joblog.JobLogContext Uninstall bool WaitForUninstall bool + OverrideResource bool } func DeploymentSelectorLabelExists(resourceName, namespace string, informer informers.SharedInformerFactory, log *zap.SugaredLogger) bool { diff --git a/pkg/microservice/aslan/core/common/service/version.go b/pkg/microservice/aslan/core/common/service/version.go index ed8acfa3e8..89403210ca 100644 --- a/pkg/microservice/aslan/core/common/service/version.go +++ b/pkg/microservice/aslan/core/common/service/version.go @@ -276,7 +276,7 @@ type RollbackEnvServiceVersionData struct { HelmDeployStatusChan chan bool } -func RollbackEnvServiceVersion(ctx *internalhandler.Context, projectName, envName, serviceName string, revision int64, isHelmChart, isProduction bool, detail string, log *zap.SugaredLogger) (*RollbackEnvServiceVersionData, error) { +func RollbackEnvServiceVersion(ctx *internalhandler.Context, projectName, envName, serviceName string, revision int64, isHelmChart, isProduction, overrideResource bool, detail string, log *zap.SugaredLogger) (*RollbackEnvServiceVersionData, error) { envSvcVersion, err := mongodb.NewEnvServiceVersionColl().Find(projectName, envName, serviceName, isHelmChart, isProduction, revision) if err != nil { if mongodb.IsErrNoDocuments(err) { @@ -462,6 +462,7 @@ func RollbackEnvServiceVersion(ctx *internalhandler.Context, projectName, envNam Uninstall: false, AddZadigLabel: !isProduction, SharedEnvHandler: kube.EnsureUpdateZadigService, + OverrideResource: overrideResource, } unstructuredList, err := kube.CreateOrPatchResource(resourceApplyParam, log) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go index 07b8b1e7ad..080ff37025 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go @@ -282,7 +282,7 @@ func (c *DeployJobCtl) run(ctx context.Context) error { // if not only deploy image, we will redeploy service if !onlyDeployImage(c.jobTaskSpec.DeployContents) { - if err := c.updateSystemService(env, currentYaml, updatedYaml, c.jobTaskSpec.VariableKVs, revision, containers, candidateReplicaOverrides, updateRevision, c.jobTaskSpec.ServiceName); err != nil { + if err := c.updateSystemService(env, currentYaml, updatedYaml, c.jobTaskSpec.VariableKVs, revision, containers, candidateReplicaOverrides, updateRevision, c.jobTaskSpec.ServiceName, c.jobTaskSpec.OverrideResource); err != nil { logError(c.job, err.Error(), c.logger) return err } @@ -335,7 +335,8 @@ func reconcileReplicaOverridesForDeploy(currentYaml, candidateYaml string, curre } func (c *DeployJobCtl) updateSystemService(env *commonmodels.Product, currentYaml, updatedYaml string, variableKVs []*commontypes.RenderVariableKV, revision int, - containers []*commonmodels.Container, workLoads []*commonmodels.WorkLoad, updateRevision bool, serviceName string) error { + containers []*commonmodels.Container, workLoads []*commonmodels.WorkLoad, updateRevision bool, serviceName string, overrideResource bool) error { + addZadigLabel := !c.jobTaskSpec.Production if addZadigLabel { if !commonutil.ServiceDeployed(c.jobTaskSpec.ServiceName, env.ServiceDeployStrategy) && !updateRevision && @@ -360,6 +361,7 @@ func (c *DeployJobCtl) updateSystemService(env *commonmodels.Product, currentYam SharedEnvHandler: nil, ProductInfo: env, JobLogContext: &joblog.JobLogContext{WorkflowCtx: c.workflowCtx, JobTask: c.job}, + OverrideResource: overrideResource, }, c.logger) if err != nil { diff --git a/pkg/microservice/aslan/core/environment/handler/service.go b/pkg/microservice/aslan/core/environment/handler/service.go index 38628a4cf5..63d73ce859 100644 --- a/pkg/microservice/aslan/core/environment/handler/service.go +++ b/pkg/microservice/aslan/core/environment/handler/service.go @@ -365,6 +365,7 @@ func UpdateService(c *gin.Context) { envName := c.Param("name") projectKey := c.Query("projectName") production := c.Query("production") == "true" + overrideResource := c.Query("overrideResource") == "true" detail := fmt.Sprintf("环境名称:%s,服务名称:%s", envName, c.Param("serviceName")) detailEn := fmt.Sprintf("Environment Name: %s, Service Name: %s", envName, c.Param("serviceName")) @@ -424,6 +425,7 @@ func UpdateService(c *gin.Context) { ServiceRev: svcRev, UpdateBy: ctx.UserName, UpdateServiceTmpl: svcRev.UpdateServiceTmpl, + OverrideResource: overrideResource, } ctx.RespErr = service.UpdateService(args, ctx.Logger) diff --git a/pkg/microservice/aslan/core/environment/handler/version.go b/pkg/microservice/aslan/core/environment/handler/version.go index ce2f7c753d..a8b2df1872 100644 --- a/pkg/microservice/aslan/core/environment/handler/version.go +++ b/pkg/microservice/aslan/core/environment/handler/version.go @@ -320,6 +320,7 @@ func RollbackEnvServiceVersion(c *gin.Context) { return } production := c.Query("production") == "true" + overrideResource := c.Query("overrideResource") == "true" req := &RollbackEnvServiceVersionRequest{} if err := c.ShouldBindJSON(req); err != nil { @@ -382,5 +383,5 @@ func RollbackEnvServiceVersion(c *gin.Context) { detailEn := fmt.Sprintf("Environment: %s, Service: %s, Version: %d", envName, serviceName, revision) internalhandler.InsertDetailedOperationLog(c, ctx.UserName, projectKey, setting.OperationSceneEnv, "回滚", "环境-服务", detail, detailEn, "", types.RequestBodyTypeJSON, ctx.Logger, envName) - _, ctx.RespErr = commonservice.RollbackEnvServiceVersion(ctx, projectKey, envName, serviceName, revision, isHelmChart, production, req.Detail, ctx.Logger) + _, ctx.RespErr = commonservice.RollbackEnvServiceVersion(ctx, projectKey, envName, serviceName, revision, isHelmChart, production, overrideResource, req.Detail, ctx.Logger) } diff --git a/pkg/microservice/aslan/core/environment/service/environment.go b/pkg/microservice/aslan/core/environment/service/environment.go index f6e4ef294d..117c3fad57 100644 --- a/pkg/microservice/aslan/core/environment/service/environment.go +++ b/pkg/microservice/aslan/core/environment/service/environment.go @@ -536,9 +536,10 @@ func generateMobileCustomWorkflow(projectName, workflowName string, focalBasicIm } type UpdateServiceArg struct { - ServiceName string `json:"service_name"` - DeployStrategy string `json:"deploy_strategy"` - VariableKVs []*commontypes.RenderVariableKV `json:"variable_kvs"` + ServiceName string `json:"service_name"` + DeployStrategy string `json:"deploy_strategy"` + VariableKVs []*commontypes.RenderVariableKV `json:"variable_kvs"` + OverrideResource bool `json:"override_resource"` } type UpdateEnv struct { @@ -593,10 +594,12 @@ func UpdateMultipleK8sEnv(args []*UpdateEnv, envNames []string, productName, req } strategyMap := make(map[string]string) + overrideResourceMap := make(map[string]bool) updateSvcs := make([]*templatemodels.ServiceRender, 0) updateRevisionSvcs := make([]string, 0) for _, svc := range arg.Services { strategyMap[svc.ServiceName] = svc.DeployStrategy + overrideResourceMap[svc.ServiceName] = svc.OverrideResource err = commontypes.ValidateRenderVariables(exitedProd.GlobalVariables, svc.VariableKVs) if err != nil { @@ -620,7 +623,7 @@ func UpdateMultipleK8sEnv(args []*UpdateEnv, envNames []string, productName, req // update env default variable, particular svcs from client are involved // svc revision will not be updated - err = updateK8sProduct(exitedProd, username, requestID, updateRevisionSvcs, filter, updateSvcs, strategyMap, force, exitedProd.GlobalVariables, log) + err = updateK8sProduct(exitedProd, username, requestID, updateRevisionSvcs, filter, updateSvcs, strategyMap, overrideResourceMap, force, exitedProd.GlobalVariables, log) if err != nil { log.Errorf("UpdateMultipleK8sEnv UpdateProductV2 err:%v", err) errList = multierror.Append(errList, err) @@ -647,7 +650,7 @@ func UpdateMultipleK8sEnv(args []*UpdateEnv, envNames []string, productName, req // TODO need optimize // cvm and k8s yaml projects should not be handled together -func updateProductImpl(updateRevisionSvcs []string, deployStrategy map[string]string, existedProd, updateProd *commonmodels.Product, filter svcUpgradeFilter, user string, log *zap.SugaredLogger) (err error) { +func updateProductImpl(updateRevisionSvcs []string, deployStrategy map[string]string, overrideResource map[string]bool, existedProd, updateProd *commonmodels.Product, filter svcUpgradeFilter, user string, log *zap.SugaredLogger) (err error) { productName := existedProd.ProductName envName := existedProd.EnvName namespace := existedProd.Namespace @@ -813,7 +816,7 @@ func updateProductImpl(updateRevisionSvcs []string, deployStrategy map[string]st updateProd, service, curEnv.GetServiceMap()[service.ServiceName], - !updateProd.Production, inf, kubeClient, istioClient, log) + !updateProd.Production, overrideResource[service.ServiceName], inf, kubeClient, istioClient, log) if errUpsertService != nil { service.Error = errUpsertService.Error() } else { @@ -2181,7 +2184,7 @@ func updateK8sProductVariable(productResp *commonmodels.Product, userName, reque } return false } - return updateK8sProduct(productResp, userName, requestID, nil, filter, productResp.ServiceRenders, nil, false, productResp.GlobalVariables, log) + return updateK8sProduct(productResp, userName, requestID, nil, filter, productResp.ServiceRenders, nil, nil, false, productResp.GlobalVariables, log) } func updateHelmProductVariable(productResp *commonmodels.Product, userName, requestID string, syncLock *cache.RedisLock, log *zap.SugaredLogger) error { @@ -2898,7 +2901,7 @@ func restartRelatedWorkloads(env *commonmodels.Product, service *commonmodels.Pr } // upsertService -func upsertService(env *commonmodels.Product, newService *commonmodels.ProductService, prevSvc *commonmodels.ProductService, addLabel bool, informer informers.SharedInformerFactory, +func upsertService(env *commonmodels.Product, newService *commonmodels.ProductService, prevSvc *commonmodels.ProductService, addLabel bool, overrideResource bool, informer informers.SharedInformerFactory, kubeClient client.Client, istioClient versionedclient.Interface, log *zap.SugaredLogger) ([]*unstructured.Unstructured, error) { isUpdate := prevSvc == nil errList := &multierror.Error{ @@ -2966,6 +2969,7 @@ func upsertService(env *commonmodels.Product, newService *commonmodels.ProductSe SharedEnvHandler: EnsureUpdateZadigService, IstioGrayscaleEnvHandler: kube.EnsureUpdateGrayscaleService, IsFromImportToDeploy: isFromImportToDeploy, + OverrideResource: overrideResource, } return kube.CreateOrPatchResource(resourceApplyParam, log) diff --git a/pkg/microservice/aslan/core/environment/service/environment_update.go b/pkg/microservice/aslan/core/environment/service/environment_update.go index ccd2cc5536..feec3334ce 100644 --- a/pkg/microservice/aslan/core/environment/service/environment_update.go +++ b/pkg/microservice/aslan/core/environment/service/environment_update.go @@ -191,7 +191,7 @@ func updateK8sSvcInAllEnvs(productName string, templateSvc *commonmodels.Service } return false } - err = updateK8sProduct(product, "system", "", []string{svcRender.ServiceName}, filter, []*templatemodels.ServiceRender{svcRender}, nil, false, product.GlobalVariables, log.SugaredLogger()) + err = updateK8sProduct(product, "system", "", []string{svcRender.ServiceName}, filter, []*templatemodels.ServiceRender{svcRender}, nil, nil, false, product.GlobalVariables, log.SugaredLogger()) if err != nil { retErr = multierror.Append(retErr, err) } @@ -199,7 +199,7 @@ func updateK8sSvcInAllEnvs(productName string, templateSvc *commonmodels.Service return retErr.ErrorOrNil() } -func updateK8sProduct(exitedProd *commonmodels.Product, user, requestID string, updateRevisionSvc []string, filter svcUpgradeFilter, updatedSvcs []*templatemodels.ServiceRender, deployStrategy map[string]string, +func updateK8sProduct(exitedProd *commonmodels.Product, user, requestID string, updateRevisionSvc []string, filter svcUpgradeFilter, updatedSvcs []*templatemodels.ServiceRender, deployStrategy map[string]string, overrideResource map[string]bool, force bool, globalVariables []*commontypes.GlobalVariableKV, log *zap.SugaredLogger) error { envName, productName := exitedProd.EnvName, exitedProd.ProductName kubeClient, err := clientmanager.NewKubeClientManager().GetControllerRuntimeClient(exitedProd.ClusterID) @@ -425,7 +425,7 @@ func updateK8sProduct(exitedProd *commonmodels.Product, user, requestID string, go func() { productErrMsg := "" - err = updateProductImpl(updateRevisionSvc, deployStrategy, exitedProd, updateProd, filter, user, log) + err = updateProductImpl(updateRevisionSvc, deployStrategy, overrideResource, exitedProd, updateProd, filter, user, log) if err != nil { log.Errorf("[%s][P:%s] failed to update product %#v", envName, productName, err) // 发送更新产品失败消息给用户 @@ -486,7 +486,7 @@ func updateCVMProduct(exitedProd *commonmodels.Product, user, requestID string, go func() { productErrMsg := "" - err = updateProductImpl(serviceNames, nil, exitedProd, updateProd, nil, user, log) + err = updateProductImpl(serviceNames, nil, nil, exitedProd, updateProd, nil, user, log) if err != nil { productErrMsg = err.Error() log.Errorf("[%s][P:%s] failed to update product %#v", envName, productName, err) diff --git a/pkg/microservice/aslan/core/environment/service/k8s.go b/pkg/microservice/aslan/core/environment/service/k8s.go index 9fe9651d3a..0d1299baba 100644 --- a/pkg/microservice/aslan/core/environment/service/k8s.go +++ b/pkg/microservice/aslan/core/environment/service/k8s.go @@ -209,7 +209,7 @@ func (k *K8sService) updateService(args *SvcOptArgs) error { prodinfo, newProductSvc, currentProductSvc, - !prodinfo.Production, inf, kubeClient, istioClient, k.log) + !prodinfo.Production, args.OverrideResource, inf, kubeClient, istioClient, k.log) if err != nil { k.log.Error(err) newProductSvc.Error = err.Error() @@ -677,7 +677,7 @@ func (k *K8sService) createGroup(username string, product *commonmodels.Product, updatableServiceNameList = append(updatableServiceNameList, group[i].ServiceName) go func(svc *commonmodels.ProductService) { defer wg.Done() - items, err := upsertService(prod, svc, nil, !prod.Production, informer, kubeClient, istioClient, k.log) + items, err := upsertService(prod, svc, nil, !prod.Production, false, informer, kubeClient, istioClient, k.log) if err != nil { lock.Lock() switch e := err.(type) { diff --git a/pkg/microservice/aslan/core/environment/service/types.go b/pkg/microservice/aslan/core/environment/service/types.go index f9f1decd08..7b6f66d304 100644 --- a/pkg/microservice/aslan/core/environment/service/types.go +++ b/pkg/microservice/aslan/core/environment/service/types.go @@ -73,6 +73,7 @@ type SvcOptArgs struct { ServiceRev *SvcRevision UpdateBy string UpdateServiceTmpl bool + OverrideResource bool } type PreviewServiceArgs struct { diff --git a/pkg/microservice/aslan/core/workflow/service/workflow/controller/job/job_deploy.go b/pkg/microservice/aslan/core/workflow/service/workflow/controller/job/job_deploy.go index 6c191b495b..fb98f45dad 100644 --- a/pkg/microservice/aslan/core/workflow/service/workflow/controller/job/job_deploy.go +++ b/pkg/microservice/aslan/core/workflow/service/workflow/controller/job/job_deploy.go @@ -489,6 +489,7 @@ func (j DeployJobController) ToTask(taskID int64) ([]*commonmodels.JobTask, erro VersionName: j.jobSpec.VersionName, DeployContents: j.jobSpec.DeployContents, Timeout: timeout, + OverrideResource: svc.YAMLMergeStrategy == config.YAMLMergeStrategyOverride, } for _, module := range svc.Modules { diff --git a/pkg/microservice/aslan/core/workflow/service/workflow/workflow_task_v4.go b/pkg/microservice/aslan/core/workflow/service/workflow/workflow_task_v4.go index 587d0e8745..0a0ada99d7 100644 --- a/pkg/microservice/aslan/core/workflow/service/workflow/workflow_task_v4.go +++ b/pkg/microservice/aslan/core/workflow/service/workflow/workflow_task_v4.go @@ -1300,6 +1300,11 @@ type CommonRevertInput struct { Detail string `json:"detail"` } +type DeployRevertInput struct { + Detail string `json:"detail"` + OverrideResource bool `json:"override_resource"` +} + type SQLRevertInput struct { CommonRevertInput `json:",inline"` SQL string `json:"sql"` @@ -1337,7 +1342,7 @@ func RevertWorkflowTaskV4Job(ctx *internalhandler.Context, workflowName, jobName return err } - inputSpec := new(CommonRevertInput) + inputSpec := new(DeployRevertInput) err = commonmodels.IToi(input, inputSpec) if err != nil { return fmt.Errorf("failed to decode deploy revert job spec, error: %s", err) @@ -1378,9 +1383,10 @@ func RevertWorkflowTaskV4Job(ctx *internalhandler.Context, workflowName, jobName JobTaskCommonRevertSpec: commonmodels.JobTaskCommonRevertSpec{ Detail: inputSpec.Detail, }, + OverrideResource: inputSpec.OverrideResource, } - rollbackStatus, err := commonservice.RollbackEnvServiceVersion(ctx, task.ProjectName, jobTaskSpec.Env, jobTaskSpec.ServiceName, jobTaskSpec.OriginRevision, false, jobTaskSpec.Production, inputSpec.Detail, logger) + rollbackStatus, err := commonservice.RollbackEnvServiceVersion(ctx, task.ProjectName, jobTaskSpec.Env, jobTaskSpec.ServiceName, jobTaskSpec.OriginRevision, false, jobTaskSpec.Production, inputSpec.OverrideResource, inputSpec.Detail, logger) if err != nil { log.Errorf("failed to rollback env service version, error: %s", err) return err @@ -1501,7 +1507,7 @@ func RevertWorkflowTaskV4Job(ctx *internalhandler.Context, workflowName, jobName }, } - rollbackStatus, err := commonservice.RollbackEnvServiceVersion(ctx, task.ProjectName, jobTaskSpec.Env, jobTaskSpec.ServiceName, jobTaskSpec.OriginRevision, false, jobTaskSpec.IsProduction, inputSpec.Detail, logger) + rollbackStatus, err := commonservice.RollbackEnvServiceVersion(ctx, task.ProjectName, jobTaskSpec.Env, jobTaskSpec.ServiceName, jobTaskSpec.OriginRevision, false, jobTaskSpec.IsProduction, false, inputSpec.Detail, logger) if err != nil { log.Errorf("failed to rollback env service version, error: %s", err) return err From aae964416017d2191787cd0a61e5743b9358bba4 Mon Sep 17 00:00:00 2001 From: Min Min Date: Fri, 20 Mar 2026 10:22:18 +0800 Subject: [PATCH 13/21] debug Signed-off-by: Min Min --- .../aslan/core/common/repository/models/workflow_v4.go | 3 +++ .../workflow/service/workflow/controller/job/job_deploy.go | 1 + 2 files changed, 4 insertions(+) diff --git a/pkg/microservice/aslan/core/common/repository/models/workflow_v4.go b/pkg/microservice/aslan/core/common/repository/models/workflow_v4.go index 254111370b..427c1961b8 100644 --- a/pkg/microservice/aslan/core/common/repository/models/workflow_v4.go +++ b/pkg/microservice/aslan/core/common/repository/models/workflow_v4.go @@ -481,6 +481,9 @@ type ZadigDeployJobSpec struct { ValueMergeStrategy config.ValueMergeStrategy `bson:"value_merge_strategy" json:"value_merge_strategy" yaml:"value_merge_strategy"` MergeStrategySource config.ParamSourceType `bson:"merge_strategy_source" json:"merge_strategy_source" yaml:"merge_strategy_source"` + // YAML deploy only field + YAMLMergeStrategy config.YAMLMergeStrategy `bson:"yaml_merge_strategy" json:"yaml_merge_strategy" yaml:"yaml_merge_strategy"` + // TODO: Deprecated in 2.3.0, this field is now used for saving the default service module info for deployment. DefaultServices []*ServiceAndImage `bson:"service_and_images" yaml:"service_and_images" json:"service_and_images"` } diff --git a/pkg/microservice/aslan/core/workflow/service/workflow/controller/job/job_deploy.go b/pkg/microservice/aslan/core/workflow/service/workflow/controller/job/job_deploy.go index fb98f45dad..ddc1341033 100644 --- a/pkg/microservice/aslan/core/workflow/service/workflow/controller/job/job_deploy.go +++ b/pkg/microservice/aslan/core/workflow/service/workflow/controller/job/job_deploy.go @@ -146,6 +146,7 @@ func (j DeployJobController) Update(useUserInput bool, ticket *commonmodels.Appr j.jobSpec.EnvSource = latestSpec.EnvSource j.jobSpec.ValueMergeStrategy = latestSpec.ValueMergeStrategy j.jobSpec.MergeStrategySource = latestSpec.MergeStrategySource + j.jobSpec.YAMLMergeStrategy = latestSpec.YAMLMergeStrategy // source is a bit tricky: if the saved args has a source of fromjob, but it has been change to runtime in the config // we need to not only update its source but also set services to empty slice. From ac06b9afc1bf3b171314cb0cbe958249280f936c Mon Sep 17 00:00:00 2001 From: Min Min Date: Fri, 20 Mar 2026 10:58:07 +0800 Subject: [PATCH 14/21] add override function to CreateOrPatchResource helper function Signed-off-by: Min Min --- .../aslan/core/common/service/kube/apply.go | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index fc1736204a..f9ef29960e 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -736,6 +736,9 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + if applyParam.OverrideResource { + originalYAML = "" + } err = updater.CreateOrPatchIngressV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) @@ -760,6 +763,9 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + if applyParam.OverrideResource { + originalYAML = "" + } err = updater.CreateOrPatchServiceV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) @@ -875,6 +881,9 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + if applyParam.OverrideResource { + originalYAML = "" + } err = updater.CreateOrPatchDeploymentV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), res, err) @@ -920,6 +929,9 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + if applyParam.OverrideResource { + originalYAML = "" + } err = updater.CreateOrPatchStatefulSetV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), res, err) @@ -1021,6 +1033,9 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + if applyParam.OverrideResource { + originalYAML = "" + } err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), obj, err) @@ -1059,6 +1074,9 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + if applyParam.OverrideResource { + originalYAML = "" + } err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), obj, err) @@ -1083,6 +1101,9 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + if applyParam.OverrideResource { + originalYAML = "" + } err = updater.CreateOrPatchClusterRoleV2(context.TODO(), productInfo.ClusterID, originalYAML, string(targetYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) @@ -1106,6 +1127,9 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + if applyParam.OverrideResource { + originalYAML = "" + } err = updater.CreateOrPatchClusterRoleBindingV2(context.TODO(), productInfo.ClusterID, originalYAML, string(targetYAML)) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) @@ -1132,6 +1156,9 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + if applyParam.OverrideResource { + originalYAML = "" + } switch u.GetKind() { case setting.ConfigMap: From 5d38f3ff1300339838778044191005d722a6060f Mon Sep 17 00:00:00 2001 From: Min Min Date: Fri, 20 Mar 2026 15:06:41 +0800 Subject: [PATCH 15/21] remove update image only logic for yaml deploy Signed-off-by: Min Min --- .../jobcontroller/job_deploy.go | 75 +------------------ 1 file changed, 1 insertion(+), 74 deletions(-) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go index 080ff37025..7f6e8ea5d6 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go @@ -281,16 +281,7 @@ func (c *DeployJobCtl) run(ctx context.Context) error { c.ack() // if not only deploy image, we will redeploy service - if !onlyDeployImage(c.jobTaskSpec.DeployContents) { - if err := c.updateSystemService(env, currentYaml, updatedYaml, c.jobTaskSpec.VariableKVs, revision, containers, candidateReplicaOverrides, updateRevision, c.jobTaskSpec.ServiceName, c.jobTaskSpec.OverrideResource); err != nil { - logError(c.job, err.Error(), c.logger) - return err - } - - return nil - } - // if only deploy image, we only patch image. - if err := c.updateServiceModuleImages(ctx, resources, env); err != nil { + if err := c.updateSystemService(env, currentYaml, updatedYaml, c.jobTaskSpec.VariableKVs, revision, containers, candidateReplicaOverrides, updateRevision, c.jobTaskSpec.ServiceName, c.jobTaskSpec.OverrideResource); err != nil { logError(c.job, err.Error(), c.logger) return err } @@ -298,42 +289,6 @@ func (c *DeployJobCtl) run(ctx context.Context) error { return nil } -func onlyDeployImage(deployContents []config.DeployContent) bool { - return slices.Contains(deployContents, config.DeployImage) && len(deployContents) == 1 -} - -func reconcileReplicaOverridesForDeploy(currentYaml, candidateYaml string, currentWorkLoads []*commonmodels.WorkLoad) ([]*commonmodels.WorkLoad, error) { - _ = currentYaml - _ = currentWorkLoads - - candidateReplicaMap, err := kube.ExtractWorkloadReplicas(candidateYaml) - if err != nil { - return nil, err - } - - ret := make([]*commonmodels.WorkLoad, 0, len(candidateReplicaMap)) - keys := make([]string, 0, len(candidateReplicaMap)) - for key := range candidateReplicaMap { - keys = append(keys, key) - } - sort.Strings(keys) - - for _, key := range keys { - candidateReplica := candidateReplicaMap[key] - workloadType, workloadName := "", key - if parts := strings.SplitN(key, "/", 2); len(parts) == 2 { - workloadType = kube.NormalizeReplicaWorkloadType(parts[0]) - workloadName = parts[1] - } - ret, err = kube.UpsertWorkLoadsReplicas(ret, workloadType, workloadName, candidateReplica) - if err != nil { - return nil, err - } - } - - return ret, nil -} - func (c *DeployJobCtl) updateSystemService(env *commonmodels.Product, currentYaml, updatedYaml string, variableKVs []*commontypes.RenderVariableKV, revision int, containers []*commonmodels.Container, workLoads []*commonmodels.WorkLoad, updateRevision bool, serviceName string, overrideResource bool) error { @@ -658,34 +613,6 @@ Job: return replaceResources, relatedPodLabels, nil } -func (c *DeployJobCtl) updateServiceModuleImages(ctx context.Context, resources []*kube.WorkloadResource, env *commonmodels.Product) error { - jobTaskctx := &joblog.JobLogContext{ - WorkflowCtx: c.workflowCtx, - JobTask: c.job, - } - - errList := new(multierror.Error) - wg := sync.WaitGroup{} - for _, serviceModule := range c.jobTaskSpec.ServiceAndImages { - wg.Add(1) - go func(serviceModule *commonmodels.DeployServiceModule) { - defer wg.Done() - replaceResources, relatedPodLabels, err := UpdateExternalServiceModule(ctx, c.kubeClient, c.clientSet, resources, env, c.jobTaskSpec.ServiceName, serviceModule, "", c.workflowCtx.WorkflowTaskCreatorUsername, jobTaskctx, c.logger) - if err != nil { - errList = multierror.Append(errList, err) - } else { - c.jobTaskSpec.ReplaceResources = append(c.jobTaskSpec.ReplaceResources, replaceResources...) - c.jobTaskSpec.RelatedPodLabels = append(c.jobTaskSpec.RelatedPodLabels, relatedPodLabels...) - } - }(serviceModule) - } - wg.Wait() - if err := errList.ErrorOrNil(); err != nil { - return err - } - return nil -} - // 5.26 temporarily deactivate this function // Because these errors must exist for a short period of time in some cases func workLoadDeployStat(kubeClient client.Client, namespace string, labelMaps []map[string]string, ownerUID string, jobLogCtx *joblog.JobLogContext) error { From c11e56d55ec998b4bbbece6adfae907652382115 Mon Sep 17 00:00:00 2001 From: Min Min Date: Fri, 20 Mar 2026 15:12:26 +0800 Subject: [PATCH 16/21] added some debug logs Signed-off-by: Min Min --- pkg/microservice/aslan/core/common/service/kube/apply.go | 1 + .../service/workflowcontroller/jobcontroller/job_deploy.go | 5 ++--- pkg/tool/kube/updater/deployment_v2.go | 4 ++++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index f9ef29960e..5124f5ffab 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -881,6 +881,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } + log.Infof("OverrideResource: %v", applyParam.OverrideResource) if applyParam.OverrideResource { originalYAML = "" } diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go index 7f6e8ea5d6..c86a7d9543 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go @@ -21,10 +21,8 @@ import ( "fmt" "sort" "strings" - "sync" "time" - "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "go.uber.org/zap" "golang.org/x/exp/slices" @@ -226,6 +224,7 @@ func (c *DeployJobCtl) run(ctx context.Context) error { EnvName: c.jobTaskSpec.Env, ServiceName: c.jobTaskSpec.ServiceName, }) + if err != nil { msg := fmt.Sprintf("get current service yaml error: %v", err) logError(c.job, msg, c.logger) @@ -242,7 +241,7 @@ func (c *DeployJobCtl) run(ctx context.Context) error { IgnoreCurrentReplicaOverrides: updateRevision, Containers: containers, } - candidateYaml, revision, resources, err := kube.GenerateRenderedYaml(option) + candidateYaml, revision, _, err := kube.GenerateRenderedYaml(option) if err != nil { msg := fmt.Sprintf("generate service yaml error: %v", err) logError(c.job, msg, c.logger) diff --git a/pkg/tool/kube/updater/deployment_v2.go b/pkg/tool/kube/updater/deployment_v2.go index bb1bb37c96..bfac131684 100644 --- a/pkg/tool/kube/updater/deployment_v2.go +++ b/pkg/tool/kube/updater/deployment_v2.go @@ -262,6 +262,10 @@ func CreateOrPatchDeploymentV2(ctx context.Context, clusterID, namespace, origin return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) } + fmt.Printf("originalJSONMutated: %s\n", string(originalJSONMutated)) + fmt.Printf("targetJSONMutated: %s\n", string(targetJSONMutated)) + fmt.Printf("patchBytes: %s\n", string(patchBytes)) + if string(patchBytes) == "{}" { return nil } From 2fd7891f84e534d1a80c14374903253751b67c13 Mon Sep 17 00:00:00 2001 From: Min Min Date: Fri, 20 Mar 2026 15:31:19 +0800 Subject: [PATCH 17/21] added debug logs for service Signed-off-by: Min Min --- pkg/tool/kube/updater/service_v2.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/tool/kube/updater/service_v2.go b/pkg/tool/kube/updater/service_v2.go index 07c4d6a2a1..d04a84a7b9 100644 --- a/pkg/tool/kube/updater/service_v2.go +++ b/pkg/tool/kube/updater/service_v2.go @@ -181,6 +181,10 @@ func CreateOrPatchServiceV2(ctx context.Context, clusterID, namespace, originalY return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) } + fmt.Printf("originalJSONMutated: %s\n", string(originalJSONMutated)) + fmt.Printf("targetJSONMutated: %s\n", string(targetJSONMutated)) + fmt.Printf("patchBytes: %s\n", string(patchBytes)) + if string(patchBytes) == "{}" { return nil } From 96667cf6cc8af787924a0d547de81358f61302fb Mon Sep 17 00:00:00 2001 From: Min Min Date: Fri, 20 Mar 2026 16:08:01 +0800 Subject: [PATCH 18/21] add update logic in replace resource instead of a patch Signed-off-by: Min Min --- .../aslan/core/common/service/kube/apply.go | 60 +++++-------------- .../environment/service/common_env_cfg.go | 2 +- .../service/environment_creator.go | 8 +-- .../aslan/core/environment/service/ingress.go | 2 +- pkg/tool/kube/updater/clusterrole_v2.go | 37 +++++++++++- pkg/tool/kube/updater/configmap_v2.go | 19 +++++- pkg/tool/kube/updater/cronjob_v2.go | 43 +++++++++++-- pkg/tool/kube/updater/deployment_v2.go | 22 +++++-- pkg/tool/kube/updater/ingress_v2.go | 43 +++++++++++-- pkg/tool/kube/updater/pod_v2.go | 18 +++++- pkg/tool/kube/updater/pvc_v2.go | 18 +++++- pkg/tool/kube/updater/replicaset_v2.go | 19 +++++- pkg/tool/kube/updater/role_v2.go | 19 +++++- pkg/tool/kube/updater/rolebinding_v2.go | 19 +++++- pkg/tool/kube/updater/secret_v2.go | 19 +++++- pkg/tool/kube/updater/service_v2.go | 22 +++++-- pkg/tool/kube/updater/serviceaccount_v2.go | 19 +++++- pkg/tool/kube/updater/statefulset_v2.go | 18 +++++- 18 files changed, 326 insertions(+), 81 deletions(-) diff --git a/pkg/microservice/aslan/core/common/service/kube/apply.go b/pkg/microservice/aslan/core/common/service/kube/apply.go index 5124f5ffab..9d56c4ea58 100644 --- a/pkg/microservice/aslan/core/common/service/kube/apply.go +++ b/pkg/microservice/aslan/core/common/service/kube/apply.go @@ -736,10 +736,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } - if applyParam.OverrideResource { - originalYAML = "" - } - err = updater.CreateOrPatchIngressV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchIngressV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -763,10 +760,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } - if applyParam.OverrideResource { - originalYAML = "" - } - err = updater.CreateOrPatchServiceV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchServiceV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -881,11 +875,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } - log.Infof("OverrideResource: %v", applyParam.OverrideResource) - if applyParam.OverrideResource { - originalYAML = "" - } - err = updater.CreateOrPatchDeploymentV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) + err = updater.CreateOrPatchDeploymentV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML), applyParam.OverrideResource) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), res, err) errList = multierror.Append(errList, err) @@ -930,10 +920,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } - if applyParam.OverrideResource { - originalYAML = "" - } - err = updater.CreateOrPatchStatefulSetV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) + err = updater.CreateOrPatchStatefulSetV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML), applyParam.OverrideResource) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), res, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -1034,10 +1021,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } - if applyParam.OverrideResource { - originalYAML = "" - } - err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) + err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML), applyParam.OverrideResource) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), obj, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -1075,10 +1059,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } - if applyParam.OverrideResource { - originalYAML = "" - } - err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML)) + err = updater.CreateOrPatchCronJobV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(resYAML), applyParam.OverrideResource) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), obj, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -1102,10 +1083,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } - if applyParam.OverrideResource { - originalYAML = "" - } - err = updater.CreateOrPatchClusterRoleV2(context.TODO(), productInfo.ClusterID, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchClusterRoleV2(context.TODO(), productInfo.ClusterID, originalYAML, string(targetYAML), applyParam.OverrideResource) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -1128,10 +1106,7 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } - if applyParam.OverrideResource { - originalYAML = "" - } - err = updater.CreateOrPatchClusterRoleBindingV2(context.TODO(), productInfo.ClusterID, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchClusterRoleBindingV2(context.TODO(), productInfo.ClusterID, originalYAML, string(targetYAML), applyParam.OverrideResource) if err != nil { log.Errorf("Failed to create or update %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) errList = multierror.Append(errList, errors.Wrapf(err, "failed to create or update %s/%s", u.GetKind(), u.GetName())) @@ -1157,27 +1132,24 @@ func CreateOrPatchResource(applyParam *ResourceApplyParam, log *zap.SugaredLogge if curRes, ok := curResourceMap[gvkn]; ok { originalYAML = curRes.Manifest } - if applyParam.OverrideResource { - originalYAML = "" - } switch u.GetKind() { case setting.ConfigMap: - err = updater.CreateOrPatchConfigMapV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchConfigMapV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) case setting.Secret: - err = updater.CreateOrPatchSecretV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchSecretV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) case setting.PersistentVolumeClaim: - err = updater.CreateOrPatchPVCV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchPVCV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) case setting.ServiceAccount: - err = updater.CreateOrPatchServiceAccountV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchServiceAccountV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) case setting.Role: - err = updater.CreateOrPatchRoleV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchRoleV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) case setting.RoleBinding: - err = updater.CreateOrPatchRoleBindingV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchRoleBindingV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) case setting.Pod: - err = updater.CreateOrPatchPodV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchPodV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) case setting.ReplicaSet: - err = updater.CreateOrPatchReplicaSetV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML)) + err = updater.CreateOrPatchReplicaSetV2(context.TODO(), productInfo.ClusterID, namespace, originalYAML, string(targetYAML), applyParam.OverrideResource) } if err != nil { diff --git a/pkg/microservice/aslan/core/environment/service/common_env_cfg.go b/pkg/microservice/aslan/core/environment/service/common_env_cfg.go index 4ea1499c66..8d950d2d6e 100644 --- a/pkg/microservice/aslan/core/environment/service/common_env_cfg.go +++ b/pkg/microservice/aslan/core/environment/service/common_env_cfg.go @@ -305,7 +305,7 @@ func CreateCommonEnvCfg(args *models.CreateUpdateCommonEnvCfgArgs, userName stri return e.ErrUpdateResource.AddErr(err) } - err = updater.CreateOrPatchIngressV2(context.TODO(), product.ClusterID, product.Namespace, "", yamlData) + err = updater.CreateOrPatchIngressV2(context.TODO(), product.ClusterID, product.Namespace, "", yamlData, true) if err != nil { log.Errorf("Failed to UpdateOrCreateIngress %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) return e.ErrUpdateResource.AddErr(fmt.Errorf("Failed to UpdateOrCreateIngress %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err)) diff --git a/pkg/microservice/aslan/core/environment/service/environment_creator.go b/pkg/microservice/aslan/core/environment/service/environment_creator.go index de691d77ae..41764ad7ee 100644 --- a/pkg/microservice/aslan/core/environment/service/environment_creator.go +++ b/pkg/microservice/aslan/core/environment/service/environment_creator.go @@ -381,13 +381,13 @@ func initEnvConfigSetAction(envName, namespace, productName, userName, clusterID } switch u.GetKind() { case setting.ConfigMap: - err = updater.CreateOrPatchConfigMapV2(context.TODO(), clusterID, namespace, "", string(targetYAML)) + err = updater.CreateOrPatchConfigMapV2(context.TODO(), clusterID, namespace, "", string(targetYAML), false) case setting.Ingress: - err = updater.CreateOrPatchIngressV2(context.TODO(), clusterID, namespace, "", string(targetYAML)) + err = updater.CreateOrPatchIngressV2(context.TODO(), clusterID, namespace, "", string(targetYAML), false) case setting.Secret: - err = updater.CreateOrPatchSecretV2(context.TODO(), clusterID, namespace, "", string(targetYAML)) + err = updater.CreateOrPatchSecretV2(context.TODO(), clusterID, namespace, "", string(targetYAML), false) case setting.PersistentVolumeClaim: - err = updater.CreateOrPatchPVCV2(context.TODO(), clusterID, namespace, "", string(targetYAML)) + err = updater.CreateOrPatchPVCV2(context.TODO(), clusterID, namespace, "", string(targetYAML), false) } if err != nil { log.Errorf("Failed to initEnvConfigSet %s, manifest is\n%v\n, error: %s", u.GetKind(), u, err) diff --git a/pkg/microservice/aslan/core/environment/service/ingress.go b/pkg/microservice/aslan/core/environment/service/ingress.go index e9bab8c85d..b54e1860b0 100644 --- a/pkg/microservice/aslan/core/environment/service/ingress.go +++ b/pkg/microservice/aslan/core/environment/service/ingress.go @@ -225,7 +225,7 @@ func UpdateOrCreateIngress(args *models.CreateUpdateCommonEnvCfgArgs, userName s return e.ErrUpdateResource.AddErr(err) } - err = updater.CreateOrPatchIngressV2(context.TODO(), product.ClusterID, product.Namespace, "", yamlData) + err = updater.CreateOrPatchIngressV2(context.TODO(), product.ClusterID, product.Namespace, "", yamlData, false) if err != nil { log.Errorf("Failed to UpdateOrCreateIngress %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err) return e.ErrUpdateResource.AddErr(fmt.Errorf("Failed to UpdateOrCreateIngress %s, manifest is\n%v\n, error: %v", u.GetKind(), u, err)) diff --git a/pkg/tool/kube/updater/clusterrole_v2.go b/pkg/tool/kube/updater/clusterrole_v2.go index a857ebffb7..7f08c4532f 100644 --- a/pkg/tool/kube/updater/clusterrole_v2.go +++ b/pkg/tool/kube/updater/clusterrole_v2.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -64,12 +65,16 @@ func DeleteClusterRolesV2(ctx context.Context, clusterID string, opts ...DeleteO } // CreateOrPatchClusterRoleV2 is cluster-scoped (no namespace). -func CreateOrPatchClusterRoleV2(ctx context.Context, clusterID, originalYAML, targetYAML string) error { +func CreateOrPatchClusterRoleV2(ctx context.Context, clusterID, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -117,6 +122,18 @@ func CreateOrPatchClusterRoleV2(ctx context.Context, clusterID, originalYAML, ta return fmt.Errorf("failed to check clusterrole existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.RbacV1().ClusterRoles().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get clusterrole for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.RbacV1().ClusterRoles().Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &rbacv1.ClusterRole{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) @@ -141,12 +158,16 @@ func CreateOrPatchClusterRoleV2(ctx context.Context, clusterID, originalYAML, ta } // CreateOrPatchClusterRoleBindingV2 is cluster-scoped (no namespace). -func CreateOrPatchClusterRoleBindingV2(ctx context.Context, clusterID, originalYAML, targetYAML string) error { +func CreateOrPatchClusterRoleBindingV2(ctx context.Context, clusterID, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -194,6 +215,18 @@ func CreateOrPatchClusterRoleBindingV2(ctx context.Context, clusterID, originalY return fmt.Errorf("failed to check clusterrolebinding existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get clusterrolebinding for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.RbacV1().ClusterRoleBindings().Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &rbacv1.ClusterRoleBinding{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/configmap_v2.go b/pkg/tool/kube/updater/configmap_v2.go index a96bae71f6..39c6c582a3 100644 --- a/pkg/tool/kube/updater/configmap_v2.go +++ b/pkg/tool/kube/updater/configmap_v2.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" @@ -117,12 +118,16 @@ func UpdateConfigMapV2(ctx context.Context, clusterID, namespace string, cm *cor } // CreateOrPatchConfigMapV2 implements a 2-way merge patch for ConfigMap. -func CreateOrPatchConfigMapV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchConfigMapV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -172,6 +177,18 @@ func CreateOrPatchConfigMapV2(ctx context.Context, clusterID, namespace, origina return fmt.Errorf("failed to check configmap existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get configmap for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.CoreV1().ConfigMaps(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.ConfigMap{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/cronjob_v2.go b/pkg/tool/kube/updater/cronjob_v2.go index c389175d92..781318e156 100644 --- a/pkg/tool/kube/updater/cronjob_v2.go +++ b/pkg/tool/kube/updater/cronjob_v2.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" @@ -91,7 +92,7 @@ func DeleteCronJobsV2(ctx context.Context, clusterID, namespace string, opts ... // CreateOrPatchCronJobV2 implements a 2-way merge patch for CronJob, similar to CreateOrPatchDeploymentV2. // On clusters < 1.21, it falls back to batch/v1beta1 API. -func CreateOrPatchCronJobV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchCronJobV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) @@ -103,12 +104,16 @@ func CreateOrPatchCronJobV2(ctx context.Context, clusterID, namespace, originalY } if kubeclient.VersionLessThan121(version) { - return createOrPatchCronJobBeta(ctx, c, namespace, originalYAML, targetYAML) + return createOrPatchCronJobBeta(ctx, c, namespace, originalYAML, targetYAML, resourceOverride) } - return createOrPatchCronJobV1(ctx, c, namespace, originalYAML, targetYAML) + return createOrPatchCronJobV1(ctx, c, namespace, originalYAML, targetYAML, resourceOverride) } -func createOrPatchCronJobV1(ctx context.Context, c *kubernetes.Clientset, namespace, originalYAML, targetYAML string) error { +func createOrPatchCronJobV1(ctx context.Context, c *kubernetes.Clientset, namespace, originalYAML, targetYAML string, resourceOverride bool) error { + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -158,6 +163,18 @@ func createOrPatchCronJobV1(ctx context.Context, c *kubernetes.Clientset, namesp return fmt.Errorf("failed to check cronjob existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get cronjob for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.BatchV1().CronJobs(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &batchv1.CronJob{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) @@ -177,7 +194,11 @@ func createOrPatchCronJobV1(ctx context.Context, c *kubernetes.Clientset, namesp return nil } -func createOrPatchCronJobBeta(ctx context.Context, c *kubernetes.Clientset, namespace, originalYAML, targetYAML string) error { +func createOrPatchCronJobBeta(ctx context.Context, c *kubernetes.Clientset, namespace, originalYAML, targetYAML string, resourceOverride bool) error { + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -227,6 +248,18 @@ func createOrPatchCronJobBeta(ctx context.Context, c *kubernetes.Clientset, name return fmt.Errorf("failed to check cronjob existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.BatchV1beta1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get cronjob (v1beta1) for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.BatchV1beta1().CronJobs(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &batchv1beta1.CronJob{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/deployment_v2.go b/pkg/tool/kube/updater/deployment_v2.go index bfac131684..31f1287f5b 100644 --- a/pkg/tool/kube/updater/deployment_v2.go +++ b/pkg/tool/kube/updater/deployment_v2.go @@ -202,12 +202,16 @@ func DeleteDeploymentAndWaitV2(ctx context.Context, clusterID, namespace string, // CreateOrPatchDeploymentV2 is used when the YAML is fully controlled by this system, it implements a 3-way merge patch for the deployment. // If we are simply editing the deployment, use UpdateDeploymentV2 instead. -func CreateOrPatchDeploymentV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchDeploymentV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -257,15 +261,23 @@ func CreateOrPatchDeploymentV2(ctx context.Context, clusterID, namespace, origin return fmt.Errorf("failed to check deployment existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.AppsV1().Deployments(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &appsv1.Deployment{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) } - fmt.Printf("originalJSONMutated: %s\n", string(originalJSONMutated)) - fmt.Printf("targetJSONMutated: %s\n", string(targetJSONMutated)) - fmt.Printf("patchBytes: %s\n", string(patchBytes)) - if string(patchBytes) == "{}" { return nil } diff --git a/pkg/tool/kube/updater/ingress_v2.go b/pkg/tool/kube/updater/ingress_v2.go index 98d3b2c042..d690e4affb 100644 --- a/pkg/tool/kube/updater/ingress_v2.go +++ b/pkg/tool/kube/updater/ingress_v2.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" kubeclient "github.com/koderover/zadig/v2/pkg/shared/kube/client" @@ -78,7 +79,7 @@ func DeleteIngressesV2(ctx context.Context, clusterID, namespace string, opts .. return nil } -func CreateOrPatchIngressV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchIngressV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) @@ -90,12 +91,16 @@ func CreateOrPatchIngressV2(ctx context.Context, clusterID, namespace, originalY } if kubeclient.VersionLessThan122(version) { - return createOrPatchIngressBeta(ctx, c, namespace, originalYAML, targetYAML) + return createOrPatchIngressBeta(ctx, c, namespace, originalYAML, targetYAML, resourceOverride) } - return createOrPatchIngressV1(ctx, c, namespace, originalYAML, targetYAML) + return createOrPatchIngressV1(ctx, c, namespace, originalYAML, targetYAML, resourceOverride) } -func createOrPatchIngressV1(ctx context.Context, c kubernetes.Interface, namespace, originalYAML, targetYAML string) error { +func createOrPatchIngressV1(ctx context.Context, c kubernetes.Interface, namespace, originalYAML, targetYAML string, resourceOverride bool) error { + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -145,6 +150,18 @@ func createOrPatchIngressV1(ctx context.Context, c kubernetes.Interface, namespa return fmt.Errorf("failed to check ingress existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.NetworkingV1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get ingress for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.NetworkingV1().Ingresses(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &networkingv1.Ingress{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) @@ -168,7 +185,11 @@ func createOrPatchIngressV1(ctx context.Context, c kubernetes.Interface, namespa return nil } -func createOrPatchIngressBeta(ctx context.Context, c kubernetes.Interface, namespace, originalYAML, targetYAML string) error { +func createOrPatchIngressBeta(ctx context.Context, c kubernetes.Interface, namespace, originalYAML, targetYAML string, resourceOverride bool) error { + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -218,6 +239,18 @@ func createOrPatchIngressBeta(ctx context.Context, c kubernetes.Interface, names return fmt.Errorf("failed to check ingress existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.ExtensionsV1beta1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get ingress (v1beta1) for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.ExtensionsV1beta1().Ingresses(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &extensionsv1beta1.Ingress{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/pod_v2.go b/pkg/tool/kube/updater/pod_v2.go index d157700066..02881964f1 100644 --- a/pkg/tool/kube/updater/pod_v2.go +++ b/pkg/tool/kube/updater/pod_v2.go @@ -110,12 +110,16 @@ func UpdatePodV2(ctx context.Context, clusterID, namespace, name string, mutatio return err } -func CreateOrPatchPodV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchPodV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -165,6 +169,18 @@ func CreateOrPatchPodV2(ctx context.Context, clusterID, namespace, originalYAML, return fmt.Errorf("failed to check pod existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get pod for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.CoreV1().Pods(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.Pod{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/pvc_v2.go b/pkg/tool/kube/updater/pvc_v2.go index bad637dc81..e82e60950a 100644 --- a/pkg/tool/kube/updater/pvc_v2.go +++ b/pkg/tool/kube/updater/pvc_v2.go @@ -102,12 +102,16 @@ func CreatePVCV2(ctx context.Context, clusterID, namespace string, pvc *corev1.P return nil } -func CreateOrPatchPVCV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchPVCV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -157,6 +161,18 @@ func CreateOrPatchPVCV2(ctx context.Context, clusterID, namespace, originalYAML, return fmt.Errorf("failed to check PVC existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get PVC for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.CoreV1().PersistentVolumeClaims(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.PersistentVolumeClaim{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/replicaset_v2.go b/pkg/tool/kube/updater/replicaset_v2.go index 2849481d6f..026d6e7614 100644 --- a/pkg/tool/kube/updater/replicaset_v2.go +++ b/pkg/tool/kube/updater/replicaset_v2.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -63,12 +64,16 @@ func DeleteReplicaSetsV2(ctx context.Context, clusterID, namespace string, opts return util.IgnoreNotFoundError(err) } -func CreateOrPatchReplicaSetV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchReplicaSetV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -118,6 +123,18 @@ func CreateOrPatchReplicaSetV2(ctx context.Context, clusterID, namespace, origin return fmt.Errorf("failed to check replicaset existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get replicaset for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.AppsV1().ReplicaSets(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &appsv1.ReplicaSet{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/role_v2.go b/pkg/tool/kube/updater/role_v2.go index 3acec95a7d..4495753f06 100644 --- a/pkg/tool/kube/updater/role_v2.go +++ b/pkg/tool/kube/updater/role_v2.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -64,12 +65,16 @@ func DeleteRolesV2(ctx context.Context, clusterID, namespace string, opts ...Del } // CreateOrPatchRoleV2 implements a 2-way merge patch for Role. -func CreateOrPatchRoleV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchRoleV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -119,6 +124,18 @@ func CreateOrPatchRoleV2(ctx context.Context, clusterID, namespace, originalYAML return fmt.Errorf("failed to check role existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.RbacV1().Roles(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get role for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.RbacV1().Roles(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &rbacv1.Role{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/rolebinding_v2.go b/pkg/tool/kube/updater/rolebinding_v2.go index 25c870bb27..8665e3c9e4 100644 --- a/pkg/tool/kube/updater/rolebinding_v2.go +++ b/pkg/tool/kube/updater/rolebinding_v2.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" @@ -64,12 +65,16 @@ func DeleteRoleBindingsV2(ctx context.Context, clusterID, namespace string, opts } // CreateOrPatchRoleBindingV2 implements a 2-way merge patch for RoleBinding. -func CreateOrPatchRoleBindingV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchRoleBindingV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -119,6 +124,18 @@ func CreateOrPatchRoleBindingV2(ctx context.Context, clusterID, namespace, origi return fmt.Errorf("failed to check role binding existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.RbacV1().RoleBindings(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get role binding for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.RbacV1().RoleBindings(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &rbacv1.RoleBinding{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/secret_v2.go b/pkg/tool/kube/updater/secret_v2.go index 046edf0525..27203c8710 100644 --- a/pkg/tool/kube/updater/secret_v2.go +++ b/pkg/tool/kube/updater/secret_v2.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" @@ -106,12 +107,16 @@ func CreateOrUpdateSecretV2(ctx context.Context, clusterID string, s *corev1.Sec } // CreateOrPatchSecretV2 implements a 3-way merge patch for Secret. -func CreateOrPatchSecretV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchSecretV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -161,6 +166,18 @@ func CreateOrPatchSecretV2(ctx context.Context, clusterID, namespace, originalYA return fmt.Errorf("failed to check secret existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get secret for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.CoreV1().Secrets(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.Secret{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/service_v2.go b/pkg/tool/kube/updater/service_v2.go index d04a84a7b9..6cd75f640e 100644 --- a/pkg/tool/kube/updater/service_v2.go +++ b/pkg/tool/kube/updater/service_v2.go @@ -121,12 +121,16 @@ func UpdateServiceV2(ctx context.Context, clusterID, namespace, serviceName stri } // CreateOrPatchServiceV2 implements a 2-way merge patch for Service. -func CreateOrPatchServiceV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchServiceV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -176,15 +180,23 @@ func CreateOrPatchServiceV2(ctx context.Context, clusterID, namespace, originalY return fmt.Errorf("failed to check service existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get service for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.CoreV1().Services(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.Service{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) } - fmt.Printf("originalJSONMutated: %s\n", string(originalJSONMutated)) - fmt.Printf("targetJSONMutated: %s\n", string(targetJSONMutated)) - fmt.Printf("patchBytes: %s\n", string(patchBytes)) - if string(patchBytes) == "{}" { return nil } diff --git a/pkg/tool/kube/updater/serviceaccount_v2.go b/pkg/tool/kube/updater/serviceaccount_v2.go index afc93b00ec..450bbebdc4 100644 --- a/pkg/tool/kube/updater/serviceaccount_v2.go +++ b/pkg/tool/kube/updater/serviceaccount_v2.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/retry" "sigs.k8s.io/yaml" "github.com/koderover/zadig/v2/pkg/tool/clientmanager" @@ -87,12 +88,16 @@ func CreateServiceAccountV2(ctx context.Context, clusterID, namespace string, sa } // CreateOrPatchServiceAccountV2 implements a 2-way merge patch for ServiceAccount. -func CreateOrPatchServiceAccountV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchServiceAccountV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -142,6 +147,18 @@ func CreateOrPatchServiceAccountV2(ctx context.Context, clusterID, namespace, or return fmt.Errorf("failed to check service account existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get service account for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.CoreV1().ServiceAccounts(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &corev1.ServiceAccount{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) diff --git a/pkg/tool/kube/updater/statefulset_v2.go b/pkg/tool/kube/updater/statefulset_v2.go index 33ff1690e3..4df3d37a78 100644 --- a/pkg/tool/kube/updater/statefulset_v2.go +++ b/pkg/tool/kube/updater/statefulset_v2.go @@ -202,12 +202,16 @@ func DeleteStatefulSetAndWaitV2(ctx context.Context, clusterID, namespace string // CreateOrPatchStatefulSetV2 is used when the YAML is fully controlled by this system, it implements a 2-way merge patch for the statefulset. // If we are simply editing the statefulset, use UpdateStatefulSetV2 instead. -func CreateOrPatchStatefulSetV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string) error { +func CreateOrPatchStatefulSetV2(ctx context.Context, clusterID, namespace, originalYAML, targetYAML string, resourceOverride bool) error { c, err := clientmanager.NewKubeClientManager().GetKubernetesClientSet(clusterID) if err != nil { return fmt.Errorf("failed to get kube client: %w", err) } + if resourceOverride { + originalYAML = "" + } + targetJSON, err := yaml.YAMLToJSON([]byte(targetYAML)) if err != nil { return fmt.Errorf("failed to convert target YAML to JSON: %w", err) @@ -257,6 +261,18 @@ func CreateOrPatchStatefulSetV2(ctx context.Context, clusterID, namespace, origi return fmt.Errorf("failed to check statefulset existence: %w", err) } + if resourceOverride { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + existing, err := c.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get statefulset for replace: %w", err) + } + targetObj.ResourceVersion = existing.ResourceVersion + _, err = c.AppsV1().StatefulSets(namespace).Update(ctx, &targetObj, metav1.UpdateOptions{}) + return err + }) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(originalJSONMutated, targetJSONMutated, &appsv1.StatefulSet{}) if err != nil { return fmt.Errorf("failed to calculate 2-way merge patch: %w", err) From 712d7ce210a31e440396046080e316c824853c1b Mon Sep 17 00:00:00 2001 From: Min Min Date: Fri, 20 Mar 2026 16:35:13 +0800 Subject: [PATCH 19/21] rebase main Signed-off-by: Min Min --- .../jobcontroller/job_deploy.go | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go index c86a7d9543..c847305928 100644 --- a/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go +++ b/pkg/microservice/aslan/core/common/service/workflowcontroller/jobcontroller/job_deploy.go @@ -288,6 +288,38 @@ func (c *DeployJobCtl) run(ctx context.Context) error { return nil } +func reconcileReplicaOverridesForDeploy(currentYaml, candidateYaml string, currentWorkLoads []*commonmodels.WorkLoad) ([]*commonmodels.WorkLoad, error) { + _ = currentYaml + _ = currentWorkLoads + + candidateReplicaMap, err := kube.ExtractWorkloadReplicas(candidateYaml) + if err != nil { + return nil, err + } + + ret := make([]*commonmodels.WorkLoad, 0, len(candidateReplicaMap)) + keys := make([]string, 0, len(candidateReplicaMap)) + for key := range candidateReplicaMap { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + candidateReplica := candidateReplicaMap[key] + workloadType, workloadName := "", key + if parts := strings.SplitN(key, "/", 2); len(parts) == 2 { + workloadType = kube.NormalizeReplicaWorkloadType(parts[0]) + workloadName = parts[1] + } + ret, err = kube.UpsertWorkLoadsReplicas(ret, workloadType, workloadName, candidateReplica) + if err != nil { + return nil, err + } + } + + return ret, nil +} + func (c *DeployJobCtl) updateSystemService(env *commonmodels.Product, currentYaml, updatedYaml string, variableKVs []*commontypes.RenderVariableKV, revision int, containers []*commonmodels.Container, workLoads []*commonmodels.WorkLoad, updateRevision bool, serviceName string, overrideResource bool) error { From fe442c5260bf49e3997ead316022c00b70f1fc10 Mon Sep 17 00:00:00 2001 From: Min Min Date: Fri, 20 Mar 2026 16:42:28 +0800 Subject: [PATCH 20/21] fox merge problems Signed-off-by: Min Min --- .../aslan/core/environment/service/service_scale.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/microservice/aslan/core/environment/service/service_scale.go b/pkg/microservice/aslan/core/environment/service/service_scale.go index dc2b45e224..eb6a196a95 100644 --- a/pkg/microservice/aslan/core/environment/service/service_scale.go +++ b/pkg/microservice/aslan/core/environment/service/service_scale.go @@ -51,7 +51,7 @@ func Scale(args *ScaleArgs, updateBy string, logger *zap.SugaredLogger) error { } if !project.IsK8sYamlProduct() { - return scaleWorkload(prod.Namespace, args.Type, args.Name, args.Number, kubeClient, logger) + return scaleWorkload(context.Background(), prod.ClusterID, prod.Namespace, args.Type, args.Name, args.Number, logger) } mutexAutoUpdate := cache.NewRedisLock(updateMultipleProductLockKey(args.ProductName)) @@ -113,7 +113,7 @@ func Scale(args *ScaleArgs, updateBy string, logger *zap.SugaredLogger) error { } if liveReplica != targetReplica { - if err := scaleWorkload(prod.Namespace, args.Type, args.Name, args.Number, kubeClient, logger); err != nil { + if err := scaleWorkload(context.TODO(), prod.ClusterID, prod.Namespace, args.Type, args.Name, args.Number, logger); err != nil { return e.ErrScaleService.AddErr(err) } } @@ -164,20 +164,20 @@ func OpenAPIScale(req *OpenAPIScaleServiceReq, updateBy string, logger *zap.Suga return Scale(args, updateBy, logger) } -func scaleWorkload(namespace, workloadType, workloadName string, replicas int, kubeClient client.Client, logger *zap.SugaredLogger) error { +func scaleWorkload(ctx context.Context, clusterID, namespace, workloadType, workloadName string, replicas int, logger *zap.SugaredLogger) error { switch kube.NormalizeReplicaWorkloadType(workloadType) { case setting.Deployment: - if err := updater.ScaleDeployment(namespace, workloadName, replicas, kubeClient); err != nil { + if err := updater.ScaleDeploymentV2(ctx, clusterID, namespace, workloadName, replicas); err != nil { logger.Errorf("failed to scale %s/deployment/%s to %d", namespace, workloadName, replicas) return err } case setting.StatefulSet: - if err := updater.ScaleStatefulSet(namespace, workloadName, replicas, kubeClient); err != nil { + if err := updater.ScaleStatefulSetV2(ctx, clusterID, namespace, workloadName, replicas); err != nil { logger.Errorf("failed to scale %s/statefulset/%s to %d", namespace, workloadName, replicas) return err } case setting.CloneSet: - if err := updater.ScaleCloneSet(namespace, workloadName, replicas, kubeClient); err != nil { + if err := updater.ScaleCloneSetV2(ctx, clusterID, namespace, workloadName, replicas); err != nil { logger.Errorf("failed to scale %s/cloneset/%s to %d", namespace, workloadName, replicas) return err } From aedc58700884188fb884c4642eb687cbcb6f035a Mon Sep 17 00:00:00 2001 From: Min Min Date: Fri, 20 Mar 2026 18:06:43 +0800 Subject: [PATCH 21/21] debug Signed-off-by: Min Min --- pkg/tool/kube/updater/deployment_v2.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/tool/kube/updater/deployment_v2.go b/pkg/tool/kube/updater/deployment_v2.go index 31f1287f5b..7b06ca408d 100644 --- a/pkg/tool/kube/updater/deployment_v2.go +++ b/pkg/tool/kube/updater/deployment_v2.go @@ -330,7 +330,7 @@ func UpdateDeploymentImageV2(ctx context.Context, clusterID, namespace, deployme "spec": map[string]interface{}{ "template": map[string]interface{}{ "spec": map[string]interface{}{ - "initContainers": []map[string]interface{}{ + "containers": []map[string]interface{}{ { "name": containerName, "image": newImage, @@ -371,7 +371,7 @@ func UpdateDeploymentInitImageV2(ctx context.Context, clusterID, namespace, depl "spec": map[string]interface{}{ "template": map[string]interface{}{ "spec": map[string]interface{}{ - "containers": []map[string]interface{}{ + "initContainers": []map[string]interface{}{ { "name": containerName, "image": newImage,