diff --git a/docs/primitives.md b/docs/primitives.md index dc2d4c2a..73df42da 100644 --- a/docs/primitives.md +++ b/docs/primitives.md @@ -129,6 +129,7 @@ Editors provide scoped, typed APIs for modifying specific parts of a resource: | `ContainerEditor` | Environment variables, arguments, resource limits, ports | | `PodSpecEditor` | Volumes, tolerations, node selectors, service account, security context | | `DeploymentSpecEditor` | Replicas, update strategy, label selectors | +| `DaemonSetSpecEditor` | Update strategy, min ready seconds, revision history limit | | `ConfigMapDataEditor` | `.data` entries — set, remove, deep-merge YAML patches, raw access | | `ObjectMetaEditor` | Labels and annotations on any Kubernetes object | @@ -154,6 +155,7 @@ have been applied. This means a single mutation can safely add a container and t | Primitive | Category | Documentation | | --------------------------- | -------- | ----------------------------------------- | | `pkg/primitives/deployment` | Workload | [deployment.md](primitives/deployment.md) | +| `pkg/primitives/daemonset` | Workload | [daemonset.md](primitives/daemonset.md) | | `pkg/primitives/configmap` | Static | [configmap.md](primitives/configmap.md) | ## Usage Examples diff --git a/docs/primitives/daemonset.md b/docs/primitives/daemonset.md new file mode 100644 index 00000000..33407503 --- /dev/null +++ b/docs/primitives/daemonset.md @@ -0,0 +1,250 @@ +# DaemonSet Primitive + +The `daemonset` primitive is the framework's built-in workload abstraction for managing Kubernetes `DaemonSet` +resources. It integrates fully with the component lifecycle and provides a rich mutation API for managing containers, +pod specs, and metadata. + +## Capabilities + +| Capability | Detail | +| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | +| **Health tracking** | Verifies `ObservedGeneration` matches `Generation` before evaluating `NumberReady`; reports `Healthy`, `Creating`, `Updating`, or `Scaling` | +| **Graceful rollouts** | Reports rollout progress via `GraceStatus` for use with component-level grace periods (for example, configured with `WithGracePeriod`) | +| **Suspension** | Deletes the DaemonSet on suspend; reports `Suspended` | +| **Mutation pipeline** | Typed editors for metadata, DaemonSet spec, pod spec, and containers | +| **Flavors** | Preserves externally-managed fields (labels, annotations, pod template metadata) | + +## Building a DaemonSet Primitive + +```go +import "github.com/sourcehawk/operator-component-framework/pkg/primitives/daemonset" + +base := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "log-collector", + Namespace: owner.Namespace, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "log-collector"}, + }, + Template: corev1.PodTemplateSpec{ + // baseline pod template + }, + }, +} + +resource, err := daemonset.NewBuilder(base). + WithFieldApplicationFlavor(daemonset.PreserveCurrentLabels). + WithMutation(MyFeatureMutation(owner.Spec.Version)). + Build() +``` + +## Mutations + +Mutations are the primary mechanism for modifying a `DaemonSet` beyond its baseline. Each mutation is a named function +that receives a `*Mutator` and records edit intent through typed editors. + +The `Feature` field controls when a mutation applies. Leaving it nil applies the mutation unconditionally. A feature +with no version constraints and no `When()` conditions is also always enabled: + +```go +func MyFeatureMutation(version string) daemonset.Mutation { + return daemonset.Mutation{ + Name: "my-feature", + Feature: feature.NewResourceFeature(version, nil), // always enabled + Mutate: func(m *daemonset.Mutator) error { + // record edits here + return nil + }, + } +} +``` + +Mutations are applied in the order they are registered with the builder. If one mutation depends on a change made by +another, register the dependency first. + +### Boolean-gated mutations + +Use `When(bool)` to gate a mutation on a runtime condition: + +```go +func MonitoringMutation(version string, enabled bool) daemonset.Mutation { + return daemonset.Mutation{ + Name: "monitoring", + Feature: feature.NewResourceFeature(version, nil).When(enabled), + Mutate: func(m *daemonset.Mutator) error { + m.EnsureContainer(corev1.Container{ + Name: "metrics-exporter", + Image: "prom/node-exporter:v1.3.1", + }) + return nil + }, + } +} +``` + +## Internal Mutation Ordering + +Within a single mutation, edit operations are grouped into categories and applied in a fixed sequence regardless of the +order they are recorded. This ensures structural consistency across mutations. + +| Step | Category | What it affects | +| ---- | --------------------------- | ----------------------------------------------------------------------- | +| 1 | DaemonSet metadata edits | Labels and annotations on the `DaemonSet` object | +| 2 | DaemonSetSpec edits | Update strategy, min ready seconds, revision history limit | +| 3 | Pod template metadata edits | Labels and annotations on the pod template | +| 4 | Pod spec edits | Volumes, tolerations, node selectors, service account, security context | +| 5 | Regular container presence | Adding or removing containers from `spec.template.spec.containers` | +| 6 | Regular container edits | Env vars, args, resources (snapshot taken after step 5) | +| 7 | Init container presence | Adding or removing containers from `spec.template.spec.initContainers` | +| 8 | Init container edits | Env vars, args, resources (snapshot taken after step 7) | + +Container edits (steps 6 and 8) are evaluated against a snapshot taken _after_ presence operations in the same mutation. +This means a single mutation can add a container and then configure it without selector resolution issues. + +## Editors + +### DaemonSetSpecEditor + +Controls DaemonSet-level settings via `m.EditDaemonSetSpec`. + +Available methods: `SetUpdateStrategy`, `SetMinReadySeconds`, `SetRevisionHistoryLimit`, `Raw`. + +```go +m.EditDaemonSetSpec(func(e *editors.DaemonSetSpecEditor) error { + e.SetMinReadySeconds(30) + e.SetRevisionHistoryLimit(5) + return nil +}) +``` + +For fields not covered by the typed API, use `Raw()`: + +```go +m.EditDaemonSetSpec(func(e *editors.DaemonSetSpecEditor) error { + e.Raw().UpdateStrategy = appsv1.DaemonSetUpdateStrategy{ + Type: appsv1.RollingUpdateDaemonSetStrategyType, + } + return nil +}) +``` + +### PodSpecEditor + +Manages pod-level configuration via `m.EditPodSpec`. + +Available methods: `SetServiceAccountName`, `EnsureVolume`, `RemoveVolume`, `EnsureToleration`, `RemoveTolerations`, +`EnsureNodeSelector`, `RemoveNodeSelector`, `EnsureImagePullSecret`, `RemoveImagePullSecret`, `SetPriorityClassName`, +`SetHostNetwork`, `SetHostPID`, `SetHostIPC`, `SetSecurityContext`, `Raw`. + +```go +m.EditPodSpec(func(e *editors.PodSpecEditor) error { + e.SetServiceAccountName("log-collector-sa") + e.EnsureVolume(corev1.Volume{ + Name: "varlog", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{Path: "/var/log"}, + }, + }) + return nil +}) +``` + +### ContainerEditor + +Modifies individual containers via `m.EditContainers` or `m.EditInitContainers`. Always used in combination with a +[selector](../primitives.md#container-selectors). + +Available methods: `EnsureEnvVar`, `EnsureEnvVars`, `RemoveEnvVar`, `RemoveEnvVars`, `EnsureArg`, `EnsureArgs`, +`RemoveArg`, `RemoveArgs`, `SetResourceLimit`, `SetResourceRequest`, `SetResources`, `Raw`. + +```go +m.EditContainers(selectors.ContainerNamed("collector"), func(e *editors.ContainerEditor) error { + e.EnsureEnvVar(corev1.EnvVar{Name: "LOG_LEVEL", Value: "info"}) + e.SetResourceLimit(corev1.ResourceCPU, resource.MustParse("200m")) + return nil +}) +``` + +### ObjectMetaEditor + +Modifies labels and annotations. Use `m.EditObjectMetadata` to target the `DaemonSet` object itself, or +`m.EditPodTemplateMetadata` to target the pod template. + +Available methods: `EnsureLabel`, `RemoveLabel`, `EnsureAnnotation`, `RemoveAnnotation`, `Raw`. + +```go +m.EditObjectMetadata(func(e *editors.ObjectMetaEditor) error { + e.EnsureLabel("app.kubernetes.io/version", version) + return nil +}) +``` + +### Raw Escape Hatch + +All editors provide a `.Raw()` method for direct access to the underlying Kubernetes struct when the typed API is +insufficient. + +## Convenience Methods + +The `Mutator` also exposes convenience wrappers that target all containers at once: + +| Method | Equivalent to | +| ----------------------------- | ------------------------------------------------------------- | +| `EnsureContainerEnvVar(ev)` | `EditContainers(AllContainers(), ...)` → `EnsureEnvVar(ev)` | +| `RemoveContainerEnvVar(name)` | `EditContainers(AllContainers(), ...)` → `RemoveEnvVar(name)` | +| `EnsureContainerArg(arg)` | `EditContainers(AllContainers(), ...)` → `EnsureArg(arg)` | +| `RemoveContainerArg(arg)` | `EditContainers(AllContainers(), ...)` → `RemoveArg(arg)` | + +## Suspension + +DaemonSets have no replicas field, so there is no clean in-place pause mechanism. By default, the DaemonSet is +**deleted** when the component is suspended and recreated when unsuspended. + +- `DefaultDeleteOnSuspendHandler` returns `true` +- `DefaultSuspendMutationHandler` is a no-op +- `DefaultSuspensionStatusHandler` always reports `Suspended` with reason `"DaemonSet deleted on suspend"` + +Override these handlers via `WithCustomSuspendDeletionDecision`, `WithCustomSuspendMutation`, and +`WithCustomSuspendStatus` if a different suspension strategy is required. + +## Status Handlers + +### ConvergingStatus + +`DefaultConvergingStatusHandler` considers a DaemonSet ready when `Status.NumberReady >= Status.DesiredNumberScheduled` +and `DesiredNumberScheduled > 0`. When `DesiredNumberScheduled` is zero (no matching nodes) and the controller has +observed the current generation (`ObservedGeneration >= Generation`), the DaemonSet is considered converged with the +reason "No nodes match the DaemonSet node selector". + +### GraceStatus + +`DefaultGraceStatusHandler` categorizes health as: + +| Status | Condition | +| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `Healthy` | `DesiredNumberScheduled == 0` and `ObservedGeneration >= Generation` — no nodes match the selector | +| `Degraded` | `DesiredNumberScheduled == 0` but controller has not observed latest generation, or `DesiredNumberScheduled > 0 && NumberReady >= 1` but below desired | +| `Down` | `DesiredNumberScheduled > 0 && NumberReady == 0` | + +The `Healthy` status for zero desired pods reflects that having no matching nodes is a valid configuration state, not a +failure. The generation check ensures the controller has observed the latest spec before declaring health. + +## Guidance + +**`Feature: nil` applies unconditionally.** Omit `Feature` (leave it nil) for mutations that should always run. Use +`feature.NewResourceFeature(version, constraints)` when version-based gating is needed, and chain `.When(bool)` for +boolean conditions. + +**Register mutations in dependency order.** If mutation B relies on a container added by mutation A, register A first. +The internal ordering within each mutation handles intra-mutation dependencies automatically. + +**Prefer `EnsureContainer` over direct slice manipulation.** The mutator tracks presence operations so that selectors in +the same mutation resolve correctly and reconciliation remains idempotent. + +**Use selectors for precision.** Targeting `AllContainers()` when you only mean to modify the primary container can +cause unexpected behavior if sidecar containers are present. + +**DaemonSets are node-scoped.** Unlike Deployments, DaemonSets run one pod per qualifying node. Use node selectors, +tolerations, and affinities in the pod spec to control which nodes run the DaemonSet pods. diff --git a/examples/daemonset-primitive/README.md b/examples/daemonset-primitive/README.md new file mode 100644 index 00000000..9dcc2acb --- /dev/null +++ b/examples/daemonset-primitive/README.md @@ -0,0 +1,37 @@ +# DaemonSet Primitive Example + +This example demonstrates the usage of the `daemonset` primitive within the operator component framework. It shows how +to manage a Kubernetes DaemonSet as a component of a larger application, utilizing features like: + +- **Base Construction**: Initializing a DaemonSet with basic metadata and spec. +- **Feature Mutations**: Applying version-gated or conditional changes (sidecars, env vars, annotations) using the + `Mutator`. +- **Field Flavors**: Preserving labels and annotations that might be managed by external tools (e.g., ArgoCD, manual + edits). +- **Suspension**: Demonstrating the delete-on-suspend behavior unique to DaemonSets. +- **Data Extraction**: Harvesting information from the reconciled resource. + +## Directory Structure + +- `app/`: Defines the mock `ExampleApp` CRD and the controller that uses the component framework. +- `features/`: Contains modular feature definitions: + - `mutations.go`: sidecar injection, env vars, and version-based image updates. + - `flavors.go`: usage of `FieldApplicationFlavor` to preserve fields. +- `resources/`: Contains the central `NewDaemonSetResource` factory that assembles all features using the + `daemonset.Builder`. +- `main.go`: A standalone entry point that demonstrates a single reconciliation loop using a fake client. + +## Running the Example + +You can run this example directly using `go run`: + +```bash +go run examples/daemonset-primitive/main.go +``` + +This will: + +1. Initialize a fake Kubernetes client. +2. Create an `ExampleApp` owner object. +3. Reconcile the `ExampleApp` components through multiple spec changes. +4. Print the resulting status conditions. diff --git a/examples/daemonset-primitive/app/controller.go b/examples/daemonset-primitive/app/controller.go new file mode 100644 index 00000000..a44f9427 --- /dev/null +++ b/examples/daemonset-primitive/app/controller.go @@ -0,0 +1,54 @@ +// Package app provides a sample controller using the daemonset primitive. +package app + +import ( + "context" + + "github.com/sourcehawk/operator-component-framework/pkg/component" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ExampleController reconciles an ExampleApp object using the component framework. +type ExampleController struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder + Metrics component.Recorder + + // NewDaemonSetResource is a factory function to create the daemonset resource. + // This allows us to inject the resource construction logic. + NewDaemonSetResource func(*ExampleApp) (component.Resource, error) +} + +// Reconcile performs the reconciliation for a single ExampleApp. +func (r *ExampleController) Reconcile(ctx context.Context, owner *ExampleApp) error { + // 1. Build the daemonset resource for this owner. + dsResource, err := r.NewDaemonSetResource(owner) + if err != nil { + return err + } + + // 2. Build the component that manages the daemonset. + comp, err := component.NewComponentBuilder(). + WithName("example-app"). + WithConditionType("AppReady"). + WithResource(dsResource, component.ResourceOptions{}). + Suspend(owner.Spec.Suspended). + Build() + if err != nil { + return err + } + + // 3. Execute the component reconciliation. + resCtx := component.ReconcileContext{ + Client: r.Client, + Scheme: r.Scheme, + Recorder: r.Recorder, + Metrics: r.Metrics, + Owner: owner, + } + + return comp.Reconcile(ctx, resCtx) +} diff --git a/examples/daemonset-primitive/app/owner.go b/examples/daemonset-primitive/app/owner.go new file mode 100644 index 00000000..6b611a02 --- /dev/null +++ b/examples/daemonset-primitive/app/owner.go @@ -0,0 +1,20 @@ +package app + +import ( + sharedapp "github.com/sourcehawk/operator-component-framework/examples/shared/app" +) + +// ExampleApp re-exports the shared CRD type so callers in this package need no import alias. +type ExampleApp = sharedapp.ExampleApp + +// ExampleAppSpec re-exports the shared spec type. +type ExampleAppSpec = sharedapp.ExampleAppSpec + +// ExampleAppStatus re-exports the shared status type. +type ExampleAppStatus = sharedapp.ExampleAppStatus + +// ExampleAppList re-exports the shared list type. +type ExampleAppList = sharedapp.ExampleAppList + +// AddToScheme registers the ExampleApp types with the given scheme. +var AddToScheme = sharedapp.AddToScheme diff --git a/examples/daemonset-primitive/features/flavors.go b/examples/daemonset-primitive/features/flavors.go new file mode 100644 index 00000000..6026ea64 --- /dev/null +++ b/examples/daemonset-primitive/features/flavors.go @@ -0,0 +1,16 @@ +// Package features provides sample features for the daemonset primitive. +package features + +import ( + "github.com/sourcehawk/operator-component-framework/pkg/primitives/daemonset" +) + +// PreserveLabelsFlavor demonstrates using a flavor to keep external labels. +func PreserveLabelsFlavor() daemonset.FieldApplicationFlavor { + return daemonset.PreserveCurrentLabels +} + +// PreserveAnnotationsFlavor demonstrates using a flavor to keep external annotations. +func PreserveAnnotationsFlavor() daemonset.FieldApplicationFlavor { + return daemonset.PreserveCurrentAnnotations +} diff --git a/examples/daemonset-primitive/features/mutations.go b/examples/daemonset-primitive/features/mutations.go new file mode 100644 index 00000000..31e116d9 --- /dev/null +++ b/examples/daemonset-primitive/features/mutations.go @@ -0,0 +1,75 @@ +package features + +import ( + "fmt" + + "github.com/sourcehawk/operator-component-framework/pkg/feature" + "github.com/sourcehawk/operator-component-framework/pkg/mutation/editors" + "github.com/sourcehawk/operator-component-framework/pkg/mutation/selectors" + "github.com/sourcehawk/operator-component-framework/pkg/primitives/daemonset" + corev1 "k8s.io/api/core/v1" +) + +// TracingFeature adds a Jaeger sidecar to the daemonset. +func TracingFeature(enabled bool) daemonset.Mutation { + return daemonset.Mutation{ + Name: "Tracing", + Feature: feature.NewResourceFeature("any", nil).When(enabled), + Mutate: func(m *daemonset.Mutator) error { + m.EnsureContainer(corev1.Container{ + Name: "jaeger-agent", + Image: "jaegertracing/jaeger-agent:1.28", + }) + + m.EnsureContainerEnvVar(corev1.EnvVar{ + Name: "JAEGER_AGENT_HOST", + Value: "localhost", + }) + + return nil + }, + } +} + +// MetricsFeature adds an exporter sidecar and some annotations. +func MetricsFeature(enabled bool, port int) daemonset.Mutation { + return daemonset.Mutation{ + Name: "Metrics", + Feature: feature.NewResourceFeature("any", nil).When(enabled), + Mutate: func(m *daemonset.Mutator) error { + m.EnsureContainer(corev1.Container{ + Name: "prometheus-exporter", + Image: "prom/node-exporter:v1.3.1", + }) + + m.EditPodTemplateMetadata(func(meta *editors.ObjectMetaEditor) error { + meta.EnsureAnnotation("prometheus.io/scrape", "true") + meta.EnsureAnnotation("prometheus.io/port", fmt.Sprintf("%d", port)) + return nil + }) + + return nil + }, + } +} + +// VersionFeature sets the image version and a label. +func VersionFeature(version string) daemonset.Mutation { + return daemonset.Mutation{ + Name: "Version", + Feature: feature.NewResourceFeature(version, nil), + Mutate: func(m *daemonset.Mutator) error { + m.EditContainers(selectors.ContainerNamed("agent"), func(ce *editors.ContainerEditor) error { + ce.Raw().Image = fmt.Sprintf("my-agent:%s", version) + return nil + }) + + m.EditObjectMetadata(func(meta *editors.ObjectMetaEditor) error { + meta.EnsureLabel("app.kubernetes.io/version", version) + return nil + }) + + return nil + }, + } +} diff --git a/examples/daemonset-primitive/main.go b/examples/daemonset-primitive/main.go new file mode 100644 index 00000000..67f06ce3 --- /dev/null +++ b/examples/daemonset-primitive/main.go @@ -0,0 +1,122 @@ +// Package main is the entry point for the daemonset primitive example. +package main + +import ( + "context" + "fmt" + "os" + + ocm "github.com/sourcehawk/go-crd-condition-metrics/pkg/crd-condition-metrics" + "github.com/sourcehawk/operator-component-framework/examples/daemonset-primitive/app" + "github.com/sourcehawk/operator-component-framework/examples/daemonset-primitive/resources" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func main() { + // 1. Setup scheme and fake client for the example. + scheme := runtime.NewScheme() + if err := app.AddToScheme(scheme); err != nil { + fmt.Fprintf(os.Stderr, "failed to add to scheme: %v\n", err) + os.Exit(1) + } + if err := appsv1.AddToScheme(scheme); err != nil { + fmt.Fprintf(os.Stderr, "failed to add apps/v1 to scheme: %v\n", err) + os.Exit(1) + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(&app.ExampleApp{}). + Build() + + // 2. Create an example Owner object. + owner := &app.ExampleApp{ + Spec: app.ExampleAppSpec{ + Version: "1.2.3", + EnableTracing: true, + EnableMetrics: true, + Suspended: false, + }, + } + owner.Name = "my-example-app" + owner.Namespace = "default" + + if err := fakeClient.Create(context.Background(), owner); err != nil { + fmt.Fprintf(os.Stderr, "failed to create owner: %v\n", err) + os.Exit(1) + } + + // 3. Initialize our controller. + gauge := ocm.NewOperatorConditionsGauge("example") + controller := &app.ExampleController{ + Client: fakeClient, + Scheme: scheme, + Recorder: record.NewFakeRecorder(100), + Metrics: &ocm.ConditionMetricRecorder{ + Controller: "example-controller", + OperatorConditionsGauge: gauge, + }, + + // Pass the daemonset resource factory. + NewDaemonSetResource: resources.NewDaemonSetResource, + } + + // 4. Run reconciliation with multiple spec versions. + specs := []app.ExampleAppSpec{ + { + Version: "1.2.3", + EnableTracing: true, + EnableMetrics: true, + Suspended: false, + }, + { + Version: "1.2.4", // Version upgrade + EnableTracing: true, + EnableMetrics: true, + Suspended: false, + }, + { + Version: "1.2.4", + EnableTracing: false, // Disable tracing + EnableMetrics: true, + Suspended: false, + }, + { + Version: "1.2.4", + EnableTracing: false, + EnableMetrics: true, + Suspended: true, // Suspend the app + }, + } + + ctx := context.Background() + + for i, spec := range specs { + fmt.Printf("\n--- Step %d: Applying Spec: Version=%s, Tracing=%v, Metrics=%v, Suspended=%v ---\n", + i+1, spec.Version, spec.EnableTracing, spec.EnableMetrics, spec.Suspended) + + // Update owner spec + owner.Spec = spec + if err := fakeClient.Update(ctx, owner); err != nil { + fmt.Fprintf(os.Stderr, "failed to update owner: %v\n", err) + os.Exit(1) + } + + fmt.Println("Running reconciliation...") + if err := controller.Reconcile(ctx, owner); err != nil { + fmt.Fprintf(os.Stderr, "reconciliation failed: %v\n", err) + os.Exit(1) + } + + // Inspect the owner conditions. + for _, cond := range owner.Status.Conditions { + fmt.Printf("Condition: %s, Status: %s, Reason: %s\n", + cond.Type, cond.Status, cond.Reason) + } + } + + fmt.Println("\nReconciliation sequence completed successfully!") +} diff --git a/examples/daemonset-primitive/resources/daemonset.go b/examples/daemonset-primitive/resources/daemonset.go new file mode 100644 index 00000000..b503b5b0 --- /dev/null +++ b/examples/daemonset-primitive/resources/daemonset.go @@ -0,0 +1,80 @@ +// Package resources provides resource implementations for the daemonset primitive example. +package resources + +import ( + "fmt" + + "github.com/sourcehawk/operator-component-framework/examples/daemonset-primitive/app" + "github.com/sourcehawk/operator-component-framework/examples/daemonset-primitive/features" + "github.com/sourcehawk/operator-component-framework/pkg/component" + "github.com/sourcehawk/operator-component-framework/pkg/primitives/daemonset" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" +) + +// NewDaemonSetResource constructs a daemonset primitive resource with all the features. +func NewDaemonSetResource(owner *app.ExampleApp) (component.Resource, error) { + // 1. Create the base daemonset object. + base := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: owner.Name + "-daemonset", + Namespace: owner.Namespace, + Labels: map[string]string{ + "app": owner.Name, + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": owner.Name, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": owner.Name, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "agent", + Image: "my-agent:latest", // Will be overwritten by VersionFeature + }, + }, + }, + }, + }, + } + + // 2. Initialize the daemonset builder. + builder := daemonset.NewBuilder(base) + + // 3. Apply mutations (features) based on the owner spec. + builder.WithMutation(features.VersionFeature(owner.Spec.Version)) + builder.WithMutation(features.TracingFeature(owner.Spec.EnableTracing)) + builder.WithMutation(features.MetricsFeature(owner.Spec.EnableMetrics, 9090)) + + // 4. Configure flavors (e.g., preserve labels/annotations if they were modified externally). + builder.WithFieldApplicationFlavor(features.PreserveLabelsFlavor()) + builder.WithFieldApplicationFlavor(features.PreserveAnnotationsFlavor()) + + // 5. Data extraction (optional). + builder.WithDataExtractor(func(d appsv1.DaemonSet) error { + fmt.Printf("Reconciling desired DaemonSet object: %s/%s\n", d.Namespace, d.Name) + + // Print the complete daemonset resource object as yaml + y, err := yaml.Marshal(d) + if err != nil { + return fmt.Errorf("failed to marshal daemonset to yaml: %w", err) + } + fmt.Printf("Complete DaemonSet Resource:\n---\n%s\n---\n", string(y)) + + return nil + }) + + // 6. Build the final resource. + return builder.Build() +} diff --git a/pkg/mutation/editors/daemonsetspec.go b/pkg/mutation/editors/daemonsetspec.go new file mode 100644 index 00000000..1e5ab4f6 --- /dev/null +++ b/pkg/mutation/editors/daemonsetspec.go @@ -0,0 +1,38 @@ +package editors + +import ( + appsv1 "k8s.io/api/apps/v1" +) + +// DaemonSetSpecEditor provides a typed API for mutating a Kubernetes DaemonSetSpec. +type DaemonSetSpecEditor struct { + spec *appsv1.DaemonSetSpec +} + +// NewDaemonSetSpecEditor creates a new DaemonSetSpecEditor for the given DaemonSetSpec. +func NewDaemonSetSpecEditor(spec *appsv1.DaemonSetSpec) *DaemonSetSpecEditor { + return &DaemonSetSpecEditor{spec: spec} +} + +// Raw returns the underlying *appsv1.DaemonSetSpec. +// +// This is an escape hatch for cases where the typed API is insufficient. +func (e *DaemonSetSpecEditor) Raw() *appsv1.DaemonSetSpec { + return e.spec +} + +// SetUpdateStrategy sets the update strategy for the DaemonSet. +func (e *DaemonSetSpecEditor) SetUpdateStrategy(strategy appsv1.DaemonSetUpdateStrategy) { + e.spec.UpdateStrategy = strategy +} + +// SetMinReadySeconds sets the minimum number of seconds for which a newly created pod +// should be ready without any of its containers crashing, for it to be considered available. +func (e *DaemonSetSpecEditor) SetMinReadySeconds(seconds int32) { + e.spec.MinReadySeconds = seconds +} + +// SetRevisionHistoryLimit sets the number of old DaemonSet revisions to retain to allow rollback. +func (e *DaemonSetSpecEditor) SetRevisionHistoryLimit(limit int32) { + e.spec.RevisionHistoryLimit = &limit +} diff --git a/pkg/mutation/editors/daemonsetspec_test.go b/pkg/mutation/editors/daemonsetspec_test.go new file mode 100644 index 00000000..0262dc45 --- /dev/null +++ b/pkg/mutation/editors/daemonsetspec_test.go @@ -0,0 +1,40 @@ +package editors + +import ( + "testing" + + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" +) + +func TestDaemonSetSpecEditor(t *testing.T) { + t.Run("SetUpdateStrategy", func(t *testing.T) { + spec := &appsv1.DaemonSetSpec{} + editor := NewDaemonSetSpecEditor(spec) + strategy := appsv1.DaemonSetUpdateStrategy{ + Type: appsv1.RollingUpdateDaemonSetStrategyType, + } + editor.SetUpdateStrategy(strategy) + assert.Equal(t, appsv1.RollingUpdateDaemonSetStrategyType, spec.UpdateStrategy.Type) + }) + + t.Run("SetMinReadySeconds", func(t *testing.T) { + spec := &appsv1.DaemonSetSpec{} + editor := NewDaemonSetSpecEditor(spec) + editor.SetMinReadySeconds(30) + assert.Equal(t, int32(30), spec.MinReadySeconds) + }) + + t.Run("SetRevisionHistoryLimit", func(t *testing.T) { + spec := &appsv1.DaemonSetSpec{} + editor := NewDaemonSetSpecEditor(spec) + editor.SetRevisionHistoryLimit(5) + assert.Equal(t, int32(5), *spec.RevisionHistoryLimit) + }) + + t.Run("Raw", func(t *testing.T) { + spec := &appsv1.DaemonSetSpec{} + editor := NewDaemonSetSpecEditor(spec) + assert.Equal(t, spec, editor.Raw()) + }) +} diff --git a/pkg/primitives/daemonset/builder.go b/pkg/primitives/daemonset/builder.go new file mode 100644 index 00000000..50774586 --- /dev/null +++ b/pkg/primitives/daemonset/builder.go @@ -0,0 +1,211 @@ +// Package daemonset provides a builder and resource for managing Kubernetes DaemonSets. +package daemonset + +import ( + "fmt" + + "github.com/sourcehawk/operator-component-framework/internal/generic" + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + "github.com/sourcehawk/operator-component-framework/pkg/feature" + appsv1 "k8s.io/api/apps/v1" +) + +// Builder is a configuration helper for creating and customizing a DaemonSet Resource. +// +// It provides a fluent API for registering mutations, status handlers, and +// data extractors. This builder ensures that the resulting Resource is +// properly initialized and validated before use in a reconciliation loop. +type Builder struct { + base *generic.WorkloadBuilder[*appsv1.DaemonSet, *Mutator] +} + +// NewBuilder initializes a new Builder with the provided DaemonSet object. +// +// The DaemonSet object passed here serves as the "desired base state". During +// reconciliation, the Resource will attempt to make the cluster's state match +// this base state, modified by any registered mutations. +// +// The provided DaemonSet must have at least a Name and Namespace set, which +// is validated during the Build() call. +func NewBuilder(daemonset *appsv1.DaemonSet) *Builder { + identityFunc := func(d *appsv1.DaemonSet) string { + return fmt.Sprintf("apps/v1/DaemonSet/%s/%s", d.Namespace, d.Name) + } + + base := generic.NewWorkloadBuilder[*appsv1.DaemonSet, *Mutator]( + daemonset, + identityFunc, + DefaultFieldApplicator, + NewMutator, + ) + + base. + WithCustomConvergeStatus(DefaultConvergingStatusHandler). + WithCustomGraceStatus(DefaultGraceStatusHandler). + WithCustomSuspendStatus(DefaultSuspensionStatusHandler). + WithCustomSuspendMutation(DefaultSuspendMutationHandler). + WithCustomSuspendDeletionDecision(DefaultDeleteOnSuspendHandler) + + return &Builder{ + base: base, + } +} + +// WithMutation registers a feature-based mutation for the DaemonSet. +// +// Mutations are applied sequentially during the Mutate() phase of reconciliation. +// They are typically used by Features to inject environment variables, +// arguments, or other configuration into the DaemonSet's containers. +// +// Since mutations are often version-gated, the provided feature.Mutation +// should contain the logic to determine if and how the mutation is applied +// based on the component's current version or configuration. +func (b *Builder) WithMutation(m Mutation) *Builder { + b.base.WithMutation(feature.Mutation[*Mutator](m)) + return b +} + +// WithCustomFieldApplicator sets a custom strategy for applying the desired +// state to the existing DaemonSet in the cluster. +// +// There is a default field applicator (DefaultFieldApplicator) that overwrites +// the entire spec of the current object with the desired state. Using a custom +// applicator is necessary when: +// - Other controllers manage specific fields that should be preserved. +// - Sidecar injectors add containers or volumes that should be preserved. +// - Defaulting webhooks add fields that would otherwise cause perpetual diffs. +// +// The applicator function receives both the 'current' object from the API +// server and the 'desired' object from the Resource. It is responsible for +// merging the desired changes into the current object. +// +// If a custom applicator is set, it overrides the default baseline application +// logic. Post-application flavors and mutations are still applied afterward. +func (b *Builder) WithCustomFieldApplicator( + applicator func(current *appsv1.DaemonSet, desired *appsv1.DaemonSet) error, +) *Builder { + b.base.WithCustomFieldApplicator(applicator) + return b +} + +// WithFieldApplicationFlavor registers a reusable post-application "flavor" for +// the DaemonSet. +// +// Flavors are applied in the order they are registered, after the baseline field +// applicator (default or custom) has already run. They are typically used to +// preserve selected live fields from the current object that should not be +// overwritten by the desired state. +// +// If the provided flavor is nil, it is ignored. +func (b *Builder) WithFieldApplicationFlavor(flavor FieldApplicationFlavor) *Builder { + b.base.WithFieldApplicationFlavor(generic.FieldApplicationFlavor[*appsv1.DaemonSet](flavor)) + return b +} + +// WithCustomConvergeStatus overrides the default logic for determining if the +// DaemonSet has reached its desired state. +// +// The default behavior uses DefaultConvergingStatusHandler, which: +// - Treats the DaemonSet as converged when DesiredNumberScheduled == 0 as soon as +// status.ObservedGeneration is greater than or equal to metadata.Generation. +// - When DesiredNumberScheduled > 0, treats the DaemonSet as converged once +// status.NumberReady is greater than or equal to status.DesiredNumberScheduled. +// +// If you want to augment the default behavior, you can call DefaultConvergingStatusHandler +// within your custom handler. +func (b *Builder) WithCustomConvergeStatus( + handler func(concepts.ConvergingOperation, *appsv1.DaemonSet) (concepts.AliveStatusWithReason, error), +) *Builder { + b.base.WithCustomConvergeStatus(handler) + return b +} + +// WithCustomGraceStatus overrides how the DaemonSet reports its health while +// it is still converging. +// +// The default behavior uses DefaultGraceStatusHandler. +// +// If you want to augment the default behavior, you can call DefaultGraceStatusHandler +// within your custom handler. +func (b *Builder) WithCustomGraceStatus( + handler func(*appsv1.DaemonSet) (concepts.GraceStatusWithReason, error), +) *Builder { + b.base.WithCustomGraceStatus(handler) + return b +} + +// WithCustomSuspendStatus overrides how the progress of suspension is reported. +// +// The default behavior uses DefaultSuspensionStatusHandler, which always reports +// Suspended because DaemonSets are deleted on suspension. +// +// If you want to augment the default behavior, you can call DefaultSuspensionStatusHandler +// within your custom handler. +func (b *Builder) WithCustomSuspendStatus( + handler func(*appsv1.DaemonSet) (concepts.SuspensionStatusWithReason, error), +) *Builder { + b.base.WithCustomSuspendStatus(handler) + return b +} + +// WithCustomSuspendMutation defines how the DaemonSet should be modified when +// the component is suspended. +// +// The default behavior uses DefaultSuspendMutationHandler, which is a no-op +// because DaemonSets are deleted on suspension rather than mutated. +// +// If you want to augment the default behavior, you can call DefaultSuspendMutationHandler +// within your custom handler. +func (b *Builder) WithCustomSuspendMutation( + handler func(*Mutator) error, +) *Builder { + b.base.WithCustomSuspendMutation(handler) + return b +} + +// WithCustomSuspendDeletionDecision overrides the decision of whether to delete +// the DaemonSet when the component is suspended. +// +// The default behavior uses DefaultDeleteOnSuspendHandler, which returns true +// because DaemonSets have no replicas field and cannot be scaled down. +// +// If you want to augment the default behavior, you can call DefaultDeleteOnSuspendHandler +// within your custom handler. +func (b *Builder) WithCustomSuspendDeletionDecision( + handler func(*appsv1.DaemonSet) bool, +) *Builder { + b.base.WithCustomSuspendDeletionDecision(handler) + return b +} + +// WithDataExtractor registers a function to harvest information from the +// DaemonSet after it has been successfully reconciled. +// +// This is useful for capturing auto-generated fields (like names or assigned +// IPs) and making them available to other components or resources via the +// framework's data extraction mechanism. +func (b *Builder) WithDataExtractor( + extractor func(appsv1.DaemonSet) error, +) *Builder { + if extractor != nil { + b.base.WithDataExtractor(func(d *appsv1.DaemonSet) error { + return extractor(*d) + }) + } + return b +} + +// Build validates the configuration and returns the initialized Resource. +// +// It ensures that: +// - A base DaemonSet object was provided. +// - The DaemonSet has both a name and a namespace set. +// +// If validation fails, an error is returned and the Resource should not be used. +func (b *Builder) Build() (*Resource, error) { + genericRes, err := b.base.Build() + if err != nil { + return nil, err + } + return &Resource{base: genericRes}, nil +} diff --git a/pkg/primitives/daemonset/builder_test.go b/pkg/primitives/daemonset/builder_test.go new file mode 100644 index 00000000..b37c088c --- /dev/null +++ b/pkg/primitives/daemonset/builder_test.go @@ -0,0 +1,272 @@ +package daemonset + +import ( + "errors" + "testing" + + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestBuilder(t *testing.T) { + t.Parallel() + + t.Run("Build validation", func(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + daemonset *appsv1.DaemonSet + expectedErr string + }{ + { + name: "nil daemonset", + daemonset: nil, + expectedErr: "object cannot be nil", + }, + { + name: "empty name", + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, + }, + expectedErr: "object name cannot be empty", + }, + { + name: "empty namespace", + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + }, + }, + expectedErr: "object namespace cannot be empty", + }, + { + name: "valid daemonset", + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + }, + expectedErr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, err := NewBuilder(tt.daemonset).Build() + if tt.expectedErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErr) + assert.Nil(t, res) + } else { + require.NoError(t, err) + require.NotNil(t, res) + assert.Equal(t, "apps/v1/DaemonSet/test-ns/test-ds", res.Identity()) + } + }) + } + }) + + t.Run("WithMutation", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + m := Mutation{ + Name: "test-mutation", + } + res, err := NewBuilder(ds). + WithMutation(m). + Build() + require.NoError(t, err) + assert.Len(t, res.base.Mutations, 1) + assert.Equal(t, "test-mutation", res.base.Mutations[0].Name) + }) + + t.Run("WithCustomFieldApplicator", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + applied := false + applicator := func(_ *appsv1.DaemonSet, _ *appsv1.DaemonSet) error { + applied = true + return nil + } + res, err := NewBuilder(ds). + WithCustomFieldApplicator(applicator). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.CustomFieldApplicator) + _ = res.base.CustomFieldApplicator(nil, nil) + assert.True(t, applied) + }) + + t.Run("WithFieldApplicationFlavor", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + res, err := NewBuilder(ds). + WithFieldApplicationFlavor(PreserveCurrentLabels). + WithFieldApplicationFlavor(nil). + Build() + require.NoError(t, err) + assert.Len(t, res.base.FieldFlavors, 1) + }) + + t.Run("WithCustomConvergeStatus", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + handler := func(_ concepts.ConvergingOperation, _ *appsv1.DaemonSet) (concepts.AliveStatusWithReason, error) { + return concepts.AliveStatusWithReason{Status: concepts.AliveConvergingStatusUpdating}, nil + } + res, err := NewBuilder(ds). + WithCustomConvergeStatus(handler). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.ConvergingStatusHandler) + status, err := res.base.ConvergingStatusHandler(concepts.ConvergingOperationUpdated, nil) + require.NoError(t, err) + assert.Equal(t, concepts.AliveConvergingStatusUpdating, status.Status) + }) + + t.Run("WithCustomGraceStatus", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + handler := func(_ *appsv1.DaemonSet) (concepts.GraceStatusWithReason, error) { + return concepts.GraceStatusWithReason{Status: concepts.GraceStatusHealthy}, nil + } + res, err := NewBuilder(ds). + WithCustomGraceStatus(handler). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.GraceStatusHandler) + status, err := res.base.GraceStatusHandler(nil) + require.NoError(t, err) + assert.Equal(t, concepts.GraceStatusHealthy, status.Status) + }) + + t.Run("WithCustomSuspendStatus", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + handler := func(_ *appsv1.DaemonSet) (concepts.SuspensionStatusWithReason, error) { + return concepts.SuspensionStatusWithReason{Status: concepts.SuspensionStatusSuspended}, nil + } + res, err := NewBuilder(ds). + WithCustomSuspendStatus(handler). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.SuspendStatusHandler) + status, err := res.base.SuspendStatusHandler(nil) + require.NoError(t, err) + assert.Equal(t, concepts.SuspensionStatusSuspended, status.Status) + }) + + t.Run("WithCustomSuspendMutation", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + handler := func(_ *Mutator) error { + return errors.New("suspend error") + } + res, err := NewBuilder(ds). + WithCustomSuspendMutation(handler). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.SuspendMutationHandler) + err = res.base.SuspendMutationHandler(nil) + assert.EqualError(t, err, "suspend error") + }) + + t.Run("WithCustomSuspendDeletionDecision", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + handler := func(_ *appsv1.DaemonSet) bool { + return false + } + res, err := NewBuilder(ds). + WithCustomSuspendDeletionDecision(handler). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.DeleteOnSuspendHandler) + assert.False(t, res.base.DeleteOnSuspendHandler(nil)) + }) + + t.Run("WithDataExtractor", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + called := false + extractor := func(_ appsv1.DaemonSet) error { + called = true + return nil + } + res, err := NewBuilder(ds). + WithDataExtractor(extractor). + Build() + require.NoError(t, err) + assert.Len(t, res.base.DataExtractors, 1) + err = res.base.DataExtractors[0](&appsv1.DaemonSet{}) + require.NoError(t, err) + assert.True(t, called) + }) + + t.Run("WithDataExtractor nil", func(t *testing.T) { + t.Parallel() + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + res, err := NewBuilder(ds). + WithDataExtractor(nil). + Build() + require.NoError(t, err) + assert.Len(t, res.base.DataExtractors, 0) + }) +} diff --git a/pkg/primitives/daemonset/flavors.go b/pkg/primitives/daemonset/flavors.go new file mode 100644 index 00000000..c70313ae --- /dev/null +++ b/pkg/primitives/daemonset/flavors.go @@ -0,0 +1,48 @@ +package daemonset + +import ( + "github.com/sourcehawk/operator-component-framework/pkg/flavors" + "github.com/sourcehawk/operator-component-framework/pkg/flavors/utils" + appsv1 "k8s.io/api/apps/v1" +) + +// FieldApplicationFlavor defines a function signature for applying "flavors" to a resource. +// A flavor typically preserves certain fields from the current (live) object after the +// baseline field application has occurred. +type FieldApplicationFlavor flavors.FieldApplicationFlavor[*appsv1.DaemonSet] + +// PreserveCurrentLabels ensures that any labels present on the current live +// DaemonSet but missing from the applied (desired) object are preserved. +// If a label exists in both, the applied value wins. +func PreserveCurrentLabels(applied, current, desired *appsv1.DaemonSet) error { + return flavors.PreserveCurrentLabels[*appsv1.DaemonSet]()(applied, current, desired) +} + +// PreserveCurrentAnnotations ensures that any annotations present on the current +// live DaemonSet but missing from the applied (desired) object are preserved. +// If an annotation exists in both, the applied value wins. +func PreserveCurrentAnnotations(applied, current, desired *appsv1.DaemonSet) error { + return flavors.PreserveCurrentAnnotations[*appsv1.DaemonSet]()(applied, current, desired) +} + +// PreserveCurrentPodTemplateLabels ensures that any labels present on the +// current live DaemonSet's pod template but missing from the applied +// (desired) object's pod template are preserved. +// If a label exists in both, the applied value wins. +// +// Note: pod template metadata changes may affect the rollout hash of the DaemonSet. +func PreserveCurrentPodTemplateLabels(applied, current, _ *appsv1.DaemonSet) error { + applied.Spec.Template.Labels = utils.PreserveMap(applied.Spec.Template.Labels, current.Spec.Template.Labels) + return nil +} + +// PreserveCurrentPodTemplateAnnotations ensures that any annotations present +// on the current live DaemonSet's pod template but missing from the applied +// (desired) object's pod template are preserved. +// If an annotation exists in both, the applied value wins. +// +// Note: pod template metadata changes may affect the rollout hash of the DaemonSet. +func PreserveCurrentPodTemplateAnnotations(applied, current, _ *appsv1.DaemonSet) error { + applied.Spec.Template.Annotations = utils.PreserveMap(applied.Spec.Template.Annotations, current.Spec.Template.Annotations) + return nil +} diff --git a/pkg/primitives/daemonset/flavors_test.go b/pkg/primitives/daemonset/flavors_test.go new file mode 100644 index 00000000..cbf830e9 --- /dev/null +++ b/pkg/primitives/daemonset/flavors_test.go @@ -0,0 +1,139 @@ +package daemonset + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestMutate_OrderingAndFlavors(t *testing.T) { + desired := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + Labels: map[string]string{"app": "desired"}, + }, + } + + t.Run("flavors run after baseline applicator", func(t *testing.T) { + current := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + Labels: map[string]string{"extra": "preserved"}, + }, + } + + res, _ := NewBuilder(desired). + WithFieldApplicationFlavor(PreserveCurrentLabels). + Build() + + err := res.Mutate(current) + require.NoError(t, err) + + assert.Equal(t, "desired", current.Labels["app"]) + assert.Equal(t, "preserved", current.Labels["extra"]) + }) + + t.Run("flavors run in registration order", func(t *testing.T) { + current := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + + var order []string + flavor1 := func(_, _, _ *appsv1.DaemonSet) error { + order = append(order, "flavor1") + return nil + } + flavor2 := func(_, _, _ *appsv1.DaemonSet) error { + order = append(order, "flavor2") + return nil + } + + res, _ := NewBuilder(desired). + WithFieldApplicationFlavor(flavor1). + WithFieldApplicationFlavor(flavor2). + Build() + + err := res.Mutate(current) + require.NoError(t, err) + assert.Equal(t, []string{"flavor1", "flavor2"}, order) + }) + + t.Run("flavor error is returned with context", func(t *testing.T) { + current := &appsv1.DaemonSet{} + flavorErr := errors.New("boom") + flavor := func(_, _, _ *appsv1.DaemonSet) error { + return flavorErr + } + + res, _ := NewBuilder(desired). + WithFieldApplicationFlavor(flavor). + Build() + + err := res.Mutate(current) + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to apply field application flavor") + assert.True(t, errors.Is(err, flavorErr)) + }) +} + +func TestDefaultFlavors(t *testing.T) { + t.Run("PreserveCurrentLabels", func(t *testing.T) { + applied := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"keep": "applied", "overlap": "applied"}}} + current := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"extra": "current", "overlap": "current"}}} + + err := PreserveCurrentLabels(applied, current, nil) + require.NoError(t, err) + assert.Equal(t, "applied", applied.Labels["keep"]) + assert.Equal(t, "applied", applied.Labels["overlap"]) + assert.Equal(t, "current", applied.Labels["extra"]) + }) + + t.Run("PreserveCurrentAnnotations", func(t *testing.T) { + applied := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"keep": "applied"}}} + current := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"extra": "current"}}} + + err := PreserveCurrentAnnotations(applied, current, nil) + require.NoError(t, err) + assert.Equal(t, "applied", applied.Annotations["keep"]) + assert.Equal(t, "current", applied.Annotations["extra"]) + }) + + t.Run("PreserveCurrentPodTemplateLabels", func(t *testing.T) { + applied := &appsv1.DaemonSet{Spec: appsv1.DaemonSetSpec{Template: corev1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"keep": "applied"}}}}} + current := &appsv1.DaemonSet{Spec: appsv1.DaemonSetSpec{Template: corev1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"extra": "current"}}}}} + + err := PreserveCurrentPodTemplateLabels(applied, current, nil) + require.NoError(t, err) + assert.Equal(t, "applied", applied.Spec.Template.Labels["keep"]) + assert.Equal(t, "current", applied.Spec.Template.Labels["extra"]) + }) + + t.Run("PreserveCurrentPodTemplateAnnotations", func(t *testing.T) { + applied := &appsv1.DaemonSet{Spec: appsv1.DaemonSetSpec{Template: corev1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"keep": "applied"}}}}} + current := &appsv1.DaemonSet{Spec: appsv1.DaemonSetSpec{Template: corev1.PodTemplateSpec{ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{"extra": "current"}}}}} + + err := PreserveCurrentPodTemplateAnnotations(applied, current, nil) + require.NoError(t, err) + assert.Equal(t, "applied", applied.Spec.Template.Annotations["keep"]) + assert.Equal(t, "current", applied.Spec.Template.Annotations["extra"]) + }) + + t.Run("handles nil maps safely", func(t *testing.T) { + applied := &appsv1.DaemonSet{} + current := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"extra": "current"}}} + + err := PreserveCurrentLabels(applied, current, nil) + require.NoError(t, err) + assert.Equal(t, "current", applied.Labels["extra"]) + }) +} diff --git a/pkg/primitives/daemonset/handlers.go b/pkg/primitives/daemonset/handlers.go new file mode 100644 index 00000000..e081c682 --- /dev/null +++ b/pkg/primitives/daemonset/handlers.go @@ -0,0 +1,139 @@ +package daemonset + +import ( + "fmt" + + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + appsv1 "k8s.io/api/apps/v1" +) + +// DefaultConvergingStatusHandler is the default logic for determining if a DaemonSet has reached its desired state. +// +// It considers a DaemonSet ready when the DaemonSet controller has observed the current generation +// (Status.ObservedGeneration >= ObjectMeta.Generation) and either: +// - Status.NumberReady >= Status.DesiredNumberScheduled and DesiredNumberScheduled > 0, or +// - DesiredNumberScheduled is zero (no matching nodes is a valid converged state). +// +// If the controller has not yet observed the latest spec, the handler reports Creating (when the +// resource was just created) or Updating (otherwise) to avoid falsely reporting health based on +// stale status fields. +// +// This function is used as the default handler by the Resource if no custom handler is registered via +// Builder.WithCustomConvergeStatus. It can be reused within custom handlers to augment the default behavior. +func DefaultConvergingStatusHandler( + op concepts.ConvergingOperation, ds *appsv1.DaemonSet, +) (concepts.AliveStatusWithReason, error) { + if status := concepts.StaleGenerationStatus( + op, ds.Status.ObservedGeneration, ds.Generation, "daemonset", + ); status != nil { + return *status, nil + } + + desired := ds.Status.DesiredNumberScheduled + + if desired == 0 { + return concepts.AliveStatusWithReason{ + Status: concepts.AliveConvergingStatusHealthy, + Reason: "No nodes match the DaemonSet node selector", + }, nil + } + + if ds.Status.NumberReady >= desired { + return concepts.AliveStatusWithReason{ + Status: concepts.AliveConvergingStatusHealthy, + Reason: "All pods are ready", + }, nil + } + + var status concepts.AliveConvergingStatus + switch op { + case concepts.ConvergingOperationCreated: + status = concepts.AliveConvergingStatusCreating + case concepts.ConvergingOperationUpdated: + status = concepts.AliveConvergingStatusUpdating + default: + status = concepts.AliveConvergingStatusScaling + } + + return concepts.AliveStatusWithReason{ + Status: status, + Reason: fmt.Sprintf("Waiting for pods: %d/%d ready", ds.Status.NumberReady, desired), + }, nil +} + +// DefaultGraceStatusHandler provides a default health assessment of the DaemonSet when it has not yet +// reached full readiness. +// +// It categorizes the current state into: +// - GraceStatusHealthy: DesiredNumberScheduled is zero, the controller has observed the current +// generation (Status.ObservedGeneration >= Generation), and no nodes match the selector; this is a +// valid configuration state, not a failure. +// - GraceStatusDegraded: DesiredNumberScheduled is zero but the controller has not yet observed the +// current generation, or DesiredNumberScheduled > 0 and at least one pod is ready, but below desired. +// - GraceStatusDown: DesiredNumberScheduled > 0 and no pods are ready. +// +// This function is used as the default handler by the Resource if no custom handler is registered via +// Builder.WithCustomGraceStatus. It can be reused within custom handlers to augment the default behavior. +func DefaultGraceStatusHandler(ds *appsv1.DaemonSet) (concepts.GraceStatusWithReason, error) { + if ds.Status.DesiredNumberScheduled == 0 { + if ds.Status.ObservedGeneration >= ds.Generation { + return concepts.GraceStatusWithReason{ + Status: concepts.GraceStatusHealthy, + Reason: "No nodes match the DaemonSet node selector", + }, nil + } + + return concepts.GraceStatusWithReason{ + Status: concepts.GraceStatusDegraded, + Reason: "Waiting for DaemonSet controller to observe latest generation", + }, nil + } + + if ds.Status.NumberReady >= 1 { + return concepts.GraceStatusWithReason{ + Status: concepts.GraceStatusDegraded, + Reason: "DaemonSet partially available", + }, nil + } + + return concepts.GraceStatusWithReason{ + Status: concepts.GraceStatusDown, + Reason: "No pods are ready", + }, nil +} + +// DefaultDeleteOnSuspendHandler provides the default decision of whether to delete the DaemonSet +// when the parent component is suspended. +// +// It always returns true because DaemonSets have no replicas field and cannot be +// scaled to zero in-place. +// +// This function is used as the default handler by the Resource if no custom handler is registered via +// Builder.WithCustomSuspendDeletionDecision. It can be reused within custom handlers. +func DefaultDeleteOnSuspendHandler(_ *appsv1.DaemonSet) bool { + return true +} + +// DefaultSuspendMutationHandler provides the default mutation applied to a DaemonSet when the component is suspended. +// +// It is a no-op because DaemonSets are deleted on suspension rather than mutated. +// +// This function is used as the default handler by the Resource if no custom handler is registered via +// Builder.WithCustomSuspendMutation. It can be reused within custom handlers. +func DefaultSuspendMutationHandler(_ *Mutator) error { + return nil +} + +// DefaultSuspensionStatusHandler monitors the progress of the suspension process. +// +// It always reports Suspended with a reason indicating that the DaemonSet is deleted +// on suspension, because there is no in-place scale-down mechanism for DaemonSets. +// +// This function is used as the default handler by the Resource if no custom handler is registered via +// Builder.WithCustomSuspendStatus. It can be reused within custom handlers. +func DefaultSuspensionStatusHandler(_ *appsv1.DaemonSet) (concepts.SuspensionStatusWithReason, error) { + return concepts.SuspensionStatusWithReason{ + Status: concepts.SuspensionStatusSuspended, + Reason: "DaemonSet deleted on suspend", + }, nil +} diff --git a/pkg/primitives/daemonset/handlers_test.go b/pkg/primitives/daemonset/handlers_test.go new file mode 100644 index 00000000..00709df1 --- /dev/null +++ b/pkg/primitives/daemonset/handlers_test.go @@ -0,0 +1,243 @@ +package daemonset + +import ( + "testing" + + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestDefaultConvergingStatusHandler(t *testing.T) { + tests := []struct { + name string + op concepts.ConvergingOperation + daemonset *appsv1.DaemonSet + wantStatus concepts.AliveConvergingStatus + wantReason string + }{ + { + name: "ready with all pods scheduled", + op: concepts.ConvergingOperationUpdated, + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Generation: 1}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 3, + ObservedGeneration: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusHealthy, + wantReason: "All pods are ready", + }, + { + name: "ready with more than desired", + op: concepts.ConvergingOperationUpdated, + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Generation: 1}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 5, + ObservedGeneration: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusHealthy, + wantReason: "All pods are ready", + }, + { + name: "stale observed generation with desired pods ready reports updating not healthy", + op: concepts.ConvergingOperationUpdated, + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Generation: 2}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 3, + ObservedGeneration: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusUpdating, + wantReason: "Waiting for daemonset controller to observe latest spec", + }, + { + name: "stale observed generation after create", + op: concepts.ConvergingOperationCreated, + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Generation: 2}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 3, + ObservedGeneration: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusCreating, + wantReason: "Waiting for daemonset controller to observe latest spec", + }, + { + name: "stale observed generation with no operation", + op: concepts.ConvergingOperationNone, + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Generation: 2}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 3, + ObservedGeneration: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusUpdating, + wantReason: "Waiting for daemonset controller to observe latest spec", + }, + { + name: "creating", + op: concepts.ConvergingOperationCreated, + daemonset: &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusCreating, + wantReason: "Waiting for pods: 1/3 ready", + }, + { + name: "updating", + op: concepts.ConvergingOperationUpdated, + daemonset: &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusUpdating, + wantReason: "Waiting for pods: 1/3 ready", + }, + { + name: "scaling", + op: concepts.ConvergingOperation("Scaling"), + daemonset: &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusScaling, + wantReason: "Waiting for pods: 1/3 ready", + }, + { + name: "zero desired with observed generation is healthy", + op: concepts.ConvergingOperationCreated, + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Generation: 1}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 0, + NumberReady: 0, + ObservedGeneration: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusHealthy, + wantReason: "No nodes match the DaemonSet node selector", + }, + { + name: "zero desired with stale generation is not ready", + op: concepts.ConvergingOperationCreated, + daemonset: &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Generation: 2}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 0, + NumberReady: 0, + ObservedGeneration: 1, + }, + }, + wantStatus: concepts.AliveConvergingStatusCreating, + wantReason: "Waiting for daemonset controller to observe latest spec", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := DefaultConvergingStatusHandler(tt.op, tt.daemonset) + require.NoError(t, err) + assert.Equal(t, tt.wantStatus, got.Status) + assert.Equal(t, tt.wantReason, got.Reason) + }) + } +} + +func TestDefaultGraceStatusHandler(t *testing.T) { + t.Run("healthy when no nodes match selector and generation observed", func(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Generation: 1}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 0, + NumberReady: 0, + ObservedGeneration: 1, + }, + } + got, err := DefaultGraceStatusHandler(ds) + require.NoError(t, err) + assert.Equal(t, concepts.GraceStatusHealthy, got.Status) + assert.Equal(t, "No nodes match the DaemonSet node selector", got.Reason) + }) + + t.Run("degraded when no nodes match selector but generation stale", func(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Generation: 2}, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 0, + NumberReady: 0, + ObservedGeneration: 1, + }, + } + got, err := DefaultGraceStatusHandler(ds) + require.NoError(t, err) + assert.Equal(t, concepts.GraceStatusDegraded, got.Status) + assert.Equal(t, "Waiting for DaemonSet controller to observe latest generation", got.Reason) + }) + + t.Run("degraded (some ready)", func(t *testing.T) { + ds := &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 1, + }, + } + got, err := DefaultGraceStatusHandler(ds) + require.NoError(t, err) + assert.Equal(t, concepts.GraceStatusDegraded, got.Status) + assert.Equal(t, "DaemonSet partially available", got.Reason) + }) + + t.Run("down (none ready)", func(t *testing.T) { + ds := &appsv1.DaemonSet{ + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 0, + }, + } + got, err := DefaultGraceStatusHandler(ds) + require.NoError(t, err) + assert.Equal(t, concepts.GraceStatusDown, got.Status) + assert.Equal(t, "No pods are ready", got.Reason) + }) +} + +func TestDefaultDeleteOnSuspendHandler(t *testing.T) { + ds := &appsv1.DaemonSet{} + assert.True(t, DefaultDeleteOnSuspendHandler(ds)) +} + +func TestDefaultSuspendMutationHandler(t *testing.T) { + ds := &appsv1.DaemonSet{} + mutator := NewMutator(ds) + err := DefaultSuspendMutationHandler(mutator) + require.NoError(t, err) +} + +func TestDefaultSuspensionStatusHandler(t *testing.T) { + ds := &appsv1.DaemonSet{} + got, err := DefaultSuspensionStatusHandler(ds) + require.NoError(t, err) + assert.Equal(t, concepts.SuspensionStatusSuspended, got.Status) + assert.Equal(t, "DaemonSet deleted on suspend", got.Reason) +} diff --git a/pkg/primitives/daemonset/mutator.go b/pkg/primitives/daemonset/mutator.go new file mode 100644 index 00000000..fa8a9593 --- /dev/null +++ b/pkg/primitives/daemonset/mutator.go @@ -0,0 +1,456 @@ +package daemonset + +import ( + "github.com/sourcehawk/operator-component-framework/pkg/feature" + "github.com/sourcehawk/operator-component-framework/pkg/mutation/editors" + "github.com/sourcehawk/operator-component-framework/pkg/mutation/selectors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" +) + +// Mutation defines a mutation that is applied to a daemonset Mutator +// only if its associated feature.ResourceFeature is enabled. +type Mutation feature.Mutation[*Mutator] + +type containerEdit struct { + selector selectors.ContainerSelector + edit func(*editors.ContainerEditor) error +} + +type containerPresenceOp struct { + name string + container *corev1.Container // nil for remove +} + +type featurePlan struct { + daemonsetMetadataEdits []func(*editors.ObjectMetaEditor) error + daemonsetSpecEdits []func(*editors.DaemonSetSpecEditor) error + podTemplateMetadataEdits []func(*editors.ObjectMetaEditor) error + podSpecEdits []func(*editors.PodSpecEditor) error + containerPresence []containerPresenceOp + containerEdits []containerEdit + initContainerPresence []containerPresenceOp + initContainerEdits []containerEdit +} + +// Mutator is a high-level helper for modifying a Kubernetes DaemonSet. +// +// It uses a "plan-and-apply" pattern: mutations are recorded first, and then +// applied to the DaemonSet in a single controlled pass when Apply() is called. +// +// This approach ensures that mutations are applied consistently and minimizes +// repeated scans of the underlying Kubernetes structures. +// +// The Mutator maintains feature boundaries: each feature's mutations are planned +// together and applied in the order the features were registered. +type Mutator struct { + current *appsv1.DaemonSet + + plans []featurePlan + active *featurePlan +} + +// NewMutator creates a new Mutator for the given DaemonSet. +// +// It is typically used within a Feature's Mutation logic to express desired +// changes to the DaemonSet. BeginFeature must be called before registering +// any mutations. +func NewMutator(current *appsv1.DaemonSet) *Mutator { + return &Mutator{ + current: current, + } +} + +// BeginFeature starts a new feature planning scope. All subsequent mutation +// registrations will be grouped into this feature's plan until another +// BeginFeature is called. +// +// This is used to ensure that mutations from different features are applied +// in registration order while maintaining internal category ordering within +// each feature. +func (m *Mutator) BeginFeature() { + m.plans = append(m.plans, featurePlan{}) + m.active = &m.plans[len(m.plans)-1] +} + +// EditContainers records a mutation for containers matching the given selector. +// +// Planning: +// All container edits are stored and executed during Apply(). +// +// Execution Order: +// - Within a feature, edits are applied in registration order. +// - Overall, container edits are executed AFTER container presence operations within the same feature. +// +// Selection: +// - The selector determines which containers the edit function will be called for. +// - If either selector or edit function is nil, the registration is ignored. +// - Selectors are intended to target containers defined by the baseline resource structure or added by earlier presence operations. +// - Selector matching is evaluated against a snapshot taken after the current feature's container presence operations are applied. +// - Mutations should not rely on earlier edits in the SAME feature phase changing which selectors match. +func (m *Mutator) EditContainers(selector selectors.ContainerSelector, edit func(*editors.ContainerEditor) error) { + if selector == nil || edit == nil { + return + } + m.active.containerEdits = append(m.active.containerEdits, containerEdit{ + selector: selector, + edit: edit, + }) +} + +// EditInitContainers records a mutation for init containers matching the given selector. +// +// Planning: +// All init container edits are stored and executed during Apply(). +// +// Execution Order: +// - Within a feature, edits are applied in registration order. +// - Overall, init container edits apply only to spec.template.spec.initContainers. +// - They run in their own category during Apply(), after init container presence operations within the same feature. +// +// Selection: +// - The selector determines which init containers the edit function will be called for. +// - If either selector or edit function is nil, the registration is ignored. +// - Selector matching is evaluated against a snapshot taken after the current feature's init container presence operations are applied. +func (m *Mutator) EditInitContainers(selector selectors.ContainerSelector, edit func(*editors.ContainerEditor) error) { + if selector == nil || edit == nil { + return + } + m.active.initContainerEdits = append(m.active.initContainerEdits, containerEdit{ + selector: selector, + edit: edit, + }) +} + +// EnsureContainer records that a regular container must be present in the DaemonSet. +// If a container with the same name exists, it is replaced; otherwise, it is appended. +func (m *Mutator) EnsureContainer(container corev1.Container) { + m.active.containerPresence = append(m.active.containerPresence, containerPresenceOp{ + name: container.Name, + container: &container, + }) +} + +// RemoveContainer records that a regular container should be removed by name. +func (m *Mutator) RemoveContainer(name string) { + m.active.containerPresence = append(m.active.containerPresence, containerPresenceOp{ + name: name, + container: nil, + }) +} + +// RemoveContainers records that multiple regular containers should be removed by name. +func (m *Mutator) RemoveContainers(names []string) { + for _, name := range names { + m.RemoveContainer(name) + } +} + +// EnsureInitContainer records that an init container must be present in the DaemonSet. +// If an init container with the same name exists, it is replaced; otherwise, it is appended. +func (m *Mutator) EnsureInitContainer(container corev1.Container) { + m.active.initContainerPresence = append(m.active.initContainerPresence, containerPresenceOp{ + name: container.Name, + container: &container, + }) +} + +// RemoveInitContainer records that an init container should be removed by name. +func (m *Mutator) RemoveInitContainer(name string) { + m.active.initContainerPresence = append(m.active.initContainerPresence, containerPresenceOp{ + name: name, + container: nil, + }) +} + +// RemoveInitContainers records that multiple init containers should be removed by name. +func (m *Mutator) RemoveInitContainers(names []string) { + for _, name := range names { + m.RemoveInitContainer(name) + } +} + +// EditDaemonSetSpec records a mutation for the DaemonSet's top-level spec. +// +// Planning: +// All DaemonSet spec edits are stored and executed during Apply(). +// +// Execution Order: +// - Within a feature, edits are applied in registration order. +// - Overall, DaemonSet spec edits are executed AFTER daemonset-metadata edits but BEFORE pod template/spec/container edits within the same feature. +// +// If the edit function is nil, the registration is ignored. +func (m *Mutator) EditDaemonSetSpec(edit func(*editors.DaemonSetSpecEditor) error) { + if edit == nil { + return + } + m.active.daemonsetSpecEdits = append(m.active.daemonsetSpecEdits, edit) +} + +// EditPodSpec records a mutation for the DaemonSet's pod spec. +// +// Planning: +// All pod spec edits are stored and executed during Apply(). +// +// Execution Order: +// - Within a feature, edits are applied in registration order. +// - Overall, pod spec edits are executed AFTER metadata edits but BEFORE container edits within the same feature. +// +// If the edit function is nil, the registration is ignored. +func (m *Mutator) EditPodSpec(edit func(*editors.PodSpecEditor) error) { + if edit == nil { + return + } + m.active.podSpecEdits = append(m.active.podSpecEdits, edit) +} + +// EditPodTemplateMetadata records a mutation for the DaemonSet's pod template metadata. +// +// Planning: +// All pod template metadata edits are stored and executed during Apply(). +// +// Execution Order: +// - Within a feature, edits are applied in registration order. +// - Overall, pod template metadata edits are executed AFTER daemonset-metadata/spec edits but BEFORE pod spec/container edits within the same feature. +// +// If the edit function is nil, the registration is ignored. +func (m *Mutator) EditPodTemplateMetadata(edit func(*editors.ObjectMetaEditor) error) { + if edit == nil { + return + } + m.active.podTemplateMetadataEdits = append(m.active.podTemplateMetadataEdits, edit) +} + +// EditObjectMetadata records a mutation for the DaemonSet's own metadata. +// +// Planning: +// All object metadata edits are stored and executed during Apply(). +// +// Execution Order: +// - Within a feature, edits are applied in registration order. +// - Overall, object metadata edits are executed BEFORE all other categories within the same feature. +// +// If the edit function is nil, the registration is ignored. +func (m *Mutator) EditObjectMetadata(edit func(*editors.ObjectMetaEditor) error) { + if edit == nil { + return + } + m.active.daemonsetMetadataEdits = append(m.active.daemonsetMetadataEdits, edit) +} + +// EnsureContainerEnvVar records that an environment variable must be present +// in all containers of the DaemonSet. +// +// This is a convenience wrapper over EditContainers. +func (m *Mutator) EnsureContainerEnvVar(ev corev1.EnvVar) { + m.EditContainers(selectors.AllContainers(), func(e *editors.ContainerEditor) error { + e.EnsureEnvVar(ev) + return nil + }) +} + +// RemoveContainerEnvVar records that an environment variable should be +// removed from all containers of the DaemonSet. +// +// This is a convenience wrapper over EditContainers. +func (m *Mutator) RemoveContainerEnvVar(name string) { + m.EditContainers(selectors.AllContainers(), func(e *editors.ContainerEditor) error { + e.RemoveEnvVar(name) + return nil + }) +} + +// RemoveContainerEnvVars records that multiple environment variables should be +// removed from all containers of the DaemonSet. +// +// This is a convenience wrapper over EditContainers. +func (m *Mutator) RemoveContainerEnvVars(names []string) { + m.EditContainers(selectors.AllContainers(), func(e *editors.ContainerEditor) error { + e.RemoveEnvVars(names) + return nil + }) +} + +// EnsureContainerArg records that a command-line argument must be present +// in all containers of the DaemonSet. +// +// This is a convenience wrapper over EditContainers. +func (m *Mutator) EnsureContainerArg(arg string) { + m.EditContainers(selectors.AllContainers(), func(e *editors.ContainerEditor) error { + e.EnsureArg(arg) + return nil + }) +} + +// RemoveContainerArg records that a command-line argument should be +// removed from all containers of the DaemonSet. +// +// This is a convenience wrapper over EditContainers. +func (m *Mutator) RemoveContainerArg(arg string) { + m.EditContainers(selectors.AllContainers(), func(e *editors.ContainerEditor) error { + e.RemoveArg(arg) + return nil + }) +} + +// RemoveContainerArgs records that multiple command-line arguments should be +// removed from all containers of the DaemonSet. +// +// This is a convenience wrapper over EditContainers. +func (m *Mutator) RemoveContainerArgs(args []string) { + m.EditContainers(selectors.AllContainers(), func(e *editors.ContainerEditor) error { + e.RemoveArgs(args) + return nil + }) +} + +// Apply executes all recorded mutation intents on the underlying DaemonSet. +// +// Execution Order: +// Features are applied in the order they were registered. +// Within each feature, mutations are applied in this fixed category order: +// 1. Object metadata edits +// 2. DaemonSetSpec edits +// 3. Pod template metadata edits +// 4. Pod spec edits +// 5. Regular container presence operations +// 6. Regular container edits +// 7. Init container presence operations +// 8. Init container edits +// +// Within each category of a single feature, edits are applied in their registration order. +// +// Selection & Identity: +// - Container selectors target containers in the state they are in at the start of that feature's +// container phase (after presence operations of the SAME feature have been applied). +// - Selector matching within a phase is evaluated against a snapshot of containers at the start +// of that phase, not the progressively mutated live containers. +// - Later features observe the DaemonSet as modified by all previous features. +// +// Timing: +// No changes are made to the DaemonSet until Apply() is called. +// Selectors and edit functions are executed during this pass. +func (m *Mutator) Apply() error { + for _, plan := range m.plans { + // 1. Object metadata + if len(plan.daemonsetMetadataEdits) > 0 { + editor := editors.NewObjectMetaEditor(&m.current.ObjectMeta) + for _, edit := range plan.daemonsetMetadataEdits { + if err := edit(editor); err != nil { + return err + } + } + } + + // 2. DaemonSetSpec + if len(plan.daemonsetSpecEdits) > 0 { + editor := editors.NewDaemonSetSpecEditor(&m.current.Spec) + for _, edit := range plan.daemonsetSpecEdits { + if err := edit(editor); err != nil { + return err + } + } + } + + // 3. Pod template metadata + if len(plan.podTemplateMetadataEdits) > 0 { + editor := editors.NewObjectMetaEditor(&m.current.Spec.Template.ObjectMeta) + for _, edit := range plan.podTemplateMetadataEdits { + if err := edit(editor); err != nil { + return err + } + } + } + + // 4. Pod spec + if len(plan.podSpecEdits) > 0 { + editor := editors.NewPodSpecEditor(&m.current.Spec.Template.Spec) + for _, edit := range plan.podSpecEdits { + if err := edit(editor); err != nil { + return err + } + } + } + + // 5. Regular container presence + for _, op := range plan.containerPresence { + applyPresenceOp(&m.current.Spec.Template.Spec.Containers, op) + } + + // 6. Regular container edits + if len(plan.containerEdits) > 0 { + // Take snapshot of containers AFTER presence ops but BEFORE applying any edits for stable selector matching + snapshots := make([]corev1.Container, len(m.current.Spec.Template.Spec.Containers)) + for i := range m.current.Spec.Template.Spec.Containers { + m.current.Spec.Template.Spec.Containers[i].DeepCopyInto(&snapshots[i]) + } + + for i := range m.current.Spec.Template.Spec.Containers { + container := &m.current.Spec.Template.Spec.Containers[i] + snapshot := &snapshots[i] + editor := editors.NewContainerEditor(container) + for _, ce := range plan.containerEdits { + if ce.selector(i, snapshot) { + if err := ce.edit(editor); err != nil { + return err + } + } + } + } + } + + // 7. Init container presence + for _, op := range plan.initContainerPresence { + applyPresenceOp(&m.current.Spec.Template.Spec.InitContainers, op) + } + + // 8. Init container edits + if len(plan.initContainerEdits) > 0 { + // Take snapshot of init containers AFTER presence ops but BEFORE applying any edits + snapshots := make([]corev1.Container, len(m.current.Spec.Template.Spec.InitContainers)) + for i := range m.current.Spec.Template.Spec.InitContainers { + m.current.Spec.Template.Spec.InitContainers[i].DeepCopyInto(&snapshots[i]) + } + + for i := range m.current.Spec.Template.Spec.InitContainers { + container := &m.current.Spec.Template.Spec.InitContainers[i] + snapshot := &snapshots[i] + editor := editors.NewContainerEditor(container) + for _, ce := range plan.initContainerEdits { + if ce.selector(i, snapshot) { + if err := ce.edit(editor); err != nil { + return err + } + } + } + } + } + } + + return nil +} + +func applyPresenceOp(containers *[]corev1.Container, op containerPresenceOp) { + found := -1 + for i, c := range *containers { + if c.Name == op.name { + found = i + break + } + } + + if op.container == nil { + // Remove + if found != -1 { + *containers = append((*containers)[:found], (*containers)[found+1:]...) + } + return + } + + // Ensure + if found != -1 { + (*containers)[found] = *op.container + } else { + *containers = append(*containers, *op.container) + } +} diff --git a/pkg/primitives/daemonset/mutator_test.go b/pkg/primitives/daemonset/mutator_test.go new file mode 100644 index 00000000..0186f7a9 --- /dev/null +++ b/pkg/primitives/daemonset/mutator_test.go @@ -0,0 +1,648 @@ +package daemonset + +import ( + "errors" + "testing" + + "github.com/sourcehawk/operator-component-framework/pkg/mutation/editors" + "github.com/sourcehawk/operator-component-framework/pkg/mutation/selectors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestMutator_EnvVars(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Env: []corev1.EnvVar{ + {Name: "KEEP", Value: "stay"}, + {Name: "CHANGE", Value: "old"}, + {Name: "REMOVE", Value: "gone"}, + }, + }, + }, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + m.EnsureContainerEnvVar(corev1.EnvVar{Name: "CHANGE", Value: "new"}) + m.EnsureContainerEnvVar(corev1.EnvVar{Name: "ADD", Value: "added"}) + m.RemoveContainerEnvVars([]string{"REMOVE", "NONEXISTENT"}) + + err := m.Apply() + require.NoError(t, err) + + env := ds.Spec.Template.Spec.Containers[0].Env + assert.Len(t, env, 3) + + findEnv := func(name string) *corev1.EnvVar { + for i := range env { + if env[i].Name == name { + return &env[i] + } + } + return nil + } + + assert.NotNil(t, findEnv("KEEP")) + assert.Equal(t, "stay", findEnv("KEEP").Value) + assert.Equal(t, "new", findEnv("CHANGE").Value) + assert.Equal(t, "added", findEnv("ADD").Value) + assert.Nil(t, findEnv("REMOVE")) +} + +func TestMutator_Args(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Args: []string{"--keep", "--change=old", "--remove"}, + }, + }, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + m.EnsureContainerArg("--change=new") + m.EnsureContainerArg("--add") + m.RemoveContainerArgs([]string{"--remove", "--nonexistent"}) + + err := m.Apply() + require.NoError(t, err) + + args := ds.Spec.Template.Spec.Containers[0].Args + assert.Contains(t, args, "--keep") + assert.Contains(t, args, "--change=old") + assert.Contains(t, args, "--change=new") + assert.Contains(t, args, "--add") + assert.NotContains(t, args, "--remove") +} + +func TestNewMutator(t *testing.T) { + ds := &appsv1.DaemonSet{} + m := NewMutator(ds) + assert.NotNil(t, m) + assert.Equal(t, ds, m.current) + assert.Empty(t, m.plans, "NewMutator must not create any plans") + assert.Nil(t, m.active, "active plan must not be set") +} + +func TestBeginFeature_AddsExactlyOnePlan(t *testing.T) { + ds := &appsv1.DaemonSet{} + m := NewMutator(ds) + + m.BeginFeature() + require.Len(t, m.plans, 1, "BeginFeature must add exactly one plan") + assert.Equal(t, &m.plans[0], m.active, "active must point to the new plan") + + m.BeginFeature() + require.Len(t, m.plans, 2) + assert.Equal(t, &m.plans[1], m.active) +} + +func TestBeginFeature_IsolatesFeaturePlans(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "app"}}, + }, + }, + }, + } + m := NewMutator(ds) + + // Record mutations in the first feature plan + m.BeginFeature() + m.EditDaemonSetSpec(func(e *editors.DaemonSetSpecEditor) error { + e.SetMinReadySeconds(10) + return nil + }) + m.EditContainers(selectors.ContainerNamed("app"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "v1" + return nil + }) + + // Start a new feature and record different mutations + m.BeginFeature() + m.EditDaemonSetSpec(func(e *editors.DaemonSetSpecEditor) error { + e.SetMinReadySeconds(20) + return nil + }) + + // First plan should have its edits, second plan should have its own + assert.Len(t, m.plans[0].daemonsetSpecEdits, 1, "first plan should have one spec edit") + assert.Len(t, m.plans[0].containerEdits, 1, "first plan should have one container edit") + assert.Len(t, m.plans[1].daemonsetSpecEdits, 1, "second plan should have one spec edit") + assert.Empty(t, m.plans[1].containerEdits, "second plan should have no container edits") +} + +func TestMutator_SingleFeature_PlanCount(t *testing.T) { + ds := &appsv1.DaemonSet{} + m := NewMutator(ds) + m.BeginFeature() + m.EditDaemonSetSpec(func(e *editors.DaemonSetSpecEditor) error { + e.SetMinReadySeconds(10) + return nil + }) + + require.NoError(t, m.Apply()) + assert.Len(t, m.plans, 1, "no extra plans should be created during Apply") + assert.Equal(t, int32(10), ds.Spec.MinReadySeconds) +} + +func TestMutator_EditContainers(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "c1"}, + {Name: "c2"}, + }, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + m.EditContainers(selectors.ContainerNamed("c1"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "c1-image" + return nil + }) + m.EditContainers(selectors.AllContainers(), func(e *editors.ContainerEditor) error { + e.EnsureEnvVar(corev1.EnvVar{Name: "GLOBAL", Value: "true"}) + return nil + }) + + err := m.Apply() + require.NoError(t, err) + + assert.Equal(t, "c1-image", ds.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, "", ds.Spec.Template.Spec.Containers[1].Image) + assert.Equal(t, "GLOBAL", ds.Spec.Template.Spec.Containers[0].Env[0].Name) + assert.Equal(t, "GLOBAL", ds.Spec.Template.Spec.Containers[1].Env[0].Name) +} + +func TestMutator_EditPodSpec(t *testing.T) { + ds := &appsv1.DaemonSet{} + m := NewMutator(ds) + m.BeginFeature() + m.EditPodSpec(func(e *editors.PodSpecEditor) error { + e.Raw().ServiceAccountName = "my-sa" + return nil + }) + + err := m.Apply() + require.NoError(t, err) + assert.Equal(t, "my-sa", ds.Spec.Template.Spec.ServiceAccountName) +} + +func TestMutator_EditDaemonSetSpec(t *testing.T) { + ds := &appsv1.DaemonSet{} + m := NewMutator(ds) + m.BeginFeature() + m.EditDaemonSetSpec(func(e *editors.DaemonSetSpecEditor) error { + e.SetMinReadySeconds(10) + e.SetRevisionHistoryLimit(5) + return nil + }) + + err := m.Apply() + require.NoError(t, err) + assert.Equal(t, int32(10), ds.Spec.MinReadySeconds) + assert.Equal(t, int32(5), *ds.Spec.RevisionHistoryLimit) +} + +func TestMutator_EditMetadata(t *testing.T) { + ds := &appsv1.DaemonSet{} + m := NewMutator(ds) + m.BeginFeature() + m.EditObjectMetadata(func(e *editors.ObjectMetaEditor) error { + e.Raw().Labels = map[string]string{"ds": "label"} + return nil + }) + m.EditPodTemplateMetadata(func(e *editors.ObjectMetaEditor) error { + e.Raw().Annotations = map[string]string{"pod": "ann"} + return nil + }) + + err := m.Apply() + require.NoError(t, err) + assert.Equal(t, "label", ds.Labels["ds"]) + assert.Equal(t, "ann", ds.Spec.Template.Annotations["pod"]) +} + +func TestMutator_Errors(t *testing.T) { + ds := &appsv1.DaemonSet{} + m := NewMutator(ds) + m.BeginFeature() + m.EditPodSpec(func(_ *editors.PodSpecEditor) error { + return errors.New("boom") + }) + + err := m.Apply() + assert.Error(t, err) + assert.Equal(t, "boom", err.Error()) +} + +func TestMutator_Order(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"orig": "label"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "main"}}, + }, + }, + }, + } + + var order []string + + m := NewMutator(ds) + m.BeginFeature() + m.EditContainers(selectors.AllContainers(), func(_ *editors.ContainerEditor) error { + order = append(order, "container") + return nil + }) + m.EditPodSpec(func(_ *editors.PodSpecEditor) error { + order = append(order, "podspec") + return nil + }) + m.EditPodTemplateMetadata(func(_ *editors.ObjectMetaEditor) error { + order = append(order, "podmeta") + return nil + }) + m.EditDaemonSetSpec(func(_ *editors.DaemonSetSpecEditor) error { + order = append(order, "dsspec") + return nil + }) + m.EditObjectMetadata(func(_ *editors.ObjectMetaEditor) error { + order = append(order, "dsmeta") + return nil + }) + + err := m.Apply() + require.NoError(t, err) + + expected := []string{"dsmeta", "dsspec", "podmeta", "podspec", "container"} + assert.Equal(t, expected, order) +} + +func TestMutator_InitContainers(t *testing.T) { + const newImage = "new-image" + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "init-1", Image: "old-image"}, + }, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + m.EditInitContainers(selectors.ContainerNamed("init-1"), func(e *editors.ContainerEditor) error { + e.Raw().Image = newImage + return nil + }) + + err := m.Apply() + require.NoError(t, err) + + assert.Equal(t, newImage, ds.Spec.Template.Spec.InitContainers[0].Image) +} + +func TestMutator_ContainerPresence(t *testing.T) { + const newImage = "new-image" + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "app", Image: "app-image"}, + {Name: "sidecar", Image: "sidecar-image"}, + }, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + m.EnsureContainer(corev1.Container{Name: "app", Image: "app-new-image"}) + m.RemoveContainer("sidecar") + m.EnsureContainer(corev1.Container{Name: "new-container", Image: newImage}) + + err := m.Apply() + require.NoError(t, err) + + require.Len(t, ds.Spec.Template.Spec.Containers, 2) + assert.Equal(t, "app", ds.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "app-new-image", ds.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, "new-container", ds.Spec.Template.Spec.Containers[1].Name) + assert.Equal(t, newImage, ds.Spec.Template.Spec.Containers[1].Image) +} + +func TestMutator_InitContainerPresence(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "init-1", Image: "init-1-image"}, + }, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + m.EnsureInitContainer(corev1.Container{Name: "init-2", Image: "init-2-image"}) + m.RemoveInitContainers([]string{"init-1"}) + + err := m.Apply() + require.NoError(t, err) + + require.Len(t, ds.Spec.Template.Spec.InitContainers, 1) + assert.Equal(t, "init-2", ds.Spec.Template.Spec.InitContainers[0].Name) +} + +func TestMutator_SelectorSnapshotSemantics(t *testing.T) { + const appV2 = "app-v2" + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "app", Image: "app-image"}, + }, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + + m.EditContainers(selectors.ContainerNamed("app"), func(e *editors.ContainerEditor) error { + e.Raw().Name = appV2 + return nil + }) + + m.EditContainers(selectors.ContainerNamed("app"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "app-image-updated" + return nil + }) + + m.EditContainers(selectors.ContainerNamed(appV2), func(e *editors.ContainerEditor) error { + e.Raw().Image = "should-not-be-set" + return nil + }) + + err := m.Apply() + require.NoError(t, err) + + assert.Equal(t, appV2, ds.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "app-image-updated", ds.Spec.Template.Spec.Containers[0].Image) +} + +func TestMutator_Ordering_PresenceBeforeEdit(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{}, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + + m.EditContainers(selectors.ContainerNamed("new-app"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "edited-image" + return nil + }) + + m.EnsureContainer(corev1.Container{Name: "new-app", Image: "original-image"}) + + err := m.Apply() + require.NoError(t, err) + + require.Len(t, ds.Spec.Template.Spec.Containers, 1) + assert.Equal(t, "edited-image", ds.Spec.Template.Spec.Containers[0].Image) +} + +func TestMutator_NilSafety(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "main"}}, + }, + }, + }, + } + m := NewMutator(ds) + m.BeginFeature() + + m.EditContainers(nil, func(_ *editors.ContainerEditor) error { return nil }) + m.EditContainers(selectors.AllContainers(), nil) + m.EditPodSpec(nil) + m.EditPodTemplateMetadata(nil) + m.EditObjectMetadata(nil) + m.EditDaemonSetSpec(nil) + + err := m.Apply() + assert.NoError(t, err) +} + +func TestMutator_CrossFeatureOrdering(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "app", Image: "v1"}}, + }, + }, + }, + } + + m := NewMutator(ds) + + // Feature A: sets min ready seconds, image to v2 + m.BeginFeature() + m.EditDaemonSetSpec(func(e *editors.DaemonSetSpecEditor) error { + e.SetMinReadySeconds(10) + return nil + }) + m.EditContainers(selectors.ContainerNamed("app"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "v2" + return nil + }) + + // Feature B: sets min ready seconds, image to v3 + m.BeginFeature() + m.EditDaemonSetSpec(func(e *editors.DaemonSetSpecEditor) error { + e.SetMinReadySeconds(20) + return nil + }) + m.EditContainers(selectors.ContainerNamed("app"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "v3" + return nil + }) + + err := m.Apply() + require.NoError(t, err) + + assert.Equal(t, int32(20), ds.Spec.MinReadySeconds) + assert.Equal(t, "v3", ds.Spec.Template.Spec.Containers[0].Image) +} + +func TestMutator_WithinFeatureCategoryOrdering(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "original-name"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "app"}}, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + + var executionOrder []string + + m.EditContainers(selectors.AllContainers(), func(_ *editors.ContainerEditor) error { + executionOrder = append(executionOrder, "container") + return nil + }) + m.EditPodSpec(func(_ *editors.PodSpecEditor) error { + executionOrder = append(executionOrder, "podspec") + return nil + }) + m.EditPodTemplateMetadata(func(_ *editors.ObjectMetaEditor) error { + executionOrder = append(executionOrder, "podmeta") + return nil + }) + m.EditDaemonSetSpec(func(_ *editors.DaemonSetSpecEditor) error { + executionOrder = append(executionOrder, "daemonsetspec") + return nil + }) + m.EditObjectMetadata(func(_ *editors.ObjectMetaEditor) error { + executionOrder = append(executionOrder, "daemonsetmeta") + return nil + }) + + err := m.Apply() + require.NoError(t, err) + + expectedOrder := []string{ + "daemonsetmeta", + "daemonsetspec", + "podmeta", + "podspec", + "container", + } + assert.Equal(t, expectedOrder, executionOrder) +} + +func TestMutator_CrossFeatureVisibility(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "app"}}, + }, + }, + }, + } + + m := NewMutator(ds) + + m.BeginFeature() + m.EditContainers(selectors.ContainerNamed("app"), func(e *editors.ContainerEditor) error { + e.Raw().Name = "app-v2" + return nil + }) + + m.BeginFeature() + m.EditContainers(selectors.ContainerNamed("app-v2"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "v2-image" + return nil + }) + + err := m.Apply() + require.NoError(t, err) + + assert.Equal(t, "app-v2", ds.Spec.Template.Spec.Containers[0].Name) + assert.Equal(t, "v2-image", ds.Spec.Template.Spec.Containers[0].Image) +} + +func TestMutator_InitContainer_OrderingAndSnapshots(t *testing.T) { + ds := &appsv1.DaemonSet{ + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{}, + }, + }, + }, + } + + m := NewMutator(ds) + m.BeginFeature() + + m.EnsureInitContainer(corev1.Container{Name: "init-1", Image: "v1"}) + + m.EditInitContainers(selectors.ContainerNamed("init-1"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "v1-edited" + return nil + }) + + m.EditInitContainers(selectors.ContainerNamed("init-1"), func(e *editors.ContainerEditor) error { + e.Raw().Name = "init-1-renamed" + return nil + }) + + m.EditInitContainers(selectors.ContainerNamed("init-1"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "v1-final" + return nil + }) + + err := m.Apply() + require.NoError(t, err) + + require.Len(t, ds.Spec.Template.Spec.InitContainers, 1) + assert.Equal(t, "init-1-renamed", ds.Spec.Template.Spec.InitContainers[0].Name) + assert.Equal(t, "v1-final", ds.Spec.Template.Spec.InitContainers[0].Image) +} diff --git a/pkg/primitives/daemonset/resource.go b/pkg/primitives/daemonset/resource.go new file mode 100644 index 00000000..f8bdfb0c --- /dev/null +++ b/pkg/primitives/daemonset/resource.go @@ -0,0 +1,136 @@ +package daemonset + +import ( + "github.com/sourcehawk/operator-component-framework/internal/generic" + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + appsv1 "k8s.io/api/apps/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// DefaultFieldApplicator replaces current with a deep copy of desired while +// preserving server-managed metadata (ResourceVersion, UID, Generation, etc.), +// shared-controller fields (OwnerReferences, Finalizers), and the Status +// subresource from the original current object. +func DefaultFieldApplicator(current, desired *appsv1.DaemonSet) error { + original := current.DeepCopy() + *current = *desired.DeepCopy() + generic.PreserveServerManagedFields(current, original) + generic.PreserveStatus(current, original) + return nil +} + +// Resource is a high-level abstraction for managing a Kubernetes DaemonSet within a controller's +// reconciliation loop. +// +// It implements several component interfaces to integrate with the operator-component-framework: +// - component.Resource: for basic identity and mutation behavior. +// - component.Alive: for health and readiness tracking. +// - component.Suspendable: for graceful deactivation via deletion. +// - component.DataExtractable: for exporting information after successful reconciliation. +// +// This resource handles the lifecycle of a DaemonSet, including initial creation, +// updates via feature mutations, and status monitoring. +type Resource struct { + base *generic.WorkloadResource[*appsv1.DaemonSet, *Mutator] +} + +// Identity returns a unique identifier for the DaemonSet in the format +// "apps/v1/DaemonSet//". +// +// This identifier is used by the framework's internal tracking and recording +// mechanisms to distinguish this specific DaemonSet from other resources +// managed by the same component. +func (r *Resource) Identity() string { + return r.base.Identity() +} + +// Object returns a copy of the underlying Kubernetes DaemonSet object. +// +// The returned object implements the client.Object interface, making it +// fully compatible with controller-runtime's Client for operations like +// Get, Create, Update, and Patch. +// +// This method is called by the framework to obtain the current state +// of the resource before applying mutations. +func (r *Resource) Object() (client.Object, error) { + return r.base.Object() +} + +// Mutate transforms the current state of a Kubernetes DaemonSet into the desired state. +// +// The mutation process follows a specific order: +// 1. Core State: The current object is reset to the desired base state, or +// modified via a customFieldApplicator if one is configured. +// 2. Feature Mutations: All registered feature-based mutations are applied, +// allowing for granular, version-gated changes to the DaemonSet. +// 3. Suspension: If the resource is in a suspending state, the suspension +// logic is applied. +// +// This method is invoked by the framework during the "Update" phase of +// reconciliation. It ensures that the in-memory object reflects all +// configuration and feature requirements before it is sent to the API server. +func (r *Resource) Mutate(current client.Object) error { + return r.base.Mutate(current) +} + +// ConvergingStatus evaluates if the DaemonSet has successfully reached its desired state. +// +// By default, it uses DefaultConvergingStatusHandler, which first ensures that +// status.ObservedGeneration is at least the DaemonSet's metadata.Generation. +// Once the generation has been observed: +// - If DesiredNumberScheduled is zero, the DaemonSet is considered converged (no pods expected). +// - If DesiredNumberScheduled is greater than zero, the DaemonSet is considered converged when +// NumberReady is greater than or equal to DesiredNumberScheduled. +// +// The return value includes a descriptive status (Ready, Creating, Updating, or Scaling) +// and a human-readable reason, which are used to update the component's conditions. +func (r *Resource) ConvergingStatus(op concepts.ConvergingOperation) (concepts.AliveStatusWithReason, error) { + return r.base.ConvergingStatus(op) +} + +// GraceStatus provides a health assessment of the DaemonSet when it has not yet +// reached full readiness. +// +// By default, it uses DefaultGraceStatusHandler, which categorizes the current state into: +// - GraceStatusHealthy: DesiredNumberScheduled is zero (no matching nodes). +// - GraceStatusDegraded: Some pods are ready but below the desired count. +// - GraceStatusDown: No pods are ready. +func (r *Resource) GraceStatus() (concepts.GraceStatusWithReason, error) { + return r.base.GraceStatus() +} + +// DeleteOnSuspend determines whether the DaemonSet should be deleted from the +// cluster when the parent component is suspended. +// +// By default, it uses DefaultDeleteOnSuspendHandler, which returns true because +// DaemonSets have no replicas field and cannot be scaled to zero in-place. +func (r *Resource) DeleteOnSuspend() bool { + return r.base.DeleteOnSuspend() +} + +// Suspend triggers the deactivation of the DaemonSet. +// +// It registers a mutation that will be executed during the next Mutate call. +// The default behavior uses DefaultSuspendMutationHandler, which is a no-op +// because the DaemonSet is deleted on suspension rather than mutated. +func (r *Resource) Suspend() error { + return r.base.Suspend() +} + +// SuspensionStatus monitors the progress of the suspension process. +// +// By default, it uses DefaultSuspensionStatusHandler, which always reports +// Suspended because the DaemonSet is deleted on suspension. +func (r *Resource) SuspensionStatus() (concepts.SuspensionStatusWithReason, error) { + return r.base.SuspensionStatus() +} + +// ExtractData executes registered data extraction functions to harvest information +// from the reconciled DaemonSet. +// +// This is called by the framework after a successful reconciliation of the +// resource. It allows the component to export details that might be needed +// by other resources or higher-level controllers. +func (r *Resource) ExtractData() error { + return r.base.ExtractData() +} diff --git a/pkg/primitives/daemonset/resource_test.go b/pkg/primitives/daemonset/resource_test.go new file mode 100644 index 00000000..54d74f39 --- /dev/null +++ b/pkg/primitives/daemonset/resource_test.go @@ -0,0 +1,499 @@ +package daemonset + +import ( + "errors" + "testing" + + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + "github.com/sourcehawk/operator-component-framework/pkg/feature" + "github.com/sourcehawk/operator-component-framework/pkg/mutation/editors" + "github.com/sourcehawk/operator-component-framework/pkg/mutation/selectors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestResource_Identity(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + res, _ := NewBuilder(ds).Build() + + assert.Equal(t, "apps/v1/DaemonSet/test-ns/test-ds", res.Identity()) +} + +func TestResource_Object(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ds", + Namespace: "test-ns", + }, + } + res, _ := NewBuilder(ds).Build() + + obj, err := res.Object() + require.NoError(t, err) + + got, ok := obj.(*appsv1.DaemonSet) + require.True(t, ok) + assert.Equal(t, ds.Name, got.Name) + assert.Equal(t, ds.Namespace, got.Namespace) + + // Ensure it's a deep copy + got.Name = "changed" + assert.Equal(t, "test-ds", ds.Name) +} + +func TestResource_Mutate(t *testing.T) { + desired := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Labels: map[string]string{"app": "test"}, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "web", Image: "nginx"}, + }, + }, + }, + }, + } + + res, _ := NewBuilder(desired). + WithMutation(Mutation{ + Name: "test-mutation", + Feature: feature.NewResourceFeature("v1", nil).When(true), + Mutate: func(m *Mutator) error { + m.EnsureContainerEnvVar(corev1.EnvVar{Name: "FOO", Value: "BAR"}) + return nil + }, + }). + Build() + + current := &appsv1.DaemonSet{} + err := res.Mutate(current) + require.NoError(t, err) + + assert.Equal(t, "test", current.Labels["app"]) + assert.Equal(t, "BAR", current.Spec.Template.Spec.Containers[0].Env[0].Value) +} + +func TestResource_Mutate_FeatureOrdering(t *testing.T) { + desired := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "app", Image: "v1"}, + }, + }, + }, + }, + } + + res, _ := NewBuilder(desired). + WithMutation(Mutation{ + Name: "feature-a", + Feature: feature.NewResourceFeature("v1", nil).When(true), + Mutate: func(m *Mutator) error { + m.EditContainers(selectors.ContainerNamed("app"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "v2" + return nil + }) + return nil + }, + }). + WithMutation(Mutation{ + Name: "feature-b", + Feature: feature.NewResourceFeature("v1", nil).When(true), + Mutate: func(m *Mutator) error { + m.EditContainers(selectors.ContainerNamed("app"), func(e *editors.ContainerEditor) error { + if e.Raw().Image == "v2" { + e.Raw().Image = "v3" + } + return nil + }) + return nil + }, + }). + Build() + + current := &appsv1.DaemonSet{} + err := res.Mutate(current) + require.NoError(t, err) + + assert.Equal(t, "v3", current.Spec.Template.Spec.Containers[0].Image) +} + +type mockHandlers struct { + mock.Mock +} + +func (m *mockHandlers) ConvergingStatus(op concepts.ConvergingOperation, d *appsv1.DaemonSet) (concepts.AliveStatusWithReason, error) { + args := m.Called(op, d) + return args.Get(0).(concepts.AliveStatusWithReason), args.Error(1) +} + +func (m *mockHandlers) GraceStatus(d *appsv1.DaemonSet) (concepts.GraceStatusWithReason, error) { + args := m.Called(d) + return args.Get(0).(concepts.GraceStatusWithReason), args.Error(1) +} + +func (m *mockHandlers) SuspensionStatus(d *appsv1.DaemonSet) (concepts.SuspensionStatusWithReason, error) { + args := m.Called(d) + return args.Get(0).(concepts.SuspensionStatusWithReason), args.Error(1) +} + +func (m *mockHandlers) Suspend(mut *Mutator) error { + args := m.Called(mut) + return args.Error(0) +} + +func (m *mockHandlers) DeleteOnSuspend(d *appsv1.DaemonSet) bool { + args := m.Called(d) + return args.Bool(0) +} + +func TestResource_Status(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Status: appsv1.DaemonSetStatus{ + DesiredNumberScheduled: 3, + NumberReady: 2, + }, + } + + t.Run("ConvergingStatus calls handler", func(t *testing.T) { + m := &mockHandlers{} + statusReady := concepts.AliveStatusWithReason{Status: concepts.AliveConvergingStatusHealthy} + m.On("ConvergingStatus", concepts.ConvergingOperationUpdated, ds).Return(statusReady, nil) + + res, _ := NewBuilder(ds). + WithCustomConvergeStatus(m.ConvergingStatus). + Build() + + status, err := res.ConvergingStatus(concepts.ConvergingOperationUpdated) + require.NoError(t, err) + m.AssertExpectations(t) + assert.Equal(t, concepts.AliveConvergingStatusHealthy, status.Status) + }) + + t.Run("ConvergingStatus uses default", func(t *testing.T) { + res, err := NewBuilder(ds).Build() + require.NoError(t, err) + status, err := res.ConvergingStatus(concepts.ConvergingOperationUpdated) + require.NoError(t, err) + assert.Equal(t, concepts.AliveConvergingStatusUpdating, status.Status) + }) + + t.Run("GraceStatus calls handler", func(t *testing.T) { + m := &mockHandlers{} + statusReady := concepts.GraceStatusWithReason{Status: concepts.GraceStatusHealthy} + m.On("GraceStatus", ds).Return(statusReady, nil) + + res, _ := NewBuilder(ds). + WithCustomGraceStatus(m.GraceStatus). + Build() + + status, err := res.GraceStatus() + require.NoError(t, err) + m.AssertExpectations(t) + assert.Equal(t, concepts.GraceStatusHealthy, status.Status) + }) + + t.Run("GraceStatus uses default", func(t *testing.T) { + res, err := NewBuilder(ds).Build() + require.NoError(t, err) + status, err := res.GraceStatus() + require.NoError(t, err) + assert.Equal(t, concepts.GraceStatusDegraded, status.Status) + }) +} + +func TestResource_DeleteOnSuspend(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + t.Run("calls handler", func(t *testing.T) { + m := &mockHandlers{} + m.On("DeleteOnSuspend", ds).Return(false) + + res, err := NewBuilder(ds). + WithCustomSuspendDeletionDecision(m.DeleteOnSuspend). + Build() + require.NoError(t, err) + assert.False(t, res.DeleteOnSuspend()) + m.AssertExpectations(t) + }) + + t.Run("uses default", func(t *testing.T) { + res, err := NewBuilder(ds).Build() + require.NoError(t, err) + assert.True(t, res.DeleteOnSuspend()) + }) +} + +func TestResource_Suspend(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "app", Image: "nginx"}, + }, + }, + }, + }, + } + + t.Run("Suspend registers mutation and Mutate applies it using default handler", func(t *testing.T) { + res, err := NewBuilder(ds).Build() + require.NoError(t, err) + err = res.Suspend() + require.NoError(t, err) + + current := ds.DeepCopy() + err = res.Mutate(current) + require.NoError(t, err) + + // Default suspend mutation is a no-op for DaemonSets + assert.Equal(t, "nginx", current.Spec.Template.Spec.Containers[0].Image) + }) + + t.Run("Suspend uses custom mutation handler", func(t *testing.T) { + m := &mockHandlers{} + m.On("Suspend", mock.Anything).Return(nil).Run(func(args mock.Arguments) { + mut := args.Get(0).(*Mutator) + mut.EditContainers(selectors.ContainerNamed("app"), func(e *editors.ContainerEditor) error { + e.Raw().Image = "paused" + return nil + }) + }) + + res, err := NewBuilder(ds). + WithCustomSuspendMutation(m.Suspend). + Build() + require.NoError(t, err) + err = res.Suspend() + require.NoError(t, err) + + current := ds.DeepCopy() + err = res.Mutate(current) + require.NoError(t, err) + + m.AssertExpectations(t) + assert.Equal(t, "paused", current.Spec.Template.Spec.Containers[0].Image) + }) +} + +func TestResource_SuspensionStatus(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + } + + t.Run("calls handler", func(t *testing.T) { + m := &mockHandlers{} + statusSuspended := concepts.SuspensionStatusWithReason{Status: concepts.SuspensionStatusSuspended} + m.On("SuspensionStatus", ds).Return(statusSuspended, nil) + + res, err := NewBuilder(ds). + WithCustomSuspendStatus(m.SuspensionStatus). + Build() + require.NoError(t, err) + status, err := res.SuspensionStatus() + require.NoError(t, err) + m.AssertExpectations(t) + assert.Equal(t, concepts.SuspensionStatusSuspended, status.Status) + }) + + t.Run("uses default", func(t *testing.T) { + res, err := NewBuilder(ds).Build() + require.NoError(t, err) + status, err := res.SuspensionStatus() + require.NoError(t, err) + assert.Equal(t, concepts.SuspensionStatusSuspended, status.Status) + }) +} + +func TestResource_ExtractData(t *testing.T) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "web", Image: "nginx:latest"}}, + }, + }, + }, + } + + extractedImage := "" + res, err := NewBuilder(ds). + WithDataExtractor(func(d appsv1.DaemonSet) error { + extractedImage = d.Spec.Template.Spec.Containers[0].Image + return nil + }). + Build() + require.NoError(t, err) + + err = res.ExtractData() + require.NoError(t, err) + assert.Equal(t, "nginx:latest", extractedImage) +} + +func TestDefaultFieldApplicator_PreservesServerManagedFields(t *testing.T) { + current := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + ResourceVersion: "12345", + UID: "abc-def", + Generation: 3, + OwnerReferences: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Pod", Name: "other-owner", UID: "other-uid"}, + }, + Finalizers: []string{"finalizer.example.com"}, + }, + } + desired := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Labels: map[string]string{"app": "test"}, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test"}, + }, + }, + } + + err := DefaultFieldApplicator(current, desired) + require.NoError(t, err) + + // Desired spec and labels are applied + assert.Equal(t, map[string]string{"app": "test"}, current.Spec.Selector.MatchLabels) + assert.Equal(t, "test", current.Labels["app"]) + + // Server-managed fields are preserved + assert.Equal(t, "12345", current.ResourceVersion) + assert.Equal(t, "abc-def", string(current.UID)) + assert.Equal(t, int64(3), current.Generation) + + // Shared-controller fields are preserved + assert.Len(t, current.OwnerReferences, 1) + assert.Equal(t, "other-owner", current.OwnerReferences[0].Name) + assert.Equal(t, []string{"finalizer.example.com"}, current.Finalizers) +} + +func TestDefaultFieldApplicator_PreservesStatus(t *testing.T) { + current := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Status: appsv1.DaemonSetStatus{ + CurrentNumberScheduled: 3, + DesiredNumberScheduled: 3, + NumberReady: 3, + NumberAvailable: 3, + UpdatedNumberScheduled: 3, + }, + } + desired := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: appsv1.DaemonSetSpec{ + MinReadySeconds: 30, + }, + } + + err := DefaultFieldApplicator(current, desired) + require.NoError(t, err) + + // Desired spec is applied + assert.Equal(t, int32(30), current.Spec.MinReadySeconds) + + // Status from the live object is preserved + assert.Equal(t, int32(3), current.Status.CurrentNumberScheduled) + assert.Equal(t, int32(3), current.Status.DesiredNumberScheduled) + assert.Equal(t, int32(3), current.Status.NumberReady) + assert.Equal(t, int32(3), current.Status.NumberAvailable) + assert.Equal(t, int32(3), current.Status.UpdatedNumberScheduled) +} + +func TestResource_CustomFieldApplicator(t *testing.T) { + desired := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + Labels: map[string]string{"app": "test"}, + }, + Spec: appsv1.DaemonSetSpec{ + MinReadySeconds: 30, + }, + } + + applicatorCalled := false + res, _ := NewBuilder(desired). + WithCustomFieldApplicator(func(current *appsv1.DaemonSet, desired *appsv1.DaemonSet) error { + applicatorCalled = true + current.Name = desired.Name + current.Namespace = desired.Namespace + // Only apply MinReadySeconds, ignore labels + current.Spec.MinReadySeconds = desired.Spec.MinReadySeconds + return nil + }). + Build() + + current := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"external": "label"}, + }, + } + err := res.Mutate(current) + require.NoError(t, err) + + assert.True(t, applicatorCalled) + assert.Equal(t, int32(30), current.Spec.MinReadySeconds) + assert.Equal(t, "label", current.Labels["external"], "External label should be preserved") + assert.NotContains(t, current.Labels, "app", "Desired label should NOT be applied by custom applicator") + + t.Run("returns error", func(t *testing.T) { + res, _ := NewBuilder(desired). + WithCustomFieldApplicator(func(_ *appsv1.DaemonSet, _ *appsv1.DaemonSet) error { + return errors.New("applicator error") + }). + Build() + + err := res.Mutate(&appsv1.DaemonSet{}) + require.Error(t, err) + assert.Contains(t, err.Error(), "applicator error") + }) +}