diff --git a/Makefile b/Makefile index f1c483d9..cf346ae7 100644 --- a/Makefile +++ b/Makefile @@ -124,6 +124,7 @@ run-examples: ## Run all examples to verify they execute without error. go run ./examples/configmap-primitive/. go run ./examples/rolebinding-primitive/. go run ./examples/custom-resource-implementation/. + go run ./examples/hpa-primitive/. go run ./examples/clusterrolebinding-primitive/. ##@ E2E Testing diff --git a/docs/primitives.md b/docs/primitives.md index 675f0fe2..ef0359d9 100644 --- a/docs/primitives.md +++ b/docs/primitives.md @@ -152,6 +152,7 @@ have been applied. This means a single mutation can safely add a container and t | `pkg/primitives/clusterrole` | Static | [clusterrole.md](primitives/clusterrole.md) | | `pkg/primitives/clusterrolebinding` | Static | [clusterrolebinding.md](primitives/clusterrolebinding.md) | | `pkg/primitives/pvc` | Integration | [pvc.md](primitives/pvc.md) | +| `pkg/primitives/hpa` | Integration | [hpa.md](primitives/hpa.md) | ## Usage Examples diff --git a/docs/primitives/hpa.md b/docs/primitives/hpa.md new file mode 100644 index 00000000..f624aacb --- /dev/null +++ b/docs/primitives/hpa.md @@ -0,0 +1,347 @@ +# HorizontalPodAutoscaler (HPA) Primitive + +The `hpa` primitive is the framework's built-in integration abstraction for managing Kubernetes +`HorizontalPodAutoscaler` resources (`autoscaling/v2`). It integrates with the component lifecycle as an Operational, +Suspendable resource and provides a structured mutation API for configuring autoscaling behavior. + +## Capabilities + +| Capability | Detail | +| ----------------------- | ------------------------------------------------------------------------------------------------------------- | +| **Operational status** | Inspects `ScalingActive` and `AbleToScale` conditions to report `Operational`, `Pending`, or `Failing` | +| **Suspension (delete)** | Deletes the HPA on suspend to prevent it from scaling the target back up; recreated on resume | +| **Mutation pipeline** | Typed editors for HPA spec (metrics, scale target, behavior) and object metadata | +| **Data extraction** | Allows custom extraction from the reconciled HPA object via a registered data extractor (`WithDataExtractor`) | + +## Building an HPA Primitive + +```go +import "github.com/sourcehawk/operator-component-framework/pkg/primitives/hpa" + +base := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "web-hpa", + Namespace: owner.Namespace, + }, + Spec: autoscalingv2.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "web", + }, + MinReplicas: ptr.To(int32(2)), + MaxReplicas: 10, + }, +} + +resource, err := hpa.NewBuilder(base). + WithMutation(CPUMetricMutation(owner.Spec.Version)). + Build() +``` + +## Mutations + +Mutations are the primary mechanism for modifying an HPA beyond its baseline. Each mutation is a named function that +receives a `*Mutator` and records edit intent through typed editors. + +The `Feature` field controls when a mutation applies. Leaving it nil applies the mutation unconditionally. A feature +with no version constraints and no `When()` conditions is also always enabled: + +```go +func CPUMetricMutation(version string) hpa.Mutation { + return hpa.Mutation{ + Name: "cpu-metric", + Feature: feature.NewResourceFeature(version, nil), // always enabled + Mutate: func(m *hpa.Mutator) error { + // record edits here + return nil + }, + } +} +``` + +Mutations are applied in the order they are registered with the builder. If one mutation depends on a change made by +another, register the dependency first. + +### Boolean-gated mutations + +Use `When(bool)` to gate a mutation on a runtime condition: + +```go +func CustomMetricsMutation(version string, enabled bool) hpa.Mutation { + return hpa.Mutation{ + Name: "custom-metrics", + Feature: feature.NewResourceFeature(version, nil).When(enabled), + Mutate: func(m *hpa.Mutator) error { + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.EnsureMetric(autoscalingv2.MetricSpec{ + Type: autoscalingv2.PodsMetricSourceType, + Pods: &autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{Name: "requests_per_second"}, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("100")), + }, + }, + }) + return nil + }) + return nil + }, + } +} +``` + +### Version-gated mutations + +Pass a `[]feature.VersionConstraint` to gate on a semver range: + +```go +var legacyConstraint = mustSemverConstraint("< 2.0.0") + +func LegacyScalingMutation(version string) hpa.Mutation { + return hpa.Mutation{ + Name: "legacy-scaling", + Feature: feature.NewResourceFeature( + version, + []feature.VersionConstraint{legacyConstraint}, + ), + Mutate: func(m *hpa.Mutator) error { + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetMaxReplicas(5) // legacy apps limited to 5 replicas + return nil + }) + return nil + }, + } +} +``` + +All version constraints and `When()` conditions must be satisfied for a mutation to apply. + +## Internal Mutation Ordering + +Within a single mutation, edit operations are grouped into categories and applied in a fixed sequence regardless of the +order they are recorded: + +| Step | Category | What it affects | +| ---- | -------------- | -------------------------------------------------------------- | +| 1 | Metadata edits | Labels and annotations on the `HorizontalPodAutoscaler` object | +| 2 | HPA spec edits | Scale target ref, min/max replicas, metrics, behavior | + +## Editors + +### HPASpecEditor + +Controls HPA-level settings via `m.EditHPASpec`. + +Available methods: `SetScaleTargetRef`, `SetMinReplicas`, `SetMaxReplicas`, `EnsureMetric`, `RemoveMetric`, +`SetBehavior`, `Raw`. + +```go +m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetMinReplicas(ptr.To(int32(2))) + e.SetMaxReplicas(10) + e.EnsureMetric(autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To(int32(80)), + }, + }, + }) + return nil +}) +``` + +#### EnsureMetric + +`EnsureMetric` upserts a metric based on its full metric identity, not just type and name. Matching rules: + +| Metric type | Match key | +| ----------------- | --------------------------------------------------------------------------------------------------------- | +| Resource | `Resource.Name` (e.g. `cpu`, `memory`) | +| Pods | `Pods.Metric.Name` + `Pods.Metric.Selector` (label selector; `nil` is a distinct identity) | +| Object | `Object.DescribedObject` (`APIVersion`, `Kind`, `Name`) + `Object.Metric.Name` + `Object.Metric.Selector` | +| ContainerResource | `ContainerResource.Name` + `ContainerResource.Container` | +| External | `External.Metric.Name` + `External.Metric.Selector` (label selector; `nil` is a distinct identity) | + +If a matching entry exists it is replaced; otherwise the metric is appended. Be aware that different selectors or +described objects result in different metric identities, even if the metric names are the same. + +#### RemoveMetric + +`RemoveMetric(type, name)` removes all metrics matching the given type and name. For ContainerResource metrics, all +container variants of the named resource are removed. + +#### SetBehavior + +`SetBehavior` sets the autoscaling behavior (stabilization windows, scaling policies). Pass `nil` to remove custom +behavior and use Kubernetes defaults. + +```go +m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetBehavior(&autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To(int32(300)), + }, + }) + return nil +}) +``` + +For fields not covered by the typed API, use `Raw()`: + +```go +m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.Raw().MinReplicas = ptr.To(int32(1)) + return nil +}) +``` + +### ObjectMetaEditor + +Modifies labels and annotations via `m.EditObjectMetadata`. + +Available methods: `EnsureLabel`, `RemoveLabel`, `EnsureAnnotation`, `RemoveAnnotation`, `Raw`. + +```go +m.EditObjectMetadata(func(e *editors.ObjectMetaEditor) error { + e.EnsureLabel("app.kubernetes.io/managed-by", "my-operator") + e.EnsureAnnotation("autoscaling.example.io/policy", "aggressive") + return nil +}) +``` + +### Raw Escape Hatch + +All editors provide a `.Raw()` method for direct access to the underlying Kubernetes struct when the typed API is +insufficient. + +## Operational Status + +The default operational status handler inspects `Status.Conditions`: + +| Status | Condition | +| ------------- | ------------------------------------------------------- | +| `Operational` | `ScalingActive` is `True` | +| `Pending` | Conditions absent, or `ScalingActive` is `Unknown` | +| `Failing` | `ScalingActive` is `False`, or `AbleToScale` is `False` | + +`AbleToScale = False` takes precedence over `ScalingActive = True` because an HPA that cannot actually scale is not +operationally healthy regardless of what the scaling-active condition reports. + +Override with `WithCustomOperationalStatus`: + +```go +hpa.NewBuilder(base). + WithCustomOperationalStatus(func(op concepts.ConvergingOperation, h *autoscalingv2.HorizontalPodAutoscaler) (concepts.OperationalStatusWithReason, error) { + status, err := hpa.DefaultOperationalStatusHandler(op, h) + if err != nil { + return status, err + } + // Add custom logic + return status, nil + }) +``` + +## Suspension + +HPA has no native suspend field. The default behavior is **delete on suspend**: the HPA is removed when the component is +suspended (`DefaultDeleteOnSuspendHandler` returns `true`). A retained HPA would conflict with the suspension of its +scale target (e.g. a Deployment scaled to zero) because the Kubernetes HPA controller continuously enforces +`minReplicas` and would scale the target back up. Deleting the HPA prevents this interference. On resume the framework +recreates the HPA with the desired spec. + +The default suspension status handler reports `Suspended` immediately with the reason +`"HorizontalPodAutoscaler suspended to prevent scaling interference"`. Override this handler with +`WithCustomSuspendStatus` if you need a reason that reflects custom deletion behaviour. + +Override with `WithCustomSuspendDeletionDecision` if you want to retain the HPA during suspension (e.g. when the scale +target is managed externally and will not be present during suspension): + +```go +hpa.NewBuilder(base). + WithCustomSuspendDeletionDecision(func(_ *autoscalingv2.HorizontalPodAutoscaler) bool { + return false // keep HPA during suspension + }) +``` + +## Full Example: CPU and Memory Autoscaling + +```go +func AutoscalingMutation(version string) hpa.Mutation { + return hpa.Mutation{ + Name: "autoscaling-config", + Feature: feature.NewResourceFeature(version, nil), + Mutate: func(m *hpa.Mutator) error { + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetMinReplicas(ptr.To(int32(2))) + e.SetMaxReplicas(10) + + // CPU-based scaling + e.EnsureMetric(autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To(int32(70)), + }, + }, + }) + + // Memory-based scaling + e.EnsureMetric(autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceMemory, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To(int32(80)), + }, + }, + }) + + // Conservative scale-down + e.SetBehavior(&autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To(int32(300)), + }, + }) + + return nil + }) + + m.EditObjectMetadata(func(e *editors.ObjectMetaEditor) error { + e.EnsureLabel("app.kubernetes.io/version", version) + return nil + }) + + return nil + }, + } +} +``` + +Note: although `EditObjectMetadata` is called after `EditHPASpec` in the source, metadata edits are applied first per +the internal ordering. Order your source calls for readability — the framework handles execution order. + +## Guidance + +**`Feature: nil` applies unconditionally.** Omit `Feature` (leave it nil) for mutations that should always run. Use +`feature.NewResourceFeature(version, constraints)` when version-based gating is needed, and chain `.When(bool)` for +boolean conditions. + +**Register mutations in dependency order.** If mutation B relies on a metric added by mutation A, register A first. + +**Use `EnsureMetric` for idempotent metric management.** The editor matches by full metric identity (type, name, +selector, and described object where applicable), so repeated calls with the same identity update rather than duplicate. + +**HPA deletion on suspend is the default.** The primitive's default `DeleteOnSuspend` decision removes the HPA during +component suspension (matching the "Suspension (delete)" capability). This prevents the Kubernetes HPA controller from +scaling the target back up while it is suspended. On resume the framework recreates the HPA with the desired spec. If +you need the HPA to be retained during suspension — for example, when the scale target is managed externally and will +not be present — override `WithCustomSuspendDeletionDecision` to return `false`. diff --git a/examples/hpa-primitive/README.md b/examples/hpa-primitive/README.md new file mode 100644 index 00000000..2f22716f --- /dev/null +++ b/examples/hpa-primitive/README.md @@ -0,0 +1,35 @@ +# HPA Primitive Example + +This example demonstrates the usage of the `hpa` primitive within the operator component framework. It shows how to +manage a Kubernetes HorizontalPodAutoscaler as a component of a larger application, utilizing features like: + +- **Base Construction**: Initializing an HPA with a scale target ref, min/max replicas, and labels. +- **Feature Mutations**: Applying version-gated or conditional changes (CPU metrics, memory metrics, scaling behavior) + using the `Mutator`. +- **Operational Status**: Reporting HPA health based on `ScalingActive` and `AbleToScale` conditions. +- **Suspension (Delete)**: Demonstrating delete-on-suspend behavior — the HPA is removed during suspension to prevent it + from scaling the target back up. +- **Data Extraction**: Harvesting information from the reconciled resource. + +## Directory Structure + +- `app/`: Defines the mock `ExampleApp` CRD and the controller that uses the component framework. +- `features/`: Contains modular feature definitions: + - `mutations.go`: CPU metric, memory metric, and scale behavior feature mutations. +- `resources/`: Contains the central `NewHPAResource` factory that assembles all features using the `hpa.Builder`. +- `main.go`: A standalone entry point that demonstrates a reconciliation loop using a fake client. + +## Running the Example + +You can run this example directly using `go run`: + +```bash +go run examples/hpa-primitive/main.go +``` + +This will: + +1. Initialize a fake Kubernetes client. +2. Create an `ExampleApp` owner object. +3. Reconcile the `ExampleApp` components through multiple spec changes. +4. Print the resulting status conditions and HPA state. diff --git a/examples/hpa-primitive/app/controller.go b/examples/hpa-primitive/app/controller.go new file mode 100644 index 00000000..4d552058 --- /dev/null +++ b/examples/hpa-primitive/app/controller.go @@ -0,0 +1,54 @@ +// Package app provides a sample controller using the HPA primitive. +package app + +import ( + "context" + + "github.com/sourcehawk/operator-component-framework/pkg/component" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ExampleController reconciles an ExampleApp object using the component framework. +type ExampleController struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder + Metrics component.Recorder + + // NewHPAResource is a factory function to create the HPA resource. + // This allows us to inject the resource construction logic. + NewHPAResource func(*ExampleApp) (component.Resource, error) +} + +// Reconcile performs the reconciliation for a single ExampleApp. +func (r *ExampleController) Reconcile(ctx context.Context, owner *ExampleApp) error { + // 1. Build the HPA resource for this owner. + hpaResource, err := r.NewHPAResource(owner) + if err != nil { + return err + } + + // 2. Build the component that manages the HPA. + comp, err := component.NewComponentBuilder(). + WithName("example-app"). + WithConditionType("AppReady"). + WithResource(hpaResource, component.ResourceOptions{}). + Suspend(owner.Spec.Suspended). + Build() + if err != nil { + return err + } + + // 3. Execute the component reconciliation. + resCtx := component.ReconcileContext{ + Client: r.Client, + Scheme: r.Scheme, + Recorder: r.Recorder, + Metrics: r.Metrics, + Owner: owner, + } + + return comp.Reconcile(ctx, resCtx) +} diff --git a/examples/hpa-primitive/app/owner.go b/examples/hpa-primitive/app/owner.go new file mode 100644 index 00000000..6b611a02 --- /dev/null +++ b/examples/hpa-primitive/app/owner.go @@ -0,0 +1,20 @@ +package app + +import ( + sharedapp "github.com/sourcehawk/operator-component-framework/examples/shared/app" +) + +// ExampleApp re-exports the shared CRD type so callers in this package need no import alias. +type ExampleApp = sharedapp.ExampleApp + +// ExampleAppSpec re-exports the shared spec type. +type ExampleAppSpec = sharedapp.ExampleAppSpec + +// ExampleAppStatus re-exports the shared status type. +type ExampleAppStatus = sharedapp.ExampleAppStatus + +// ExampleAppList re-exports the shared list type. +type ExampleAppList = sharedapp.ExampleAppList + +// AddToScheme registers the ExampleApp types with the given scheme. +var AddToScheme = sharedapp.AddToScheme diff --git a/examples/hpa-primitive/features/mutations.go b/examples/hpa-primitive/features/mutations.go new file mode 100644 index 00000000..35c612fd --- /dev/null +++ b/examples/hpa-primitive/features/mutations.go @@ -0,0 +1,83 @@ +// Package features provides sample features for the HPA primitive example. +package features + +import ( + "github.com/sourcehawk/operator-component-framework/pkg/feature" + "github.com/sourcehawk/operator-component-framework/pkg/mutation/editors" + "github.com/sourcehawk/operator-component-framework/pkg/primitives/hpa" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" +) + +// CPUMetricFeature configures CPU-based autoscaling with the given utilization target. +func CPUMetricFeature(version string, targetUtilization int32) hpa.Mutation { + return hpa.Mutation{ + Name: "CPUMetric", + Feature: feature.NewResourceFeature(version, nil), + Mutate: func(m *hpa.Mutator) error { + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.EnsureMetric(autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To(targetUtilization), + }, + }, + }) + return nil + }) + + m.EditObjectMetadata(func(e *editors.ObjectMetaEditor) error { + e.EnsureLabel("app.kubernetes.io/version", version) + return nil + }) + + return nil + }, + } +} + +// MemoryMetricFeature adds memory-based autoscaling when enabled. +func MemoryMetricFeature(enabled bool, targetUtilization int32) hpa.Mutation { + return hpa.Mutation{ + Name: "MemoryMetric", + Feature: feature.NewResourceFeature("any", nil).When(enabled), + Mutate: func(m *hpa.Mutator) error { + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.EnsureMetric(autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceMemory, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To(targetUtilization), + }, + }, + }) + return nil + }) + return nil + }, + } +} + +// ScaleBehaviorFeature configures conservative scale-down behavior. +func ScaleBehaviorFeature() hpa.Mutation { + return hpa.Mutation{ + Name: "ScaleBehavior", + Mutate: func(m *hpa.Mutator) error { + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetBehavior(&autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To(int32(300)), + }, + }) + return nil + }) + return nil + }, + } +} diff --git a/examples/hpa-primitive/main.go b/examples/hpa-primitive/main.go new file mode 100644 index 00000000..fe087c9b --- /dev/null +++ b/examples/hpa-primitive/main.go @@ -0,0 +1,118 @@ +// Package main is the entry point for the HPA primitive example. +package main + +import ( + "context" + "fmt" + "os" + + ocm "github.com/sourcehawk/go-crd-condition-metrics/pkg/crd-condition-metrics" + "github.com/sourcehawk/operator-component-framework/examples/hpa-primitive/app" + "github.com/sourcehawk/operator-component-framework/examples/hpa-primitive/resources" + autoscalingv2 "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func main() { + // 1. Setup scheme and fake client for the example. + scheme := runtime.NewScheme() + if err := app.AddToScheme(scheme); err != nil { + fmt.Fprintf(os.Stderr, "failed to add to scheme: %v\n", err) + os.Exit(1) + } + if err := autoscalingv2.AddToScheme(scheme); err != nil { + fmt.Fprintf(os.Stderr, "failed to add autoscaling/v2 to scheme: %v\n", err) + os.Exit(1) + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(&app.ExampleApp{}). + Build() + + // 2. Create an example Owner object. + owner := &app.ExampleApp{ + Spec: app.ExampleAppSpec{ + Version: "1.2.3", + EnableTracing: false, + EnableMetrics: true, + Suspended: false, + }, + } + owner.Name = "my-example-app" + owner.Namespace = "default" + + if err := fakeClient.Create(context.Background(), owner); err != nil { + fmt.Fprintf(os.Stderr, "failed to create owner: %v\n", err) + os.Exit(1) + } + + // 3. Initialize our controller. + gauge := ocm.NewOperatorConditionsGauge("example") + controller := &app.ExampleController{ + Client: fakeClient, + Scheme: scheme, + Recorder: record.NewFakeRecorder(100), + Metrics: &ocm.ConditionMetricRecorder{ + Controller: "example-controller", + OperatorConditionsGauge: gauge, + }, + + // Pass the HPA resource factory. + NewHPAResource: resources.NewHPAResource, + } + + // 4. Run reconciliation with multiple spec versions. + specs := []app.ExampleAppSpec{ + { + Version: "1.2.3", + EnableMetrics: true, + Suspended: false, + }, + { + Version: "1.2.4", // Version upgrade + EnableMetrics: true, + Suspended: false, + }, + { + Version: "1.2.4", + EnableMetrics: false, // Disable memory metric + Suspended: false, + }, + { + Version: "1.2.4", + EnableMetrics: false, + Suspended: true, // Suspend the app (HPA is deleted to prevent scaling interference) + }, + } + + ctx := context.Background() + + for i, spec := range specs { + fmt.Printf("\n--- Step %d: Applying Spec: Version=%s, Metrics=%v, Suspended=%v ---\n", + i+1, spec.Version, spec.EnableMetrics, spec.Suspended) + + // Update owner spec + owner.Spec = spec + if err := fakeClient.Update(ctx, owner); err != nil { + fmt.Fprintf(os.Stderr, "failed to update owner: %v\n", err) + os.Exit(1) + } + + fmt.Println("Running reconciliation...") + if err := controller.Reconcile(ctx, owner); err != nil { + fmt.Fprintf(os.Stderr, "reconciliation failed: %v\n", err) + os.Exit(1) + } + + // Inspect the owner conditions. + for _, cond := range owner.Status.Conditions { + fmt.Printf("Condition: %s, Status: %s, Reason: %s\n", + cond.Type, cond.Status, cond.Reason) + } + } + + fmt.Println("\nReconciliation sequence completed successfully!") +} diff --git a/examples/hpa-primitive/resources/hpa.go b/examples/hpa-primitive/resources/hpa.go new file mode 100644 index 00000000..101dd948 --- /dev/null +++ b/examples/hpa-primitive/resources/hpa.go @@ -0,0 +1,74 @@ +// Package resources provides resource implementations for the HPA primitive example. +package resources + +import ( + "fmt" + + "github.com/sourcehawk/operator-component-framework/examples/hpa-primitive/app" + "github.com/sourcehawk/operator-component-framework/examples/hpa-primitive/features" + "github.com/sourcehawk/operator-component-framework/pkg/component" + "github.com/sourcehawk/operator-component-framework/pkg/primitives/hpa" + autoscalingv2 "k8s.io/api/autoscaling/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/yaml" +) + +// NewHPAResource constructs an HPA primitive resource with all the features. +func NewHPAResource(owner *app.ExampleApp) (component.Resource, error) { + // 1. Create the base HPA object. + base := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: owner.Name + "-hpa", + Namespace: owner.Namespace, + Labels: map[string]string{ + "app": owner.Name, + }, + }, + Spec: autoscalingv2.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: owner.Name + "-deployment", + }, + MinReplicas: ptr.To(int32(2)), + MaxReplicas: 10, + }, + } + + // 2. Initialize the HPA builder. + builder := hpa.NewBuilder(base) + + // 3. Apply mutations (features) based on the owner spec. + builder.WithMutation(features.CPUMetricFeature(owner.Spec.Version, 70)) + builder.WithMutation(features.MemoryMetricFeature(owner.Spec.EnableMetrics, 80)) + builder.WithMutation(features.ScaleBehaviorFeature()) + + // 4. Configure data extraction. + builder.WithDataExtractor(func(h autoscalingv2.HorizontalPodAutoscaler) error { + fmt.Printf("HPA %s: min=%d, max=%d, metrics=%d\n", + h.Name, + derefInt32(h.Spec.MinReplicas, 1), + h.Spec.MaxReplicas, + len(h.Spec.Metrics), + ) + + y, err := yaml.Marshal(h) + if err != nil { + return fmt.Errorf("failed to marshal HPA to yaml: %w", err) + } + fmt.Printf("Complete HPA Resource:\n---\n%s\n---\n", string(y)) + + return nil + }) + + // 5. Build the final resource. + return builder.Build() +} + +func derefInt32(p *int32, defaultVal int32) int32 { + if p != nil { + return *p + } + return defaultVal +} diff --git a/pkg/mutation/editors/hpaspec.go b/pkg/mutation/editors/hpaspec.go new file mode 100644 index 00000000..8ad039d5 --- /dev/null +++ b/pkg/mutation/editors/hpaspec.go @@ -0,0 +1,198 @@ +package editors + +import ( + "maps" + "slices" + "strings" + + autoscalingv2 "k8s.io/api/autoscaling/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HPASpecEditor provides a typed API for mutating a Kubernetes HorizontalPodAutoscalerSpec. +type HPASpecEditor struct { + spec *autoscalingv2.HorizontalPodAutoscalerSpec +} + +// NewHPASpecEditor creates a new HPASpecEditor for the given HorizontalPodAutoscalerSpec. +func NewHPASpecEditor(spec *autoscalingv2.HorizontalPodAutoscalerSpec) *HPASpecEditor { + return &HPASpecEditor{spec: spec} +} + +// Raw returns the underlying *autoscalingv2.HorizontalPodAutoscalerSpec. +// +// This is an escape hatch for cases where the typed API is insufficient. +func (e *HPASpecEditor) Raw() *autoscalingv2.HorizontalPodAutoscalerSpec { + return e.spec +} + +// SetScaleTargetRef sets the reference to the resource being scaled. +func (e *HPASpecEditor) SetScaleTargetRef(ref autoscalingv2.CrossVersionObjectReference) { + e.spec.ScaleTargetRef = ref +} + +// SetMinReplicas sets the lower bound for the number of replicas. +// Passing nil removes the lower bound (Kubernetes defaults to 1). +func (e *HPASpecEditor) SetMinReplicas(n *int32) { + e.spec.MinReplicas = n +} + +// SetMaxReplicas sets the upper bound for the number of replicas. +func (e *HPASpecEditor) SetMaxReplicas(n int32) { + e.spec.MaxReplicas = n +} + +// EnsureMetric upserts a metric in the spec's Metrics slice. +// +// Matching is performed by MetricSpec.Type. Within that type, matching is refined +// by the full identity of the metric source: +// - Resource: matched by Resource.Name +// - Pods: matched by Pods.Metric (Name + Selector) +// - Object: matched by Object.DescribedObject + Object.Metric (Name + Selector) +// - ContainerResource: matched by ContainerResource.Name + ContainerResource.Container +// - External: matched by External.Metric (Name + Selector) +// +// If a matching entry exists it is replaced; otherwise the metric is appended. +func (e *HPASpecEditor) EnsureMetric(metric autoscalingv2.MetricSpec) { + for i, existing := range e.spec.Metrics { + if metricsMatch(existing, metric) { + e.spec.Metrics[i] = metric + return + } + } + e.spec.Metrics = append(e.spec.Metrics, metric) +} + +// RemoveMetric removes all metrics matching the given type and name. +// +// For Resource metrics, name corresponds to the resource name (e.g. "cpu"). +// For Pods, Object, and External metrics, name corresponds to the metric name. +// For ContainerResource metrics, name corresponds to the resource name; all +// container variants of that resource are removed. +// +// This method removes every metric entry that matches the type and name, +// regardless of selector or described object. For fine-grained removal of a +// single metric identity, use [HPASpecEditor.Raw] and modify the Metrics slice directly. +// +// If no matching metric is found, this is a no-op. +func (e *HPASpecEditor) RemoveMetric(metricType autoscalingv2.MetricSourceType, name string) { + filtered := e.spec.Metrics[:0] + for _, m := range e.spec.Metrics { + if m.Type == metricType && metricName(m) == name { + continue + } + filtered = append(filtered, m) + } + e.spec.Metrics = filtered +} + +// SetBehavior sets the autoscaling behavior configuration. +// Passing nil removes custom behavior (Kubernetes uses defaults). +func (e *HPASpecEditor) SetBehavior(behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) { + e.spec.Behavior = behavior +} + +// metricsMatch reports whether two MetricSpec values target the same metric. +func metricsMatch(a, b autoscalingv2.MetricSpec) bool { + if a.Type != b.Type { + return false + } + switch a.Type { + case autoscalingv2.ResourceMetricSourceType: + if a.Resource == nil || b.Resource == nil { + return false + } + return a.Resource.Name == b.Resource.Name + case autoscalingv2.PodsMetricSourceType: + if a.Pods == nil || b.Pods == nil { + return false + } + return metricIdentifiersMatch(a.Pods.Metric, b.Pods.Metric) + case autoscalingv2.ObjectMetricSourceType: + if a.Object == nil || b.Object == nil { + return false + } + return a.Object.DescribedObject == b.Object.DescribedObject && + metricIdentifiersMatch(a.Object.Metric, b.Object.Metric) + case autoscalingv2.ContainerResourceMetricSourceType: + if a.ContainerResource == nil || b.ContainerResource == nil { + return false + } + return a.ContainerResource.Name == b.ContainerResource.Name && + a.ContainerResource.Container == b.ContainerResource.Container + case autoscalingv2.ExternalMetricSourceType: + if a.External == nil || b.External == nil { + return false + } + return metricIdentifiersMatch(a.External.Metric, b.External.Metric) + default: + return false + } +} + +// metricIdentifiersMatch reports whether two MetricIdentifier values match. +// It compares both Name and Selector. Selector comparison is semantic: label +// selectors that differ only in the ordering of MatchExpressions or expression +// Values are treated as equal. +func metricIdentifiersMatch(a, b autoscalingv2.MetricIdentifier) bool { + return a.Name == b.Name && labelSelectorsEqual(a.Selector, b.Selector) +} + +// labelSelectorsEqual performs a semantic comparison of two LabelSelectors. +// Unlike reflect.DeepEqual it is insensitive to the ordering of +// MatchExpressions and the Values within each expression. +func labelSelectorsEqual(a, b *metav1.LabelSelector) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + if !maps.Equal(a.MatchLabels, b.MatchLabels) { + return false + } + if len(a.MatchExpressions) != len(b.MatchExpressions) { + return false + } + + // Build a canonical key for each expression and compare as sorted sets. + canon := func(exprs []metav1.LabelSelectorRequirement) []string { + keys := make([]string, len(exprs)) + for i, e := range exprs { + vals := slices.Clone(e.Values) + slices.Sort(vals) + keys[i] = e.Key + "\x00" + string(e.Operator) + "\x00" + strings.Join(vals, "\x00") + } + slices.Sort(keys) + return keys + } + + return slices.Equal(canon(a.MatchExpressions), canon(b.MatchExpressions)) +} + +// metricName extracts the identifying name from a MetricSpec. +func metricName(m autoscalingv2.MetricSpec) string { + switch m.Type { + case autoscalingv2.ResourceMetricSourceType: + if m.Resource != nil { + return string(m.Resource.Name) + } + case autoscalingv2.PodsMetricSourceType: + if m.Pods != nil { + return m.Pods.Metric.Name + } + case autoscalingv2.ObjectMetricSourceType: + if m.Object != nil { + return m.Object.Metric.Name + } + case autoscalingv2.ContainerResourceMetricSourceType: + if m.ContainerResource != nil { + return string(m.ContainerResource.Name) + } + case autoscalingv2.ExternalMetricSourceType: + if m.External != nil { + return m.External.Metric.Name + } + } + return "" +} diff --git a/pkg/mutation/editors/hpaspec_test.go b/pkg/mutation/editors/hpaspec_test.go new file mode 100644 index 00000000..26704f85 --- /dev/null +++ b/pkg/mutation/editors/hpaspec_test.go @@ -0,0 +1,404 @@ +package editors + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func int32Ptr(v int32) *int32 { return &v } + +func resourcePtr(s string) *resource.Quantity { + q := resource.MustParse(s) + return &q +} + +func TestHPASpecEditor_SetScaleTargetRef(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + ref := autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "web", + } + e.SetScaleTargetRef(ref) + + assert.Equal(t, ref, spec.ScaleTargetRef) +} + +func TestHPASpecEditor_SetMinReplicas(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + e.SetMinReplicas(int32Ptr(2)) + require.NotNil(t, spec.MinReplicas) + assert.Equal(t, int32(2), *spec.MinReplicas) + + e.SetMinReplicas(nil) + assert.Nil(t, spec.MinReplicas) +} + +func TestHPASpecEditor_SetMaxReplicas(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + e.SetMaxReplicas(10) + assert.Equal(t, int32(10), spec.MaxReplicas) +} + +func TestHPASpecEditor_EnsureMetric_Resource(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + cpuMetric := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: int32Ptr(80), + }, + }, + } + + // Add CPU metric + e.EnsureMetric(cpuMetric) + require.Len(t, spec.Metrics, 1) + assert.Equal(t, int32(80), *spec.Metrics[0].Resource.Target.AverageUtilization) + + // Update CPU metric (upsert by resource name) + updatedCPU := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: int32Ptr(50), + }, + }, + } + e.EnsureMetric(updatedCPU) + require.Len(t, spec.Metrics, 1) + assert.Equal(t, int32(50), *spec.Metrics[0].Resource.Target.AverageUtilization) + + // Add memory metric (different resource name) + memMetric := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceMemory, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: resourcePtr("500Mi"), + }, + }, + } + e.EnsureMetric(memMetric) + assert.Len(t, spec.Metrics, 2) +} + +func TestHPASpecEditor_EnsureMetric_Pods(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + metric := autoscalingv2.MetricSpec{ + Type: autoscalingv2.PodsMetricSourceType, + Pods: &autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{Name: "requests_per_second"}, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: resourcePtr("100"), + }, + }, + } + e.EnsureMetric(metric) + require.Len(t, spec.Metrics, 1) + + // Upsert same metric name + updated := autoscalingv2.MetricSpec{ + Type: autoscalingv2.PodsMetricSourceType, + Pods: &autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{Name: "requests_per_second"}, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: resourcePtr("200"), + }, + }, + } + e.EnsureMetric(updated) + assert.Len(t, spec.Metrics, 1) +} + +func TestHPASpecEditor_EnsureMetric_Object(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + metric := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + Metric: autoscalingv2.MetricIdentifier{Name: "queue_length"}, + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "v1", + Kind: "Service", + Name: "worker", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: resourcePtr("30"), + }, + }, + } + e.EnsureMetric(metric) + require.Len(t, spec.Metrics, 1) + assert.Equal(t, "queue_length", spec.Metrics[0].Object.Metric.Name) +} + +func TestHPASpecEditor_EnsureMetric_Object_DifferentDescribedObject(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + // Add metric for Service "worker" + m1 := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + Metric: autoscalingv2.MetricIdentifier{Name: "queue_length"}, + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "v1", Kind: "Service", Name: "worker", + }, + Target: autoscalingv2.MetricTarget{Type: autoscalingv2.ValueMetricType, Value: resourcePtr("30")}, + }, + } + e.EnsureMetric(m1) + require.Len(t, spec.Metrics, 1) + + // Same metric name but different described object -> separate entry + m2 := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + Metric: autoscalingv2.MetricIdentifier{Name: "queue_length"}, + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "v1", Kind: "Service", Name: "processor", + }, + Target: autoscalingv2.MetricTarget{Type: autoscalingv2.ValueMetricType, Value: resourcePtr("50")}, + }, + } + e.EnsureMetric(m2) + assert.Len(t, spec.Metrics, 2, "different DescribedObject should create a separate metric entry") +} + +func TestHPASpecEditor_EnsureMetric_External_DifferentSelector(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + // Add external metric with selector + m1 := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ExternalMetricSourceType, + External: &autoscalingv2.ExternalMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "pubsub_undelivered", + Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"topic": "orders"}}, + }, + Target: autoscalingv2.MetricTarget{Type: autoscalingv2.ValueMetricType, Value: resourcePtr("100")}, + }, + } + e.EnsureMetric(m1) + require.Len(t, spec.Metrics, 1) + + // Same metric name but different selector -> separate entry + m2 := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ExternalMetricSourceType, + External: &autoscalingv2.ExternalMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "pubsub_undelivered", + Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"topic": "events"}}, + }, + Target: autoscalingv2.MetricTarget{Type: autoscalingv2.ValueMetricType, Value: resourcePtr("200")}, + }, + } + e.EnsureMetric(m2) + assert.Len(t, spec.Metrics, 2, "different selector should create a separate metric entry") +} + +func TestHPASpecEditor_EnsureMetric_External(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + metric := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ExternalMetricSourceType, + External: &autoscalingv2.ExternalMetricSource{ + Metric: autoscalingv2.MetricIdentifier{Name: "pubsub_undelivered"}, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: resourcePtr("100"), + }, + }, + } + e.EnsureMetric(metric) + require.Len(t, spec.Metrics, 1) + assert.Equal(t, "pubsub_undelivered", spec.Metrics[0].External.Metric.Name) +} + +func TestHPASpecEditor_EnsureMetric_ContainerResource(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + metric := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ContainerResourceMetricSourceType, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: corev1.ResourceCPU, + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: int32Ptr(70), + }, + }, + } + e.EnsureMetric(metric) + require.Len(t, spec.Metrics, 1) + + // Different container same resource -> separate entry + metric2 := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ContainerResourceMetricSourceType, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: corev1.ResourceCPU, + Container: "sidecar", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: int32Ptr(50), + }, + }, + } + e.EnsureMetric(metric2) + assert.Len(t, spec.Metrics, 2) +} + +func TestHPASpecEditor_EnsureMetric_SelectorOrderInsensitive(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + // Add external metric with two match expressions in order A, B. + m1 := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ExternalMetricSourceType, + External: &autoscalingv2.ExternalMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "queue_depth", + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "worker"}, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: "env", Operator: metav1.LabelSelectorOpIn, Values: []string{"staging", "prod"}}, + {Key: "region", Operator: metav1.LabelSelectorOpIn, Values: []string{"us-east-1"}}, + }, + }, + }, + Target: autoscalingv2.MetricTarget{Type: autoscalingv2.ValueMetricType, Value: resourcePtr("50")}, + }, + } + e.EnsureMetric(m1) + require.Len(t, spec.Metrics, 1) + + // Upsert with same selector but expressions in order B, A and values reversed. + m2 := autoscalingv2.MetricSpec{ + Type: autoscalingv2.ExternalMetricSourceType, + External: &autoscalingv2.ExternalMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "queue_depth", + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "worker"}, + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: "region", Operator: metav1.LabelSelectorOpIn, Values: []string{"us-east-1"}}, + {Key: "env", Operator: metav1.LabelSelectorOpIn, Values: []string{"prod", "staging"}}, + }, + }, + }, + Target: autoscalingv2.MetricTarget{Type: autoscalingv2.ValueMetricType, Value: resourcePtr("100")}, + }, + } + e.EnsureMetric(m2) + assert.Len(t, spec.Metrics, 1, "semantically equal selectors in different order should upsert, not append") + assert.Equal(t, resourcePtr("100").String(), spec.Metrics[0].External.Target.Value.String()) +} + +func TestHPASpecEditor_RemoveMetric(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{ + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: int32Ptr(80), + }, + }, + }, + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceMemory, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: int32Ptr(70), + }, + }, + }, + }, + } + e := NewHPASpecEditor(spec) + + e.RemoveMetric(autoscalingv2.ResourceMetricSourceType, "cpu") + require.Len(t, spec.Metrics, 1) + assert.Equal(t, corev1.ResourceMemory, spec.Metrics[0].Resource.Name) +} + +func TestHPASpecEditor_RemoveMetric_NoMatch(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{ + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + }, + }, + }, + } + e := NewHPASpecEditor(spec) + + e.RemoveMetric(autoscalingv2.ResourceMetricSourceType, "memory") + assert.Len(t, spec.Metrics, 1) +} + +func TestHPASpecEditor_SetBehavior(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{} + e := NewHPASpecEditor(spec) + + stabilization := int32(300) + behavior := &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &stabilization, + }, + } + e.SetBehavior(behavior) + require.NotNil(t, spec.Behavior) + assert.Equal(t, int32(300), *spec.Behavior.ScaleDown.StabilizationWindowSeconds) + + e.SetBehavior(nil) + assert.Nil(t, spec.Behavior) +} + +func TestHPASpecEditor_Raw(t *testing.T) { + spec := &autoscalingv2.HorizontalPodAutoscalerSpec{ + MaxReplicas: 5, + } + e := NewHPASpecEditor(spec) + + raw := e.Raw() + assert.Equal(t, int32(5), raw.MaxReplicas) + + raw.MaxReplicas = 10 + assert.Equal(t, int32(10), spec.MaxReplicas) +} diff --git a/pkg/primitives/hpa/builder.go b/pkg/primitives/hpa/builder.go new file mode 100644 index 00000000..53e6de06 --- /dev/null +++ b/pkg/primitives/hpa/builder.go @@ -0,0 +1,138 @@ +package hpa + +import ( + "fmt" + + "github.com/sourcehawk/operator-component-framework/internal/generic" + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + "github.com/sourcehawk/operator-component-framework/pkg/feature" + autoscalingv2 "k8s.io/api/autoscaling/v2" +) + +// Builder is a configuration helper for creating and customizing an HPA Resource. +// +// It provides a fluent API for registering mutations, status handlers, and +// data extractors. This builder ensures that the resulting Resource is +// properly initialized and validated before use in a reconciliation loop. +type Builder struct { + base *generic.IntegrationBuilder[*autoscalingv2.HorizontalPodAutoscaler, *Mutator] +} + +// NewBuilder initializes a new Builder with the provided HorizontalPodAutoscaler object. +// +// The HPA object passed here serves as the "desired base state". During +// reconciliation, the Resource will attempt to make the cluster's state match +// this base state, modified by any registered mutations. +// +// The provided HPA must have at least a Name and Namespace set, which +// is validated during the Build() call. +func NewBuilder(hpa *autoscalingv2.HorizontalPodAutoscaler) *Builder { + identityFunc := func(h *autoscalingv2.HorizontalPodAutoscaler) string { + return fmt.Sprintf("autoscaling/v2/HorizontalPodAutoscaler/%s/%s", h.Namespace, h.Name) + } + + base := generic.NewIntegrationBuilder[*autoscalingv2.HorizontalPodAutoscaler, *Mutator]( + hpa, + identityFunc, + NewMutator, + ) + + base. + WithCustomOperationalStatus(DefaultOperationalStatusHandler). + WithCustomSuspendStatus(DefaultSuspensionStatusHandler). + WithCustomSuspendMutation(DefaultSuspendMutationHandler). + WithCustomSuspendDeletionDecision(DefaultDeleteOnSuspendHandler) + + return &Builder{ + base: base, + } +} + +// WithMutation registers a feature-based mutation for the HPA. +// +// Mutations are applied sequentially during the Mutate() phase of reconciliation. +// A mutation with a nil Feature is applied unconditionally; one with a non-nil +// Feature is applied only when that feature is enabled. +func (b *Builder) WithMutation(m Mutation) *Builder { + b.base.WithMutation(feature.Mutation[*Mutator](m)) + return b +} + +// WithCustomOperationalStatus overrides the default logic for determining the +// HPA's operational status. +// +// The default behavior uses DefaultOperationalStatusHandler, which inspects +// HPA conditions (ScalingActive, AbleToScale) to determine status. +func (b *Builder) WithCustomOperationalStatus( + handler func(concepts.ConvergingOperation, *autoscalingv2.HorizontalPodAutoscaler) (concepts.OperationalStatusWithReason, error), +) *Builder { + b.base.WithCustomOperationalStatus(handler) + return b +} + +// WithCustomSuspendStatus overrides how the progress of suspension is reported. +// +// The default behavior uses DefaultSuspensionStatusHandler, which reports +// Suspended immediately because deletion is handled by the framework. +func (b *Builder) WithCustomSuspendStatus( + handler func(*autoscalingv2.HorizontalPodAutoscaler) (concepts.SuspensionStatusWithReason, error), +) *Builder { + b.base.WithCustomSuspendStatus(handler) + return b +} + +// WithCustomSuspendMutation defines how the HPA should be modified when +// the component is suspended. +// +// The default behavior uses DefaultSuspendMutationHandler, which is a no-op +// since the HPA is deleted on suspend (no spec mutations are needed). +func (b *Builder) WithCustomSuspendMutation( + handler func(*Mutator) error, +) *Builder { + b.base.WithCustomSuspendMutation(handler) + return b +} + +// WithCustomSuspendDeletionDecision overrides the decision of whether to delete +// the HPA when the component is suspended. +// +// The default behavior uses DefaultDeleteOnSuspendHandler, which returns true. +// The HPA is deleted to prevent the Kubernetes HPA controller from scaling the +// target back up during suspension. On resume the framework recreates the HPA. +func (b *Builder) WithCustomSuspendDeletionDecision( + handler func(*autoscalingv2.HorizontalPodAutoscaler) bool, +) *Builder { + b.base.WithCustomSuspendDeletionDecision(handler) + return b +} + +// WithDataExtractor registers a function to read values from the HPA after +// it has been successfully reconciled. +// +// The extractor receives a value copy of the reconciled HPA. This is useful +// for surfacing generated or updated fields to other components or resources. +// +// A nil extractor is ignored. +func (b *Builder) WithDataExtractor( + extractor func(autoscalingv2.HorizontalPodAutoscaler) error, +) *Builder { + if extractor != nil { + b.base.WithDataExtractor(func(h *autoscalingv2.HorizontalPodAutoscaler) error { + return extractor(*h) + }) + } + return b +} + +// Build validates the configuration and returns the initialized Resource. +// +// It returns an error if: +// - No HPA object was provided. +// - The HPA is missing a Name or Namespace. +func (b *Builder) Build() (*Resource, error) { + genericRes, err := b.base.Build() + if err != nil { + return nil, err + } + return &Resource{base: genericRes}, nil +} diff --git a/pkg/primitives/hpa/builder_test.go b/pkg/primitives/hpa/builder_test.go new file mode 100644 index 00000000..3f20e4f4 --- /dev/null +++ b/pkg/primitives/hpa/builder_test.go @@ -0,0 +1,213 @@ +package hpa + +import ( + "errors" + "testing" + + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + autoscalingv2 "k8s.io/api/autoscaling/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestBuilder(t *testing.T) { + t.Parallel() + + t.Run("Build validation", func(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + hpa *autoscalingv2.HorizontalPodAutoscaler + expectedErr string + }{ + { + name: "nil HPA", + hpa: nil, + expectedErr: "object cannot be nil", + }, + { + name: "empty name", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, + }, + expectedErr: "object name cannot be empty", + }, + { + name: "empty namespace", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + }, + }, + expectedErr: "object namespace cannot be empty", + }, + { + name: "valid HPA", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "test-ns", + }, + }, + expectedErr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, err := NewBuilder(tt.hpa).Build() + if tt.expectedErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErr) + assert.Nil(t, res) + } else { + require.NoError(t, err) + require.NotNil(t, res) + assert.Equal(t, "autoscaling/v2/HorizontalPodAutoscaler/test-ns/test-hpa", res.Identity()) + } + }) + } + }) + + t.Run("WithMutation", func(t *testing.T) { + t.Parallel() + hpa := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "test-ns", + }, + } + m := Mutation{ + Name: "test-mutation", + } + res, err := NewBuilder(hpa). + WithMutation(m). + Build() + require.NoError(t, err) + assert.Len(t, res.base.Mutations, 1) + assert.Equal(t, "test-mutation", res.base.Mutations[0].Name) + }) + + t.Run("WithCustomOperationalStatus", func(t *testing.T) { + t.Parallel() + hpa := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "test-ns", + }, + } + handler := func(_ concepts.ConvergingOperation, _ *autoscalingv2.HorizontalPodAutoscaler) (concepts.OperationalStatusWithReason, error) { + return concepts.OperationalStatusWithReason{Status: concepts.OperationalStatusOperational}, nil + } + res, err := NewBuilder(hpa). + WithCustomOperationalStatus(handler). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.OperationalStatusHandler) + status, err := res.base.OperationalStatusHandler(concepts.ConvergingOperationNone, nil) + require.NoError(t, err) + assert.Equal(t, concepts.OperationalStatusOperational, status.Status) + }) + + t.Run("WithCustomSuspendStatus", func(t *testing.T) { + t.Parallel() + hpa := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "test-ns", + }, + } + handler := func(_ *autoscalingv2.HorizontalPodAutoscaler) (concepts.SuspensionStatusWithReason, error) { + return concepts.SuspensionStatusWithReason{Status: concepts.SuspensionStatusSuspended}, nil + } + res, err := NewBuilder(hpa). + WithCustomSuspendStatus(handler). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.SuspendStatusHandler) + status, err := res.base.SuspendStatusHandler(nil) + require.NoError(t, err) + assert.Equal(t, concepts.SuspensionStatusSuspended, status.Status) + }) + + t.Run("WithCustomSuspendMutation", func(t *testing.T) { + t.Parallel() + hpa := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "test-ns", + }, + } + handler := func(_ *Mutator) error { + return errors.New("suspend error") + } + res, err := NewBuilder(hpa). + WithCustomSuspendMutation(handler). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.SuspendMutationHandler) + err = res.base.SuspendMutationHandler(nil) + assert.EqualError(t, err, "suspend error") + }) + + t.Run("WithCustomSuspendDeletionDecision", func(t *testing.T) { + t.Parallel() + hpa := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "test-ns", + }, + } + handler := func(_ *autoscalingv2.HorizontalPodAutoscaler) bool { + return false + } + res, err := NewBuilder(hpa). + WithCustomSuspendDeletionDecision(handler). + Build() + require.NoError(t, err) + require.NotNil(t, res.base.DeleteOnSuspendHandler) + assert.False(t, res.base.DeleteOnSuspendHandler(nil)) + }) + + t.Run("WithDataExtractor", func(t *testing.T) { + t.Parallel() + hpa := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "test-ns", + }, + } + called := false + extractor := func(_ autoscalingv2.HorizontalPodAutoscaler) error { + called = true + return nil + } + res, err := NewBuilder(hpa). + WithDataExtractor(extractor). + Build() + require.NoError(t, err) + assert.Len(t, res.base.DataExtractors, 1) + err = res.base.DataExtractors[0](&autoscalingv2.HorizontalPodAutoscaler{}) + require.NoError(t, err) + assert.True(t, called) + }) + + t.Run("WithDataExtractor nil", func(t *testing.T) { + t.Parallel() + hpa := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "test-ns", + }, + } + res, err := NewBuilder(hpa). + WithDataExtractor(nil). + Build() + require.NoError(t, err) + assert.Len(t, res.base.DataExtractors, 0) + }) +} diff --git a/pkg/primitives/hpa/handlers.go b/pkg/primitives/hpa/handlers.go new file mode 100644 index 00000000..dc3ddac0 --- /dev/null +++ b/pkg/primitives/hpa/handlers.go @@ -0,0 +1,146 @@ +package hpa + +import ( + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" +) + +// DefaultOperationalStatusHandler is the default logic for determining the operational status +// of a HorizontalPodAutoscaler. +// +// It inspects Status.Conditions to classify the HPA's state: +// - OperationalStatusOperational: condition ScalingActive is True. +// - OperationalStatusPending: conditions are absent, ScalingActive is missing, or ScalingActive is Unknown. +// - OperationalStatusFailing: condition ScalingActive is False, or condition AbleToScale is False. +// +// This function is used as the default handler by the Resource if no custom handler is registered +// via Builder.WithCustomOperationalStatus. It can be reused within custom handlers to augment +// the default behavior. +func DefaultOperationalStatusHandler( + _ concepts.ConvergingOperation, hpa *autoscalingv2.HorizontalPodAutoscaler, +) (concepts.OperationalStatusWithReason, error) { + scalingActive := findCondition(hpa.Status.Conditions, autoscalingv2.ScalingActive) + ableToScale := findCondition(hpa.Status.Conditions, autoscalingv2.AbleToScale) + + // Check for failing conditions first + if ableToScale != nil && ableToScale.Status == corev1.ConditionFalse { + return concepts.OperationalStatusWithReason{ + Status: concepts.OperationalStatusFailing, + Reason: conditionReason(ableToScale, "AbleToScale is False"), + }, nil + } + + if scalingActive != nil { + switch scalingActive.Status { + case corev1.ConditionTrue: + return concepts.OperationalStatusWithReason{ + Status: concepts.OperationalStatusOperational, + Reason: conditionReason(scalingActive, "ScalingActive is True"), + }, nil + case corev1.ConditionFalse: + return concepts.OperationalStatusWithReason{ + Status: concepts.OperationalStatusFailing, + Reason: conditionReason(scalingActive, "ScalingActive is False"), + }, nil + case corev1.ConditionUnknown: + return concepts.OperationalStatusWithReason{ + Status: concepts.OperationalStatusPending, + Reason: conditionReason(scalingActive, "ScalingActive is Unknown"), + }, nil + default: + return concepts.OperationalStatusWithReason{ + Status: concepts.OperationalStatusPending, + Reason: conditionReason(scalingActive, "ScalingActive has unrecognized status"), + }, nil + } + } + + // Distinguish between no conditions at all and ScalingActive missing/unrecognized + if len(hpa.Status.Conditions) == 0 { + return concepts.OperationalStatusWithReason{ + Status: concepts.OperationalStatusPending, + Reason: "Waiting for HPA conditions to be populated", + }, nil + } + + return concepts.OperationalStatusWithReason{ + Status: concepts.OperationalStatusPending, + Reason: "Waiting for ScalingActive condition on HPA", + }, nil +} + +// DefaultDeleteOnSuspendHandler provides the default decision of whether to delete the HPA +// when the parent component is suspended. +// +// It always returns true. A retained HPA would conflict with the suspension of its scale +// target (e.g. a Deployment scaled to zero) because the Kubernetes HPA controller +// continuously enforces minReplicas and would scale the target back up. Deleting the HPA +// prevents this interference and guarantees clean suspension semantics. On resume the +// framework recreates the HPA with the desired spec. +// +// Override this via Builder.WithCustomSuspendDeletionDecision if your use case requires +// the HPA to be retained during suspension. +// +// This function is used as the default handler by the Resource if no custom handler is registered +// via Builder.WithCustomSuspendDeletionDecision. It can be reused within custom handlers. +func DefaultDeleteOnSuspendHandler(_ *autoscalingv2.HorizontalPodAutoscaler) bool { + return true +} + +// DefaultSuspendMutationHandler provides the default mutation applied to an HPA when +// the component is suspended. +// +// It is a no-op. The default suspension behavior deletes the HPA +// (DefaultDeleteOnSuspendHandler returns true), so no spec mutations are needed before +// deletion. +// +// This function is used as the default handler by the Resource if no custom handler is registered +// via Builder.WithCustomSuspendMutation. It can be reused within custom handlers. +func DefaultSuspendMutationHandler(_ *Mutator) error { + return nil +} + +// DefaultSuspensionStatusHandler reports the suspension status of the HPA. +// +// It always returns Suspended immediately. The default suspension behaviour deletes the +// HPA to prevent it from interfering with the scale target's suspension. Because +// deletion is handled by the framework after this status is reported, no additional +// work is required and the status is always Suspended. +// +// The reason string is intentionally deletion-agnostic so that this handler remains +// accurate even when the deletion decision is overridden via +// Builder.WithCustomSuspendDeletionDecision. If you need a reason that reflects your +// custom deletion behaviour, override this handler via Builder.WithCustomSuspendStatus. +// +// This function is used as the default handler by the Resource if no custom handler is registered +// via Builder.WithCustomSuspendStatus. It can be reused within custom handlers. +func DefaultSuspensionStatusHandler( + _ *autoscalingv2.HorizontalPodAutoscaler, +) (concepts.SuspensionStatusWithReason, error) { + return concepts.SuspensionStatusWithReason{ + Status: concepts.SuspensionStatusSuspended, + Reason: "HorizontalPodAutoscaler suspended to prevent scaling interference", + }, nil +} + +// findCondition returns the first condition matching the given type, or nil. +func findCondition( + conditions []autoscalingv2.HorizontalPodAutoscalerCondition, + condType autoscalingv2.HorizontalPodAutoscalerConditionType, +) *autoscalingv2.HorizontalPodAutoscalerCondition { + for i := range conditions { + if conditions[i].Type == condType { + return &conditions[i] + } + } + return nil +} + +// conditionReason returns the condition's message if present, otherwise a fallback. +func conditionReason(cond *autoscalingv2.HorizontalPodAutoscalerCondition, fallback string) string { + if cond.Message != "" { + return cond.Message + } + return fallback +} diff --git a/pkg/primitives/hpa/handlers_test.go b/pkg/primitives/hpa/handlers_test.go new file mode 100644 index 00000000..7c4e976e --- /dev/null +++ b/pkg/primitives/hpa/handlers_test.go @@ -0,0 +1,167 @@ +package hpa + +import ( + "testing" + + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" +) + +func TestDefaultOperationalStatusHandler(t *testing.T) { + tests := []struct { + name string + hpa *autoscalingv2.HorizontalPodAutoscaler + wantStatus concepts.OperationalStatus + }{ + { + name: "operational when ScalingActive is True", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionTrue, + Message: "the HPA was able to successfully calculate a replica count", + }, + }, + }, + }, + wantStatus: concepts.OperationalStatusOperational, + }, + { + name: "pending when no conditions", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{}, + }, + wantStatus: concepts.OperationalStatusPending, + }, + { + name: "pending when conditions present but ScalingActive missing", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.AbleToScale, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + wantStatus: concepts.OperationalStatusPending, + }, + { + name: "pending when ScalingActive is Unknown", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionUnknown, + }, + }, + }, + }, + wantStatus: concepts.OperationalStatusPending, + }, + { + name: "failing when ScalingActive is False", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionFalse, + Message: "the HPA target is missing", + }, + }, + }, + }, + wantStatus: concepts.OperationalStatusFailing, + }, + { + name: "failing when AbleToScale is False", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionTrue, + }, + { + Type: autoscalingv2.AbleToScale, + Status: corev1.ConditionFalse, + Message: "the HPA controller was unable to update the target scale", + }, + }, + }, + }, + wantStatus: concepts.OperationalStatusFailing, + }, + { + name: "failing when AbleToScale is False takes precedence over ScalingActive True", + hpa: &autoscalingv2.HorizontalPodAutoscaler{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.AbleToScale, + Status: corev1.ConditionFalse, + }, + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + wantStatus: concepts.OperationalStatusFailing, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := DefaultOperationalStatusHandler(concepts.ConvergingOperationNone, tt.hpa) + require.NoError(t, err) + assert.Equal(t, tt.wantStatus, got.Status) + }) + } +} + +func TestDefaultOperationalStatusHandler_UsesConditionMessage(t *testing.T) { + hpa := &autoscalingv2.HorizontalPodAutoscaler{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionTrue, + Message: "scaling is active and healthy", + }, + }, + }, + } + got, err := DefaultOperationalStatusHandler(concepts.ConvergingOperationNone, hpa) + require.NoError(t, err) + assert.Equal(t, "scaling is active and healthy", got.Reason) +} + +func TestDefaultDeleteOnSuspendHandler(t *testing.T) { + hpa := &autoscalingv2.HorizontalPodAutoscaler{} + assert.True(t, DefaultDeleteOnSuspendHandler(hpa)) +} + +func TestDefaultSuspendMutationHandler(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + err := DefaultSuspendMutationHandler(m) + require.NoError(t, err) +} + +func TestDefaultSuspensionStatusHandler(t *testing.T) { + hpa := &autoscalingv2.HorizontalPodAutoscaler{} + got, err := DefaultSuspensionStatusHandler(hpa) + require.NoError(t, err) + assert.Equal(t, concepts.SuspensionStatusSuspended, got.Status) + assert.Equal(t, "HorizontalPodAutoscaler suspended to prevent scaling interference", got.Reason) +} diff --git a/pkg/primitives/hpa/mutator.go b/pkg/primitives/hpa/mutator.go new file mode 100644 index 00000000..4cffd7f6 --- /dev/null +++ b/pkg/primitives/hpa/mutator.go @@ -0,0 +1,105 @@ +// Package hpa provides a builder and resource for managing Kubernetes HorizontalPodAutoscalers. +package hpa + +import ( + "github.com/sourcehawk/operator-component-framework/pkg/feature" + "github.com/sourcehawk/operator-component-framework/pkg/mutation/editors" + autoscalingv2 "k8s.io/api/autoscaling/v2" +) + +// Mutation defines a mutation that is applied to an HPA Mutator +// only if its associated feature gate is enabled. +type Mutation feature.Mutation[*Mutator] + +type featurePlan struct { + metadataEdits []func(*editors.ObjectMetaEditor) error + hpaSpecEdits []func(*editors.HPASpecEditor) error +} + +// Mutator is a high-level helper for modifying a Kubernetes HorizontalPodAutoscaler. +// +// It uses a "plan-and-apply" pattern: mutations are recorded first, then +// applied to the HPA in a single controlled pass when Apply() is called. +// +// The Mutator maintains feature boundaries: each feature's mutations are planned +// together and applied in the order the features were registered. +// +// Mutator implements editors.ObjectMutator. +type Mutator struct { + hpa *autoscalingv2.HorizontalPodAutoscaler + + plans []featurePlan + active *featurePlan +} + +// NewMutator creates a new Mutator for the given HorizontalPodAutoscaler. +// BeginFeature must be called before registering any mutations. +func NewMutator(hpa *autoscalingv2.HorizontalPodAutoscaler) *Mutator { + return &Mutator{ + hpa: hpa, + } +} + +// BeginFeature starts a new feature planning scope. All subsequent mutation +// registrations will be grouped into this feature's plan. +func (m *Mutator) BeginFeature() { + m.plans = append(m.plans, featurePlan{}) + m.active = &m.plans[len(m.plans)-1] +} + +// EditObjectMetadata records a mutation for the HPA's own metadata. +// +// Metadata edits are applied before HPA spec edits within the same feature. +// A nil edit function is ignored. +func (m *Mutator) EditObjectMetadata(edit func(*editors.ObjectMetaEditor) error) { + if edit == nil { + return + } + m.active.metadataEdits = append(m.active.metadataEdits, edit) +} + +// EditHPASpec records a mutation for the HPA's spec. +// +// HPA spec edits are applied after metadata edits within the same feature. +// A nil edit function is ignored. +func (m *Mutator) EditHPASpec(edit func(*editors.HPASpecEditor) error) { + if edit == nil { + return + } + m.active.hpaSpecEdits = append(m.active.hpaSpecEdits, edit) +} + +// Apply executes all recorded mutation intents on the underlying HPA. +// +// Execution order across all registered features: +// +// 1. Metadata edits (in registration order within each feature) +// 2. HPA spec edits (in registration order within each feature) +// +// Features are applied in the order they were registered. Later features observe +// the HPA as modified by all previous features. +func (m *Mutator) Apply() error { + for _, plan := range m.plans { + // 1. Metadata edits + if len(plan.metadataEdits) > 0 { + editor := editors.NewObjectMetaEditor(&m.hpa.ObjectMeta) + for _, edit := range plan.metadataEdits { + if err := edit(editor); err != nil { + return err + } + } + } + + // 2. HPA spec edits + if len(plan.hpaSpecEdits) > 0 { + editor := editors.NewHPASpecEditor(&m.hpa.Spec) + for _, edit := range plan.hpaSpecEdits { + if err := edit(editor); err != nil { + return err + } + } + } + } + + return nil +} diff --git a/pkg/primitives/hpa/mutator_test.go b/pkg/primitives/hpa/mutator_test.go new file mode 100644 index 00000000..d11c9260 --- /dev/null +++ b/pkg/primitives/hpa/mutator_test.go @@ -0,0 +1,230 @@ +package hpa + +import ( + "errors" + "testing" + + "github.com/sourcehawk/operator-component-framework/pkg/mutation/editors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// compile-time check that Mutator implements editors.ObjectMutator. +var _ editors.ObjectMutator = (*Mutator)(nil) + +func newTestHPA() *autoscalingv2.HorizontalPodAutoscaler { + return &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "default", + }, + Spec: autoscalingv2.HorizontalPodAutoscalerSpec{ + MaxReplicas: 10, + }, + } +} + +// --- EditObjectMetadata --- + +func TestMutator_EditObjectMetadata(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + m.BeginFeature() + m.EditObjectMetadata(func(e *editors.ObjectMetaEditor) error { + e.EnsureLabel("app", "myapp") + return nil + }) + require.NoError(t, m.Apply()) + assert.Equal(t, "myapp", hpa.Labels["app"]) +} + +func TestMutator_EditObjectMetadata_Nil(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + m.BeginFeature() + m.EditObjectMetadata(nil) + assert.NoError(t, m.Apply()) +} + +func TestMutator_EditObjectMetadata_Error(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + m.BeginFeature() + m.EditObjectMetadata(func(_ *editors.ObjectMetaEditor) error { + return errors.New("metadata error") + }) + assert.EqualError(t, m.Apply(), "metadata error") +} + +// --- EditHPASpec --- + +func TestMutator_EditHPASpec(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + m.BeginFeature() + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetMaxReplicas(20) + return nil + }) + require.NoError(t, m.Apply()) + assert.Equal(t, int32(20), hpa.Spec.MaxReplicas) +} + +func TestMutator_EditHPASpec_Nil(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + m.BeginFeature() + m.EditHPASpec(nil) + assert.NoError(t, m.Apply()) +} + +func TestMutator_EditHPASpec_Error(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + m.BeginFeature() + m.EditHPASpec(func(_ *editors.HPASpecEditor) error { + return errors.New("spec error") + }) + assert.EqualError(t, m.Apply(), "spec error") +} + +func TestMutator_EditHPASpec_EnsureMetric(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + m.BeginFeature() + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.EnsureMetric(autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: int32Ptr(80), + }, + }, + }) + return nil + }) + require.NoError(t, m.Apply()) + require.Len(t, hpa.Spec.Metrics, 1) + assert.Equal(t, corev1.ResourceCPU, hpa.Spec.Metrics[0].Resource.Name) +} + +// --- Execution order --- + +func TestMutator_ExecutionOrder(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + m.BeginFeature() + + var order []string + + // Register spec edit first, metadata second — metadata must still run first + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + order = append(order, "spec") + e.SetMaxReplicas(5) + return nil + }) + m.EditObjectMetadata(func(e *editors.ObjectMetaEditor) error { + order = append(order, "metadata") + e.EnsureLabel("test", "value") + return nil + }) + + require.NoError(t, m.Apply()) + require.Equal(t, []string{"metadata", "spec"}, order) +} + +// --- Multiple features --- + +func TestMutator_MultipleFeatures(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + + m.BeginFeature() + m.EditObjectMetadata(func(e *editors.ObjectMetaEditor) error { + e.EnsureLabel("feature", "one") + return nil + }) + + m.BeginFeature() + m.EditObjectMetadata(func(e *editors.ObjectMetaEditor) error { + // Second feature overwrites the label + e.EnsureLabel("feature", "two") + return nil + }) + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetMaxReplicas(42) + return nil + }) + + require.NoError(t, m.Apply()) + assert.Equal(t, "two", hpa.Labels["feature"]) + assert.Equal(t, int32(42), hpa.Spec.MaxReplicas) +} + +// --- Constructor and feature plan invariants --- + +func TestNewMutator_InitializesNoPlan(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + + assert.Empty(t, m.plans, "NewMutator must not create any plans") + assert.Nil(t, m.active, "active plan must not be set") +} + +func TestBeginFeature_AddsExactlyOnePlan(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + + m.BeginFeature() + require.Len(t, m.plans, 1, "BeginFeature must add exactly one plan") + assert.Equal(t, &m.plans[0], m.active, "active must point to the new plan") + + m.BeginFeature() + require.Len(t, m.plans, 2) + assert.Equal(t, &m.plans[1], m.active) +} + +func TestBeginFeature_IsolatesFeaturePlans(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + + // Record a mutation in the first feature plan + m.BeginFeature() + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetMaxReplicas(5) + return nil + }) + + // Start a new feature and record a different mutation + m.BeginFeature() + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetMaxReplicas(10) + return nil + }) + + // The initial plan should have exactly one spec edit + assert.Len(t, m.plans[0].hpaSpecEdits, 1, "initial plan should have one edit") + // The second plan should also have exactly one spec edit + assert.Len(t, m.plans[1].hpaSpecEdits, 1, "second plan should have one edit") +} + +func TestMutator_SingleFeature_PlanCount(t *testing.T) { + hpa := newTestHPA() + m := NewMutator(hpa) + m.BeginFeature() + m.EditHPASpec(func(e *editors.HPASpecEditor) error { + e.SetMaxReplicas(5) + return nil + }) + + require.NoError(t, m.Apply()) + assert.Len(t, m.plans, 1, "no extra plans should be created during Apply") + assert.Equal(t, int32(5), hpa.Spec.MaxReplicas) +} + +func int32Ptr(v int32) *int32 { return &v } diff --git a/pkg/primitives/hpa/resource.go b/pkg/primitives/hpa/resource.go new file mode 100644 index 00000000..cfb9fc01 --- /dev/null +++ b/pkg/primitives/hpa/resource.go @@ -0,0 +1,87 @@ +package hpa + +import ( + "github.com/sourcehawk/operator-component-framework/internal/generic" + "github.com/sourcehawk/operator-component-framework/pkg/component/concepts" + autoscalingv2 "k8s.io/api/autoscaling/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Resource is a high-level abstraction for managing a Kubernetes HorizontalPodAutoscaler +// within a controller's reconciliation loop. +// +// It implements the following component interfaces: +// - component.Resource: for basic identity and mutation behaviour. +// - component.Operational: for reporting operational status based on HPA conditions. +// - component.Suspendable: for default delete-on-suspend behaviour that removes the HPA to +// prevent it from scaling the target back up during suspension. +// - component.DataExtractable: for exporting values after successful reconciliation. +type Resource struct { + base *generic.IntegrationResource[*autoscalingv2.HorizontalPodAutoscaler, *Mutator] +} + +// Identity returns a unique identifier for the HPA in the format +// "autoscaling/v2/HorizontalPodAutoscaler//". +func (r *Resource) Identity() string { + return r.base.Identity() +} + +// Object returns a deep copy of the underlying Kubernetes HorizontalPodAutoscaler object. +// +// The returned object implements client.Object, making it compatible with +// controller-runtime's Client for Create, Update, and Patch operations. +func (r *Resource) Object() (client.Object, error) { + return r.base.Object() +} + +// Mutate transforms the current state of a Kubernetes HorizontalPodAutoscaler into the desired state. +// +// All registered feature-gated mutations are applied in order. +// +// This method is invoked by the framework during the Update phase of reconciliation. +func (r *Resource) Mutate(current client.Object) error { + return r.base.Mutate(current) +} + +// ConvergingStatus reports the HPA's operational status using the configured handler. +// +// By default, it uses DefaultOperationalStatusHandler, which inspects HPA conditions +// to determine if the autoscaler is active, pending, or failing. +func (r *Resource) ConvergingStatus(op concepts.ConvergingOperation) (concepts.OperationalStatusWithReason, error) { + return r.base.ConvergingStatus(op) +} + +// DeleteOnSuspend determines whether the HPA should be deleted from the cluster +// when the parent component is suspended. +// +// By default, it uses DefaultDeleteOnSuspendHandler, which returns true. The HPA is +// deleted to prevent the Kubernetes HPA controller from scaling the target back up +// while it is suspended. On resume the framework recreates the HPA with the desired spec. +func (r *Resource) DeleteOnSuspend() bool { + return r.base.DeleteOnSuspend() +} + +// Suspend registers the configured suspension mutation for the next mutate cycle. +// +// For HPA, the default suspension mutation is a no-op since the HPA is deleted on +// suspend (no spec mutations are needed before deletion). +func (r *Resource) Suspend() error { + return r.base.Suspend() +} + +// SuspensionStatus reports the suspension status of the HPA. +// +// By default, it uses DefaultSuspensionStatusHandler, which reports Suspended +// immediately because deletion is handled by the framework after this status is reported. +func (r *Resource) SuspensionStatus() (concepts.SuspensionStatusWithReason, error) { + return r.base.SuspensionStatus() +} + +// ExtractData executes all registered data extractor functions against a deep copy +// of the reconciled HPA. +// +// This is called by the framework after successful reconciliation, allowing the +// component to read generated or updated values from the HPA. +func (r *Resource) ExtractData() error { + return r.base.ExtractData() +} diff --git a/pkg/primitives/hpa/resource_test.go b/pkg/primitives/hpa/resource_test.go new file mode 100644 index 00000000..9b95d845 --- /dev/null +++ b/pkg/primitives/hpa/resource_test.go @@ -0,0 +1,38 @@ +package hpa + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + autoscalingv2 "k8s.io/api/autoscaling/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestResource_ObjectAndMutate(t *testing.T) { + base := &autoscalingv2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "default", + }, + Spec: autoscalingv2.HorizontalPodAutoscalerSpec{ + MinReplicas: ptrInt32(2), + MaxReplicas: 10, + }, + } + + res, err := NewBuilder(base).Build() + require.NoError(t, err) + + obj, err := res.Object() + require.NoError(t, err) + require.NoError(t, res.Mutate(obj)) + + got := obj.(*autoscalingv2.HorizontalPodAutoscaler) + assert.Equal(t, int32(2), *got.Spec.MinReplicas) + assert.Equal(t, int32(10), got.Spec.MaxReplicas) +} + +func ptrInt32(v int32) *int32 { + return &v +}