From ad6a68b7af3f2373062411952ceedcf4ddba3e76 Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Wed, 6 Aug 2025 16:47:01 +0200 Subject: [PATCH 01/12] Errorf # Conflicts: # internal/uniqueness/leaderelection.go diff --git a/internal/evaluator/logger.go b/internal/evaluator/logger.go index fd0ffa72..d8b5a892 100644 --- a/internal/evaluator/logger.go +++ b/internal/evaluator/logger.go @@ -18,6 +18,8 @@ package evaluator import ( + "context" + "github.com/elastic/elastic-agent-libs/logp" "github.com/open-policy-agent/opa/v1/logging" "go.uber.org/zap" @@ -54,7 +56,7 @@ func (l *logger) Info(fmt string, a ...any) { } func (l *logger) Error(fmt string, a ...any) { - l.log.Errorf(fmt, a...) + l.log.Errorf(context.TODO(), fmt, a...) } func (l *logger) Warn(fmt string, a ...any) { diff --git a/internal/flavors/assetinventory/strategy_aws.go b/internal/flavors/assetinventory/strategy_aws.go index ed5b78f8..b3ce4750 100644 --- a/internal/flavors/assetinventory/strategy_aws.go +++ b/internal/flavors/assetinventory/strategy_aws.go @@ -103,7 +103,7 @@ func tryListingBuckets(ctx context.Context, log *clog.Logger, roleConfig awssdk. return true } if !strings.Contains(err.Error(), "not authorized to perform: sts:AssumeRole") { - log.Errorf("Expected a 403 autorization error, but got: %v", err) + log.Errorf(ctx, "Expected a 403 autorization error, but got: %v", err) } return false } diff --git a/internal/flavors/benchmark/aws_org.go b/internal/flavors/benchmark/aws_org.go index 5e47b88a..a98d59b5 100644 --- a/internal/flavors/benchmark/aws_org.go +++ b/internal/flavors/benchmark/aws_org.go @@ -148,7 +148,7 @@ func (a *AWSOrg) getAwsAccounts(ctx context.Context, log *clog.Logger, cfgCloudb if identity.Account == rootIdentity.Account { cfg, err := a.pickManagementAccountRole(ctx, log, stsClient, cfgCloudbeatRoot, identity) if err != nil { - log.Errorf("error picking roles for account %s: %s", identity.Account, err) + log.Errorf(ctx, "error picking roles for account %s: %s", identity.Account, err) continue } awsConfig = cfg @@ -205,7 +205,7 @@ func (a *AWSOrg) pickManagementAccountRole(ctx context.Context, log *clog.Logger if foundTagValue == scanSettingTagValue { _, err := a.IAMProvider.GetRole(ctx, memberRole) if err != nil { - log.Errorf("Management Account should be scanned (%s: %s), but %q role is missing: %s", scanSettingTagKey, foundTagValue, memberRole, err) + log.Errorf(ctx, "Management Account should be scanned (%s: %s), but %q role is missing: %s", scanSettingTagKey, foundTagValue, memberRole, err) } } diff --git a/internal/flavors/benchmark/eks.go b/internal/flavors/benchmark/eks.go index d8f3801e..ddece011 100644 --- a/internal/flavors/benchmark/eks.go +++ b/internal/flavors/benchmark/eks.go @@ -73,7 +73,7 @@ func (k *EKS) initialize(ctx context.Context, log *clog.Logger, cfg *config.Conf } benchmarkHelper := NewK8sBenchmarkHelper(log, cfg, kubeClient) - k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) + k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) //nolint:contextcheck awsConfig, awsIdentity, err := k.getEksAwsConfig(ctx, cfg) if err != nil { diff --git a/internal/flavors/benchmark/k8s.go b/internal/flavors/benchmark/k8s.go index 4617bfc2..fc700f96 100644 --- a/internal/flavors/benchmark/k8s.go +++ b/internal/flavors/benchmark/k8s.go @@ -66,7 +66,7 @@ func (k *K8S) initialize(ctx context.Context, log *clog.Logger, cfg *config.Conf } benchmarkHelper := NewK8sBenchmarkHelper(log, cfg, kubeClient) - k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) + k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) //nolint:contextcheck dp, err := benchmarkHelper.GetK8sDataProvider(ctx, k8s.KubernetesClusterNameProvider{KubeClient: kubeClient}) if err != nil { diff --git a/internal/flavors/benchmark/k8s_helper.go b/internal/flavors/benchmark/k8s_helper.go index 13cbd88f..08f1e751 100644 --- a/internal/flavors/benchmark/k8s_helper.go +++ b/internal/flavors/benchmark/k8s_helper.go @@ -48,7 +48,7 @@ func NewK8sBenchmarkHelper(log *clog.Logger, cfg *config.Config, client client_g func (h *K8SBenchmarkHelper) GetK8sDataProvider(ctx context.Context, clusterNameProvider k8s.ClusterNameProviderAPI) (dataprovider.CommonDataProvider, error) { clusterName, err := clusterNameProvider.GetClusterName(ctx, h.cfg) if err != nil { - h.log.Errorf("failed to get cluster name: %v", err) + h.log.Errorf(ctx, "failed to get cluster name: %v", err) } serverVersion, err := h.client.Discovery().ServerVersion() diff --git a/internal/infra/clog/clog.go b/internal/infra/clog/clog.go index d220a56d..7c7fde88 100644 --- a/internal/infra/clog/clog.go +++ b/internal/infra/clog/clog.go @@ -31,13 +31,14 @@ type Logger struct { *logp.Logger } -func (l *Logger) Errorf(template string, args ...any) { +func (l *Logger) Errorf(ctx context.Context, template string, args ...any) { + spanCtx := trace.SpanContextFromContext(ctx) // Downgrade context.Canceled errors to warning level if hasErrorType(context.Canceled, args...) { - l.Warnf(template, args...) + l.WithSpanContext(spanCtx).Warnf(template, args...) return } - l.Logger.Errorf(template, args...) + l.WithSpanContext(spanCtx).Logger.Errorf(template, args...) } func (l *Logger) Error(args ...any) { diff --git a/internal/infra/clog/clog_test.go b/internal/infra/clog/clog_test.go index 92469e86..8bd5a11f 100644 --- a/internal/infra/clog/clog_test.go +++ b/internal/infra/clog/clog_test.go @@ -46,8 +46,8 @@ func (s *LoggerTestSuite) TestErrorfWithContextCanceled() { logger := NewLogger("test") err := context.Canceled - logger.Errorf("some error: %s", err) // error with context.Canceled - logger.Errorf("some error: %s", err.Error()) // error string with context Canceled + logger.Errorf(context.TODO(), "some error: %s", err) // error with context.Canceled + logger.Errorf(context.TODO(), "some error: %s", err.Error()) // error string with context Canceled logs := logp.ObserverLogs().TakeAll() if s.Len(logs, 2) { @@ -62,7 +62,7 @@ func (s *LoggerTestSuite) TestLogErrorfWithoutContextCanceled() { logger := NewLogger("test") err := errors.New("oops") - logger.Errorf("some error: %s", err) + logger.Errorf(context.TODO(), "some error: %s", err) logs := logp.ObserverLogs().TakeAll() if s.Len(logs, 1) { diff --git a/internal/inventory/awsfetcher/fetcher_ec2_instance.go b/internal/inventory/awsfetcher/fetcher_ec2_instance.go index 0c881b2e..d75bd83e 100644 --- a/internal/inventory/awsfetcher/fetcher_ec2_instance.go +++ b/internal/inventory/awsfetcher/fetcher_ec2_instance.go @@ -53,7 +53,7 @@ func (e *ec2InstanceFetcher) Fetch(ctx context.Context, assetChannel chan<- inve instances, err := e.provider.DescribeInstances(ctx) if err != nil { - e.logger.Errorf("Could not list ec2 instances: %v", err) + e.logger.Errorf(ctx, "Could not list ec2 instances: %v", err) return } diff --git a/internal/inventory/awsfetcher/fetcher_elb.go b/internal/inventory/awsfetcher/fetcher_elb.go index 5acc55b7..72fa994a 100644 --- a/internal/inventory/awsfetcher/fetcher_elb.go +++ b/internal/inventory/awsfetcher/fetcher_elb.go @@ -73,7 +73,7 @@ func (f *elbFetcher) fetch(ctx context.Context, resourceName string, function el awsResources, err := function(ctx) if err != nil { - f.logger.Errorf("Could not fetch %s: %v", resourceName, err) + f.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/awsfetcher/fetcher_iam_policy.go b/internal/inventory/awsfetcher/fetcher_iam_policy.go index b04c1db6..36ad2f64 100644 --- a/internal/inventory/awsfetcher/fetcher_iam_policy.go +++ b/internal/inventory/awsfetcher/fetcher_iam_policy.go @@ -54,7 +54,7 @@ func (i *iamPolicyFetcher) Fetch(ctx context.Context, assetChannel chan<- invent policies, err := i.provider.GetPolicies(ctx) if err != nil { - i.logger.Errorf("Could not list policies: %v", err) + i.logger.Errorf(ctx, "Could not list policies: %v", err) if len(policies) == 0 { return } @@ -67,7 +67,7 @@ func (i *iamPolicyFetcher) Fetch(ctx context.Context, assetChannel chan<- invent policy, ok := resource.(iam.Policy) if !ok { - i.logger.Errorf("Could not get info about policy: %s", resource.GetResourceArn()) + i.logger.Errorf(ctx, "Could not get info about policy: %s", resource.GetResourceArn()) continue } diff --git a/internal/inventory/awsfetcher/fetcher_iam_role.go b/internal/inventory/awsfetcher/fetcher_iam_role.go index e2d60459..e3ee419f 100644 --- a/internal/inventory/awsfetcher/fetcher_iam_role.go +++ b/internal/inventory/awsfetcher/fetcher_iam_role.go @@ -54,7 +54,7 @@ func (i *iamRoleFetcher) Fetch(ctx context.Context, assetChannel chan<- inventor roles, err := i.provider.ListRoles(ctx) if err != nil { - i.logger.Errorf("Could not list roles: %v", err) + i.logger.Errorf(ctx, "Could not list roles: %v", err) if len(roles) == 0 { return } diff --git a/internal/inventory/awsfetcher/fetcher_iam_user.go b/internal/inventory/awsfetcher/fetcher_iam_user.go index 6221b765..b6c4ac36 100644 --- a/internal/inventory/awsfetcher/fetcher_iam_user.go +++ b/internal/inventory/awsfetcher/fetcher_iam_user.go @@ -53,7 +53,7 @@ func (i *iamUserFetcher) Fetch(ctx context.Context, assetChannel chan<- inventor users, err := i.provider.GetUsers(ctx) if err != nil { - i.logger.Errorf("Could not list users: %v", err) + i.logger.Errorf(ctx, "Could not list users: %v", err) if len(users) == 0 { return } @@ -66,7 +66,7 @@ func (i *iamUserFetcher) Fetch(ctx context.Context, assetChannel chan<- inventor user, ok := resource.(iam.User) if !ok { - i.logger.Errorf("Could not get info about user: %s", resource.GetResourceArn()) + i.logger.Errorf(ctx, "Could not get info about user: %s", resource.GetResourceArn()) continue } diff --git a/internal/inventory/awsfetcher/fetcher_lambda.go b/internal/inventory/awsfetcher/fetcher_lambda.go index 54698553..10847d03 100644 --- a/internal/inventory/awsfetcher/fetcher_lambda.go +++ b/internal/inventory/awsfetcher/fetcher_lambda.go @@ -73,7 +73,7 @@ func (s *lambdaFetcher) fetch(ctx context.Context, resourceName string, function awsResources, err := function(ctx) if err != nil { - s.logger.Errorf("Could not fetch %s: %v", resourceName, err) + s.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/awsfetcher/fetcher_networking.go b/internal/inventory/awsfetcher/fetcher_networking.go index 97220b90..ece19ca3 100644 --- a/internal/inventory/awsfetcher/fetcher_networking.go +++ b/internal/inventory/awsfetcher/fetcher_networking.go @@ -88,7 +88,7 @@ func (s *networkingFetcher) fetch(ctx context.Context, resourceName string, func awsResources, err := function(ctx) if err != nil { - s.logger.Errorf("Could not fetch %s: %v", resourceName, err) + s.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/awsfetcher/fetcher_rds.go b/internal/inventory/awsfetcher/fetcher_rds.go index 85e4f1b7..990cfc1b 100644 --- a/internal/inventory/awsfetcher/fetcher_rds.go +++ b/internal/inventory/awsfetcher/fetcher_rds.go @@ -55,7 +55,7 @@ func (s *rdsFetcher) Fetch(ctx context.Context, assetChannel chan<- inventory.As awsResources, err := s.provider.DescribeDBInstances(ctx) if err != nil { - s.logger.Errorf("Could not list RDS Instances: %v", err) + s.logger.Errorf(ctx, "Could not list RDS Instances: %v", err) if len(awsResources) == 0 { return } diff --git a/internal/inventory/awsfetcher/fetcher_s3_bucket.go b/internal/inventory/awsfetcher/fetcher_s3_bucket.go index 1a0e4fa4..4c349802 100644 --- a/internal/inventory/awsfetcher/fetcher_s3_bucket.go +++ b/internal/inventory/awsfetcher/fetcher_s3_bucket.go @@ -55,7 +55,7 @@ func (s *s3BucketFetcher) Fetch(ctx context.Context, assetChannel chan<- invento awsBuckets, err := s.provider.DescribeBuckets(ctx) if err != nil { - s.logger.Errorf("Could not list s3 buckets: %v", err) + s.logger.Errorf(ctx, "Could not list s3 buckets: %v", err) if len(awsBuckets) == 0 { return } diff --git a/internal/inventory/awsfetcher/fetcher_sns.go b/internal/inventory/awsfetcher/fetcher_sns.go index a6d8819f..0e22d7bd 100644 --- a/internal/inventory/awsfetcher/fetcher_sns.go +++ b/internal/inventory/awsfetcher/fetcher_sns.go @@ -52,7 +52,7 @@ func (s *snsFetcher) Fetch(ctx context.Context, assetChannel chan<- inventory.As awsResources, err := s.provider.ListTopicsWithSubscriptions(ctx) if err != nil { - s.logger.Errorf("Could not fetch SNS Topics: %v", err) + s.logger.Errorf(ctx, "Could not fetch SNS Topics: %v", err) return } diff --git a/internal/inventory/azurefetcher/fetcher_account.go b/internal/inventory/azurefetcher/fetcher_account.go index f3481caa..263b75ce 100644 --- a/internal/inventory/azurefetcher/fetcher_account.go +++ b/internal/inventory/azurefetcher/fetcher_account.go @@ -67,7 +67,7 @@ func (f *accountFetcher) fetch(ctx context.Context, resourceName string, functio azureAssets, err := function(ctx) if err != nil { - f.logger.Errorf("Could not fetch %s: %v", resourceName, err) + f.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/azurefetcher/fetcher_activedirectory.go b/internal/inventory/azurefetcher/fetcher_activedirectory.go index bc78b6a3..9c8d5219 100644 --- a/internal/inventory/azurefetcher/fetcher_activedirectory.go +++ b/internal/inventory/azurefetcher/fetcher_activedirectory.go @@ -63,7 +63,7 @@ func (f *activedirectoryFetcher) fetchServicePrincipals(ctx context.Context, ass items, err := f.provider.ListServicePrincipals(ctx) if err != nil { - f.logger.Errorf("Could not fetch Service Principals: %v", err) + f.logger.Errorf(ctx, "Could not fetch Service Principals: %v", err) } for _, item := range items { @@ -94,7 +94,7 @@ func (f *activedirectoryFetcher) fetchDirectoryRoles(ctx context.Context, assetC items, err := f.provider.ListDirectoryRoles(ctx) if err != nil { - f.logger.Errorf("Could not fetch Directory Roles: %v", err) + f.logger.Errorf(ctx, "Could not fetch Directory Roles: %v", err) } for _, item := range items { @@ -124,7 +124,7 @@ func (f *activedirectoryFetcher) fetchGroups(ctx context.Context, assetChan chan items, err := f.provider.ListGroups(ctx) if err != nil { - f.logger.Errorf("Could not fetch Groups: %v", err) + f.logger.Errorf(ctx, "Could not fetch Groups: %v", err) } for _, item := range items { @@ -160,7 +160,7 @@ func (f *activedirectoryFetcher) fetchUsers(ctx context.Context, assetChan chan< items, err := f.provider.ListUsers(ctx) if err != nil { - f.logger.Errorf("Could not fetch Users: %v", err) + f.logger.Errorf(ctx, "Could not fetch Users: %v", err) } for _, item := range items { diff --git a/internal/inventory/azurefetcher/fetcher_resource_graph.go b/internal/inventory/azurefetcher/fetcher_resource_graph.go index f1a2af13..af0c732b 100644 --- a/internal/inventory/azurefetcher/fetcher_resource_graph.go +++ b/internal/inventory/azurefetcher/fetcher_resource_graph.go @@ -78,7 +78,7 @@ func (f *resourceGraphFetcher) fetch(ctx context.Context, resourceName, resource azureAssets, err := f.provider.ListAllAssetTypesByName(ctx, resourceGroup, []string{resourceType}) if err != nil { - f.logger.Errorf("Could not fetch %s: %v", resourceName, err) + f.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/azurefetcher/fetcher_storage.go b/internal/inventory/azurefetcher/fetcher_storage.go index 263f1f23..fca2ae5a 100644 --- a/internal/inventory/azurefetcher/fetcher_storage.go +++ b/internal/inventory/azurefetcher/fetcher_storage.go @@ -74,7 +74,7 @@ func (f *storageFetcher) Fetch(ctx context.Context, assetChan chan<- inventory.A storageAccounts, err := f.listStorageAccounts(ctx) if err != nil { - f.logger.Errorf("Could not fetch anything: %v", err) + f.logger.Errorf(ctx, "Could not fetch anything: %v", err) return } @@ -108,7 +108,7 @@ func (f *storageFetcher) fetch(ctx context.Context, storageAccounts []azurelib.A azureAssets, err := function(ctx, storageAccounts) if err != nil { - f.logger.Errorf("Could not fetch %s: %v", resourceName, err) + f.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/launcher/launcher.go b/internal/launcher/launcher.go index 8670dbf3..8c38768b 100644 --- a/internal/launcher/launcher.go +++ b/internal/launcher/launcher.go @@ -21,6 +21,7 @@ package launcher import ( + "context" "errors" "fmt" "sync" @@ -99,7 +100,7 @@ func (l *launcher) Run(b *beat.Beat) error { l.log.Infof("Waiting for initial reconfiguration from Fleet server...") update, err := l.reconfigureWait(reconfigureWaitTimeout) if err != nil { - l.log.Errorf("Failed while waiting for the initial reconfiguration from Fleet server: %v", err) + l.log.Errorf(context.TODO(), "Failed while waiting for the initial reconfiguration from Fleet server: %v", err) return err } @@ -122,7 +123,7 @@ func (l *launcher) run() error { l.log.Info("Launcher stopped after timeout") case err == nil: // unexpected default: - l.log.Errorf("Launcher stopped by error: %v", err) + l.log.Errorf(context.TODO(), "Launcher stopped by error: %v", err) } l.reloader.Stop() @@ -298,7 +299,7 @@ func (l *launcher) reconfigureWait(timeout time.Duration) (*config.C, error) { if l.validator != nil { err := l.validator.Validate(update) if err != nil { - l.log.Errorf("Config update validation failed: %v", err) + l.log.Errorf(context.TODO(), "Config update validation failed: %v", err) healthErr := &BeaterUnhealthyError{} if errors.As(err, healthErr) { l.beat.Manager.UpdateStatus(status.Degraded, healthErr.Error()) diff --git a/internal/resources/fetching/cycle/cache.go b/internal/resources/fetching/cycle/cache.go index 3e2b13cd..040185e4 100644 --- a/internal/resources/fetching/cycle/cache.go +++ b/internal/resources/fetching/cycle/cache.go @@ -57,7 +57,7 @@ func (c *Cache[T]) GetValue(ctx context.Context, cycle Metadata, fetch func(cont if c.lastCycle.Sequence < 0 { return result, err } - c.log.Errorf("Failed to renew, using cached value: %v", err) + c.log.Errorf(ctx, "Failed to renew, using cached value: %v", err) } else { c.cachedValue = result c.lastCycle = cycle diff --git a/internal/resources/fetching/fetchers/aws/ecr_fetcher.go b/internal/resources/fetching/fetchers/aws/ecr_fetcher.go index d9af08df..2dd77591 100644 --- a/internal/resources/fetching/fetchers/aws/ecr_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/ecr_fetcher.go @@ -74,7 +74,7 @@ func (f *EcrFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) er podsList, err := f.kubeClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{}) if err != nil { - f.log.Errorf("failed to get pods - %v", err) + f.log.Errorf(ctx, "failed to get pods - %v", err) return err } @@ -96,7 +96,7 @@ func (f *EcrFetcher) describePodImagesRepositories(ctx context.Context, podsList // Add configuration describedRepo, err := describer.Provider.DescribeRepositories(ctx, repositories, region) if err != nil { - f.log.Errorf("could not retrieve pod's aws repositories for region %s: %v", region, err) + f.log.Errorf(ctx, "could not retrieve pod's aws repositories for region %s: %v", region, err) } else { awsRepositories = append(awsRepositories, describedRepo...) } diff --git a/internal/resources/fetching/fetchers/aws/iam_fetcher.go b/internal/resources/fetching/fetchers/aws/iam_fetcher.go index 2490afc8..3ee96bf0 100644 --- a/internal/resources/fetching/fetchers/aws/iam_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/iam_fetcher.go @@ -62,35 +62,35 @@ func (f IAMFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) err pwdPolicy, err := f.iamProvider.GetPasswordPolicy(ctx) if err != nil { - f.log.Errorf("Unable to fetch PasswordPolicy, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch PasswordPolicy, error: %v", err) } else { iamResources = append(iamResources, pwdPolicy) } users, err := f.iamProvider.GetUsers(ctx) if err != nil { - f.log.Errorf("Unable to fetch IAM users, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch IAM users, error: %v", err) } else { iamResources = append(iamResources, users...) } policies, err := f.iamProvider.GetPolicies(ctx) if err != nil { - f.log.Errorf("Unable to fetch IAM policies, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch IAM policies, error: %v", err) } else { iamResources = append(iamResources, policies...) } serverCertificates, err := f.iamProvider.ListServerCertificates(ctx) if err != nil { - f.log.Errorf("Unable to fetch IAM server certificates, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch IAM server certificates, error: %v", err) } else { iamResources = append(iamResources, serverCertificates) } accessAnalyzers, err := f.iamProvider.GetAccessAnalyzers(ctx) if err != nil { - f.log.Errorf("Unable to fetch access access analyzers, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch access access analyzers, error: %v", err) } else { iamResources = append(iamResources, accessAnalyzers) } diff --git a/internal/resources/fetching/fetchers/aws/kms_fetcher.go b/internal/resources/fetching/fetchers/aws/kms_fetcher.go index 7f6917b5..e829f0de 100644 --- a/internal/resources/fetching/fetchers/aws/kms_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/kms_fetcher.go @@ -52,7 +52,7 @@ func (f *KmsFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) er keys, err := f.kms.DescribeSymmetricKeys(ctx) if err != nil { - f.log.Errorf("failed to describe keys from KMS: %v", err) + f.log.Errorf(ctx, "failed to describe keys from KMS: %v", err) return nil } diff --git a/internal/resources/fetching/fetchers/aws/logging_fetcher.go b/internal/resources/fetching/fetchers/aws/logging_fetcher.go index b010a8a7..d67ed5b7 100644 --- a/internal/resources/fetching/fetchers/aws/logging_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/logging_fetcher.go @@ -67,7 +67,7 @@ func (f LoggingFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) f.log.Debug("Starting LoggingFetcher.Fetch") trails, err := f.loggingProvider.DescribeTrails(ctx) if err != nil { - f.log.Errorf("failed to describe trails: %v", err) + f.log.Errorf(ctx, "failed to describe trails: %v", err) } for _, resource := range trails { @@ -81,7 +81,7 @@ func (f LoggingFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) configs, err := f.configserviceProvider.DescribeConfigRecorders(ctx) if err != nil { - f.log.Errorf("failed to describe config recorders: %v", err) + f.log.Errorf(ctx, "failed to describe config recorders: %v", err) return nil } diff --git a/internal/resources/fetching/fetchers/aws/monitoring_fetcher.go b/internal/resources/fetching/fetchers/aws/monitoring_fetcher.go index aa667727..10857f29 100644 --- a/internal/resources/fetching/fetchers/aws/monitoring_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/monitoring_fetcher.go @@ -61,7 +61,7 @@ func (m MonitoringFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metada m.log.Debug("Starting MonitoringFetcher.Fetch") out, err := m.provider.AggregateResources(ctx) if err != nil { - m.log.Errorf("failed to aggregate monitoring resources: %v", err) + m.log.Errorf(ctx, "failed to aggregate monitoring resources: %v", err) } if out != nil { m.resourceCh <- fetching.ResourceInfo{ @@ -71,7 +71,7 @@ func (m MonitoringFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metada } hubs, err := m.securityhub.Describe(ctx) if err != nil { - m.log.Errorf("failed to describe security hub: %v", err) + m.log.Errorf(ctx, "failed to describe security hub: %v", err) return nil } diff --git a/internal/resources/fetching/fetchers/aws/network_fetcher.go b/internal/resources/fetching/fetchers/aws/network_fetcher.go index 4dd42adc..ed0ed2c1 100644 --- a/internal/resources/fetching/fetchers/aws/network_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/network_fetcher.go @@ -97,23 +97,23 @@ func (f NetworkFetcher) aggregateResources(ctx context.Context, client ec2.Elast var resources []awslib.AwsResource nacl, err := client.DescribeNetworkAcl(ctx) if err != nil { - f.log.Errorf("failed to describe network acl: %v", err) + f.log.Errorf(ctx, "failed to describe network acl: %v", err) } resources = append(resources, nacl...) securityGroups, err := client.DescribeSecurityGroups(ctx) if err != nil { - f.log.Errorf("failed to describe security groups: %v", err) + f.log.Errorf(ctx, "failed to describe security groups: %v", err) } resources = append(resources, securityGroups...) vpcs, err := client.DescribeVpcs(ctx) if err != nil { - f.log.Errorf("failed to describe vpcs: %v", err) + f.log.Errorf(ctx, "failed to describe vpcs: %v", err) } resources = append(resources, vpcs...) ebsEncryption, err := client.GetEbsEncryptionByDefault(ctx) if err != nil { - f.log.Errorf("failed to get ebs encryption by default: %v", err) + f.log.Errorf(ctx, "failed to get ebs encryption by default: %v", err) } if ebsEncryption != nil { diff --git a/internal/resources/fetching/fetchers/aws/rds_fetcher.go b/internal/resources/fetching/fetchers/aws/rds_fetcher.go index d2c36837..99f0ab01 100644 --- a/internal/resources/fetching/fetchers/aws/rds_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/rds_fetcher.go @@ -53,7 +53,7 @@ func (f *RdsFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) er f.log.Info("Starting RdsFetcher.Fetch") dbInstances, err := f.provider.DescribeDBInstances(ctx) if err != nil { - f.log.Errorf("failed to load some DB instances from rds: %v", err) + f.log.Errorf(ctx, "failed to load some DB instances from rds: %v", err) } for _, dbInstance := range dbInstances { diff --git a/internal/resources/fetching/fetchers/aws/s3_fetcher.go b/internal/resources/fetching/fetchers/aws/s3_fetcher.go index c28a2599..8225956a 100644 --- a/internal/resources/fetching/fetchers/aws/s3_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/s3_fetcher.go @@ -49,7 +49,7 @@ func (f *S3Fetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) err f.log.Info("Starting S3Fetcher.Fetch") buckets, err := f.s3.DescribeBuckets(ctx) if err != nil { - f.log.Errorf("failed to load buckets from S3: %v", err) + f.log.Errorf(ctx, "failed to load buckets from S3: %v", err) return nil } diff --git a/internal/resources/fetching/fetchers/azure/assets_fetcher.go b/internal/resources/fetching/fetchers/azure/assets_fetcher.go index 768e7cb1..7a30164e 100644 --- a/internal/resources/fetching/fetchers/azure/assets_fetcher.go +++ b/internal/resources/fetching/fetchers/azure/assets_fetcher.go @@ -92,7 +92,7 @@ func (f *AzureAssetsFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Meta // Fetching all types even if non-existent in asset group for simplicity r, err := f.provider.ListAllAssetTypesByName(ctx, assetGroup, slices.Collect(maps.Keys(AzureAssetTypeToTypePair))) if err != nil { - f.log.Errorf("AzureAssetsFetcher.Fetch failed to fetch asset group %s: %s", assetGroup, err.Error()) + f.log.Errorf(ctx, "AzureAssetsFetcher.Fetch failed to fetch asset group %s: %s", assetGroup, err.Error()) errAgg = errors.Join(errAgg, err) continue } @@ -101,7 +101,7 @@ func (f *AzureAssetsFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Meta subscriptions, err := f.provider.GetSubscriptions(ctx, cycleMetadata) if err != nil { - f.log.Errorf("Error fetching subscription information: %v", err) + f.log.Errorf(ctx, "Error fetching subscription information: %v", err) } for _, e := range f.enrichers { diff --git a/internal/resources/fetching/fetchers/azure/batch_fetcher.go b/internal/resources/fetching/fetchers/azure/batch_fetcher.go index 7496552b..182fdf09 100644 --- a/internal/resources/fetching/fetchers/azure/batch_fetcher.go +++ b/internal/resources/fetching/fetchers/azure/batch_fetcher.go @@ -70,7 +70,7 @@ func (f *AzureBatchAssetFetcher) Fetch(ctx context.Context, cycleMetadata cycle. for _, assetGroup := range AzureBatchAssetGroups { r, err := f.provider.ListAllAssetTypesByName(ctx, assetGroup, slices.Collect(maps.Keys(AzureBatchAssets))) if err != nil { - f.log.Errorf("AzureBatchAssetFetcher.Fetch failed to fetch asset group %s: %s", assetGroup, err.Error()) + f.log.Errorf(ctx, "AzureBatchAssetFetcher.Fetch failed to fetch asset group %s: %s", assetGroup, err.Error()) errAgg = errors.Join(errAgg, err) continue } diff --git a/internal/resources/fetching/fetchers/azure/security_fetcher.go b/internal/resources/fetching/fetchers/azure/security_fetcher.go index 8e69ca5f..4c27efff 100644 --- a/internal/resources/fetching/fetchers/azure/security_fetcher.go +++ b/internal/resources/fetching/fetchers/azure/security_fetcher.go @@ -66,7 +66,7 @@ func (f *AzureSecurityAssetFetcher) Fetch(ctx context.Context, cycleMetadata cyc for assetType, fn := range fetches { securityContacts, err := fn(ctx, sub.ShortID) if err != nil { - f.log.Errorf("AzureSecurityAssetFetcher.Fetch failed to fetch %s for subscription %s: %s", assetType, sub.ShortID, err.Error()) + f.log.Errorf(ctx, "AzureSecurityAssetFetcher.Fetch failed to fetch %s for subscription %s: %s", assetType, sub.ShortID, err.Error()) errs = append(errs, err) continue } diff --git a/internal/resources/fetching/fetchers/k8s/file_system_fetcher.go b/internal/resources/fetching/fetchers/k8s/file_system_fetcher.go index 0bbd9bf8..fd79d813 100644 --- a/internal/resources/fetching/fetchers/k8s/file_system_fetcher.go +++ b/internal/resources/fetching/fetchers/k8s/file_system_fetcher.go @@ -99,20 +99,22 @@ func NewFsFetcher(log *clog.Logger, ch chan fetching.ResourceInfo, patterns []st } } -func (f *FileSystemFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata) error { +func (f *FileSystemFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) error { f.log.Debug("Starting FileSystemFetcher.Fetch") // Input files might contain glob pattern for _, filePattern := range f.patterns { matchedFiles, err := Glob(filePattern) if err != nil { - f.log.Errorf("Failed to find matched glob for %s, error: %+v", filePattern, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Failed to find matched glob for %s, error: %+v", filePattern, err) } for _, file := range matchedFiles { - resource, err := f.fetchSystemResource(file) + resource, err := f.fetchSystemResource(ctx, file) if err != nil { - f.log.Errorf("Unable to fetch fileSystemResource for file %v", file) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Unable to fetch fileSystemResource for file %v", file) continue } @@ -123,17 +125,17 @@ func (f *FileSystemFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadat return nil } -func (f *FileSystemFetcher) fetchSystemResource(filePath string) (*FSResource, error) { +func (f *FileSystemFetcher) fetchSystemResource(ctx context.Context, filePath string) (*FSResource, error) { info, err := os.Stat(filePath) if err != nil { return nil, fmt.Errorf("failed to fetch %s, error: %w", filePath, err) } - resourceInfo, _ := f.fromFileInfo(info, filePath) + resourceInfo, _ := f.fromFileInfo(ctx, info, filePath) return resourceInfo, nil } -func (f *FileSystemFetcher) fromFileInfo(info os.FileInfo, path string) (*FSResource, error) { +func (f *FileSystemFetcher) fromFileInfo(ctx context.Context, info os.FileInfo, path string) (*FSResource, error) { if info == nil { return nil, nil } @@ -172,7 +174,7 @@ func (f *FileSystemFetcher) fromFileInfo(info os.FileInfo, path string) (*FSReso return &FSResource{ EvalResource: data, - ElasticCommon: f.createFileCommonData(stat, data, path), + ElasticCommon: f.createFileCommonData(ctx, stat, data, path), }, nil } @@ -232,7 +234,7 @@ func getFSSubType(fileInfo os.FileInfo) string { return FileSubType } -func (f *FileSystemFetcher) createFileCommonData(stat *syscall.Stat_t, data EvalFSResource, path string) FileCommonData { +func (f *FileSystemFetcher) createFileCommonData(ctx context.Context, stat *syscall.Stat_t, data EvalFSResource, path string) FileCommonData { cd := FileCommonData{ Name: data.Name, Mode: data.Mode, @@ -250,7 +252,8 @@ func (f *FileSystemFetcher) createFileCommonData(stat *syscall.Stat_t, data Eval t, err := times.Stat(path) if err != nil { - f.log.Errorf("failed to get file time data (file %s), error - %s", path, err.Error()) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "failed to get file time data (file %s), error - %s", path, err.Error()) } else { cd.Accessed = t.AccessTime() cd.Mtime = t.ModTime() diff --git a/internal/resources/fetching/fetchers/k8s/kube_fetcher.go b/internal/resources/fetching/fetchers/k8s/kube_fetcher.go index ae43039e..f472aef4 100644 --- a/internal/resources/fetching/fetchers/k8s/kube_fetcher.go +++ b/internal/resources/fetching/fetchers/k8s/kube_fetcher.go @@ -163,7 +163,7 @@ func (f *KubeFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata) err return fmt.Errorf("could not initate Kubernetes watchers: %w", err) } - getKubeData(f.log, f.watchers, f.resourceCh, cycleMetadata) + getKubeData(f.log, f.watchers, f.resourceCh, cycleMetadata) //nolint:contextcheck return nil } diff --git a/internal/resources/fetching/fetchers/k8s/kube_provider.go b/internal/resources/fetching/fetchers/k8s/kube_provider.go index c2ca3ad5..b5010907 100644 --- a/internal/resources/fetching/fetchers/k8s/kube_provider.go +++ b/internal/resources/fetching/fetchers/k8s/kube_provider.go @@ -18,6 +18,7 @@ package fetchers import ( + "context" "reflect" "github.com/elastic/elastic-agent-autodiscover/kubernetes" @@ -54,13 +55,13 @@ func getKubeData(log *clog.Logger, watchers []kubernetes.Watcher, resCh chan fet resource, ok := r.(kubernetes.Resource) if !ok { - log.Errorf("Bad resource: %#v does not implement kubernetes.Resource", r) + log.Errorf(context.TODO(), "Bad resource: %#v does not implement kubernetes.Resource", r) continue } err := addTypeInformationToKubeResource(resource) if err != nil { - log.Errorf("Bad resource: %v", err) + log.Errorf(context.TODO(), "Bad resource: %v", err) continue } // See https://github.com/kubernetes/kubernetes/issues/3030 resCh <- fetching.ResourceInfo{Resource: K8sResource{log, resource}, CycleMetadata: cycleMetadata} @@ -108,7 +109,7 @@ func (r K8sResource) GetElasticCommonData() (map[string]any, error) { func getK8sObjectMeta(log *clog.Logger, k8sObj reflect.Value) metav1.ObjectMeta { metadata, ok := k8sObj.FieldByName(k8sObjMetadataField).Interface().(metav1.ObjectMeta) if !ok { - log.Errorf("Failed to retrieve object metadata, Resource: %#v", k8sObj) + log.Errorf(context.TODO(), "Failed to retrieve object metadata, Resource: %#v", k8sObj) return metav1.ObjectMeta{} } @@ -118,7 +119,7 @@ func getK8sObjectMeta(log *clog.Logger, k8sObj reflect.Value) metav1.ObjectMeta func getK8sSubType(log *clog.Logger, k8sObj reflect.Value) string { typeMeta, ok := k8sObj.FieldByName(k8sTypeMetadataField).Interface().(metav1.TypeMeta) if !ok { - log.Errorf("Failed to retrieve type metadata, Resource: %#v", k8sObj) + log.Errorf(context.TODO(), "Failed to retrieve type metadata, Resource: %#v", k8sObj) return "" } diff --git a/internal/resources/fetching/fetchers/k8s/process_fetcher.go b/internal/resources/fetching/fetchers/k8s/process_fetcher.go index d1b931af..ff6f6d9d 100644 --- a/internal/resources/fetching/fetchers/k8s/process_fetcher.go +++ b/internal/resources/fetching/fetchers/k8s/process_fetcher.go @@ -129,7 +129,7 @@ func NewProcessFetcher(log *clog.Logger, ch chan fetching.ResourceInfo, processe } } -func (f *ProcessesFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata) error { +func (f *ProcessesFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) error { f.log.Debug("Starting ProcessesFetcher.Fetch") pids, err := proc.ListFS(f.Fs) @@ -142,7 +142,8 @@ func (f *ProcessesFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata for _, p := range pids { stat, err := proc.ReadStatFS(f.Fs, p) if err != nil { - f.log.Errorf("error while reading /proc//stat for process %s: %s", p, err.Error()) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "error while reading /proc//stat for process %s: %s", p, err.Error()) continue } @@ -159,39 +160,43 @@ func (f *ProcessesFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata continue } - fetchedResource := f.fetchProcessData(stat, processConfig, p, cmd) + fetchedResource := f.fetchProcessData(ctx, stat, processConfig, p, cmd) f.resourceCh <- fetching.ResourceInfo{Resource: fetchedResource, CycleMetadata: cycleMetadata} } return nil } -func (f *ProcessesFetcher) fetchProcessData(procStat proc.ProcStat, processConf ProcessInputConfiguration, processId string, cmd string) fetching.Resource { - configMap := f.getProcessConfigurationFile(processConf, cmd, procStat.Name) +func (f *ProcessesFetcher) fetchProcessData(ctx context.Context, procStat proc.ProcStat, processConf ProcessInputConfiguration, processId string, cmd string) fetching.Resource { + configMap := f.getProcessConfigurationFile(ctx, processConf, cmd, procStat.Name) evalRes := EvalProcResource{PID: processId, Cmd: cmd, Stat: procStat, ExternalData: configMap} - procCd := f.createProcCommonData(procStat, cmd, processId) + procCd := f.createProcCommonData(ctx, procStat, cmd, processId) return ProcResource{EvalResource: evalRes, ElasticCommon: procCd} } -func (f *ProcessesFetcher) createProcCommonData(stat proc.ProcStat, cmd string, pid string) ProcCommonData { +func (f *ProcessesFetcher) createProcCommonData(ctx context.Context, stat proc.ProcStat, cmd string, pid string) ProcCommonData { processID, err := strconv.ParseInt(pid, 10, 64) if err != nil { - f.log.Errorf("Couldn't parse PID, pid: %s", pid) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't parse PID, pid: %s", pid) } startTime, err := strconv.ParseUint(stat.StartTime, 10, 64) if err != nil { - f.log.Errorf("Couldn't parse stat.StartTime, startTime: %s", stat.StartTime) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't parse stat.StartTime, startTime: %s", stat.StartTime) } pgid, err := strconv.ParseInt(stat.Group, 10, 64) if err != nil { - f.log.Errorf("Couldn't parse stat.Group, Group: %s, Error: %v", stat.Group, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't parse stat.Group, Group: %s, Error: %v", stat.Group, err) } ppid, err := strconv.ParseInt(stat.Parent, 10, 64) if err != nil { - f.log.Errorf("Couldn't parse stat.Parent, Parent: %s, Error: %v", stat.Parent, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't parse stat.Parent, Parent: %s, Error: %v", stat.Parent, err) } sysUptime, err := proc.ReadUptimeFS(f.Fs) @@ -219,7 +224,7 @@ func (f *ProcessesFetcher) createProcCommonData(stat proc.ProcStat, cmd string, // getProcessConfigurationFile - reads the configuration file associated with a process. // As an input this function receives a ProcessInputConfiguration that contains ConfigFileArguments, a string array that represents some process flags // The function extracts the configuration file associated with each flag and returns it. -func (f *ProcessesFetcher) getProcessConfigurationFile(processConfig ProcessInputConfiguration, cmd string, processName string) map[string]any { +func (f *ProcessesFetcher) getProcessConfigurationFile(ctx context.Context, processConfig ProcessInputConfiguration, cmd string, processName string) map[string]any { configMap := make(map[string]any) for _, argument := range processConfig.ConfigFileArguments { // The regex extracts the cmd line flag(argument) value @@ -232,7 +237,8 @@ func (f *ProcessesFetcher) getProcessConfigurationFile(processConfig ProcessInpu groupMatches := matcher.FindStringSubmatch(cmd) if len(groupMatches) < 2 { - f.log.Errorf("Couldn't find a configuration file associated with flag %s for process %s", argument, processName) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't find a configuration file associated with flag %s for process %s", argument, processName) continue } argValue := matcher.FindStringSubmatch(cmd)[1] @@ -240,12 +246,14 @@ func (f *ProcessesFetcher) getProcessConfigurationFile(processConfig ProcessInpu data, err := fs.ReadFile(f.Fs, argValue) if err != nil { - f.log.Errorf("Failed to read file configuration for process %s, error - %+v", processName, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Failed to read file configuration for process %s, error - %+v", processName, err) continue } configFile, err := f.readConfigurationFile(argValue, data) if err != nil { - f.log.Errorf("Failed to parse file configuration for process %s, error - %+v", processName, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Failed to parse file configuration for process %s, error - %+v", processName, err) continue } configMap[argument] = configFile diff --git a/internal/resources/fetching/manager/manager.go b/internal/resources/fetching/manager/manager.go index ea6ba252..a52dc4f2 100644 --- a/internal/resources/fetching/manager/manager.go +++ b/internal/resources/fetching/manager/manager.go @@ -75,7 +75,7 @@ func (m *Manager) Stop() { func (m *Manager) fetchAndSleep(ctx context.Context) { counter, err := observability.MeterFromContext(ctx, scopeName).Int64Counter("cloudbeat.fetcher.manager.cycles") if err != nil { - m.log.Errorf("Failed to create fetcher manager cycles counter: %v", err) + m.log.Errorf(ctx, "Failed to create fetcher manager cycles counter: %v", err) } // set immediate exec for first time run @@ -124,7 +124,7 @@ func (m *Manager) fetchIteration(ctx context.Context) { defer wg.Done() err := m.fetchSingle(ctx, k, cycle.Metadata{Sequence: seq}) if err != nil { - logger.Errorf("Error running fetcher for key %s: %v", k, err) + logger.Errorf(ctx, "Error running fetcher for key %s: %v", k, err) } }(key) } diff --git a/internal/resources/fetching/registry/registry.go b/internal/resources/fetching/registry/registry.go index d0257b28..27e17df1 100644 --- a/internal/resources/fetching/registry/registry.go +++ b/internal/resources/fetching/registry/registry.go @@ -119,7 +119,7 @@ func (r *registry) Update(ctx context.Context) { } fm, err := r.updater(ctx) if err != nil { - r.log.Errorf("Failed to update registry: %v", err) + r.log.Errorf(ctx, "Failed to update registry: %v", err) return } r.reg = fm diff --git a/internal/resources/providers/aws_cis/logging/provider.go b/internal/resources/providers/aws_cis/logging/provider.go index 79d365af..52047294 100644 --- a/internal/resources/providers/aws_cis/logging/provider.go +++ b/internal/resources/providers/aws_cis/logging/provider.go @@ -52,17 +52,17 @@ func (p *Provider) DescribeTrails(ctx context.Context) ([]awslib.AwsResource, er } bucketPolicy, policyErr := p.s3Provider.GetBucketPolicy(ctx, info.Trail.S3BucketName, *info.Trail.HomeRegion) if policyErr != nil { - p.log.Errorf("Error getting bucket policy for bucket %s: %v", *info.Trail.S3BucketName, policyErr) + p.log.Errorf(ctx, "Error getting bucket policy for bucket %s: %v", *info.Trail.S3BucketName, policyErr) } aclGrants, aclErr := p.s3Provider.GetBucketACL(ctx, info.Trail.S3BucketName, *info.Trail.HomeRegion) if aclErr != nil { - p.log.Errorf("Error getting bucket ACL for bucket %s: %v", *info.Trail.S3BucketName, aclErr) + p.log.Errorf(ctx, "Error getting bucket ACL for bucket %s: %v", *info.Trail.S3BucketName, aclErr) } bucketLogging, loggingErr := p.s3Provider.GetBucketLogging(ctx, info.Trail.S3BucketName, *info.Trail.HomeRegion) if loggingErr != nil { - p.log.Errorf("Error getting bucket logging for bucket %s: %v", *info.Trail.S3BucketName, loggingErr) + p.log.Errorf(ctx, "Error getting bucket logging for bucket %s: %v", *info.Trail.S3BucketName, loggingErr) } enrichedTrails = append(enrichedTrails, EnrichedTrail{ diff --git a/internal/resources/providers/aws_cis/monitoring/monitoring.go b/internal/resources/providers/aws_cis/monitoring/monitoring.go index 3a0076ab..9bab3203 100644 --- a/internal/resources/providers/aws_cis/monitoring/monitoring.go +++ b/internal/resources/providers/aws_cis/monitoring/monitoring.go @@ -98,11 +98,11 @@ func (p *Provider) AggregateResources(ctx context.Context) (*Resource, error) { } metrics, err := p.Cloudwatchlogs.DescribeMetricFilters(ctx, info.Trail.HomeRegion, logGroup) if err != nil { - p.Log.Errorf("failed to describe metric filters for cloudwatchlog log group arn %s: %v", *info.Trail.CloudWatchLogsLogGroupArn, err) + p.Log.Errorf(ctx, "failed to describe metric filters for cloudwatchlog log group arn %s: %v", *info.Trail.CloudWatchLogsLogGroupArn, err) continue } - parsedMetrics := p.parserMetrics(metrics) + parsedMetrics := p.parserMetrics(ctx, metrics) names := filterNamesFromMetrics(metrics) if len(names) == 0 { @@ -117,7 +117,7 @@ func (p *Provider) AggregateResources(ctx context.Context) (*Resource, error) { for _, name := range names { alarms, err := p.Cloudwatch.DescribeAlarms(ctx, info.Trail.HomeRegion, []string{name}) if err != nil { - p.Log.Errorf("failed to describe alarms for cloudwatch filter %v: %v", names, err) + p.Log.Errorf(ctx, "failed to describe alarms for cloudwatch filter %v: %v", names, err) continue } topics := p.getSubscriptionForAlarms(ctx, info.Trail.HomeRegion, alarms) @@ -133,7 +133,7 @@ func (p *Provider) AggregateResources(ctx context.Context) (*Resource, error) { return &Resource{Items: items}, nil } -func (p *Provider) parserMetrics(metrics []cloudwatchlogs_types.MetricFilter) []MetricFilter { +func (p *Provider) parserMetrics(ctx context.Context, metrics []cloudwatchlogs_types.MetricFilter) []MetricFilter { parsedMetrics := make([]MetricFilter, 0, len(metrics)) for _, m := range metrics { if m.FilterPattern == nil { @@ -145,7 +145,8 @@ func (p *Provider) parserMetrics(metrics []cloudwatchlogs_types.MetricFilter) [] exp, err := parseFilterPattern(*m.FilterPattern) if err != nil { - p.Log.Errorf("failed to parse metric filter pattern: %v (pattern: %s)", err, *m.FilterPattern) + // FIXME: This should be a context from the function signature. + p.Log.Errorf(ctx, "failed to parse metric filter pattern: %v (pattern: %s)", err, *m.FilterPattern) parsedMetrics = append(parsedMetrics, MetricFilter{ MetricFilter: m, }) @@ -166,7 +167,7 @@ func (p *Provider) getSubscriptionForAlarms(ctx context.Context, region *string, for _, action := range alarm.AlarmActions { subscriptions, err := p.Sns.ListSubscriptionsByTopic(ctx, pointers.Deref(region), action) if err != nil { - p.Log.Errorf("failed to list subscriptions for topic %s: %v", action, err) + p.Log.Errorf(ctx, "failed to list subscriptions for topic %s: %v", action, err) continue } for _, topic := range subscriptions { diff --git a/internal/resources/providers/awslib/account_provider.go b/internal/resources/providers/awslib/account_provider.go index 8ccb8cd6..815bfa29 100644 --- a/internal/resources/providers/awslib/account_provider.go +++ b/internal/resources/providers/awslib/account_provider.go @@ -64,7 +64,7 @@ func listAccounts(ctx context.Context, log *clog.Logger, client organizationsAPI organization, err := getOUInfoForAccount(ctx, client, organizationIdToName, account.Id) if err != nil { - log.Errorf("failed to get organizational unit info for account %s: %v", *account.Id, err) + log.Errorf(ctx, "failed to get organizational unit info for account %s: %v", *account.Id, err) } accounts = append(accounts, cloud.Identity{ Provider: "aws", diff --git a/internal/resources/providers/awslib/all_region_selector.go b/internal/resources/providers/awslib/all_region_selector.go index 14bad6e1..5ef32be4 100644 --- a/internal/resources/providers/awslib/all_region_selector.go +++ b/internal/resources/providers/awslib/all_region_selector.go @@ -47,7 +47,7 @@ func (s *allRegionSelector) Regions(ctx context.Context, cfg aws.Config) ([]stri output, err := s.client.DescribeRegions(ctx, nil) if err != nil { - log.Errorf("Failed getting available regions: %v", err) + log.Errorf(ctx, "Failed getting available regions: %v", err) return nil, err } diff --git a/internal/resources/providers/awslib/cached_region_selector.go b/internal/resources/providers/awslib/cached_region_selector.go index 07a8feae..a7a870ad 100644 --- a/internal/resources/providers/awslib/cached_region_selector.go +++ b/internal/resources/providers/awslib/cached_region_selector.go @@ -94,12 +94,12 @@ func (s *cachedRegionSelector) Regions(ctx context.Context, cfg aws.Config) ([]s var output []string output, err := s.client.Regions(ctx, cfg) if err != nil { - log.Errorf("Failed getting regions: %v", err) + log.Errorf(ctx, "Failed getting regions: %v", err) return nil, err } if !s.setCache(output) { - log.Errorf("Failed setting regions cache") + log.Errorf(ctx, "Failed setting regions cache") } return output, nil } diff --git a/internal/resources/providers/awslib/cloudtrail/provider.go b/internal/resources/providers/awslib/cloudtrail/provider.go index 02b2102a..6dc8d78d 100644 --- a/internal/resources/providers/awslib/cloudtrail/provider.go +++ b/internal/resources/providers/awslib/cloudtrail/provider.go @@ -57,12 +57,12 @@ func (p Provider) DescribeTrails(ctx context.Context) ([]TrailInfo, error) { } status, err := p.getTrailStatus(ctx, trail) if err != nil { - p.log.Errorf("failed to get trail status %s %v", *trail.TrailARN, err.Error()) + p.log.Errorf(ctx, "failed to get trail status %s %v", *trail.TrailARN, err.Error()) } selectors, err := p.getEventSelectors(ctx, trail) if err != nil { - p.log.Errorf("failed to get trail event selector %s %v", *trail.TrailARN, err.Error()) + p.log.Errorf(ctx, "failed to get trail event selector %s %v", *trail.TrailARN, err.Error()) } result = append(result, TrailInfo{ diff --git a/internal/resources/providers/awslib/configservice/provider.go b/internal/resources/providers/awslib/configservice/provider.go index 04a42f0e..d773ac3a 100644 --- a/internal/resources/providers/awslib/configservice/provider.go +++ b/internal/resources/providers/awslib/configservice/provider.go @@ -29,7 +29,7 @@ func (p *Provider) DescribeConfigRecorders(ctx context.Context) ([]awslib.AwsRes configs, err := awslib.MultiRegionFetch(ctx, p.clients, func(ctx context.Context, region string, c Client) (awslib.AwsResource, error) { recorderList, err := c.DescribeConfigurationRecorders(ctx, nil) if err != nil { - p.log.Errorf("Error fetching AWS Config recorders: %v", err) + p.log.Errorf(ctx, "Error fetching AWS Config recorders: %v", err) return nil, err } diff --git a/internal/resources/providers/awslib/current_region_selector.go b/internal/resources/providers/awslib/current_region_selector.go index d58ee76e..c3c3d29f 100644 --- a/internal/resources/providers/awslib/current_region_selector.go +++ b/internal/resources/providers/awslib/current_region_selector.go @@ -39,7 +39,7 @@ func (s *currentRegionSelector) Regions(ctx context.Context, cfg aws.Config) ([] metadata, err := s.client.GetMetadata(ctx, cfg) if err != nil { - log.Errorf("Failed getting current region: %v", err) + log.Errorf(ctx, "Failed getting current region: %v", err) return nil, err } diff --git a/internal/resources/providers/awslib/ec2/provider.go b/internal/resources/providers/awslib/ec2/provider.go index 31d0bb7d..e7a07c0c 100644 --- a/internal/resources/providers/awslib/ec2/provider.go +++ b/internal/resources/providers/awslib/ec2/provider.go @@ -362,7 +362,7 @@ func (p *Provider) IterOwnedSnapshots(ctx context.Context, before time.Time) ite return nil, nil }) if err != nil { - p.log.Errorf("Error listing owned snapshots: %v", err) + p.log.Errorf(ctx, "Error listing owned snapshots: %v", err) } } } @@ -492,7 +492,7 @@ func (p *Provider) DescribeVolumes(ctx context.Context, instances []*Ec2Instance var result []*Volume for _, vol := range allVolumes { if len(vol.Attachments) != 1 { - p.log.Errorf("Volume %s has %d attachments", *vol.VolumeId, len(vol.Attachments)) + p.log.Errorf(ctx, "Volume %s has %d attachments", *vol.VolumeId, len(vol.Attachments)) continue } @@ -564,7 +564,7 @@ func (p *Provider) DescribeVpcs(ctx context.Context) ([]awslib.AwsResource, erro }, }}) if err != nil { - p.log.Errorf("Error fetching flow logs for VPC %s: %v", *vpc.VpcId, err.Error()) + p.log.Errorf(ctx, "Error fetching flow logs for VPC %s: %v", *vpc.VpcId, err.Error()) continue } diff --git a/internal/resources/providers/awslib/elb_v2/provider_v2.go b/internal/resources/providers/awslib/elb_v2/provider_v2.go index d7e8699d..e719b85c 100644 --- a/internal/resources/providers/awslib/elb_v2/provider_v2.go +++ b/internal/resources/providers/awslib/elb_v2/provider_v2.go @@ -54,7 +54,7 @@ func (p *Provider) DescribeLoadBalancers(ctx context.Context) ([]awslib.AwsResou } listeners, err := p.describeListeners(ctx, region, loadBalancer.GetResourceArn()) if err != nil { - p.log.Errorf("Error fetching listeners for %s: %v", loadBalancer.GetResourceArn(), err) + p.log.Errorf(ctx, "Error fetching listeners for %s: %v", loadBalancer.GetResourceArn(), err) } else { loadBalancer.Listeners = listeners } diff --git a/internal/resources/providers/awslib/iam/policy.go b/internal/resources/providers/awslib/iam/policy.go index f406ea22..9cec7eb1 100644 --- a/internal/resources/providers/awslib/iam/policy.go +++ b/internal/resources/providers/awslib/iam/policy.go @@ -223,7 +223,7 @@ func (p Provider) listInlinePolicies(ctx context.Context, identity *string) ([]P UserName: identity, }) if err != nil { - p.log.Errorf("fail to get inline policy for user: %s, policy name: %s", *identity, policyNames[i]) + p.log.Errorf(ctx, "fail to get inline policy for user: %s, policy name: %s", *identity, policyNames[i]) policies = append(policies, PolicyDocument{PolicyName: policyNames[i]}) continue } diff --git a/internal/resources/providers/awslib/iam/role_policy.go b/internal/resources/providers/awslib/iam/role_policy.go index 65d683c9..a5c0f4ac 100644 --- a/internal/resources/providers/awslib/iam/role_policy.go +++ b/internal/resources/providers/awslib/iam/role_policy.go @@ -41,7 +41,7 @@ func (p Provider) GetIAMRolePermissions(ctx context.Context, roleName string) ([ policy, err := p.client.GetRolePolicy(ctx, input) if err != nil { - p.log.Errorf("Failed to get policy %s: %v", *policyId.PolicyName, err) + p.log.Errorf(ctx, "Failed to get policy %s: %v", *policyId.PolicyName, err) continue } diff --git a/internal/resources/providers/awslib/iam/root_account.go b/internal/resources/providers/awslib/iam/root_account.go index a2400bf2..67483b3c 100644 --- a/internal/resources/providers/awslib/iam/root_account.go +++ b/internal/resources/providers/awslib/iam/root_account.go @@ -27,7 +27,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam/types" ) -func (p Provider) getRootAccountUser(rootAccount *CredentialReport) *types.User { +func (p Provider) getRootAccountUser(ctx context.Context, rootAccount *CredentialReport) *types.User { if rootAccount == nil { p.log.Error("no root account entry was provided") return nil @@ -35,7 +35,7 @@ func (p Provider) getRootAccountUser(rootAccount *CredentialReport) *types.User rootDate, err := time.Parse(time.RFC3339, rootAccount.UserCreation) if err != nil { - p.log.Errorf("fail to parse root account user creation, error: %v", err) + p.log.Errorf(ctx, "fail to parse root account user creation, error: %v", err) return nil } @@ -45,7 +45,7 @@ func (p Provider) getRootAccountUser(rootAccount *CredentialReport) *types.User if rootAccount.PasswordLastUsed != "no_information" && rootAccount.PasswordLastUsed != "N/A" { pwdLastUsed, err = time.Parse(time.RFC3339, rootAccount.PasswordLastUsed) if err != nil { - p.log.Errorf("fail to parse root account password last used, error: %v", err) + p.log.Errorf(ctx, "fail to parse root account password last used, error: %v", err) return nil } } diff --git a/internal/resources/providers/awslib/iam/user.go b/internal/resources/providers/awslib/iam/user.go index 3305bedf..34617a86 100644 --- a/internal/resources/providers/awslib/iam/user.go +++ b/internal/resources/providers/awslib/iam/user.go @@ -54,7 +54,7 @@ func (p Provider) GetUsers(ctx context.Context) ([]awslib.AwsResource, error) { return nil, err } - rootUser := p.getRootAccountUser(credentialReport[rootAccount]) + rootUser := p.getRootAccountUser(ctx, credentialReport[rootAccount]) if rootUser != nil { apiUsers = append(apiUsers, *rootUser) } @@ -80,23 +80,23 @@ func (p Provider) GetUsers(ctx context.Context) ([]awslib.AwsResource, error) { mfaDevices, err := p.getMFADevices(ctx, apiUser, userAccount) if err != nil { - p.log.Errorf("fail to list mfa device for user: %s, error: %v", username, err) + p.log.Errorf(ctx, "fail to list mfa device for user: %s, error: %v", username, err) } pwdEnabled, err := isPasswordEnabled(userAccount) if err != nil { - p.log.Errorf("fail to parse PasswordEnabled for user: %s, error: %v", username, err) + p.log.Errorf(ctx, "fail to parse PasswordEnabled for user: %s, error: %v", username, err) pwdEnabled = false } inlinePolicies, err := p.listInlinePolicies(ctx, apiUser.UserName) if err != nil && !isRootUser(username) { - p.log.Errorf("fail to list inline policies for user: %s, error: %v", username, err) + p.log.Errorf(ctx, "fail to list inline policies for user: %s, error: %v", username, err) } attachedPolicies, err := p.listAttachedPolicies(ctx, apiUser.UserName) if err != nil && !isRootUser(username) { - p.log.Errorf("fail to list attached policies for user: %s, error: %v", username, err) + p.log.Errorf(ctx, "fail to list attached policies for user: %s, error: %v", username, err) } users = append(users, User{ diff --git a/internal/resources/providers/awslib/multi_region.go b/internal/resources/providers/awslib/multi_region.go index 4b2adfb3..07ef6c7b 100644 --- a/internal/resources/providers/awslib/multi_region.go +++ b/internal/resources/providers/awslib/multi_region.go @@ -53,7 +53,7 @@ func (w *MultiRegionClientFactory[T]) NewMultiRegionClients(ctx context.Context, clientsMap := make(map[string]T, 0) regionList, err := selector.Regions(ctx, cfg) if err != nil { - log.Errorf("Region '%s' selected after failure to retrieve aws regions: %v", cfg.Region, err) + log.Errorf(ctx, "Region '%s' selected after failure to retrieve aws regions: %v", cfg.Region, err) regionList = []string{cfg.Region} } for _, region := range regionList { diff --git a/internal/resources/providers/awslib/rds/provider.go b/internal/resources/providers/awslib/rds/provider.go index f1d133d6..7c0c90ca 100644 --- a/internal/resources/providers/awslib/rds/provider.go +++ b/internal/resources/providers/awslib/rds/provider.go @@ -52,7 +52,7 @@ func (p Provider) DescribeDBInstances(ctx context.Context) ([]awslib.AwsResource for { output, err := c.DescribeDBInstances(ctx, dbInstancesInput) if err != nil { - p.log.Errorf("Could not describe DB instances. Error: %v", err) + p.log.Errorf(ctx, "Could not describe DB instances. Error: %v", err) return result, err } @@ -89,7 +89,7 @@ func (p Provider) getDBInstanceSubnets(ctx context.Context, region string, dbIns resultSubnet := Subnet{ID: *subnet.SubnetIdentifier, RouteTable: nil} routeTableForSubnet, err := p.ec2.GetRouteTableForSubnet(ctx, region, *subnet.SubnetIdentifier, *dbInstance.DBSubnetGroup.VpcId) if err != nil { - p.log.Errorf("Could not get route table for subnet %s of DB %s. Error: %v", *subnet.SubnetIdentifier, *dbInstance.DBInstanceIdentifier, err) + p.log.Errorf(ctx, "Could not get route table for subnet %s of DB %s. Error: %v", *subnet.SubnetIdentifier, *dbInstance.DBInstanceIdentifier, err) } else { var routes []Route for _, route := range routeTableForSubnet.Routes { diff --git a/internal/resources/providers/awslib/s3/provider.go b/internal/resources/providers/awslib/s3/provider.go index 4d176baf..6f871a8c 100644 --- a/internal/resources/providers/awslib/s3/provider.go +++ b/internal/resources/providers/awslib/s3/provider.go @@ -65,7 +65,7 @@ func (p Provider) DescribeBuckets(ctx context.Context) ([]awslib.AwsResource, er } clientBuckets, err := defaultClient.ListBuckets(ctx, &s3Client.ListBucketsInput{}) if err != nil { - p.log.Errorf("Could not list s3 buckets: %v", err) + p.log.Errorf(ctx, "Could not list s3 buckets: %v", err) return nil, err } @@ -77,7 +77,7 @@ func (p Provider) DescribeBuckets(ctx context.Context) ([]awslib.AwsResource, er accountPublicAccessBlockConfig, accountPublicAccessBlockErr := p.getAccountPublicAccessBlock(ctx) if accountPublicAccessBlockErr != nil { - p.log.Errorf("Could not get account public access block configuration. Err: %v", accountPublicAccessBlockErr) + p.log.Errorf(ctx, "Could not get account public access block configuration. Err: %v", accountPublicAccessBlockErr) } bucketsRegionsMapping := p.getBucketsRegionMapping(ctx, clientBuckets.Buckets) @@ -87,22 +87,22 @@ func (p Provider) DescribeBuckets(ctx context.Context) ([]awslib.AwsResource, er // of the flow, so we should keep describing the bucket even if getting these objects fails. sseAlgorithm, encryptionErr := p.getBucketEncryptionAlgorithm(ctx, bucket.Name, region) if encryptionErr != nil { - p.log.Errorf("Could not get encryption for bucket %s. Error: %v", *bucket.Name, encryptionErr) + p.log.Errorf(ctx, "Could not get encryption for bucket %s. Error: %v", *bucket.Name, encryptionErr) } bucketPolicy, policyErr := p.GetBucketPolicy(ctx, bucket.Name, region) if policyErr != nil { - p.log.Errorf("Could not get bucket policy for bucket %s. Error: %v", *bucket.Name, policyErr) + p.log.Errorf(ctx, "Could not get bucket policy for bucket %s. Error: %v", *bucket.Name, policyErr) } bucketVersioning, versioningErr := p.getBucketVersioning(ctx, bucket.Name, region) if versioningErr != nil { - p.log.Errorf("Could not get bucket versioning for bucket %s. Err: %v", *bucket.Name, versioningErr) + p.log.Errorf(ctx, "Could not get bucket versioning for bucket %s. Err: %v", *bucket.Name, versioningErr) } publicAccessBlockConfiguration, publicAccessBlockErr := p.getPublicAccessBlock(ctx, bucket.Name, region) if publicAccessBlockErr != nil { - p.log.Errorf("Could not get public access block configuration for bucket %s. Err: %v", *bucket.Name, publicAccessBlockErr) + p.log.Errorf(ctx, "Could not get public access block configuration for bucket %s. Err: %v", *bucket.Name, publicAccessBlockErr) } result = append(result, BucketDescription{ @@ -191,7 +191,7 @@ func (p Provider) getBucketsRegionMapping(ctx context.Context, buckets []types.B // If we could not get the Region for a bucket, additional API calls for resources will probably fail, we should // not describe this bucket. if regionErr != nil { - p.log.Errorf("Could not get bucket location for bucket %s. Not describing this bucket. Error: %v", *clientBucket.Name, regionErr) + p.log.Errorf(ctx, "Could not get bucket location for bucket %s. Not describing this bucket. Error: %v", *clientBucket.Name, regionErr) continue } diff --git a/internal/resources/providers/awslib/sns/provider.go b/internal/resources/providers/awslib/sns/provider.go index e6d1c5c2..f17b4407 100644 --- a/internal/resources/providers/awslib/sns/provider.go +++ b/internal/resources/providers/awslib/sns/provider.go @@ -48,7 +48,7 @@ func (p *Provider) ListTopics(ctx context.Context) ([]types.Topic, error) { for { output, err := c.ListTopics(ctx, input) if err != nil { - p.log.Errorf("Could not list SNS Topics. Error: %s", err) + p.log.Errorf(ctx, "Could not list SNS Topics. Error: %s", err) return nil, err } all = append(all, output.Topics...) @@ -93,7 +93,7 @@ func (p *Provider) ListTopicsWithSubscriptions(ctx context.Context) ([]awslib.Aw for { output, err := c.ListTopics(ctx, input) if err != nil { - p.log.Errorf("Could not list SNS Topics. Error: %s", err) + p.log.Errorf(ctx, "Could not list SNS Topics. Error: %s", err) return nil, err } @@ -104,7 +104,7 @@ func (p *Provider) ListTopicsWithSubscriptions(ctx context.Context) ([]awslib.Aw } subscriptions, err := p.ListSubscriptionsByTopic(ctx, region, topicInfo.GetResourceArn()) if err != nil { - p.log.Errorf("Could not list SNS Subscriptions for Topic %q. Error: %s", topicInfo.GetResourceArn(), err) + p.log.Errorf(ctx, "Could not list SNS Subscriptions for Topic %q. Error: %s", topicInfo.GetResourceArn(), err) } else { topicInfo.Subscriptions = subscriptions } diff --git a/internal/resources/providers/azurelib/inventory/storage_provider.go b/internal/resources/providers/azurelib/inventory/storage_provider.go index 2f9da7e6..d94279d8 100644 --- a/internal/resources/providers/azurelib/inventory/storage_provider.go +++ b/internal/resources/providers/azurelib/inventory/storage_provider.go @@ -155,12 +155,12 @@ func (p *storageAccountProvider) ListStorageAccounts(ctx context.Context, storag for _, saID := range storageAccountsSubscriptionsIds { res, err := p.client.AssetAccountStorage(ctx, saID, nil) if err != nil { - p.log.Errorf("error while fetching storage accounts for subscriptionId: %s, error: %v", saID, err) + p.log.Errorf(ctx, "error while fetching storage accounts for subscriptionId: %s, error: %v", saID, err) continue } storageAccountsAssets, err := transformStorageAccounts(res, saID) if err != nil { - p.log.Errorf("error while transforming storage for subscriptionId: %s, error: %v", saID, err) + p.log.Errorf(ctx, "error while transforming storage for subscriptionId: %s, error: %v", saID, err) continue } assets = append(assets, storageAccountsAssets...) @@ -256,7 +256,7 @@ func (p *storageAccountProvider) ListStorageAccountFileServices(ctx context.Cont for _, item := range response.Value { properties, err := maps.AsMapStringAny(item.FileServiceProperties) if err != nil { - p.log.Errorf("error while transforming azure queue services for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure queue services for storage accounts %s: %v", sa.Id, err) } assets = append(assets, AzureAsset{ @@ -288,7 +288,7 @@ func (p *storageAccountProvider) ListStorageAccountFileShares(ctx context.Contex fileShares, err := transformFileShares(responses, sa) if err != nil { - p.log.Errorf("error while transforming azure file share for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure file share for storage accounts %s: %v", sa.Id, err) } assets = append(assets, fileShares...) @@ -307,7 +307,7 @@ func (p *storageAccountProvider) ListStorageAccountQueues(ctx context.Context, s queues, err := transformQueues(responses, sa) if err != nil { - p.log.Errorf("error while transforming azure queues for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure queues for storage accounts %s: %v", sa.Id, err) } assets = append(assets, queues...) @@ -327,7 +327,7 @@ func (p *storageAccountProvider) ListStorageAccountQueueServices(ctx context.Con for _, item := range response.Value { properties, err := maps.AsMapStringAny(item.QueueServiceProperties) if err != nil { - p.log.Errorf("error while transforming azure queue services for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure queue services for storage accounts %s: %v", sa.Id, err) } assets = append(assets, AzureAsset{ @@ -358,7 +358,7 @@ func (p *storageAccountProvider) ListStorageAccountTables(ctx context.Context, s tables, err := transformTables(responses, sa) if err != nil { - p.log.Errorf("error while transforming azure tables for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure tables for storage accounts %s: %v", sa.Id, err) } assets = append(assets, tables...) @@ -377,7 +377,7 @@ func (p *storageAccountProvider) ListStorageAccountTableServices(ctx context.Con for _, item := range response.Value { properties, err := maps.AsMapStringAny(item.TableServiceProperties) if err != nil { - p.log.Errorf("error while transforming azure table services for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure table services for storage accounts %s: %v", sa.Id, err) } assets = append(assets, AzureAsset{ diff --git a/internal/resources/providers/gcplib/inventory/grpc_rate_limiter.go b/internal/resources/providers/gcplib/inventory/grpc_rate_limiter.go index 3bafd14d..e9e07a9d 100644 --- a/internal/resources/providers/gcplib/inventory/grpc_rate_limiter.go +++ b/internal/resources/providers/gcplib/inventory/grpc_rate_limiter.go @@ -97,7 +97,7 @@ func (rl *AssetsInventoryRateLimiter) Wait(ctx context.Context, method string, r if limiter != nil { err := limiter.Wait(ctx) if err != nil { - rl.log.Errorf("Failed to wait for project quota on method: %s, request: %v, error: %v", method, req, err) + rl.log.Errorf(ctx, "Failed to wait for project quota on method: %s, request: %v, error: %v", method, req, err) } } } diff --git a/internal/resources/providers/gcplib/inventory/provider.go b/internal/resources/providers/gcplib/inventory/provider.go index 44228070..21bcb441 100644 --- a/internal/resources/providers/gcplib/inventory/provider.go +++ b/internal/resources/providers/gcplib/inventory/provider.go @@ -413,7 +413,7 @@ func (p *Provider) getAllAssets(ctx context.Context, out chan<- *ExtendedGcpAsse } if err != nil { - p.log.Errorf("Error fetching GCP %v of types: %v for %v: %v\n", req.ContentType, req.AssetTypes, req.Parent, err) + p.log.Errorf(ctx, "Error fetching GCP %v of types: %v for %v: %v\n", req.ContentType, req.AssetTypes, req.Parent, err) return } diff --git a/internal/resources/providers/gcplib/inventory/resource_manager.go b/internal/resources/providers/gcplib/inventory/resource_manager.go index 4dc0ae55..de8159e6 100644 --- a/internal/resources/providers/gcplib/inventory/resource_manager.go +++ b/internal/resources/providers/gcplib/inventory/resource_manager.go @@ -55,7 +55,7 @@ func NewResourceManagerWrapper(ctx context.Context, log *clog.Logger, gcpConfig getProjectDisplayName: func(ctx context.Context, parent string) string { prj, err := crmService.Projects.Get(parent).Context(ctx).Do() if err != nil { - log.Errorf("error fetching GCP Project: %s, error: %s", parent, err) + log.Errorf(ctx, "error fetching GCP Project: %s, error: %s", parent, err) return "" } return prj.DisplayName @@ -63,7 +63,7 @@ func NewResourceManagerWrapper(ctx context.Context, log *clog.Logger, gcpConfig getOrganizationDisplayName: func(ctx context.Context, parent string) string { org, err := crmService.Organizations.Get(parent).Context(ctx).Do() if err != nil { - log.Errorf("error fetching GCP Org: %s, error: %s", parent, err) + log.Errorf(ctx, "error fetching GCP Org: %s, error: %s", parent, err) return "" } return org.DisplayName @@ -81,7 +81,7 @@ func (c *ResourceManagerWrapper) GetCloudMetadata(ctx context.Context, asset *as if valid { return cloudAccountMetadata } - c.log.Errorf("error casting cloud account metadata for key: %s", key) + c.log.Errorf(ctx, "error casting cloud account metadata for key: %s", key) } cloudAccountMetadata := c.getMetadata(ctx, orgId, projectId) c.accountMetadataCache.Store(key, cloudAccountMetadata) diff --git a/internal/resources/providers/msgraph/provider.go b/internal/resources/providers/msgraph/provider.go index e0d5f1fb..7a572827 100644 --- a/internal/resources/providers/msgraph/provider.go +++ b/internal/resources/providers/msgraph/provider.go @@ -94,7 +94,7 @@ func (p *provider) ListServicePrincipals(ctx context.Context) ([]*models.Service return true // to continue the iteration }) if err != nil { - p.log.Errorf("error iterating over Service Principals: %v", err) + p.log.Errorf(ctx, "error iterating over Service Principals: %v", err) } return items, nil } @@ -122,7 +122,7 @@ func (p *provider) ListDirectoryRoles(ctx context.Context) ([]*models.DirectoryR return true // to continue the iteration }) if err != nil { - p.log.Errorf("error iterating over Directory Roles: %v", err) + p.log.Errorf(ctx, "error iterating over Directory Roles: %v", err) } return items, nil } @@ -150,7 +150,7 @@ func (p *provider) ListGroups(ctx context.Context) ([]*models.Group, error) { return true // to continue the iteration }) if err != nil { - p.log.Errorf("error iterating over Groups: %v", err) + p.log.Errorf(ctx, "error iterating over Groups: %v", err) } return items, nil } @@ -178,7 +178,7 @@ func (p *provider) ListUsers(ctx context.Context) ([]*models.User, error) { return true // to continue the iteration }) if err != nil { - p.log.Errorf("error iterating over Users: %v", err) + p.log.Errorf(ctx, "error iterating over Users: %v", err) } return items, nil } diff --git a/internal/uniqueness/leaderelection.go b/internal/uniqueness/leaderelection.go index da0acf3b..8b8fcc28 100644 --- a/internal/uniqueness/leaderelection.go +++ b/internal/uniqueness/leaderelection.go @@ -74,13 +74,13 @@ func (m *LeaderelectionManager) Run(ctx context.Context) error { leConfig, err := m.buildConfig(newCtx) if err != nil { - m.log.Errorf("Fail building leader election config: %v", err) + m.log.Errorf(ctx, "Fail building leader election config: %v", err) return err } m.leader, err = le.NewLeaderElector(leConfig) if err != nil { - m.log.Errorf("Fail to create a new leader elector: %v", err) + m.log.Errorf(ctx, "Fail to create a new leader elector: %v", err) return err } diff --git a/internal/vulnerability/events_creator.go b/internal/vulnerability/events_creator.go index 1f57416e..d88bd755 100644 --- a/internal/vulnerability/events_creator.go +++ b/internal/vulnerability/events_creator.go @@ -203,7 +203,7 @@ func (e EventsCreator) CreateEvents(ctx context.Context, scanResults chan []Resu events := make([]beat.Event, 0, len(data)) for _, res := range data { - events = append(events, e.generateEvent(res.reportResult, res.vulnerability, res.snapshot.Instance, res.seq)) + events = append(events, e.generateEvent(ctx, res.reportResult, res.vulnerability, res.snapshot.Instance, res.seq)) } select { @@ -220,7 +220,7 @@ func (e EventsCreator) GetChan() chan []beat.Event { return e.ch } -func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTypes.DetectedVulnerability, instance ec2.Ec2Instance, seq time.Time) beat.Event { +func (e EventsCreator) generateEvent(ctx context.Context, reportResult trivyTypes.Result, vul trivyTypes.DetectedVulnerability, instance ec2.Ec2Instance, seq time.Time) beat.Event { timestamp := time.Now().UTC() sequence := seq.Unix() @@ -251,7 +251,7 @@ func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTy // TODO: Should we fail the event if we can't enrich the cloud section? if err != nil { - e.log.Errorf("failed to enrich cloud section: %v", err) + e.log.Errorf(ctx, "failed to enrich cloud section: %v", err) } hostSec, err := convertStructToMapStr(HostSection{ @@ -269,7 +269,7 @@ func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTy // TODO: Should we fail the event if we can't enrich the host section? if err != nil { - e.log.Errorf("failed to enrich host section: %v", err) + e.log.Errorf(ctx, "failed to enrich host section: %v", err) } networkSec, err := convertStructToMapStr(NetworkSection{ @@ -280,7 +280,7 @@ func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTy // TODO: Should we fail the event if we can't enrich the network section? if err != nil { - e.log.Errorf("failed to enrich network section: %v", err) + e.log.Errorf(ctx, "failed to enrich network section: %v", err) } event := beat.Event{ @@ -344,12 +344,12 @@ func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTy err = e.cloudDataProvider.EnrichEvent(&event, fetching.ResourceMetadata{Region: instance.Region}) if err != nil { - e.log.Errorf("failed to enrich event with benchmark data provider: %v", err) + e.log.Errorf(ctx, "failed to enrich event with benchmark data provider: %v", err) } err = e.commonDataProvider.EnrichEvent(&event) if err != nil { - e.log.Errorf("failed to enrich event with global data provider: %v", err) + e.log.Errorf(ctx, "failed to enrich event with global data provider: %v", err) } return event diff --git a/internal/vulnerability/fetcher.go b/internal/vulnerability/fetcher.go index cdd275dd..dd66db8d 100644 --- a/internal/vulnerability/fetcher.go +++ b/internal/vulnerability/fetcher.go @@ -51,14 +51,14 @@ func (f VulnerabilityFetcher) FetchInstances(ctx context.Context) error { f.log.Info("Starting VulnerabilityFetcher.FetchInstances") ins, err := f.provider.DescribeInstances(ctx) if err != nil { - f.log.Errorf("VulnerabilityFetcher.FetchInstances DescribeInstances failed: %v", err) + f.log.Errorf(ctx, "VulnerabilityFetcher.FetchInstances DescribeInstances failed: %v", err) return err } f.log.Infof("VulnerabilityFetcher.FetchInstances found %d results", len(ins)) err = f.attachRootVolumes(ctx, ins) if err != nil { - f.log.Errorf("VulnerabilityFetcher.FetchInstances attachRootVolumes failed: %v", err) + f.log.Errorf(ctx, "VulnerabilityFetcher.FetchInstances attachRootVolumes failed: %v", err) } else { f.sortByRootVolumeSize(ins) } diff --git a/internal/vulnerability/replicator.go b/internal/vulnerability/replicator.go index 342a9f25..7a17a88e 100644 --- a/internal/vulnerability/replicator.go +++ b/internal/vulnerability/replicator.go @@ -55,7 +55,7 @@ func (f VulnerabilityReplicator) SnapshotInstance(ctx context.Context, insCh cha } sp, err := f.manager.CreateSnapshots(ctx, data) if err != nil { - f.log.Errorf("VulnerabilityReplicator.SnapshotInstance.CreateSnapshots failed: %v", err) + f.log.Errorf(ctx, "VulnerabilityReplicator.SnapshotInstance.CreateSnapshots failed: %v", err) continue } diff --git a/internal/vulnerability/runner.go b/internal/vulnerability/runner.go index 67740e15..834ef6f7 100644 --- a/internal/vulnerability/runner.go +++ b/internal/vulnerability/runner.go @@ -40,7 +40,7 @@ func NewVulnerabilityRunner(ctx context.Context, log *clog.Logger) (Vulnerabilit log.Debug("NewVulnerabilityRunner: New") if err := clearTrivyCache(ctx, log); err != nil { - log.Errorf("error during runner cache clearing %s", err.Error()) + log.Errorf(ctx, "error during runner cache clearing %s", err.Error()) } opts := flag.Options{ diff --git a/internal/vulnerability/scanner.go b/internal/vulnerability/scanner.go index 94540bcc..1537c3b5 100644 --- a/internal/vulnerability/scanner.go +++ b/internal/vulnerability/scanner.go @@ -99,11 +99,11 @@ func (f VulnerabilityScanner) ScanSnapshot(ctx context.Context, snapCh chan ec2. func (f VulnerabilityScanner) scan(ctx context.Context, snap ec2.EBSSnapshot) { f.log.Infof("Starting VulnerabilityScanner.scan, %s", snap.SnapshotId) - defer func() { + defer func(ctx context.Context) { if r := recover(); r != nil { - f.log.Errorf("vulnerability scanner recovered from panic: %v", r) + f.log.Errorf(ctx, "vulnerability scanner recovered from panic: %v", r) } - }() + }(ctx) o, err := os.CreateTemp("", "") if err != nil { @@ -158,7 +158,7 @@ func (f VulnerabilityScanner) scan(ctx context.Context, snap ec2.EBSSnapshot) { ) if err != nil { - f.log.Errorf("VulnerabilityScanner.scan.ScanVM, snapshotId: %s, instanceId: %s, error: %v", snap.SnapshotId, *snap.Instance.InstanceId, err) + f.log.Errorf(ctx, "VulnerabilityScanner.scan.ScanVM, snapshotId: %s, instanceId: %s, error: %v", snap.SnapshotId, *snap.Instance.InstanceId, err) return } diff --git a/internal/vulnerability/snapshot.go b/internal/vulnerability/snapshot.go index 01c78ec1..d1df2a04 100644 --- a/internal/vulnerability/snapshot.go +++ b/internal/vulnerability/snapshot.go @@ -127,7 +127,7 @@ func (s *SnapshotManager) delete(ctx context.Context, snapshot ec2.EBSSnapshot, s.logger.Infof("VulnerabilityScanner.manager.%s %s", message, snapshot.SnapshotId) err := s.provider.DeleteSnapshot(ctx, snapshot) if err != nil { - s.logger.Errorf("VulnerabilityScanner.manager.%s %s error: %s", message, snapshot.SnapshotId, err) + s.logger.Errorf(ctx, "VulnerabilityScanner.manager.%s %s error: %s", message, snapshot.SnapshotId, err) } } diff --git a/internal/vulnerability/verifier.go b/internal/vulnerability/verifier.go index 1c992633..d5b5f856 100644 --- a/internal/vulnerability/verifier.go +++ b/internal/vulnerability/verifier.go @@ -102,7 +102,7 @@ func (f VulnerabilityVerifier) verify(ctx context.Context, snap ec2.EBSSnapshot) case <-time.After(f.interval): sp, err := f.provider.DescribeSnapshots(ctx, snap) if err != nil { - f.log.Errorf("VulnerabilityVerifier.verify.DescribeSnapshots failed: %v", err) + f.log.Errorf(ctx, "VulnerabilityVerifier.verify.DescribeSnapshots failed: %v", err) continue } // TODO: Add a layer of "smart" cache to avoid checking and sending the same snapshot diff --git a/internal/vulnerability/worker.go b/internal/vulnerability/worker.go index 443ee699..84921d16 100644 --- a/internal/vulnerability/worker.go +++ b/internal/vulnerability/worker.go @@ -145,7 +145,7 @@ func (f *VulnerabilityWorker) Run(ctx context.Context) { defer wg.Done() err := job.fn(ctx) if err != nil { - f.log.Errorf("VulnerabilityWorker.work job %s failed: %s", job.name, err.Error()) + f.log.Errorf(ctx, "VulnerabilityWorker.work job %s failed: %s", job.name, err.Error()) } else { f.log.Infof("VulnerabilityWorker.work job %s finished", job.name) } --- internal/evaluator/logger.go | 4 +- .../flavors/assetinventory/strategy_aws.go | 2 +- internal/flavors/benchmark/aws_org.go | 4 +- internal/flavors/benchmark/eks.go | 2 +- internal/flavors/benchmark/k8s.go | 2 +- internal/flavors/benchmark/k8s_helper.go | 2 +- internal/infra/clog/clog.go | 7 ++-- internal/infra/clog/clog_test.go | 6 +-- .../awsfetcher/fetcher_ec2_instance.go | 2 +- internal/inventory/awsfetcher/fetcher_elb.go | 2 +- .../awsfetcher/fetcher_iam_policy.go | 4 +- .../inventory/awsfetcher/fetcher_iam_role.go | 2 +- .../inventory/awsfetcher/fetcher_iam_user.go | 4 +- .../inventory/awsfetcher/fetcher_lambda.go | 2 +- .../awsfetcher/fetcher_networking.go | 2 +- internal/inventory/awsfetcher/fetcher_rds.go | 2 +- .../inventory/awsfetcher/fetcher_s3_bucket.go | 2 +- internal/inventory/awsfetcher/fetcher_sns.go | 2 +- .../inventory/azurefetcher/fetcher_account.go | 2 +- .../azurefetcher/fetcher_activedirectory.go | 8 ++-- .../azurefetcher/fetcher_resource_graph.go | 2 +- .../inventory/azurefetcher/fetcher_storage.go | 4 +- internal/launcher/launcher.go | 7 ++-- internal/resources/fetching/cycle/cache.go | 2 +- .../fetching/fetchers/aws/ecr_fetcher.go | 4 +- .../fetching/fetchers/aws/iam_fetcher.go | 10 ++--- .../fetching/fetchers/aws/kms_fetcher.go | 2 +- .../fetching/fetchers/aws/logging_fetcher.go | 4 +- .../fetchers/aws/monitoring_fetcher.go | 4 +- .../fetching/fetchers/aws/network_fetcher.go | 8 ++-- .../fetching/fetchers/aws/rds_fetcher.go | 2 +- .../fetching/fetchers/aws/s3_fetcher.go | 2 +- .../fetching/fetchers/azure/assets_fetcher.go | 4 +- .../fetching/fetchers/azure/batch_fetcher.go | 2 +- .../fetchers/azure/security_fetcher.go | 2 +- .../fetchers/k8s/file_system_fetcher.go | 23 ++++++----- .../fetching/fetchers/k8s/kube_fetcher.go | 2 +- .../fetching/fetchers/k8s/kube_provider.go | 9 +++-- .../fetching/fetchers/k8s/process_fetcher.go | 38 +++++++++++-------- .../resources/fetching/manager/manager.go | 4 +- .../resources/fetching/registry/registry.go | 2 +- .../providers/aws_cis/logging/provider.go | 6 +-- .../aws_cis/monitoring/monitoring.go | 13 ++++--- .../providers/awslib/account_provider.go | 2 +- .../providers/awslib/all_region_selector.go | 2 +- .../awslib/cached_region_selector.go | 4 +- .../providers/awslib/cloudtrail/provider.go | 4 +- .../awslib/configservice/provider.go | 2 +- .../awslib/current_region_selector.go | 2 +- .../providers/awslib/ec2/provider.go | 6 +-- .../providers/awslib/elb_v2/provider_v2.go | 2 +- .../resources/providers/awslib/iam/policy.go | 2 +- .../providers/awslib/iam/role_policy.go | 2 +- .../providers/awslib/iam/root_account.go | 6 +-- .../resources/providers/awslib/iam/user.go | 10 ++--- .../providers/awslib/multi_region.go | 2 +- .../providers/awslib/rds/provider.go | 4 +- .../resources/providers/awslib/s3/provider.go | 14 +++---- .../providers/awslib/sns/provider.go | 6 +-- .../azurelib/inventory/storage_provider.go | 16 ++++---- .../gcplib/inventory/grpc_rate_limiter.go | 2 +- .../providers/gcplib/inventory/provider.go | 2 +- .../gcplib/inventory/resource_manager.go | 6 +-- .../resources/providers/msgraph/provider.go | 8 ++-- internal/uniqueness/leaderelection.go | 4 +- internal/vulnerability/events_creator.go | 14 +++---- internal/vulnerability/fetcher.go | 4 +- internal/vulnerability/replicator.go | 2 +- internal/vulnerability/runner.go | 2 +- internal/vulnerability/scanner.go | 8 ++-- internal/vulnerability/snapshot.go | 2 +- internal/vulnerability/verifier.go | 2 +- internal/vulnerability/worker.go | 2 +- 73 files changed, 190 insertions(+), 173 deletions(-) diff --git a/internal/evaluator/logger.go b/internal/evaluator/logger.go index fd0ffa7200..d8b5a8929c 100644 --- a/internal/evaluator/logger.go +++ b/internal/evaluator/logger.go @@ -18,6 +18,8 @@ package evaluator import ( + "context" + "github.com/elastic/elastic-agent-libs/logp" "github.com/open-policy-agent/opa/v1/logging" "go.uber.org/zap" @@ -54,7 +56,7 @@ func (l *logger) Info(fmt string, a ...any) { } func (l *logger) Error(fmt string, a ...any) { - l.log.Errorf(fmt, a...) + l.log.Errorf(context.TODO(), fmt, a...) } func (l *logger) Warn(fmt string, a ...any) { diff --git a/internal/flavors/assetinventory/strategy_aws.go b/internal/flavors/assetinventory/strategy_aws.go index ed5b78f873..b3ce475030 100644 --- a/internal/flavors/assetinventory/strategy_aws.go +++ b/internal/flavors/assetinventory/strategy_aws.go @@ -103,7 +103,7 @@ func tryListingBuckets(ctx context.Context, log *clog.Logger, roleConfig awssdk. return true } if !strings.Contains(err.Error(), "not authorized to perform: sts:AssumeRole") { - log.Errorf("Expected a 403 autorization error, but got: %v", err) + log.Errorf(ctx, "Expected a 403 autorization error, but got: %v", err) } return false } diff --git a/internal/flavors/benchmark/aws_org.go b/internal/flavors/benchmark/aws_org.go index 5e47b88a40..a98d59b51e 100644 --- a/internal/flavors/benchmark/aws_org.go +++ b/internal/flavors/benchmark/aws_org.go @@ -148,7 +148,7 @@ func (a *AWSOrg) getAwsAccounts(ctx context.Context, log *clog.Logger, cfgCloudb if identity.Account == rootIdentity.Account { cfg, err := a.pickManagementAccountRole(ctx, log, stsClient, cfgCloudbeatRoot, identity) if err != nil { - log.Errorf("error picking roles for account %s: %s", identity.Account, err) + log.Errorf(ctx, "error picking roles for account %s: %s", identity.Account, err) continue } awsConfig = cfg @@ -205,7 +205,7 @@ func (a *AWSOrg) pickManagementAccountRole(ctx context.Context, log *clog.Logger if foundTagValue == scanSettingTagValue { _, err := a.IAMProvider.GetRole(ctx, memberRole) if err != nil { - log.Errorf("Management Account should be scanned (%s: %s), but %q role is missing: %s", scanSettingTagKey, foundTagValue, memberRole, err) + log.Errorf(ctx, "Management Account should be scanned (%s: %s), but %q role is missing: %s", scanSettingTagKey, foundTagValue, memberRole, err) } } diff --git a/internal/flavors/benchmark/eks.go b/internal/flavors/benchmark/eks.go index d8f3801eeb..ddece01130 100644 --- a/internal/flavors/benchmark/eks.go +++ b/internal/flavors/benchmark/eks.go @@ -73,7 +73,7 @@ func (k *EKS) initialize(ctx context.Context, log *clog.Logger, cfg *config.Conf } benchmarkHelper := NewK8sBenchmarkHelper(log, cfg, kubeClient) - k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) + k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) //nolint:contextcheck awsConfig, awsIdentity, err := k.getEksAwsConfig(ctx, cfg) if err != nil { diff --git a/internal/flavors/benchmark/k8s.go b/internal/flavors/benchmark/k8s.go index 4617bfc202..fc700f96dd 100644 --- a/internal/flavors/benchmark/k8s.go +++ b/internal/flavors/benchmark/k8s.go @@ -66,7 +66,7 @@ func (k *K8S) initialize(ctx context.Context, log *clog.Logger, cfg *config.Conf } benchmarkHelper := NewK8sBenchmarkHelper(log, cfg, kubeClient) - k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) + k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) //nolint:contextcheck dp, err := benchmarkHelper.GetK8sDataProvider(ctx, k8s.KubernetesClusterNameProvider{KubeClient: kubeClient}) if err != nil { diff --git a/internal/flavors/benchmark/k8s_helper.go b/internal/flavors/benchmark/k8s_helper.go index 13cbd88fcd..08f1e75152 100644 --- a/internal/flavors/benchmark/k8s_helper.go +++ b/internal/flavors/benchmark/k8s_helper.go @@ -48,7 +48,7 @@ func NewK8sBenchmarkHelper(log *clog.Logger, cfg *config.Config, client client_g func (h *K8SBenchmarkHelper) GetK8sDataProvider(ctx context.Context, clusterNameProvider k8s.ClusterNameProviderAPI) (dataprovider.CommonDataProvider, error) { clusterName, err := clusterNameProvider.GetClusterName(ctx, h.cfg) if err != nil { - h.log.Errorf("failed to get cluster name: %v", err) + h.log.Errorf(ctx, "failed to get cluster name: %v", err) } serverVersion, err := h.client.Discovery().ServerVersion() diff --git a/internal/infra/clog/clog.go b/internal/infra/clog/clog.go index d220a56d77..7c7fde886f 100644 --- a/internal/infra/clog/clog.go +++ b/internal/infra/clog/clog.go @@ -31,13 +31,14 @@ type Logger struct { *logp.Logger } -func (l *Logger) Errorf(template string, args ...any) { +func (l *Logger) Errorf(ctx context.Context, template string, args ...any) { + spanCtx := trace.SpanContextFromContext(ctx) // Downgrade context.Canceled errors to warning level if hasErrorType(context.Canceled, args...) { - l.Warnf(template, args...) + l.WithSpanContext(spanCtx).Warnf(template, args...) return } - l.Logger.Errorf(template, args...) + l.WithSpanContext(spanCtx).Logger.Errorf(template, args...) } func (l *Logger) Error(args ...any) { diff --git a/internal/infra/clog/clog_test.go b/internal/infra/clog/clog_test.go index 92469e86b3..8bd5a11f75 100644 --- a/internal/infra/clog/clog_test.go +++ b/internal/infra/clog/clog_test.go @@ -46,8 +46,8 @@ func (s *LoggerTestSuite) TestErrorfWithContextCanceled() { logger := NewLogger("test") err := context.Canceled - logger.Errorf("some error: %s", err) // error with context.Canceled - logger.Errorf("some error: %s", err.Error()) // error string with context Canceled + logger.Errorf(context.TODO(), "some error: %s", err) // error with context.Canceled + logger.Errorf(context.TODO(), "some error: %s", err.Error()) // error string with context Canceled logs := logp.ObserverLogs().TakeAll() if s.Len(logs, 2) { @@ -62,7 +62,7 @@ func (s *LoggerTestSuite) TestLogErrorfWithoutContextCanceled() { logger := NewLogger("test") err := errors.New("oops") - logger.Errorf("some error: %s", err) + logger.Errorf(context.TODO(), "some error: %s", err) logs := logp.ObserverLogs().TakeAll() if s.Len(logs, 1) { diff --git a/internal/inventory/awsfetcher/fetcher_ec2_instance.go b/internal/inventory/awsfetcher/fetcher_ec2_instance.go index 0c881b2e61..d75bd83e66 100644 --- a/internal/inventory/awsfetcher/fetcher_ec2_instance.go +++ b/internal/inventory/awsfetcher/fetcher_ec2_instance.go @@ -53,7 +53,7 @@ func (e *ec2InstanceFetcher) Fetch(ctx context.Context, assetChannel chan<- inve instances, err := e.provider.DescribeInstances(ctx) if err != nil { - e.logger.Errorf("Could not list ec2 instances: %v", err) + e.logger.Errorf(ctx, "Could not list ec2 instances: %v", err) return } diff --git a/internal/inventory/awsfetcher/fetcher_elb.go b/internal/inventory/awsfetcher/fetcher_elb.go index 5acc55b704..72fa994a50 100644 --- a/internal/inventory/awsfetcher/fetcher_elb.go +++ b/internal/inventory/awsfetcher/fetcher_elb.go @@ -73,7 +73,7 @@ func (f *elbFetcher) fetch(ctx context.Context, resourceName string, function el awsResources, err := function(ctx) if err != nil { - f.logger.Errorf("Could not fetch %s: %v", resourceName, err) + f.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/awsfetcher/fetcher_iam_policy.go b/internal/inventory/awsfetcher/fetcher_iam_policy.go index b04c1db6bf..36ad2f6447 100644 --- a/internal/inventory/awsfetcher/fetcher_iam_policy.go +++ b/internal/inventory/awsfetcher/fetcher_iam_policy.go @@ -54,7 +54,7 @@ func (i *iamPolicyFetcher) Fetch(ctx context.Context, assetChannel chan<- invent policies, err := i.provider.GetPolicies(ctx) if err != nil { - i.logger.Errorf("Could not list policies: %v", err) + i.logger.Errorf(ctx, "Could not list policies: %v", err) if len(policies) == 0 { return } @@ -67,7 +67,7 @@ func (i *iamPolicyFetcher) Fetch(ctx context.Context, assetChannel chan<- invent policy, ok := resource.(iam.Policy) if !ok { - i.logger.Errorf("Could not get info about policy: %s", resource.GetResourceArn()) + i.logger.Errorf(ctx, "Could not get info about policy: %s", resource.GetResourceArn()) continue } diff --git a/internal/inventory/awsfetcher/fetcher_iam_role.go b/internal/inventory/awsfetcher/fetcher_iam_role.go index e2d60459e9..e3ee419f37 100644 --- a/internal/inventory/awsfetcher/fetcher_iam_role.go +++ b/internal/inventory/awsfetcher/fetcher_iam_role.go @@ -54,7 +54,7 @@ func (i *iamRoleFetcher) Fetch(ctx context.Context, assetChannel chan<- inventor roles, err := i.provider.ListRoles(ctx) if err != nil { - i.logger.Errorf("Could not list roles: %v", err) + i.logger.Errorf(ctx, "Could not list roles: %v", err) if len(roles) == 0 { return } diff --git a/internal/inventory/awsfetcher/fetcher_iam_user.go b/internal/inventory/awsfetcher/fetcher_iam_user.go index 6221b76529..b6c4ac36e5 100644 --- a/internal/inventory/awsfetcher/fetcher_iam_user.go +++ b/internal/inventory/awsfetcher/fetcher_iam_user.go @@ -53,7 +53,7 @@ func (i *iamUserFetcher) Fetch(ctx context.Context, assetChannel chan<- inventor users, err := i.provider.GetUsers(ctx) if err != nil { - i.logger.Errorf("Could not list users: %v", err) + i.logger.Errorf(ctx, "Could not list users: %v", err) if len(users) == 0 { return } @@ -66,7 +66,7 @@ func (i *iamUserFetcher) Fetch(ctx context.Context, assetChannel chan<- inventor user, ok := resource.(iam.User) if !ok { - i.logger.Errorf("Could not get info about user: %s", resource.GetResourceArn()) + i.logger.Errorf(ctx, "Could not get info about user: %s", resource.GetResourceArn()) continue } diff --git a/internal/inventory/awsfetcher/fetcher_lambda.go b/internal/inventory/awsfetcher/fetcher_lambda.go index 54698553e2..10847d0374 100644 --- a/internal/inventory/awsfetcher/fetcher_lambda.go +++ b/internal/inventory/awsfetcher/fetcher_lambda.go @@ -73,7 +73,7 @@ func (s *lambdaFetcher) fetch(ctx context.Context, resourceName string, function awsResources, err := function(ctx) if err != nil { - s.logger.Errorf("Could not fetch %s: %v", resourceName, err) + s.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/awsfetcher/fetcher_networking.go b/internal/inventory/awsfetcher/fetcher_networking.go index 97220b90c1..ece19ca370 100644 --- a/internal/inventory/awsfetcher/fetcher_networking.go +++ b/internal/inventory/awsfetcher/fetcher_networking.go @@ -88,7 +88,7 @@ func (s *networkingFetcher) fetch(ctx context.Context, resourceName string, func awsResources, err := function(ctx) if err != nil { - s.logger.Errorf("Could not fetch %s: %v", resourceName, err) + s.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/awsfetcher/fetcher_rds.go b/internal/inventory/awsfetcher/fetcher_rds.go index 85e4f1b7ab..990cfc1b67 100644 --- a/internal/inventory/awsfetcher/fetcher_rds.go +++ b/internal/inventory/awsfetcher/fetcher_rds.go @@ -55,7 +55,7 @@ func (s *rdsFetcher) Fetch(ctx context.Context, assetChannel chan<- inventory.As awsResources, err := s.provider.DescribeDBInstances(ctx) if err != nil { - s.logger.Errorf("Could not list RDS Instances: %v", err) + s.logger.Errorf(ctx, "Could not list RDS Instances: %v", err) if len(awsResources) == 0 { return } diff --git a/internal/inventory/awsfetcher/fetcher_s3_bucket.go b/internal/inventory/awsfetcher/fetcher_s3_bucket.go index 1a0e4fa44c..4c349802a3 100644 --- a/internal/inventory/awsfetcher/fetcher_s3_bucket.go +++ b/internal/inventory/awsfetcher/fetcher_s3_bucket.go @@ -55,7 +55,7 @@ func (s *s3BucketFetcher) Fetch(ctx context.Context, assetChannel chan<- invento awsBuckets, err := s.provider.DescribeBuckets(ctx) if err != nil { - s.logger.Errorf("Could not list s3 buckets: %v", err) + s.logger.Errorf(ctx, "Could not list s3 buckets: %v", err) if len(awsBuckets) == 0 { return } diff --git a/internal/inventory/awsfetcher/fetcher_sns.go b/internal/inventory/awsfetcher/fetcher_sns.go index a6d8819f88..0e22d7bd81 100644 --- a/internal/inventory/awsfetcher/fetcher_sns.go +++ b/internal/inventory/awsfetcher/fetcher_sns.go @@ -52,7 +52,7 @@ func (s *snsFetcher) Fetch(ctx context.Context, assetChannel chan<- inventory.As awsResources, err := s.provider.ListTopicsWithSubscriptions(ctx) if err != nil { - s.logger.Errorf("Could not fetch SNS Topics: %v", err) + s.logger.Errorf(ctx, "Could not fetch SNS Topics: %v", err) return } diff --git a/internal/inventory/azurefetcher/fetcher_account.go b/internal/inventory/azurefetcher/fetcher_account.go index f3481caa50..263b75ce8b 100644 --- a/internal/inventory/azurefetcher/fetcher_account.go +++ b/internal/inventory/azurefetcher/fetcher_account.go @@ -67,7 +67,7 @@ func (f *accountFetcher) fetch(ctx context.Context, resourceName string, functio azureAssets, err := function(ctx) if err != nil { - f.logger.Errorf("Could not fetch %s: %v", resourceName, err) + f.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/azurefetcher/fetcher_activedirectory.go b/internal/inventory/azurefetcher/fetcher_activedirectory.go index bc78b6a310..9c8d521925 100644 --- a/internal/inventory/azurefetcher/fetcher_activedirectory.go +++ b/internal/inventory/azurefetcher/fetcher_activedirectory.go @@ -63,7 +63,7 @@ func (f *activedirectoryFetcher) fetchServicePrincipals(ctx context.Context, ass items, err := f.provider.ListServicePrincipals(ctx) if err != nil { - f.logger.Errorf("Could not fetch Service Principals: %v", err) + f.logger.Errorf(ctx, "Could not fetch Service Principals: %v", err) } for _, item := range items { @@ -94,7 +94,7 @@ func (f *activedirectoryFetcher) fetchDirectoryRoles(ctx context.Context, assetC items, err := f.provider.ListDirectoryRoles(ctx) if err != nil { - f.logger.Errorf("Could not fetch Directory Roles: %v", err) + f.logger.Errorf(ctx, "Could not fetch Directory Roles: %v", err) } for _, item := range items { @@ -124,7 +124,7 @@ func (f *activedirectoryFetcher) fetchGroups(ctx context.Context, assetChan chan items, err := f.provider.ListGroups(ctx) if err != nil { - f.logger.Errorf("Could not fetch Groups: %v", err) + f.logger.Errorf(ctx, "Could not fetch Groups: %v", err) } for _, item := range items { @@ -160,7 +160,7 @@ func (f *activedirectoryFetcher) fetchUsers(ctx context.Context, assetChan chan< items, err := f.provider.ListUsers(ctx) if err != nil { - f.logger.Errorf("Could not fetch Users: %v", err) + f.logger.Errorf(ctx, "Could not fetch Users: %v", err) } for _, item := range items { diff --git a/internal/inventory/azurefetcher/fetcher_resource_graph.go b/internal/inventory/azurefetcher/fetcher_resource_graph.go index f1a2af13de..af0c732b16 100644 --- a/internal/inventory/azurefetcher/fetcher_resource_graph.go +++ b/internal/inventory/azurefetcher/fetcher_resource_graph.go @@ -78,7 +78,7 @@ func (f *resourceGraphFetcher) fetch(ctx context.Context, resourceName, resource azureAssets, err := f.provider.ListAllAssetTypesByName(ctx, resourceGroup, []string{resourceType}) if err != nil { - f.logger.Errorf("Could not fetch %s: %v", resourceName, err) + f.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/inventory/azurefetcher/fetcher_storage.go b/internal/inventory/azurefetcher/fetcher_storage.go index 263f1f23c6..fca2ae5af0 100644 --- a/internal/inventory/azurefetcher/fetcher_storage.go +++ b/internal/inventory/azurefetcher/fetcher_storage.go @@ -74,7 +74,7 @@ func (f *storageFetcher) Fetch(ctx context.Context, assetChan chan<- inventory.A storageAccounts, err := f.listStorageAccounts(ctx) if err != nil { - f.logger.Errorf("Could not fetch anything: %v", err) + f.logger.Errorf(ctx, "Could not fetch anything: %v", err) return } @@ -108,7 +108,7 @@ func (f *storageFetcher) fetch(ctx context.Context, storageAccounts []azurelib.A azureAssets, err := function(ctx, storageAccounts) if err != nil { - f.logger.Errorf("Could not fetch %s: %v", resourceName, err) + f.logger.Errorf(ctx, "Could not fetch %s: %v", resourceName, err) return } diff --git a/internal/launcher/launcher.go b/internal/launcher/launcher.go index 8670dbf357..8c38768b3b 100644 --- a/internal/launcher/launcher.go +++ b/internal/launcher/launcher.go @@ -21,6 +21,7 @@ package launcher import ( + "context" "errors" "fmt" "sync" @@ -99,7 +100,7 @@ func (l *launcher) Run(b *beat.Beat) error { l.log.Infof("Waiting for initial reconfiguration from Fleet server...") update, err := l.reconfigureWait(reconfigureWaitTimeout) if err != nil { - l.log.Errorf("Failed while waiting for the initial reconfiguration from Fleet server: %v", err) + l.log.Errorf(context.TODO(), "Failed while waiting for the initial reconfiguration from Fleet server: %v", err) return err } @@ -122,7 +123,7 @@ func (l *launcher) run() error { l.log.Info("Launcher stopped after timeout") case err == nil: // unexpected default: - l.log.Errorf("Launcher stopped by error: %v", err) + l.log.Errorf(context.TODO(), "Launcher stopped by error: %v", err) } l.reloader.Stop() @@ -298,7 +299,7 @@ func (l *launcher) reconfigureWait(timeout time.Duration) (*config.C, error) { if l.validator != nil { err := l.validator.Validate(update) if err != nil { - l.log.Errorf("Config update validation failed: %v", err) + l.log.Errorf(context.TODO(), "Config update validation failed: %v", err) healthErr := &BeaterUnhealthyError{} if errors.As(err, healthErr) { l.beat.Manager.UpdateStatus(status.Degraded, healthErr.Error()) diff --git a/internal/resources/fetching/cycle/cache.go b/internal/resources/fetching/cycle/cache.go index 3e2b13cd1b..040185e466 100644 --- a/internal/resources/fetching/cycle/cache.go +++ b/internal/resources/fetching/cycle/cache.go @@ -57,7 +57,7 @@ func (c *Cache[T]) GetValue(ctx context.Context, cycle Metadata, fetch func(cont if c.lastCycle.Sequence < 0 { return result, err } - c.log.Errorf("Failed to renew, using cached value: %v", err) + c.log.Errorf(ctx, "Failed to renew, using cached value: %v", err) } else { c.cachedValue = result c.lastCycle = cycle diff --git a/internal/resources/fetching/fetchers/aws/ecr_fetcher.go b/internal/resources/fetching/fetchers/aws/ecr_fetcher.go index d9af08df61..2dd775910d 100644 --- a/internal/resources/fetching/fetchers/aws/ecr_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/ecr_fetcher.go @@ -74,7 +74,7 @@ func (f *EcrFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) er podsList, err := f.kubeClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{}) if err != nil { - f.log.Errorf("failed to get pods - %v", err) + f.log.Errorf(ctx, "failed to get pods - %v", err) return err } @@ -96,7 +96,7 @@ func (f *EcrFetcher) describePodImagesRepositories(ctx context.Context, podsList // Add configuration describedRepo, err := describer.Provider.DescribeRepositories(ctx, repositories, region) if err != nil { - f.log.Errorf("could not retrieve pod's aws repositories for region %s: %v", region, err) + f.log.Errorf(ctx, "could not retrieve pod's aws repositories for region %s: %v", region, err) } else { awsRepositories = append(awsRepositories, describedRepo...) } diff --git a/internal/resources/fetching/fetchers/aws/iam_fetcher.go b/internal/resources/fetching/fetchers/aws/iam_fetcher.go index 2490afc888..3ee96bf0fa 100644 --- a/internal/resources/fetching/fetchers/aws/iam_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/iam_fetcher.go @@ -62,35 +62,35 @@ func (f IAMFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) err pwdPolicy, err := f.iamProvider.GetPasswordPolicy(ctx) if err != nil { - f.log.Errorf("Unable to fetch PasswordPolicy, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch PasswordPolicy, error: %v", err) } else { iamResources = append(iamResources, pwdPolicy) } users, err := f.iamProvider.GetUsers(ctx) if err != nil { - f.log.Errorf("Unable to fetch IAM users, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch IAM users, error: %v", err) } else { iamResources = append(iamResources, users...) } policies, err := f.iamProvider.GetPolicies(ctx) if err != nil { - f.log.Errorf("Unable to fetch IAM policies, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch IAM policies, error: %v", err) } else { iamResources = append(iamResources, policies...) } serverCertificates, err := f.iamProvider.ListServerCertificates(ctx) if err != nil { - f.log.Errorf("Unable to fetch IAM server certificates, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch IAM server certificates, error: %v", err) } else { iamResources = append(iamResources, serverCertificates) } accessAnalyzers, err := f.iamProvider.GetAccessAnalyzers(ctx) if err != nil { - f.log.Errorf("Unable to fetch access access analyzers, error: %v", err) + f.log.Errorf(ctx, "Unable to fetch access access analyzers, error: %v", err) } else { iamResources = append(iamResources, accessAnalyzers) } diff --git a/internal/resources/fetching/fetchers/aws/kms_fetcher.go b/internal/resources/fetching/fetchers/aws/kms_fetcher.go index 7f6917b556..e829f0dec9 100644 --- a/internal/resources/fetching/fetchers/aws/kms_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/kms_fetcher.go @@ -52,7 +52,7 @@ func (f *KmsFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) er keys, err := f.kms.DescribeSymmetricKeys(ctx) if err != nil { - f.log.Errorf("failed to describe keys from KMS: %v", err) + f.log.Errorf(ctx, "failed to describe keys from KMS: %v", err) return nil } diff --git a/internal/resources/fetching/fetchers/aws/logging_fetcher.go b/internal/resources/fetching/fetchers/aws/logging_fetcher.go index b010a8a792..d67ed5b7b2 100644 --- a/internal/resources/fetching/fetchers/aws/logging_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/logging_fetcher.go @@ -67,7 +67,7 @@ func (f LoggingFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) f.log.Debug("Starting LoggingFetcher.Fetch") trails, err := f.loggingProvider.DescribeTrails(ctx) if err != nil { - f.log.Errorf("failed to describe trails: %v", err) + f.log.Errorf(ctx, "failed to describe trails: %v", err) } for _, resource := range trails { @@ -81,7 +81,7 @@ func (f LoggingFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) configs, err := f.configserviceProvider.DescribeConfigRecorders(ctx) if err != nil { - f.log.Errorf("failed to describe config recorders: %v", err) + f.log.Errorf(ctx, "failed to describe config recorders: %v", err) return nil } diff --git a/internal/resources/fetching/fetchers/aws/monitoring_fetcher.go b/internal/resources/fetching/fetchers/aws/monitoring_fetcher.go index aa6677275f..10857f293a 100644 --- a/internal/resources/fetching/fetchers/aws/monitoring_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/monitoring_fetcher.go @@ -61,7 +61,7 @@ func (m MonitoringFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metada m.log.Debug("Starting MonitoringFetcher.Fetch") out, err := m.provider.AggregateResources(ctx) if err != nil { - m.log.Errorf("failed to aggregate monitoring resources: %v", err) + m.log.Errorf(ctx, "failed to aggregate monitoring resources: %v", err) } if out != nil { m.resourceCh <- fetching.ResourceInfo{ @@ -71,7 +71,7 @@ func (m MonitoringFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metada } hubs, err := m.securityhub.Describe(ctx) if err != nil { - m.log.Errorf("failed to describe security hub: %v", err) + m.log.Errorf(ctx, "failed to describe security hub: %v", err) return nil } diff --git a/internal/resources/fetching/fetchers/aws/network_fetcher.go b/internal/resources/fetching/fetchers/aws/network_fetcher.go index 4dd42adceb..ed0ed2c136 100644 --- a/internal/resources/fetching/fetchers/aws/network_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/network_fetcher.go @@ -97,23 +97,23 @@ func (f NetworkFetcher) aggregateResources(ctx context.Context, client ec2.Elast var resources []awslib.AwsResource nacl, err := client.DescribeNetworkAcl(ctx) if err != nil { - f.log.Errorf("failed to describe network acl: %v", err) + f.log.Errorf(ctx, "failed to describe network acl: %v", err) } resources = append(resources, nacl...) securityGroups, err := client.DescribeSecurityGroups(ctx) if err != nil { - f.log.Errorf("failed to describe security groups: %v", err) + f.log.Errorf(ctx, "failed to describe security groups: %v", err) } resources = append(resources, securityGroups...) vpcs, err := client.DescribeVpcs(ctx) if err != nil { - f.log.Errorf("failed to describe vpcs: %v", err) + f.log.Errorf(ctx, "failed to describe vpcs: %v", err) } resources = append(resources, vpcs...) ebsEncryption, err := client.GetEbsEncryptionByDefault(ctx) if err != nil { - f.log.Errorf("failed to get ebs encryption by default: %v", err) + f.log.Errorf(ctx, "failed to get ebs encryption by default: %v", err) } if ebsEncryption != nil { diff --git a/internal/resources/fetching/fetchers/aws/rds_fetcher.go b/internal/resources/fetching/fetchers/aws/rds_fetcher.go index d2c3683708..99f0ab0114 100644 --- a/internal/resources/fetching/fetchers/aws/rds_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/rds_fetcher.go @@ -53,7 +53,7 @@ func (f *RdsFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) er f.log.Info("Starting RdsFetcher.Fetch") dbInstances, err := f.provider.DescribeDBInstances(ctx) if err != nil { - f.log.Errorf("failed to load some DB instances from rds: %v", err) + f.log.Errorf(ctx, "failed to load some DB instances from rds: %v", err) } for _, dbInstance := range dbInstances { diff --git a/internal/resources/fetching/fetchers/aws/s3_fetcher.go b/internal/resources/fetching/fetchers/aws/s3_fetcher.go index c28a259904..8225956a01 100644 --- a/internal/resources/fetching/fetchers/aws/s3_fetcher.go +++ b/internal/resources/fetching/fetchers/aws/s3_fetcher.go @@ -49,7 +49,7 @@ func (f *S3Fetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) err f.log.Info("Starting S3Fetcher.Fetch") buckets, err := f.s3.DescribeBuckets(ctx) if err != nil { - f.log.Errorf("failed to load buckets from S3: %v", err) + f.log.Errorf(ctx, "failed to load buckets from S3: %v", err) return nil } diff --git a/internal/resources/fetching/fetchers/azure/assets_fetcher.go b/internal/resources/fetching/fetchers/azure/assets_fetcher.go index 768e7cb133..7a30164ecd 100644 --- a/internal/resources/fetching/fetchers/azure/assets_fetcher.go +++ b/internal/resources/fetching/fetchers/azure/assets_fetcher.go @@ -92,7 +92,7 @@ func (f *AzureAssetsFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Meta // Fetching all types even if non-existent in asset group for simplicity r, err := f.provider.ListAllAssetTypesByName(ctx, assetGroup, slices.Collect(maps.Keys(AzureAssetTypeToTypePair))) if err != nil { - f.log.Errorf("AzureAssetsFetcher.Fetch failed to fetch asset group %s: %s", assetGroup, err.Error()) + f.log.Errorf(ctx, "AzureAssetsFetcher.Fetch failed to fetch asset group %s: %s", assetGroup, err.Error()) errAgg = errors.Join(errAgg, err) continue } @@ -101,7 +101,7 @@ func (f *AzureAssetsFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Meta subscriptions, err := f.provider.GetSubscriptions(ctx, cycleMetadata) if err != nil { - f.log.Errorf("Error fetching subscription information: %v", err) + f.log.Errorf(ctx, "Error fetching subscription information: %v", err) } for _, e := range f.enrichers { diff --git a/internal/resources/fetching/fetchers/azure/batch_fetcher.go b/internal/resources/fetching/fetchers/azure/batch_fetcher.go index 7496552b3c..182fdf09ce 100644 --- a/internal/resources/fetching/fetchers/azure/batch_fetcher.go +++ b/internal/resources/fetching/fetchers/azure/batch_fetcher.go @@ -70,7 +70,7 @@ func (f *AzureBatchAssetFetcher) Fetch(ctx context.Context, cycleMetadata cycle. for _, assetGroup := range AzureBatchAssetGroups { r, err := f.provider.ListAllAssetTypesByName(ctx, assetGroup, slices.Collect(maps.Keys(AzureBatchAssets))) if err != nil { - f.log.Errorf("AzureBatchAssetFetcher.Fetch failed to fetch asset group %s: %s", assetGroup, err.Error()) + f.log.Errorf(ctx, "AzureBatchAssetFetcher.Fetch failed to fetch asset group %s: %s", assetGroup, err.Error()) errAgg = errors.Join(errAgg, err) continue } diff --git a/internal/resources/fetching/fetchers/azure/security_fetcher.go b/internal/resources/fetching/fetchers/azure/security_fetcher.go index 8e69ca5f2e..4c27efff34 100644 --- a/internal/resources/fetching/fetchers/azure/security_fetcher.go +++ b/internal/resources/fetching/fetchers/azure/security_fetcher.go @@ -66,7 +66,7 @@ func (f *AzureSecurityAssetFetcher) Fetch(ctx context.Context, cycleMetadata cyc for assetType, fn := range fetches { securityContacts, err := fn(ctx, sub.ShortID) if err != nil { - f.log.Errorf("AzureSecurityAssetFetcher.Fetch failed to fetch %s for subscription %s: %s", assetType, sub.ShortID, err.Error()) + f.log.Errorf(ctx, "AzureSecurityAssetFetcher.Fetch failed to fetch %s for subscription %s: %s", assetType, sub.ShortID, err.Error()) errs = append(errs, err) continue } diff --git a/internal/resources/fetching/fetchers/k8s/file_system_fetcher.go b/internal/resources/fetching/fetchers/k8s/file_system_fetcher.go index 0bbd9bf870..fd79d81370 100644 --- a/internal/resources/fetching/fetchers/k8s/file_system_fetcher.go +++ b/internal/resources/fetching/fetchers/k8s/file_system_fetcher.go @@ -99,20 +99,22 @@ func NewFsFetcher(log *clog.Logger, ch chan fetching.ResourceInfo, patterns []st } } -func (f *FileSystemFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata) error { +func (f *FileSystemFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) error { f.log.Debug("Starting FileSystemFetcher.Fetch") // Input files might contain glob pattern for _, filePattern := range f.patterns { matchedFiles, err := Glob(filePattern) if err != nil { - f.log.Errorf("Failed to find matched glob for %s, error: %+v", filePattern, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Failed to find matched glob for %s, error: %+v", filePattern, err) } for _, file := range matchedFiles { - resource, err := f.fetchSystemResource(file) + resource, err := f.fetchSystemResource(ctx, file) if err != nil { - f.log.Errorf("Unable to fetch fileSystemResource for file %v", file) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Unable to fetch fileSystemResource for file %v", file) continue } @@ -123,17 +125,17 @@ func (f *FileSystemFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadat return nil } -func (f *FileSystemFetcher) fetchSystemResource(filePath string) (*FSResource, error) { +func (f *FileSystemFetcher) fetchSystemResource(ctx context.Context, filePath string) (*FSResource, error) { info, err := os.Stat(filePath) if err != nil { return nil, fmt.Errorf("failed to fetch %s, error: %w", filePath, err) } - resourceInfo, _ := f.fromFileInfo(info, filePath) + resourceInfo, _ := f.fromFileInfo(ctx, info, filePath) return resourceInfo, nil } -func (f *FileSystemFetcher) fromFileInfo(info os.FileInfo, path string) (*FSResource, error) { +func (f *FileSystemFetcher) fromFileInfo(ctx context.Context, info os.FileInfo, path string) (*FSResource, error) { if info == nil { return nil, nil } @@ -172,7 +174,7 @@ func (f *FileSystemFetcher) fromFileInfo(info os.FileInfo, path string) (*FSReso return &FSResource{ EvalResource: data, - ElasticCommon: f.createFileCommonData(stat, data, path), + ElasticCommon: f.createFileCommonData(ctx, stat, data, path), }, nil } @@ -232,7 +234,7 @@ func getFSSubType(fileInfo os.FileInfo) string { return FileSubType } -func (f *FileSystemFetcher) createFileCommonData(stat *syscall.Stat_t, data EvalFSResource, path string) FileCommonData { +func (f *FileSystemFetcher) createFileCommonData(ctx context.Context, stat *syscall.Stat_t, data EvalFSResource, path string) FileCommonData { cd := FileCommonData{ Name: data.Name, Mode: data.Mode, @@ -250,7 +252,8 @@ func (f *FileSystemFetcher) createFileCommonData(stat *syscall.Stat_t, data Eval t, err := times.Stat(path) if err != nil { - f.log.Errorf("failed to get file time data (file %s), error - %s", path, err.Error()) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "failed to get file time data (file %s), error - %s", path, err.Error()) } else { cd.Accessed = t.AccessTime() cd.Mtime = t.ModTime() diff --git a/internal/resources/fetching/fetchers/k8s/kube_fetcher.go b/internal/resources/fetching/fetchers/k8s/kube_fetcher.go index ae43039e2b..f472aef408 100644 --- a/internal/resources/fetching/fetchers/k8s/kube_fetcher.go +++ b/internal/resources/fetching/fetchers/k8s/kube_fetcher.go @@ -163,7 +163,7 @@ func (f *KubeFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata) err return fmt.Errorf("could not initate Kubernetes watchers: %w", err) } - getKubeData(f.log, f.watchers, f.resourceCh, cycleMetadata) + getKubeData(f.log, f.watchers, f.resourceCh, cycleMetadata) //nolint:contextcheck return nil } diff --git a/internal/resources/fetching/fetchers/k8s/kube_provider.go b/internal/resources/fetching/fetchers/k8s/kube_provider.go index c2ca3ad5da..b50109073d 100644 --- a/internal/resources/fetching/fetchers/k8s/kube_provider.go +++ b/internal/resources/fetching/fetchers/k8s/kube_provider.go @@ -18,6 +18,7 @@ package fetchers import ( + "context" "reflect" "github.com/elastic/elastic-agent-autodiscover/kubernetes" @@ -54,13 +55,13 @@ func getKubeData(log *clog.Logger, watchers []kubernetes.Watcher, resCh chan fet resource, ok := r.(kubernetes.Resource) if !ok { - log.Errorf("Bad resource: %#v does not implement kubernetes.Resource", r) + log.Errorf(context.TODO(), "Bad resource: %#v does not implement kubernetes.Resource", r) continue } err := addTypeInformationToKubeResource(resource) if err != nil { - log.Errorf("Bad resource: %v", err) + log.Errorf(context.TODO(), "Bad resource: %v", err) continue } // See https://github.com/kubernetes/kubernetes/issues/3030 resCh <- fetching.ResourceInfo{Resource: K8sResource{log, resource}, CycleMetadata: cycleMetadata} @@ -108,7 +109,7 @@ func (r K8sResource) GetElasticCommonData() (map[string]any, error) { func getK8sObjectMeta(log *clog.Logger, k8sObj reflect.Value) metav1.ObjectMeta { metadata, ok := k8sObj.FieldByName(k8sObjMetadataField).Interface().(metav1.ObjectMeta) if !ok { - log.Errorf("Failed to retrieve object metadata, Resource: %#v", k8sObj) + log.Errorf(context.TODO(), "Failed to retrieve object metadata, Resource: %#v", k8sObj) return metav1.ObjectMeta{} } @@ -118,7 +119,7 @@ func getK8sObjectMeta(log *clog.Logger, k8sObj reflect.Value) metav1.ObjectMeta func getK8sSubType(log *clog.Logger, k8sObj reflect.Value) string { typeMeta, ok := k8sObj.FieldByName(k8sTypeMetadataField).Interface().(metav1.TypeMeta) if !ok { - log.Errorf("Failed to retrieve type metadata, Resource: %#v", k8sObj) + log.Errorf(context.TODO(), "Failed to retrieve type metadata, Resource: %#v", k8sObj) return "" } diff --git a/internal/resources/fetching/fetchers/k8s/process_fetcher.go b/internal/resources/fetching/fetchers/k8s/process_fetcher.go index d1b931afcb..ff6f6d9d2c 100644 --- a/internal/resources/fetching/fetchers/k8s/process_fetcher.go +++ b/internal/resources/fetching/fetchers/k8s/process_fetcher.go @@ -129,7 +129,7 @@ func NewProcessFetcher(log *clog.Logger, ch chan fetching.ResourceInfo, processe } } -func (f *ProcessesFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata) error { +func (f *ProcessesFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) error { f.log.Debug("Starting ProcessesFetcher.Fetch") pids, err := proc.ListFS(f.Fs) @@ -142,7 +142,8 @@ func (f *ProcessesFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata for _, p := range pids { stat, err := proc.ReadStatFS(f.Fs, p) if err != nil { - f.log.Errorf("error while reading /proc//stat for process %s: %s", p, err.Error()) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "error while reading /proc//stat for process %s: %s", p, err.Error()) continue } @@ -159,39 +160,43 @@ func (f *ProcessesFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata continue } - fetchedResource := f.fetchProcessData(stat, processConfig, p, cmd) + fetchedResource := f.fetchProcessData(ctx, stat, processConfig, p, cmd) f.resourceCh <- fetching.ResourceInfo{Resource: fetchedResource, CycleMetadata: cycleMetadata} } return nil } -func (f *ProcessesFetcher) fetchProcessData(procStat proc.ProcStat, processConf ProcessInputConfiguration, processId string, cmd string) fetching.Resource { - configMap := f.getProcessConfigurationFile(processConf, cmd, procStat.Name) +func (f *ProcessesFetcher) fetchProcessData(ctx context.Context, procStat proc.ProcStat, processConf ProcessInputConfiguration, processId string, cmd string) fetching.Resource { + configMap := f.getProcessConfigurationFile(ctx, processConf, cmd, procStat.Name) evalRes := EvalProcResource{PID: processId, Cmd: cmd, Stat: procStat, ExternalData: configMap} - procCd := f.createProcCommonData(procStat, cmd, processId) + procCd := f.createProcCommonData(ctx, procStat, cmd, processId) return ProcResource{EvalResource: evalRes, ElasticCommon: procCd} } -func (f *ProcessesFetcher) createProcCommonData(stat proc.ProcStat, cmd string, pid string) ProcCommonData { +func (f *ProcessesFetcher) createProcCommonData(ctx context.Context, stat proc.ProcStat, cmd string, pid string) ProcCommonData { processID, err := strconv.ParseInt(pid, 10, 64) if err != nil { - f.log.Errorf("Couldn't parse PID, pid: %s", pid) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't parse PID, pid: %s", pid) } startTime, err := strconv.ParseUint(stat.StartTime, 10, 64) if err != nil { - f.log.Errorf("Couldn't parse stat.StartTime, startTime: %s", stat.StartTime) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't parse stat.StartTime, startTime: %s", stat.StartTime) } pgid, err := strconv.ParseInt(stat.Group, 10, 64) if err != nil { - f.log.Errorf("Couldn't parse stat.Group, Group: %s, Error: %v", stat.Group, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't parse stat.Group, Group: %s, Error: %v", stat.Group, err) } ppid, err := strconv.ParseInt(stat.Parent, 10, 64) if err != nil { - f.log.Errorf("Couldn't parse stat.Parent, Parent: %s, Error: %v", stat.Parent, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't parse stat.Parent, Parent: %s, Error: %v", stat.Parent, err) } sysUptime, err := proc.ReadUptimeFS(f.Fs) @@ -219,7 +224,7 @@ func (f *ProcessesFetcher) createProcCommonData(stat proc.ProcStat, cmd string, // getProcessConfigurationFile - reads the configuration file associated with a process. // As an input this function receives a ProcessInputConfiguration that contains ConfigFileArguments, a string array that represents some process flags // The function extracts the configuration file associated with each flag and returns it. -func (f *ProcessesFetcher) getProcessConfigurationFile(processConfig ProcessInputConfiguration, cmd string, processName string) map[string]any { +func (f *ProcessesFetcher) getProcessConfigurationFile(ctx context.Context, processConfig ProcessInputConfiguration, cmd string, processName string) map[string]any { configMap := make(map[string]any) for _, argument := range processConfig.ConfigFileArguments { // The regex extracts the cmd line flag(argument) value @@ -232,7 +237,8 @@ func (f *ProcessesFetcher) getProcessConfigurationFile(processConfig ProcessInpu groupMatches := matcher.FindStringSubmatch(cmd) if len(groupMatches) < 2 { - f.log.Errorf("Couldn't find a configuration file associated with flag %s for process %s", argument, processName) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Couldn't find a configuration file associated with flag %s for process %s", argument, processName) continue } argValue := matcher.FindStringSubmatch(cmd)[1] @@ -240,12 +246,14 @@ func (f *ProcessesFetcher) getProcessConfigurationFile(processConfig ProcessInpu data, err := fs.ReadFile(f.Fs, argValue) if err != nil { - f.log.Errorf("Failed to read file configuration for process %s, error - %+v", processName, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Failed to read file configuration for process %s, error - %+v", processName, err) continue } configFile, err := f.readConfigurationFile(argValue, data) if err != nil { - f.log.Errorf("Failed to parse file configuration for process %s, error - %+v", processName, err) + // FIXME: This should be a context from the function signature. + f.log.Errorf(ctx, "Failed to parse file configuration for process %s, error - %+v", processName, err) continue } configMap[argument] = configFile diff --git a/internal/resources/fetching/manager/manager.go b/internal/resources/fetching/manager/manager.go index ea6ba2521c..a52dc4f2bb 100644 --- a/internal/resources/fetching/manager/manager.go +++ b/internal/resources/fetching/manager/manager.go @@ -75,7 +75,7 @@ func (m *Manager) Stop() { func (m *Manager) fetchAndSleep(ctx context.Context) { counter, err := observability.MeterFromContext(ctx, scopeName).Int64Counter("cloudbeat.fetcher.manager.cycles") if err != nil { - m.log.Errorf("Failed to create fetcher manager cycles counter: %v", err) + m.log.Errorf(ctx, "Failed to create fetcher manager cycles counter: %v", err) } // set immediate exec for first time run @@ -124,7 +124,7 @@ func (m *Manager) fetchIteration(ctx context.Context) { defer wg.Done() err := m.fetchSingle(ctx, k, cycle.Metadata{Sequence: seq}) if err != nil { - logger.Errorf("Error running fetcher for key %s: %v", k, err) + logger.Errorf(ctx, "Error running fetcher for key %s: %v", k, err) } }(key) } diff --git a/internal/resources/fetching/registry/registry.go b/internal/resources/fetching/registry/registry.go index d0257b287f..27e17df14e 100644 --- a/internal/resources/fetching/registry/registry.go +++ b/internal/resources/fetching/registry/registry.go @@ -119,7 +119,7 @@ func (r *registry) Update(ctx context.Context) { } fm, err := r.updater(ctx) if err != nil { - r.log.Errorf("Failed to update registry: %v", err) + r.log.Errorf(ctx, "Failed to update registry: %v", err) return } r.reg = fm diff --git a/internal/resources/providers/aws_cis/logging/provider.go b/internal/resources/providers/aws_cis/logging/provider.go index 79d365af72..5204729479 100644 --- a/internal/resources/providers/aws_cis/logging/provider.go +++ b/internal/resources/providers/aws_cis/logging/provider.go @@ -52,17 +52,17 @@ func (p *Provider) DescribeTrails(ctx context.Context) ([]awslib.AwsResource, er } bucketPolicy, policyErr := p.s3Provider.GetBucketPolicy(ctx, info.Trail.S3BucketName, *info.Trail.HomeRegion) if policyErr != nil { - p.log.Errorf("Error getting bucket policy for bucket %s: %v", *info.Trail.S3BucketName, policyErr) + p.log.Errorf(ctx, "Error getting bucket policy for bucket %s: %v", *info.Trail.S3BucketName, policyErr) } aclGrants, aclErr := p.s3Provider.GetBucketACL(ctx, info.Trail.S3BucketName, *info.Trail.HomeRegion) if aclErr != nil { - p.log.Errorf("Error getting bucket ACL for bucket %s: %v", *info.Trail.S3BucketName, aclErr) + p.log.Errorf(ctx, "Error getting bucket ACL for bucket %s: %v", *info.Trail.S3BucketName, aclErr) } bucketLogging, loggingErr := p.s3Provider.GetBucketLogging(ctx, info.Trail.S3BucketName, *info.Trail.HomeRegion) if loggingErr != nil { - p.log.Errorf("Error getting bucket logging for bucket %s: %v", *info.Trail.S3BucketName, loggingErr) + p.log.Errorf(ctx, "Error getting bucket logging for bucket %s: %v", *info.Trail.S3BucketName, loggingErr) } enrichedTrails = append(enrichedTrails, EnrichedTrail{ diff --git a/internal/resources/providers/aws_cis/monitoring/monitoring.go b/internal/resources/providers/aws_cis/monitoring/monitoring.go index 3a0076ab76..9bab32031a 100644 --- a/internal/resources/providers/aws_cis/monitoring/monitoring.go +++ b/internal/resources/providers/aws_cis/monitoring/monitoring.go @@ -98,11 +98,11 @@ func (p *Provider) AggregateResources(ctx context.Context) (*Resource, error) { } metrics, err := p.Cloudwatchlogs.DescribeMetricFilters(ctx, info.Trail.HomeRegion, logGroup) if err != nil { - p.Log.Errorf("failed to describe metric filters for cloudwatchlog log group arn %s: %v", *info.Trail.CloudWatchLogsLogGroupArn, err) + p.Log.Errorf(ctx, "failed to describe metric filters for cloudwatchlog log group arn %s: %v", *info.Trail.CloudWatchLogsLogGroupArn, err) continue } - parsedMetrics := p.parserMetrics(metrics) + parsedMetrics := p.parserMetrics(ctx, metrics) names := filterNamesFromMetrics(metrics) if len(names) == 0 { @@ -117,7 +117,7 @@ func (p *Provider) AggregateResources(ctx context.Context) (*Resource, error) { for _, name := range names { alarms, err := p.Cloudwatch.DescribeAlarms(ctx, info.Trail.HomeRegion, []string{name}) if err != nil { - p.Log.Errorf("failed to describe alarms for cloudwatch filter %v: %v", names, err) + p.Log.Errorf(ctx, "failed to describe alarms for cloudwatch filter %v: %v", names, err) continue } topics := p.getSubscriptionForAlarms(ctx, info.Trail.HomeRegion, alarms) @@ -133,7 +133,7 @@ func (p *Provider) AggregateResources(ctx context.Context) (*Resource, error) { return &Resource{Items: items}, nil } -func (p *Provider) parserMetrics(metrics []cloudwatchlogs_types.MetricFilter) []MetricFilter { +func (p *Provider) parserMetrics(ctx context.Context, metrics []cloudwatchlogs_types.MetricFilter) []MetricFilter { parsedMetrics := make([]MetricFilter, 0, len(metrics)) for _, m := range metrics { if m.FilterPattern == nil { @@ -145,7 +145,8 @@ func (p *Provider) parserMetrics(metrics []cloudwatchlogs_types.MetricFilter) [] exp, err := parseFilterPattern(*m.FilterPattern) if err != nil { - p.Log.Errorf("failed to parse metric filter pattern: %v (pattern: %s)", err, *m.FilterPattern) + // FIXME: This should be a context from the function signature. + p.Log.Errorf(ctx, "failed to parse metric filter pattern: %v (pattern: %s)", err, *m.FilterPattern) parsedMetrics = append(parsedMetrics, MetricFilter{ MetricFilter: m, }) @@ -166,7 +167,7 @@ func (p *Provider) getSubscriptionForAlarms(ctx context.Context, region *string, for _, action := range alarm.AlarmActions { subscriptions, err := p.Sns.ListSubscriptionsByTopic(ctx, pointers.Deref(region), action) if err != nil { - p.Log.Errorf("failed to list subscriptions for topic %s: %v", action, err) + p.Log.Errorf(ctx, "failed to list subscriptions for topic %s: %v", action, err) continue } for _, topic := range subscriptions { diff --git a/internal/resources/providers/awslib/account_provider.go b/internal/resources/providers/awslib/account_provider.go index 8ccb8cd67d..815bfa2994 100644 --- a/internal/resources/providers/awslib/account_provider.go +++ b/internal/resources/providers/awslib/account_provider.go @@ -64,7 +64,7 @@ func listAccounts(ctx context.Context, log *clog.Logger, client organizationsAPI organization, err := getOUInfoForAccount(ctx, client, organizationIdToName, account.Id) if err != nil { - log.Errorf("failed to get organizational unit info for account %s: %v", *account.Id, err) + log.Errorf(ctx, "failed to get organizational unit info for account %s: %v", *account.Id, err) } accounts = append(accounts, cloud.Identity{ Provider: "aws", diff --git a/internal/resources/providers/awslib/all_region_selector.go b/internal/resources/providers/awslib/all_region_selector.go index 14bad6e1ed..5ef32be459 100644 --- a/internal/resources/providers/awslib/all_region_selector.go +++ b/internal/resources/providers/awslib/all_region_selector.go @@ -47,7 +47,7 @@ func (s *allRegionSelector) Regions(ctx context.Context, cfg aws.Config) ([]stri output, err := s.client.DescribeRegions(ctx, nil) if err != nil { - log.Errorf("Failed getting available regions: %v", err) + log.Errorf(ctx, "Failed getting available regions: %v", err) return nil, err } diff --git a/internal/resources/providers/awslib/cached_region_selector.go b/internal/resources/providers/awslib/cached_region_selector.go index 07a8feae7d..a7a870adc9 100644 --- a/internal/resources/providers/awslib/cached_region_selector.go +++ b/internal/resources/providers/awslib/cached_region_selector.go @@ -94,12 +94,12 @@ func (s *cachedRegionSelector) Regions(ctx context.Context, cfg aws.Config) ([]s var output []string output, err := s.client.Regions(ctx, cfg) if err != nil { - log.Errorf("Failed getting regions: %v", err) + log.Errorf(ctx, "Failed getting regions: %v", err) return nil, err } if !s.setCache(output) { - log.Errorf("Failed setting regions cache") + log.Errorf(ctx, "Failed setting regions cache") } return output, nil } diff --git a/internal/resources/providers/awslib/cloudtrail/provider.go b/internal/resources/providers/awslib/cloudtrail/provider.go index 02b2102a2b..6dc8d78dc9 100644 --- a/internal/resources/providers/awslib/cloudtrail/provider.go +++ b/internal/resources/providers/awslib/cloudtrail/provider.go @@ -57,12 +57,12 @@ func (p Provider) DescribeTrails(ctx context.Context) ([]TrailInfo, error) { } status, err := p.getTrailStatus(ctx, trail) if err != nil { - p.log.Errorf("failed to get trail status %s %v", *trail.TrailARN, err.Error()) + p.log.Errorf(ctx, "failed to get trail status %s %v", *trail.TrailARN, err.Error()) } selectors, err := p.getEventSelectors(ctx, trail) if err != nil { - p.log.Errorf("failed to get trail event selector %s %v", *trail.TrailARN, err.Error()) + p.log.Errorf(ctx, "failed to get trail event selector %s %v", *trail.TrailARN, err.Error()) } result = append(result, TrailInfo{ diff --git a/internal/resources/providers/awslib/configservice/provider.go b/internal/resources/providers/awslib/configservice/provider.go index 04a42f0e59..d773ac3a19 100644 --- a/internal/resources/providers/awslib/configservice/provider.go +++ b/internal/resources/providers/awslib/configservice/provider.go @@ -29,7 +29,7 @@ func (p *Provider) DescribeConfigRecorders(ctx context.Context) ([]awslib.AwsRes configs, err := awslib.MultiRegionFetch(ctx, p.clients, func(ctx context.Context, region string, c Client) (awslib.AwsResource, error) { recorderList, err := c.DescribeConfigurationRecorders(ctx, nil) if err != nil { - p.log.Errorf("Error fetching AWS Config recorders: %v", err) + p.log.Errorf(ctx, "Error fetching AWS Config recorders: %v", err) return nil, err } diff --git a/internal/resources/providers/awslib/current_region_selector.go b/internal/resources/providers/awslib/current_region_selector.go index d58ee76ed8..c3c3d29fb1 100644 --- a/internal/resources/providers/awslib/current_region_selector.go +++ b/internal/resources/providers/awslib/current_region_selector.go @@ -39,7 +39,7 @@ func (s *currentRegionSelector) Regions(ctx context.Context, cfg aws.Config) ([] metadata, err := s.client.GetMetadata(ctx, cfg) if err != nil { - log.Errorf("Failed getting current region: %v", err) + log.Errorf(ctx, "Failed getting current region: %v", err) return nil, err } diff --git a/internal/resources/providers/awslib/ec2/provider.go b/internal/resources/providers/awslib/ec2/provider.go index 31d0bb7d05..e7a07c0cc7 100644 --- a/internal/resources/providers/awslib/ec2/provider.go +++ b/internal/resources/providers/awslib/ec2/provider.go @@ -362,7 +362,7 @@ func (p *Provider) IterOwnedSnapshots(ctx context.Context, before time.Time) ite return nil, nil }) if err != nil { - p.log.Errorf("Error listing owned snapshots: %v", err) + p.log.Errorf(ctx, "Error listing owned snapshots: %v", err) } } } @@ -492,7 +492,7 @@ func (p *Provider) DescribeVolumes(ctx context.Context, instances []*Ec2Instance var result []*Volume for _, vol := range allVolumes { if len(vol.Attachments) != 1 { - p.log.Errorf("Volume %s has %d attachments", *vol.VolumeId, len(vol.Attachments)) + p.log.Errorf(ctx, "Volume %s has %d attachments", *vol.VolumeId, len(vol.Attachments)) continue } @@ -564,7 +564,7 @@ func (p *Provider) DescribeVpcs(ctx context.Context) ([]awslib.AwsResource, erro }, }}) if err != nil { - p.log.Errorf("Error fetching flow logs for VPC %s: %v", *vpc.VpcId, err.Error()) + p.log.Errorf(ctx, "Error fetching flow logs for VPC %s: %v", *vpc.VpcId, err.Error()) continue } diff --git a/internal/resources/providers/awslib/elb_v2/provider_v2.go b/internal/resources/providers/awslib/elb_v2/provider_v2.go index d7e8699df2..e719b85c6e 100644 --- a/internal/resources/providers/awslib/elb_v2/provider_v2.go +++ b/internal/resources/providers/awslib/elb_v2/provider_v2.go @@ -54,7 +54,7 @@ func (p *Provider) DescribeLoadBalancers(ctx context.Context) ([]awslib.AwsResou } listeners, err := p.describeListeners(ctx, region, loadBalancer.GetResourceArn()) if err != nil { - p.log.Errorf("Error fetching listeners for %s: %v", loadBalancer.GetResourceArn(), err) + p.log.Errorf(ctx, "Error fetching listeners for %s: %v", loadBalancer.GetResourceArn(), err) } else { loadBalancer.Listeners = listeners } diff --git a/internal/resources/providers/awslib/iam/policy.go b/internal/resources/providers/awslib/iam/policy.go index f406ea220a..9cec7eb17c 100644 --- a/internal/resources/providers/awslib/iam/policy.go +++ b/internal/resources/providers/awslib/iam/policy.go @@ -223,7 +223,7 @@ func (p Provider) listInlinePolicies(ctx context.Context, identity *string) ([]P UserName: identity, }) if err != nil { - p.log.Errorf("fail to get inline policy for user: %s, policy name: %s", *identity, policyNames[i]) + p.log.Errorf(ctx, "fail to get inline policy for user: %s, policy name: %s", *identity, policyNames[i]) policies = append(policies, PolicyDocument{PolicyName: policyNames[i]}) continue } diff --git a/internal/resources/providers/awslib/iam/role_policy.go b/internal/resources/providers/awslib/iam/role_policy.go index 65d683c92e..a5c0f4acfe 100644 --- a/internal/resources/providers/awslib/iam/role_policy.go +++ b/internal/resources/providers/awslib/iam/role_policy.go @@ -41,7 +41,7 @@ func (p Provider) GetIAMRolePermissions(ctx context.Context, roleName string) ([ policy, err := p.client.GetRolePolicy(ctx, input) if err != nil { - p.log.Errorf("Failed to get policy %s: %v", *policyId.PolicyName, err) + p.log.Errorf(ctx, "Failed to get policy %s: %v", *policyId.PolicyName, err) continue } diff --git a/internal/resources/providers/awslib/iam/root_account.go b/internal/resources/providers/awslib/iam/root_account.go index a2400bf23d..67483b3c7e 100644 --- a/internal/resources/providers/awslib/iam/root_account.go +++ b/internal/resources/providers/awslib/iam/root_account.go @@ -27,7 +27,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/iam/types" ) -func (p Provider) getRootAccountUser(rootAccount *CredentialReport) *types.User { +func (p Provider) getRootAccountUser(ctx context.Context, rootAccount *CredentialReport) *types.User { if rootAccount == nil { p.log.Error("no root account entry was provided") return nil @@ -35,7 +35,7 @@ func (p Provider) getRootAccountUser(rootAccount *CredentialReport) *types.User rootDate, err := time.Parse(time.RFC3339, rootAccount.UserCreation) if err != nil { - p.log.Errorf("fail to parse root account user creation, error: %v", err) + p.log.Errorf(ctx, "fail to parse root account user creation, error: %v", err) return nil } @@ -45,7 +45,7 @@ func (p Provider) getRootAccountUser(rootAccount *CredentialReport) *types.User if rootAccount.PasswordLastUsed != "no_information" && rootAccount.PasswordLastUsed != "N/A" { pwdLastUsed, err = time.Parse(time.RFC3339, rootAccount.PasswordLastUsed) if err != nil { - p.log.Errorf("fail to parse root account password last used, error: %v", err) + p.log.Errorf(ctx, "fail to parse root account password last used, error: %v", err) return nil } } diff --git a/internal/resources/providers/awslib/iam/user.go b/internal/resources/providers/awslib/iam/user.go index 3305bedfd8..34617a8635 100644 --- a/internal/resources/providers/awslib/iam/user.go +++ b/internal/resources/providers/awslib/iam/user.go @@ -54,7 +54,7 @@ func (p Provider) GetUsers(ctx context.Context) ([]awslib.AwsResource, error) { return nil, err } - rootUser := p.getRootAccountUser(credentialReport[rootAccount]) + rootUser := p.getRootAccountUser(ctx, credentialReport[rootAccount]) if rootUser != nil { apiUsers = append(apiUsers, *rootUser) } @@ -80,23 +80,23 @@ func (p Provider) GetUsers(ctx context.Context) ([]awslib.AwsResource, error) { mfaDevices, err := p.getMFADevices(ctx, apiUser, userAccount) if err != nil { - p.log.Errorf("fail to list mfa device for user: %s, error: %v", username, err) + p.log.Errorf(ctx, "fail to list mfa device for user: %s, error: %v", username, err) } pwdEnabled, err := isPasswordEnabled(userAccount) if err != nil { - p.log.Errorf("fail to parse PasswordEnabled for user: %s, error: %v", username, err) + p.log.Errorf(ctx, "fail to parse PasswordEnabled for user: %s, error: %v", username, err) pwdEnabled = false } inlinePolicies, err := p.listInlinePolicies(ctx, apiUser.UserName) if err != nil && !isRootUser(username) { - p.log.Errorf("fail to list inline policies for user: %s, error: %v", username, err) + p.log.Errorf(ctx, "fail to list inline policies for user: %s, error: %v", username, err) } attachedPolicies, err := p.listAttachedPolicies(ctx, apiUser.UserName) if err != nil && !isRootUser(username) { - p.log.Errorf("fail to list attached policies for user: %s, error: %v", username, err) + p.log.Errorf(ctx, "fail to list attached policies for user: %s, error: %v", username, err) } users = append(users, User{ diff --git a/internal/resources/providers/awslib/multi_region.go b/internal/resources/providers/awslib/multi_region.go index 4b2adfb3ba..07ef6c7b55 100644 --- a/internal/resources/providers/awslib/multi_region.go +++ b/internal/resources/providers/awslib/multi_region.go @@ -53,7 +53,7 @@ func (w *MultiRegionClientFactory[T]) NewMultiRegionClients(ctx context.Context, clientsMap := make(map[string]T, 0) regionList, err := selector.Regions(ctx, cfg) if err != nil { - log.Errorf("Region '%s' selected after failure to retrieve aws regions: %v", cfg.Region, err) + log.Errorf(ctx, "Region '%s' selected after failure to retrieve aws regions: %v", cfg.Region, err) regionList = []string{cfg.Region} } for _, region := range regionList { diff --git a/internal/resources/providers/awslib/rds/provider.go b/internal/resources/providers/awslib/rds/provider.go index f1d133d6e5..7c0c90caa8 100644 --- a/internal/resources/providers/awslib/rds/provider.go +++ b/internal/resources/providers/awslib/rds/provider.go @@ -52,7 +52,7 @@ func (p Provider) DescribeDBInstances(ctx context.Context) ([]awslib.AwsResource for { output, err := c.DescribeDBInstances(ctx, dbInstancesInput) if err != nil { - p.log.Errorf("Could not describe DB instances. Error: %v", err) + p.log.Errorf(ctx, "Could not describe DB instances. Error: %v", err) return result, err } @@ -89,7 +89,7 @@ func (p Provider) getDBInstanceSubnets(ctx context.Context, region string, dbIns resultSubnet := Subnet{ID: *subnet.SubnetIdentifier, RouteTable: nil} routeTableForSubnet, err := p.ec2.GetRouteTableForSubnet(ctx, region, *subnet.SubnetIdentifier, *dbInstance.DBSubnetGroup.VpcId) if err != nil { - p.log.Errorf("Could not get route table for subnet %s of DB %s. Error: %v", *subnet.SubnetIdentifier, *dbInstance.DBInstanceIdentifier, err) + p.log.Errorf(ctx, "Could not get route table for subnet %s of DB %s. Error: %v", *subnet.SubnetIdentifier, *dbInstance.DBInstanceIdentifier, err) } else { var routes []Route for _, route := range routeTableForSubnet.Routes { diff --git a/internal/resources/providers/awslib/s3/provider.go b/internal/resources/providers/awslib/s3/provider.go index 4d176bafd5..6f871a8c90 100644 --- a/internal/resources/providers/awslib/s3/provider.go +++ b/internal/resources/providers/awslib/s3/provider.go @@ -65,7 +65,7 @@ func (p Provider) DescribeBuckets(ctx context.Context) ([]awslib.AwsResource, er } clientBuckets, err := defaultClient.ListBuckets(ctx, &s3Client.ListBucketsInput{}) if err != nil { - p.log.Errorf("Could not list s3 buckets: %v", err) + p.log.Errorf(ctx, "Could not list s3 buckets: %v", err) return nil, err } @@ -77,7 +77,7 @@ func (p Provider) DescribeBuckets(ctx context.Context) ([]awslib.AwsResource, er accountPublicAccessBlockConfig, accountPublicAccessBlockErr := p.getAccountPublicAccessBlock(ctx) if accountPublicAccessBlockErr != nil { - p.log.Errorf("Could not get account public access block configuration. Err: %v", accountPublicAccessBlockErr) + p.log.Errorf(ctx, "Could not get account public access block configuration. Err: %v", accountPublicAccessBlockErr) } bucketsRegionsMapping := p.getBucketsRegionMapping(ctx, clientBuckets.Buckets) @@ -87,22 +87,22 @@ func (p Provider) DescribeBuckets(ctx context.Context) ([]awslib.AwsResource, er // of the flow, so we should keep describing the bucket even if getting these objects fails. sseAlgorithm, encryptionErr := p.getBucketEncryptionAlgorithm(ctx, bucket.Name, region) if encryptionErr != nil { - p.log.Errorf("Could not get encryption for bucket %s. Error: %v", *bucket.Name, encryptionErr) + p.log.Errorf(ctx, "Could not get encryption for bucket %s. Error: %v", *bucket.Name, encryptionErr) } bucketPolicy, policyErr := p.GetBucketPolicy(ctx, bucket.Name, region) if policyErr != nil { - p.log.Errorf("Could not get bucket policy for bucket %s. Error: %v", *bucket.Name, policyErr) + p.log.Errorf(ctx, "Could not get bucket policy for bucket %s. Error: %v", *bucket.Name, policyErr) } bucketVersioning, versioningErr := p.getBucketVersioning(ctx, bucket.Name, region) if versioningErr != nil { - p.log.Errorf("Could not get bucket versioning for bucket %s. Err: %v", *bucket.Name, versioningErr) + p.log.Errorf(ctx, "Could not get bucket versioning for bucket %s. Err: %v", *bucket.Name, versioningErr) } publicAccessBlockConfiguration, publicAccessBlockErr := p.getPublicAccessBlock(ctx, bucket.Name, region) if publicAccessBlockErr != nil { - p.log.Errorf("Could not get public access block configuration for bucket %s. Err: %v", *bucket.Name, publicAccessBlockErr) + p.log.Errorf(ctx, "Could not get public access block configuration for bucket %s. Err: %v", *bucket.Name, publicAccessBlockErr) } result = append(result, BucketDescription{ @@ -191,7 +191,7 @@ func (p Provider) getBucketsRegionMapping(ctx context.Context, buckets []types.B // If we could not get the Region for a bucket, additional API calls for resources will probably fail, we should // not describe this bucket. if regionErr != nil { - p.log.Errorf("Could not get bucket location for bucket %s. Not describing this bucket. Error: %v", *clientBucket.Name, regionErr) + p.log.Errorf(ctx, "Could not get bucket location for bucket %s. Not describing this bucket. Error: %v", *clientBucket.Name, regionErr) continue } diff --git a/internal/resources/providers/awslib/sns/provider.go b/internal/resources/providers/awslib/sns/provider.go index e6d1c5c2ad..f17b440794 100644 --- a/internal/resources/providers/awslib/sns/provider.go +++ b/internal/resources/providers/awslib/sns/provider.go @@ -48,7 +48,7 @@ func (p *Provider) ListTopics(ctx context.Context) ([]types.Topic, error) { for { output, err := c.ListTopics(ctx, input) if err != nil { - p.log.Errorf("Could not list SNS Topics. Error: %s", err) + p.log.Errorf(ctx, "Could not list SNS Topics. Error: %s", err) return nil, err } all = append(all, output.Topics...) @@ -93,7 +93,7 @@ func (p *Provider) ListTopicsWithSubscriptions(ctx context.Context) ([]awslib.Aw for { output, err := c.ListTopics(ctx, input) if err != nil { - p.log.Errorf("Could not list SNS Topics. Error: %s", err) + p.log.Errorf(ctx, "Could not list SNS Topics. Error: %s", err) return nil, err } @@ -104,7 +104,7 @@ func (p *Provider) ListTopicsWithSubscriptions(ctx context.Context) ([]awslib.Aw } subscriptions, err := p.ListSubscriptionsByTopic(ctx, region, topicInfo.GetResourceArn()) if err != nil { - p.log.Errorf("Could not list SNS Subscriptions for Topic %q. Error: %s", topicInfo.GetResourceArn(), err) + p.log.Errorf(ctx, "Could not list SNS Subscriptions for Topic %q. Error: %s", topicInfo.GetResourceArn(), err) } else { topicInfo.Subscriptions = subscriptions } diff --git a/internal/resources/providers/azurelib/inventory/storage_provider.go b/internal/resources/providers/azurelib/inventory/storage_provider.go index 2f9da7e6f2..d94279d8c2 100644 --- a/internal/resources/providers/azurelib/inventory/storage_provider.go +++ b/internal/resources/providers/azurelib/inventory/storage_provider.go @@ -155,12 +155,12 @@ func (p *storageAccountProvider) ListStorageAccounts(ctx context.Context, storag for _, saID := range storageAccountsSubscriptionsIds { res, err := p.client.AssetAccountStorage(ctx, saID, nil) if err != nil { - p.log.Errorf("error while fetching storage accounts for subscriptionId: %s, error: %v", saID, err) + p.log.Errorf(ctx, "error while fetching storage accounts for subscriptionId: %s, error: %v", saID, err) continue } storageAccountsAssets, err := transformStorageAccounts(res, saID) if err != nil { - p.log.Errorf("error while transforming storage for subscriptionId: %s, error: %v", saID, err) + p.log.Errorf(ctx, "error while transforming storage for subscriptionId: %s, error: %v", saID, err) continue } assets = append(assets, storageAccountsAssets...) @@ -256,7 +256,7 @@ func (p *storageAccountProvider) ListStorageAccountFileServices(ctx context.Cont for _, item := range response.Value { properties, err := maps.AsMapStringAny(item.FileServiceProperties) if err != nil { - p.log.Errorf("error while transforming azure queue services for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure queue services for storage accounts %s: %v", sa.Id, err) } assets = append(assets, AzureAsset{ @@ -288,7 +288,7 @@ func (p *storageAccountProvider) ListStorageAccountFileShares(ctx context.Contex fileShares, err := transformFileShares(responses, sa) if err != nil { - p.log.Errorf("error while transforming azure file share for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure file share for storage accounts %s: %v", sa.Id, err) } assets = append(assets, fileShares...) @@ -307,7 +307,7 @@ func (p *storageAccountProvider) ListStorageAccountQueues(ctx context.Context, s queues, err := transformQueues(responses, sa) if err != nil { - p.log.Errorf("error while transforming azure queues for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure queues for storage accounts %s: %v", sa.Id, err) } assets = append(assets, queues...) @@ -327,7 +327,7 @@ func (p *storageAccountProvider) ListStorageAccountQueueServices(ctx context.Con for _, item := range response.Value { properties, err := maps.AsMapStringAny(item.QueueServiceProperties) if err != nil { - p.log.Errorf("error while transforming azure queue services for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure queue services for storage accounts %s: %v", sa.Id, err) } assets = append(assets, AzureAsset{ @@ -358,7 +358,7 @@ func (p *storageAccountProvider) ListStorageAccountTables(ctx context.Context, s tables, err := transformTables(responses, sa) if err != nil { - p.log.Errorf("error while transforming azure tables for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure tables for storage accounts %s: %v", sa.Id, err) } assets = append(assets, tables...) @@ -377,7 +377,7 @@ func (p *storageAccountProvider) ListStorageAccountTableServices(ctx context.Con for _, item := range response.Value { properties, err := maps.AsMapStringAny(item.TableServiceProperties) if err != nil { - p.log.Errorf("error while transforming azure table services for storage accounts %s: %v", sa.Id, err) + p.log.Errorf(ctx, "error while transforming azure table services for storage accounts %s: %v", sa.Id, err) } assets = append(assets, AzureAsset{ diff --git a/internal/resources/providers/gcplib/inventory/grpc_rate_limiter.go b/internal/resources/providers/gcplib/inventory/grpc_rate_limiter.go index 3bafd14d2f..e9e07a9df0 100644 --- a/internal/resources/providers/gcplib/inventory/grpc_rate_limiter.go +++ b/internal/resources/providers/gcplib/inventory/grpc_rate_limiter.go @@ -97,7 +97,7 @@ func (rl *AssetsInventoryRateLimiter) Wait(ctx context.Context, method string, r if limiter != nil { err := limiter.Wait(ctx) if err != nil { - rl.log.Errorf("Failed to wait for project quota on method: %s, request: %v, error: %v", method, req, err) + rl.log.Errorf(ctx, "Failed to wait for project quota on method: %s, request: %v, error: %v", method, req, err) } } } diff --git a/internal/resources/providers/gcplib/inventory/provider.go b/internal/resources/providers/gcplib/inventory/provider.go index 4422807019..21bcb44105 100644 --- a/internal/resources/providers/gcplib/inventory/provider.go +++ b/internal/resources/providers/gcplib/inventory/provider.go @@ -413,7 +413,7 @@ func (p *Provider) getAllAssets(ctx context.Context, out chan<- *ExtendedGcpAsse } if err != nil { - p.log.Errorf("Error fetching GCP %v of types: %v for %v: %v\n", req.ContentType, req.AssetTypes, req.Parent, err) + p.log.Errorf(ctx, "Error fetching GCP %v of types: %v for %v: %v\n", req.ContentType, req.AssetTypes, req.Parent, err) return } diff --git a/internal/resources/providers/gcplib/inventory/resource_manager.go b/internal/resources/providers/gcplib/inventory/resource_manager.go index 4dc0ae5569..de8159e6f1 100644 --- a/internal/resources/providers/gcplib/inventory/resource_manager.go +++ b/internal/resources/providers/gcplib/inventory/resource_manager.go @@ -55,7 +55,7 @@ func NewResourceManagerWrapper(ctx context.Context, log *clog.Logger, gcpConfig getProjectDisplayName: func(ctx context.Context, parent string) string { prj, err := crmService.Projects.Get(parent).Context(ctx).Do() if err != nil { - log.Errorf("error fetching GCP Project: %s, error: %s", parent, err) + log.Errorf(ctx, "error fetching GCP Project: %s, error: %s", parent, err) return "" } return prj.DisplayName @@ -63,7 +63,7 @@ func NewResourceManagerWrapper(ctx context.Context, log *clog.Logger, gcpConfig getOrganizationDisplayName: func(ctx context.Context, parent string) string { org, err := crmService.Organizations.Get(parent).Context(ctx).Do() if err != nil { - log.Errorf("error fetching GCP Org: %s, error: %s", parent, err) + log.Errorf(ctx, "error fetching GCP Org: %s, error: %s", parent, err) return "" } return org.DisplayName @@ -81,7 +81,7 @@ func (c *ResourceManagerWrapper) GetCloudMetadata(ctx context.Context, asset *as if valid { return cloudAccountMetadata } - c.log.Errorf("error casting cloud account metadata for key: %s", key) + c.log.Errorf(ctx, "error casting cloud account metadata for key: %s", key) } cloudAccountMetadata := c.getMetadata(ctx, orgId, projectId) c.accountMetadataCache.Store(key, cloudAccountMetadata) diff --git a/internal/resources/providers/msgraph/provider.go b/internal/resources/providers/msgraph/provider.go index e0d5f1fbd2..7a57282718 100644 --- a/internal/resources/providers/msgraph/provider.go +++ b/internal/resources/providers/msgraph/provider.go @@ -94,7 +94,7 @@ func (p *provider) ListServicePrincipals(ctx context.Context) ([]*models.Service return true // to continue the iteration }) if err != nil { - p.log.Errorf("error iterating over Service Principals: %v", err) + p.log.Errorf(ctx, "error iterating over Service Principals: %v", err) } return items, nil } @@ -122,7 +122,7 @@ func (p *provider) ListDirectoryRoles(ctx context.Context) ([]*models.DirectoryR return true // to continue the iteration }) if err != nil { - p.log.Errorf("error iterating over Directory Roles: %v", err) + p.log.Errorf(ctx, "error iterating over Directory Roles: %v", err) } return items, nil } @@ -150,7 +150,7 @@ func (p *provider) ListGroups(ctx context.Context) ([]*models.Group, error) { return true // to continue the iteration }) if err != nil { - p.log.Errorf("error iterating over Groups: %v", err) + p.log.Errorf(ctx, "error iterating over Groups: %v", err) } return items, nil } @@ -178,7 +178,7 @@ func (p *provider) ListUsers(ctx context.Context) ([]*models.User, error) { return true // to continue the iteration }) if err != nil { - p.log.Errorf("error iterating over Users: %v", err) + p.log.Errorf(ctx, "error iterating over Users: %v", err) } return items, nil } diff --git a/internal/uniqueness/leaderelection.go b/internal/uniqueness/leaderelection.go index da0acf3b85..8b8fcc288c 100644 --- a/internal/uniqueness/leaderelection.go +++ b/internal/uniqueness/leaderelection.go @@ -74,13 +74,13 @@ func (m *LeaderelectionManager) Run(ctx context.Context) error { leConfig, err := m.buildConfig(newCtx) if err != nil { - m.log.Errorf("Fail building leader election config: %v", err) + m.log.Errorf(ctx, "Fail building leader election config: %v", err) return err } m.leader, err = le.NewLeaderElector(leConfig) if err != nil { - m.log.Errorf("Fail to create a new leader elector: %v", err) + m.log.Errorf(ctx, "Fail to create a new leader elector: %v", err) return err } diff --git a/internal/vulnerability/events_creator.go b/internal/vulnerability/events_creator.go index 1f57416e0c..d88bd755d1 100644 --- a/internal/vulnerability/events_creator.go +++ b/internal/vulnerability/events_creator.go @@ -203,7 +203,7 @@ func (e EventsCreator) CreateEvents(ctx context.Context, scanResults chan []Resu events := make([]beat.Event, 0, len(data)) for _, res := range data { - events = append(events, e.generateEvent(res.reportResult, res.vulnerability, res.snapshot.Instance, res.seq)) + events = append(events, e.generateEvent(ctx, res.reportResult, res.vulnerability, res.snapshot.Instance, res.seq)) } select { @@ -220,7 +220,7 @@ func (e EventsCreator) GetChan() chan []beat.Event { return e.ch } -func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTypes.DetectedVulnerability, instance ec2.Ec2Instance, seq time.Time) beat.Event { +func (e EventsCreator) generateEvent(ctx context.Context, reportResult trivyTypes.Result, vul trivyTypes.DetectedVulnerability, instance ec2.Ec2Instance, seq time.Time) beat.Event { timestamp := time.Now().UTC() sequence := seq.Unix() @@ -251,7 +251,7 @@ func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTy // TODO: Should we fail the event if we can't enrich the cloud section? if err != nil { - e.log.Errorf("failed to enrich cloud section: %v", err) + e.log.Errorf(ctx, "failed to enrich cloud section: %v", err) } hostSec, err := convertStructToMapStr(HostSection{ @@ -269,7 +269,7 @@ func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTy // TODO: Should we fail the event if we can't enrich the host section? if err != nil { - e.log.Errorf("failed to enrich host section: %v", err) + e.log.Errorf(ctx, "failed to enrich host section: %v", err) } networkSec, err := convertStructToMapStr(NetworkSection{ @@ -280,7 +280,7 @@ func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTy // TODO: Should we fail the event if we can't enrich the network section? if err != nil { - e.log.Errorf("failed to enrich network section: %v", err) + e.log.Errorf(ctx, "failed to enrich network section: %v", err) } event := beat.Event{ @@ -344,12 +344,12 @@ func (e EventsCreator) generateEvent(reportResult trivyTypes.Result, vul trivyTy err = e.cloudDataProvider.EnrichEvent(&event, fetching.ResourceMetadata{Region: instance.Region}) if err != nil { - e.log.Errorf("failed to enrich event with benchmark data provider: %v", err) + e.log.Errorf(ctx, "failed to enrich event with benchmark data provider: %v", err) } err = e.commonDataProvider.EnrichEvent(&event) if err != nil { - e.log.Errorf("failed to enrich event with global data provider: %v", err) + e.log.Errorf(ctx, "failed to enrich event with global data provider: %v", err) } return event diff --git a/internal/vulnerability/fetcher.go b/internal/vulnerability/fetcher.go index cdd275dd78..dd66db8d2b 100644 --- a/internal/vulnerability/fetcher.go +++ b/internal/vulnerability/fetcher.go @@ -51,14 +51,14 @@ func (f VulnerabilityFetcher) FetchInstances(ctx context.Context) error { f.log.Info("Starting VulnerabilityFetcher.FetchInstances") ins, err := f.provider.DescribeInstances(ctx) if err != nil { - f.log.Errorf("VulnerabilityFetcher.FetchInstances DescribeInstances failed: %v", err) + f.log.Errorf(ctx, "VulnerabilityFetcher.FetchInstances DescribeInstances failed: %v", err) return err } f.log.Infof("VulnerabilityFetcher.FetchInstances found %d results", len(ins)) err = f.attachRootVolumes(ctx, ins) if err != nil { - f.log.Errorf("VulnerabilityFetcher.FetchInstances attachRootVolumes failed: %v", err) + f.log.Errorf(ctx, "VulnerabilityFetcher.FetchInstances attachRootVolumes failed: %v", err) } else { f.sortByRootVolumeSize(ins) } diff --git a/internal/vulnerability/replicator.go b/internal/vulnerability/replicator.go index 342a9f25e8..7a17a88ea1 100644 --- a/internal/vulnerability/replicator.go +++ b/internal/vulnerability/replicator.go @@ -55,7 +55,7 @@ func (f VulnerabilityReplicator) SnapshotInstance(ctx context.Context, insCh cha } sp, err := f.manager.CreateSnapshots(ctx, data) if err != nil { - f.log.Errorf("VulnerabilityReplicator.SnapshotInstance.CreateSnapshots failed: %v", err) + f.log.Errorf(ctx, "VulnerabilityReplicator.SnapshotInstance.CreateSnapshots failed: %v", err) continue } diff --git a/internal/vulnerability/runner.go b/internal/vulnerability/runner.go index 67740e154e..834ef6f733 100644 --- a/internal/vulnerability/runner.go +++ b/internal/vulnerability/runner.go @@ -40,7 +40,7 @@ func NewVulnerabilityRunner(ctx context.Context, log *clog.Logger) (Vulnerabilit log.Debug("NewVulnerabilityRunner: New") if err := clearTrivyCache(ctx, log); err != nil { - log.Errorf("error during runner cache clearing %s", err.Error()) + log.Errorf(ctx, "error during runner cache clearing %s", err.Error()) } opts := flag.Options{ diff --git a/internal/vulnerability/scanner.go b/internal/vulnerability/scanner.go index 94540bcc4a..1537c3b5c9 100644 --- a/internal/vulnerability/scanner.go +++ b/internal/vulnerability/scanner.go @@ -99,11 +99,11 @@ func (f VulnerabilityScanner) ScanSnapshot(ctx context.Context, snapCh chan ec2. func (f VulnerabilityScanner) scan(ctx context.Context, snap ec2.EBSSnapshot) { f.log.Infof("Starting VulnerabilityScanner.scan, %s", snap.SnapshotId) - defer func() { + defer func(ctx context.Context) { if r := recover(); r != nil { - f.log.Errorf("vulnerability scanner recovered from panic: %v", r) + f.log.Errorf(ctx, "vulnerability scanner recovered from panic: %v", r) } - }() + }(ctx) o, err := os.CreateTemp("", "") if err != nil { @@ -158,7 +158,7 @@ func (f VulnerabilityScanner) scan(ctx context.Context, snap ec2.EBSSnapshot) { ) if err != nil { - f.log.Errorf("VulnerabilityScanner.scan.ScanVM, snapshotId: %s, instanceId: %s, error: %v", snap.SnapshotId, *snap.Instance.InstanceId, err) + f.log.Errorf(ctx, "VulnerabilityScanner.scan.ScanVM, snapshotId: %s, instanceId: %s, error: %v", snap.SnapshotId, *snap.Instance.InstanceId, err) return } diff --git a/internal/vulnerability/snapshot.go b/internal/vulnerability/snapshot.go index 01c78ec114..d1df2a043f 100644 --- a/internal/vulnerability/snapshot.go +++ b/internal/vulnerability/snapshot.go @@ -127,7 +127,7 @@ func (s *SnapshotManager) delete(ctx context.Context, snapshot ec2.EBSSnapshot, s.logger.Infof("VulnerabilityScanner.manager.%s %s", message, snapshot.SnapshotId) err := s.provider.DeleteSnapshot(ctx, snapshot) if err != nil { - s.logger.Errorf("VulnerabilityScanner.manager.%s %s error: %s", message, snapshot.SnapshotId, err) + s.logger.Errorf(ctx, "VulnerabilityScanner.manager.%s %s error: %s", message, snapshot.SnapshotId, err) } } diff --git a/internal/vulnerability/verifier.go b/internal/vulnerability/verifier.go index 1c9926339a..d5b5f856e5 100644 --- a/internal/vulnerability/verifier.go +++ b/internal/vulnerability/verifier.go @@ -102,7 +102,7 @@ func (f VulnerabilityVerifier) verify(ctx context.Context, snap ec2.EBSSnapshot) case <-time.After(f.interval): sp, err := f.provider.DescribeSnapshots(ctx, snap) if err != nil { - f.log.Errorf("VulnerabilityVerifier.verify.DescribeSnapshots failed: %v", err) + f.log.Errorf(ctx, "VulnerabilityVerifier.verify.DescribeSnapshots failed: %v", err) continue } // TODO: Add a layer of "smart" cache to avoid checking and sending the same snapshot diff --git a/internal/vulnerability/worker.go b/internal/vulnerability/worker.go index 443ee6998a..84921d166d 100644 --- a/internal/vulnerability/worker.go +++ b/internal/vulnerability/worker.go @@ -145,7 +145,7 @@ func (f *VulnerabilityWorker) Run(ctx context.Context) { defer wg.Done() err := job.fn(ctx) if err != nil { - f.log.Errorf("VulnerabilityWorker.work job %s failed: %s", job.name, err.Error()) + f.log.Errorf(ctx, "VulnerabilityWorker.work job %s failed: %s", job.name, err.Error()) } else { f.log.Infof("VulnerabilityWorker.work job %s finished", job.name) } From 5a48c8b99daaab5cf690de6f61940d872e49db31 Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Mon, 11 Aug 2025 12:25:17 +0200 Subject: [PATCH 02/12] Error --- internal/flavors/vulnerability.go | 2 +- internal/infra/clog/clog.go | 7 ++++--- internal/pipeline/pipeline.go | 2 +- .../resources/fetching/fetchers/k8s/process_fetcher.go | 4 ++-- .../providers/awslib/configservice/provider.go | 2 +- .../resources/providers/awslib/iam/root_account.go | 2 +- internal/resources/providers/awslib/kms/provider.go | 4 ++-- internal/vulnerability/runner.go | 2 +- internal/vulnerability/scanner.go | 10 +++++----- 9 files changed, 18 insertions(+), 17 deletions(-) diff --git a/internal/flavors/vulnerability.go b/internal/flavors/vulnerability.go index 30fe0666f9..410f705047 100644 --- a/internal/flavors/vulnerability.go +++ b/internal/flavors/vulnerability.go @@ -129,7 +129,7 @@ func (bt *vulnerability) Run(*beat.Beat) error { func (bt *vulnerability) runIteration() error { worker, err := vuln.NewVulnerabilityWorker(bt.ctx, bt.log, bt.config, bt.bdp, bt.cdp) if err != nil { - bt.log.Error("vulnerability.runIteration worker creation failed") + bt.log.Error(context.TODO(), "vulnerability.runIteration worker creation failed") bt.cancel() return err } diff --git a/internal/infra/clog/clog.go b/internal/infra/clog/clog.go index 7c7fde886f..9b3dfd2c4c 100644 --- a/internal/infra/clog/clog.go +++ b/internal/infra/clog/clog.go @@ -41,13 +41,14 @@ func (l *Logger) Errorf(ctx context.Context, template string, args ...any) { l.WithSpanContext(spanCtx).Logger.Errorf(template, args...) } -func (l *Logger) Error(args ...any) { +func (l *Logger) Error(ctx context.Context, args ...any) { + spanCtx := trace.SpanContextFromContext(ctx) // Downgrade context.Canceled errors to warning level if hasErrorType(context.Canceled, args...) { - l.Warn(args...) + l.WithSpanContext(spanCtx).Warn(args...) return } - l.Logger.Error(args...) + l.WithSpanContext(spanCtx).Logger.Error(args...) } func (l *Logger) Named(name string) *Logger { diff --git a/internal/pipeline/pipeline.go b/internal/pipeline/pipeline.go index 531131c4f6..aaca295380 100644 --- a/internal/pipeline/pipeline.go +++ b/internal/pipeline/pipeline.go @@ -38,7 +38,7 @@ func Step[In any, Out any](ctx context.Context, log *clog.Logger, inputChannel c for s := range inputChannel { val, err := fn(ctx, s) if err != nil { - log.Error(err) + log.Error(ctx, err) continue } outputCh <- val diff --git a/internal/resources/fetching/fetchers/k8s/process_fetcher.go b/internal/resources/fetching/fetchers/k8s/process_fetcher.go index ff6f6d9d2c..2fdbcf59e5 100644 --- a/internal/resources/fetching/fetchers/k8s/process_fetcher.go +++ b/internal/resources/fetching/fetchers/k8s/process_fetcher.go @@ -150,7 +150,7 @@ func (f *ProcessesFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metada // Get the full command line name and not the /proc/pid/status one which might be silently truncated. cmd, err := proc.ReadCmdLineFS(f.Fs, p) if err != nil { - f.log.Error("error while reading /proc//cmdline for process %s: %s", p, err.Error()) + f.log.Error(ctx, "error while reading /proc//cmdline for process %s: %s", p, err.Error()) continue } name := extractCommandName(cmd) @@ -201,7 +201,7 @@ func (f *ProcessesFetcher) createProcCommonData(ctx context.Context, stat proc.P sysUptime, err := proc.ReadUptimeFS(f.Fs) if err != nil { - f.log.Error("couldn't read system boot time", err) + f.log.Error(ctx, "couldn't read system boot time", err) } uptimeDate := time.Now().Add(-time.Duration(sysUptime) * time.Second) diff --git a/internal/resources/providers/awslib/configservice/provider.go b/internal/resources/providers/awslib/configservice/provider.go index d773ac3a19..8f2fc2ad61 100644 --- a/internal/resources/providers/awslib/configservice/provider.go +++ b/internal/resources/providers/awslib/configservice/provider.go @@ -40,7 +40,7 @@ func (p *Provider) DescribeConfigRecorders(ctx context.Context) ([]awslib.AwsRes }) if err != nil { - p.log.Error("Error fetching recorder status, recorder: %v , Error: %v:", recorder, err) + p.log.Error(ctx, "Error fetching recorder status, recorder: %v , Error: %v:", recorder, err) return nil, err } result = append(result, Recorder{ diff --git a/internal/resources/providers/awslib/iam/root_account.go b/internal/resources/providers/awslib/iam/root_account.go index 67483b3c7e..05c907f0ec 100644 --- a/internal/resources/providers/awslib/iam/root_account.go +++ b/internal/resources/providers/awslib/iam/root_account.go @@ -29,7 +29,7 @@ import ( func (p Provider) getRootAccountUser(ctx context.Context, rootAccount *CredentialReport) *types.User { if rootAccount == nil { - p.log.Error("no root account entry was provided") + p.log.Error(ctx, "no root account entry was provided") return nil } diff --git a/internal/resources/providers/awslib/kms/provider.go b/internal/resources/providers/awslib/kms/provider.go index e9a9f1135c..f595084f61 100644 --- a/internal/resources/providers/awslib/kms/provider.go +++ b/internal/resources/providers/awslib/kms/provider.go @@ -62,7 +62,7 @@ func (p *Provider) DescribeSymmetricKeys(ctx context.Context) ([]awslib.AwsResou KeyId: keyEntry.KeyId, }) if err != nil { - p.log.Error(err.Error()) + p.log.Error(ctx, err.Error()) continue } @@ -78,7 +78,7 @@ func (p *Provider) DescribeSymmetricKeys(ctx context.Context) ([]awslib.AwsResou KeyId: keyEntry.KeyId, }) if err != nil { - p.log.Error(err.Error()) + p.log.Error(ctx, err.Error()) continue } diff --git a/internal/vulnerability/runner.go b/internal/vulnerability/runner.go index 834ef6f733..b67b94d162 100644 --- a/internal/vulnerability/runner.go +++ b/internal/vulnerability/runner.go @@ -69,7 +69,7 @@ func NewVulnerabilityRunner(ctx context.Context, log *clog.Logger) (Vulnerabilit runner, err := artifact.NewRunner(ctx, opts) if err != nil { - log.Error("NewVulnerabilityRunner: NewRunner error: ", err) + log.Error(ctx, "NewVulnerabilityRunner: NewRunner error: ", err) return VulnerabilityRunner{}, err } diff --git a/internal/vulnerability/scanner.go b/internal/vulnerability/scanner.go index 1537c3b5c9..e734d920ba 100644 --- a/internal/vulnerability/scanner.go +++ b/internal/vulnerability/scanner.go @@ -107,7 +107,7 @@ func (f VulnerabilityScanner) scan(ctx context.Context, snap ec2.EBSSnapshot) { o, err := os.CreateTemp("", "") if err != nil { - f.log.Error("VulnerabilityScanner.scan.TempFile error: ", err) + f.log.Error(ctx, "VulnerabilityScanner.scan.TempFile error: ", err) return } defer func(name string) { @@ -165,21 +165,21 @@ func (f VulnerabilityScanner) scan(ctx context.Context, snap ec2.EBSSnapshot) { f.log.Info("VulnerabilityScanner.scan.Filter") report, err = f.runner.Filter(ctx, opts, report) if err != nil { - f.log.Error("VulnerabilityScanner.scan.Filter error: ", err) + f.log.Error(ctx, "VulnerabilityScanner.scan.Filter error: ", err) return } f.log.Info("VulnerabilityScanner.scan.Report") err = f.runner.Report(ctx, opts, report) if err != nil { - f.log.Error("VulnerabilityScanner.scan.Report error: ", err) + f.log.Error(ctx, "VulnerabilityScanner.scan.Report error: ", err) return } f.log.Info("VulnerabilityScanner.scan.jsonFile") jsonFile, err := os.Open(o.Name()) if err != nil { - f.log.Error("VulnerabilityScanner.scan.jsonFile error: ", err) + f.log.Error(ctx, "VulnerabilityScanner.scan.jsonFile error: ", err) return } @@ -189,7 +189,7 @@ func (f VulnerabilityScanner) scan(ctx context.Context, snap ec2.EBSSnapshot) { var unmarshalledReport trivy_types.Report err = json.Unmarshal(byteValue, &unmarshalledReport) if err != nil { - f.log.Error("VulnerabilityScanner.scan.Unmarshal error: ", err) + f.log.Error(ctx, "VulnerabilityScanner.scan.Unmarshal error: ", err) return } From ab1f5e7a327b9535c40e1263a62931458cebaff4 Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Mon, 11 Aug 2025 12:25:18 +0200 Subject: [PATCH 03/12] opa --- internal/evaluator/logger.go | 5 +++-- internal/evaluator/logger_test.go | 10 +++++----- internal/evaluator/opa.go | 6 +++--- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/internal/evaluator/logger.go b/internal/evaluator/logger.go index d8b5a8929c..c02764ee39 100644 --- a/internal/evaluator/logger.go +++ b/internal/evaluator/logger.go @@ -22,6 +22,7 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/open-policy-agent/opa/v1/logging" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -86,12 +87,12 @@ func mapToArray(m map[string]any) []any { return ret } -func newLogger() logging.Logger { +func newLogger(ctx context.Context) logging.Logger { lvl := zap.NewAtomicLevelAt(logp.GetLevel()) log := clog.NewLogger("opa").WithOptions( zap.IncreaseLevel(lvl), zap.AddCallerSkip(1), - ) + ).WithSpanContext(trace.SpanContextFromContext(ctx)) return &logger{ log: log, diff --git a/internal/evaluator/logger_test.go b/internal/evaluator/logger_test.go index 51368afe7e..d68f6e2702 100644 --- a/internal/evaluator/logger_test.go +++ b/internal/evaluator/logger_test.go @@ -46,7 +46,7 @@ func (s *LoggerTestSuite) SetupSuite() { } func (s *LoggerTestSuite) TestLogFormat() { - logger := newLogger() + logger := newLogger(s.T().Context()) logger.SetLevel(logging.Warn) logger.Warn("warn %s", "warn") logs := logp.ObserverLogs().TakeAll() @@ -56,7 +56,7 @@ func (s *LoggerTestSuite) TestLogFormat() { } func (s *LoggerTestSuite) TestLogFields() { - logger := newLogger() + logger := newLogger(s.T().Context()) logger.SetLevel(logging.Debug) logger = logger.WithFields(map[string]any{ "key": "val", @@ -72,7 +72,7 @@ func (s *LoggerTestSuite) TestLogFields() { } func (s *LoggerTestSuite) TestLogMultipleFields() { - logger := newLogger() + logger := newLogger(s.T().Context()) logger.SetLevel(logging.Debug) logger = logger.WithFields(map[string]any{ "key1": "val1", @@ -93,7 +93,7 @@ func (s *LoggerTestSuite) TestLogMultipleFields() { } func (s *LoggerTestSuite) TestLoggerGetLevel() { - logger := newLogger() + logger := newLogger(s.T().Context()) tests := []logging.Level{ logging.Debug, logging.Info, @@ -108,7 +108,7 @@ func (s *LoggerTestSuite) TestLoggerGetLevel() { } func (s *LoggerTestSuite) TestLoggerSetLevel() { - logger := newLogger() + logger := newLogger(s.T().Context()) logger.SetLevel(logging.Debug) logger.Debug("debug") logs := logp.ObserverLogs().TakeAll() diff --git a/internal/evaluator/opa.go b/internal/evaluator/opa.go index b5854ca67d..7cd30bb589 100644 --- a/internal/evaluator/opa.go +++ b/internal/evaluator/opa.go @@ -72,14 +72,14 @@ func NewOpaEvaluator(ctx context.Context, log *clog.Logger, cfg *config.Config) plugin := fmt.Sprintf(logPlugin, dlogger.PluginName, dlogger.PluginName) opaCfg := fmt.Sprintf(opaConfig, cfg.BundlePath, plugin) - decisonLogger := newLogger() - stdLogger := newLogger() + decisionLogger := newLogger(ctx) + stdLogger := newLogger(ctx) // create an instance of the OPA object opa, err := sdk.New(ctx, sdk.Options{ Config: bytes.NewReader([]byte(opaCfg)), Logger: stdLogger, - ConsoleLogger: decisonLogger, + ConsoleLogger: decisionLogger, Plugins: map[string]plugins.Factory{ dlogger.PluginName: &dlogger.Factory{}, }, From 166d79c15c1fa64e33254e9e0a05ef43057d240c Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Mon, 11 Aug 2025 12:25:19 +0200 Subject: [PATCH 04/12] refactor(clog): Use t.Context in tests --- internal/infra/clog/clog_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/infra/clog/clog_test.go b/internal/infra/clog/clog_test.go index 8bd5a11f75..32cbc417b1 100644 --- a/internal/infra/clog/clog_test.go +++ b/internal/infra/clog/clog_test.go @@ -46,8 +46,8 @@ func (s *LoggerTestSuite) TestErrorfWithContextCanceled() { logger := NewLogger("test") err := context.Canceled - logger.Errorf(context.TODO(), "some error: %s", err) // error with context.Canceled - logger.Errorf(context.TODO(), "some error: %s", err.Error()) // error string with context Canceled + logger.Errorf(s.T().Context(), "some error: %s", err) // error with context.Canceled + logger.Errorf(s.T().Context(), "some error: %s", err.Error()) // error string with context Canceled logs := logp.ObserverLogs().TakeAll() if s.Len(logs, 2) { @@ -62,7 +62,7 @@ func (s *LoggerTestSuite) TestLogErrorfWithoutContextCanceled() { logger := NewLogger("test") err := errors.New("oops") - logger.Errorf(context.TODO(), "some error: %s", err) + logger.Errorf(s.T().Context(), "some error: %s", err) logs := logp.ObserverLogs().TakeAll() if s.Len(logs, 1) { From 47208f49e36d0d3fc555691bc3a9220178321d8c Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Mon, 11 Aug 2025 12:25:20 +0200 Subject: [PATCH 05/12] refactor(launcher): Propagate context for logging --- internal/launcher/launcher.go | 27 ++++++++++++++++----------- internal/launcher/launcher_test.go | 20 ++++++++++---------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/internal/launcher/launcher.go b/internal/launcher/launcher.go index 8c38768b3b..f2066b729d 100644 --- a/internal/launcher/launcher.go +++ b/internal/launcher/launcher.go @@ -94,13 +94,16 @@ func (l *launcher) Run(b *beat.Beat) error { return err } + // Create a root context for the launcher's execution. + ctx := context.Background() + // Wait for Fleet-side reconfiguration only if beater is running in Agent-managed mode. if b.Manager.Enabled() { defer b.Manager.Stop() l.log.Infof("Waiting for initial reconfiguration from Fleet server...") - update, err := l.reconfigureWait(reconfigureWaitTimeout) + update, err := l.reconfigureWait(ctx, reconfigureWaitTimeout) if err != nil { - l.log.Errorf(context.TODO(), "Failed while waiting for the initial reconfiguration from Fleet server: %v", err) + l.log.Errorf(ctx, "Failed while waiting for the initial reconfiguration from Fleet server: %v", err) return err } @@ -109,12 +112,12 @@ func (l *launcher) Run(b *beat.Beat) error { } } - err := l.run() + err := l.run(ctx) return err } -func (l *launcher) run() error { - err := l.runLoop() +func (l *launcher) run(ctx context.Context) error { + err := l.runLoop(ctx) switch { case errors.Is(err, ErrGracefulExit): @@ -123,7 +126,7 @@ func (l *launcher) run() error { l.log.Info("Launcher stopped after timeout") case err == nil: // unexpected default: - l.log.Errorf(context.TODO(), "Launcher stopped by error: %v", err) + l.log.Errorf(ctx, "Launcher stopped by error: %v", err) } l.reloader.Stop() @@ -131,7 +134,7 @@ func (l *launcher) run() error { } // runLoop is the loop that keeps the launcher alive -func (l *launcher) runLoop() error { +func (l *launcher) runLoop(ctx context.Context) error { l.log.Info("Launcher is running") for { // Run a new beater @@ -144,7 +147,7 @@ func (l *launcher) runLoop() error { // config update (val, nil) // stop signal (nil, ErrStopSignal) // beater error (nil, err) - cfg, err := l.waitForUpdates() + cfg, err := l.waitForUpdates(ctx) if isConfigUpdate(cfg, err) { l.stopBeater() @@ -238,8 +241,10 @@ func (l *launcher) stopBeaterWithTimeout(duration time.Duration) error { // 1. The Stop function got called (nil, ErrStopSignal) // 2. The beater run has returned (nil, err) // 3. A config update received (val, nil) -func (l *launcher) waitForUpdates() (*config.C, error) { +func (l *launcher) waitForUpdates(ctx context.Context) (*config.C, error) { select { + case <-ctx.Done(): + return nil, ctx.Err() case err, ok := <-l.beaterErr: if !ok { l.log.Infof("Launcher received a stop signal") @@ -279,7 +284,7 @@ func (l *launcher) configUpdate(update *config.C) error { // reconfigureWait will wait for and consume incoming reconfiguration from the Fleet server, and keep // discarding them until the incoming config contains the necessary information to start beater // properly, thereafter returning the valid config. -func (l *launcher) reconfigureWait(timeout time.Duration) (*config.C, error) { +func (l *launcher) reconfigureWait(ctx context.Context, timeout time.Duration) (*config.C, error) { start := time.Now() timer := time.After(timeout) @@ -299,7 +304,7 @@ func (l *launcher) reconfigureWait(timeout time.Duration) (*config.C, error) { if l.validator != nil { err := l.validator.Validate(update) if err != nil { - l.log.Errorf(context.TODO(), "Config update validation failed: %v", err) + l.log.Errorf(ctx, "Config update validation failed: %v", err) healthErr := &BeaterUnhealthyError{} if errors.As(err, healthErr) { l.beat.Manager.UpdateStatus(status.Degraded, healthErr.Error()) diff --git a/internal/launcher/launcher_test.go b/internal/launcher/launcher_test.go index 6731da4e0e..a58c8dabbc 100644 --- a/internal/launcher/launcher_test.go +++ b/internal/launcher/launcher_test.go @@ -302,7 +302,7 @@ func (s *LauncherTestSuite) TestWaitForUpdates() { sut.Stop() }(tt.configs) - err := sut.run() + err := sut.run(s.T().Context()) s.Require().ErrorIs(err, ErrGracefulExit) beater, ok := sut.beater.(*beaterMock) s.Require().True(ok) @@ -325,7 +325,7 @@ func (s *LauncherTestSuite) TestErrorWaitForUpdates() { mocks.reloader.ch <- configErr }() - err := sut.run() + err := sut.run(s.T().Context()) s.Require().Error(err) } @@ -420,7 +420,7 @@ func (s *LauncherTestSuite) TestLauncherValidator() { } }(tt.configs) - cfg, err := sut.reconfigureWait(tt.timeout) + cfg, err := sut.reconfigureWait(s.T().Context(), tt.timeout) if tt.expected == nil { s.Require().Error(err) } else { @@ -433,12 +433,12 @@ func (s *LauncherTestSuite) TestLauncherValidator() { // TestLauncherErrorBeater should not call sut.Stop as the launcher should stop without calling it func (s *LauncherTestSuite) TestLauncherErrorBeater() { - s.Require().Error(s.newLauncher(s.initMocks(), errorBeaterMockCreator).run()) + s.Require().Error(s.newLauncher(s.initMocks(), errorBeaterMockCreator).run(s.T().Context())) } // TestLauncherPanicBeater should not call sut.Stop as the launcher should stop without calling it func (s *LauncherTestSuite) TestLauncherPanicBeater() { - s.Require().ErrorContains(s.newLauncher(s.initMocks(), panicBeaterMockCreator).run(), "panicBeaterMock panics") + s.Require().ErrorContains(s.newLauncher(s.initMocks(), panicBeaterMockCreator).run(s.T().Context()), "panicBeaterMock panics") } func (s *LauncherTestSuite) TestLauncherUpdateAndStop() { @@ -448,7 +448,7 @@ func (s *LauncherTestSuite) TestLauncherUpdateAndStop() { mocks.reloader.ch <- config.NewConfig() sut.Stop() }() - err := sut.run() + err := sut.run(s.T().Context()) s.Require().ErrorIs(err, ErrGracefulExit) } @@ -459,7 +459,7 @@ func (s *LauncherTestSuite) TestLauncherStopTwicePanics() { mocks.reloader.ch <- config.NewConfig() sut.Stop() }() - err := sut.run() + err := sut.run(s.T().Context()) s.Require().ErrorIs(err, ErrGracefulExit) s.Panics(func() { @@ -469,7 +469,7 @@ func (s *LauncherTestSuite) TestLauncherStopTwicePanics() { // TestLauncherErrorBeaterCreation should not call sut.Stop as the launcher should stop without calling it func (s *LauncherTestSuite) TestLauncherErrorBeaterCreation() { - s.Require().Error(s.newLauncher(s.initMocks(), errorBeaterCreator).run()) + s.Require().Error(s.newLauncher(s.initMocks(), errorBeaterCreator).run(s.T().Context())) } func (s *LauncherTestSuite) TestLauncherStop() { @@ -479,7 +479,7 @@ func (s *LauncherTestSuite) TestLauncherStop() { sut.Stop() }() - err := sut.run() + err := sut.run(s.T().Context()) s.Require().ErrorIs(err, ErrGracefulExit) } @@ -494,7 +494,7 @@ func (s *LauncherTestSuite) TestLauncherStopTimeout() { time.Sleep(shutdownGracePeriod + 100*time.Millisecond) }() - err := sut.run() + err := sut.run(s.T().Context()) s.Require().ErrorIs(err, ErrTimeoutExit) } From a6ab1414114a7c336b1b22988653fe409145ee9f Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Mon, 11 Aug 2025 12:25:20 +0200 Subject: [PATCH 06/12] refactor(k8s): Propagate context for logging in kube fetcher --- .../fetching/fetchers/k8s/kube_fetcher.go | 4 ++-- .../fetching/fetchers/k8s/kube_provider.go | 14 +++++++++----- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/internal/resources/fetching/fetchers/k8s/kube_fetcher.go b/internal/resources/fetching/fetchers/k8s/kube_fetcher.go index f472aef408..80938eedd4 100644 --- a/internal/resources/fetching/fetchers/k8s/kube_fetcher.go +++ b/internal/resources/fetching/fetchers/k8s/kube_fetcher.go @@ -150,7 +150,7 @@ func (f *KubeFetcher) initWatchers() error { return nil } -func (f *KubeFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata) error { +func (f *KubeFetcher) Fetch(ctx context.Context, cycleMetadata cycle.Metadata) error { f.log.Debug("Starting KubeFetcher.Fetch") var err error @@ -163,7 +163,7 @@ func (f *KubeFetcher) Fetch(_ context.Context, cycleMetadata cycle.Metadata) err return fmt.Errorf("could not initate Kubernetes watchers: %w", err) } - getKubeData(f.log, f.watchers, f.resourceCh, cycleMetadata) //nolint:contextcheck + getKubeData(ctx, f.log, f.watchers, f.resourceCh, cycleMetadata) return nil } diff --git a/internal/resources/fetching/fetchers/k8s/kube_provider.go b/internal/resources/fetching/fetchers/k8s/kube_provider.go index b50109073d..0d70da417f 100644 --- a/internal/resources/fetching/fetchers/k8s/kube_provider.go +++ b/internal/resources/fetching/fetchers/k8s/kube_provider.go @@ -44,7 +44,7 @@ const ( ecsResourceNameField = "orchestrator.resource.name" ) -func getKubeData(log *clog.Logger, watchers []kubernetes.Watcher, resCh chan fetching.ResourceInfo, cycleMetadata cycle.Metadata) { +func getKubeData(ctx context.Context, log *clog.Logger, watchers []kubernetes.Watcher, resCh chan fetching.ResourceInfo, cycleMetadata cycle.Metadata) { log.Debug("Starting getKubeData") for _, watcher := range watchers { @@ -55,13 +55,13 @@ func getKubeData(log *clog.Logger, watchers []kubernetes.Watcher, resCh chan fet resource, ok := r.(kubernetes.Resource) if !ok { - log.Errorf(context.TODO(), "Bad resource: %#v does not implement kubernetes.Resource", r) + log.Errorf(ctx, "Bad resource: %#v does not implement kubernetes.Resource", r) continue } err := addTypeInformationToKubeResource(resource) if err != nil { - log.Errorf(context.TODO(), "Bad resource: %v", err) + log.Errorf(ctx, "Bad resource: %v", err) continue } // See https://github.com/kubernetes/kubernetes/issues/3030 resCh <- fetching.ResourceInfo{Resource: K8sResource{log, resource}, CycleMetadata: cycleMetadata} @@ -109,7 +109,9 @@ func (r K8sResource) GetElasticCommonData() (map[string]any, error) { func getK8sObjectMeta(log *clog.Logger, k8sObj reflect.Value) metav1.ObjectMeta { metadata, ok := k8sObj.FieldByName(k8sObjMetadataField).Interface().(metav1.ObjectMeta) if !ok { - log.Errorf(context.TODO(), "Failed to retrieve object metadata, Resource: %#v", k8sObj) + // Bypassing clog's context-aware Errorf to avoid using context.TODO(). + // This means these logs won't be enriched with tracing information. + log.Logger.Errorf("Failed to retrieve object metadata, Resource: %#v", k8sObj) return metav1.ObjectMeta{} } @@ -119,7 +121,9 @@ func getK8sObjectMeta(log *clog.Logger, k8sObj reflect.Value) metav1.ObjectMeta func getK8sSubType(log *clog.Logger, k8sObj reflect.Value) string { typeMeta, ok := k8sObj.FieldByName(k8sTypeMetadataField).Interface().(metav1.TypeMeta) if !ok { - log.Errorf(context.TODO(), "Failed to retrieve type metadata, Resource: %#v", k8sObj) + // Bypassing clog's context-aware Errorf to avoid using context.TODO(). + // This means these logs won't be enriched with tracing information. + log.Logger.Errorf("Failed to retrieve type metadata, Resource: %#v", k8sObj) return "" } From fa34dfcaa298c5a5f0405ee0f95062a9dd102970 Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Mon, 11 Aug 2025 12:26:07 +0200 Subject: [PATCH 07/12] NewLeaderElector remove nolint --- internal/flavors/benchmark/eks.go | 2 +- internal/flavors/benchmark/k8s.go | 2 +- internal/uniqueness/leaderelection_test.go | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/internal/flavors/benchmark/eks.go b/internal/flavors/benchmark/eks.go index ddece01130..d8f3801eeb 100644 --- a/internal/flavors/benchmark/eks.go +++ b/internal/flavors/benchmark/eks.go @@ -73,7 +73,7 @@ func (k *EKS) initialize(ctx context.Context, log *clog.Logger, cfg *config.Conf } benchmarkHelper := NewK8sBenchmarkHelper(log, cfg, kubeClient) - k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) //nolint:contextcheck + k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) awsConfig, awsIdentity, err := k.getEksAwsConfig(ctx, cfg) if err != nil { diff --git a/internal/flavors/benchmark/k8s.go b/internal/flavors/benchmark/k8s.go index fc700f96dd..4617bfc202 100644 --- a/internal/flavors/benchmark/k8s.go +++ b/internal/flavors/benchmark/k8s.go @@ -66,7 +66,7 @@ func (k *K8S) initialize(ctx context.Context, log *clog.Logger, cfg *config.Conf } benchmarkHelper := NewK8sBenchmarkHelper(log, cfg, kubeClient) - k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) //nolint:contextcheck + k.leaderElector = uniqueness.NewLeaderElector(log, kubeClient) dp, err := benchmarkHelper.GetK8sDataProvider(ctx, k8s.KubernetesClusterNameProvider{KubeClient: kubeClient}) if err != nil { diff --git a/internal/uniqueness/leaderelection_test.go b/internal/uniqueness/leaderelection_test.go index 5bfa291156..7ed8f03b16 100644 --- a/internal/uniqueness/leaderelection_test.go +++ b/internal/uniqueness/leaderelection_test.go @@ -18,7 +18,6 @@ package uniqueness import ( - "context" "fmt" "os" "strings" @@ -169,7 +168,7 @@ func (s *LeaderElectionTestSuite) TestManager_buildConfig() { s.Require().NoError(os.Setenv(PodNameEnvar, podId)) } - got, err := s.manager.buildConfig(context.TODO()) + got, err := s.manager.buildConfig(s.T().Context()) if (err != nil) != tt.wantErr { s.FailNow("unexpected error", "error: %v", err) } From 8b6fad261431eee8e691d8be356fac9fcdc78daa Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Mon, 11 Aug 2025 12:32:12 +0200 Subject: [PATCH 08/12] inline --- internal/evaluator/opa.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/internal/evaluator/opa.go b/internal/evaluator/opa.go index 7cd30bb589..e61300448f 100644 --- a/internal/evaluator/opa.go +++ b/internal/evaluator/opa.go @@ -72,14 +72,11 @@ func NewOpaEvaluator(ctx context.Context, log *clog.Logger, cfg *config.Config) plugin := fmt.Sprintf(logPlugin, dlogger.PluginName, dlogger.PluginName) opaCfg := fmt.Sprintf(opaConfig, cfg.BundlePath, plugin) - decisionLogger := newLogger(ctx) - stdLogger := newLogger(ctx) - // create an instance of the OPA object opa, err := sdk.New(ctx, sdk.Options{ Config: bytes.NewReader([]byte(opaCfg)), - Logger: stdLogger, - ConsoleLogger: decisionLogger, + Logger: newLogger(ctx), + ConsoleLogger: newLogger(ctx), Plugins: map[string]plugins.Factory{ dlogger.PluginName: &dlogger.Factory{}, }, From ad3521c242c26a745427b357f4d295b3bdc94cd4 Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Mon, 11 Aug 2025 14:43:36 +0200 Subject: [PATCH 09/12] OTel: Align span names (#3506) Span naming convention: `..` According to https://github.com/elastic/security-team/issues/13322 --- internal/flavors/benchmark/aws_org.go | 2 +- internal/resources/fetching/manager/manager.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/flavors/benchmark/aws_org.go b/internal/flavors/benchmark/aws_org.go index a98d59b51e..4478d8c39d 100644 --- a/internal/flavors/benchmark/aws_org.go +++ b/internal/flavors/benchmark/aws_org.go @@ -98,7 +98,7 @@ func (a *AWSOrg) initialize(ctx context.Context, log *clog.Logger, cfg *config.C cache := make(map[string]registry.FetchersMap) reg := registry.NewRegistry(log, registry.WithUpdater( func(ctx context.Context) (registry.FetchersMap, error) { - ctx, span := observability.StartSpan(ctx, scopeName, "Update AWS accounts") + ctx, span := observability.StartSpan(ctx, scopeName, "benchmark.AWSOrg.initialize") defer span.End() spannedLog := log.WithSpanContext(span.SpanContext()) diff --git a/internal/resources/fetching/manager/manager.go b/internal/resources/fetching/manager/manager.go index a52dc4f2bb..58024e8fd8 100644 --- a/internal/resources/fetching/manager/manager.go +++ b/internal/resources/fetching/manager/manager.go @@ -104,7 +104,7 @@ func (m *Manager) fetchIteration(ctx context.Context) { ctx, span := observability.StartSpan( ctx, scopeName, - "Fetch Iteration", + "manager.Manager.fetchIteration", trace.WithAttributes(attribute.String("transaction.type", "request")), ) defer span.End() From 03c2207d2a81409f42f008179b7dffcacb6151d9 Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Mon, 11 Aug 2025 17:30:19 +0200 Subject: [PATCH 10/12] feat(OTel): Use global providers This commit refactors the OpenTelemetry (OTel) instrumentation to use global tracer and meter providers instead of passing them through the context. This simplifies the observability setup and removes the need for custom wrappers around OTel functions. This approach is idiomatic in OpenTelemetry-Go. OTel uses a delegation pattern where a default, no-op provider is initially present. When the application later configures and sets the actual provider, the default provider delegates all calls to the new, configured one. This design makes the initialization order flexible and robust, ensuring all components use the same fully-configured provider, regardless of when they were initialized. See: https://seth-shi.medium.com/why-can-otel-gettracerprovider-5bfbc73db828 Key changes: - The `internal/infra/observability/context.go` file has been removed, as providers are no longer stored in the context. - `internal/infra/observability/otel.go` now initializes and manages global `TracerProvider` and `MeterProvider` instances using a `sync.Once` to ensure they are set up only once. - The `observability.StartSpan` and `observability.MeterFromContext` helper functions have been removed. Callers now use the standard `otel.Tracer(name).Start()` and `otel.Meter(name)` to create spans and get meters directly. - All call sites have been updated to use the new approach, obtaining tracers and meters from the global OTel instance. - The `SetUpOtel` function no longer returns a context, as it now configures the global providers. --- internal/flavors/benchmark/aws_org.go | 5 +- internal/flavors/posture.go | 2 +- internal/infra/observability/context.go | 79 -------------- internal/infra/observability/otel.go | 103 ++++++++---------- internal/infra/observability/otel_test.go | 12 +- .../resources/fetching/manager/manager.go | 10 +- .../resources/fetching/registry/registry.go | 6 +- 7 files changed, 71 insertions(+), 146 deletions(-) delete mode 100644 internal/infra/observability/context.go diff --git a/internal/flavors/benchmark/aws_org.go b/internal/flavors/benchmark/aws_org.go index 4478d8c39d..de4fe64e4b 100644 --- a/internal/flavors/benchmark/aws_org.go +++ b/internal/flavors/benchmark/aws_org.go @@ -26,6 +26,7 @@ import ( awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/service/sts" + "go.opentelemetry.io/otel" "github.com/elastic/cloudbeat/internal/config" "github.com/elastic/cloudbeat/internal/dataprovider" @@ -49,6 +50,8 @@ const ( scopeName = "github.com/elastic/cloudbeat/internal/flavors/benchmark/aws_org" ) +var tracer = otel.Tracer(scopeName) + type AWSOrg struct { IAMProvider iam.RoleGetter IdentityProvider awslib.IdentityProviderGetter @@ -98,7 +101,7 @@ func (a *AWSOrg) initialize(ctx context.Context, log *clog.Logger, cfg *config.C cache := make(map[string]registry.FetchersMap) reg := registry.NewRegistry(log, registry.WithUpdater( func(ctx context.Context) (registry.FetchersMap, error) { - ctx, span := observability.StartSpan(ctx, scopeName, "benchmark.AWSOrg.initialize") + ctx, span := tracer.Start(ctx, "benchmark.AWSOrg.initialize") defer span.End() spannedLog := log.WithSpanContext(span.SpanContext()) diff --git a/internal/flavors/posture.go b/internal/flavors/posture.go index 1ece65c5a1..043a93e6ed 100644 --- a/internal/flavors/posture.go +++ b/internal/flavors/posture.go @@ -53,7 +53,7 @@ func newPostureFromCfg(b *beat.Beat, cfg *config.Config) (*posture, error) { log.Info("Config initiated with cycle period of ", cfg.Period) ctx, cancel := context.WithCancel(context.Background()) - ctx, err := observability.SetUpOtel(ctx, log.Logger) + err := observability.SetUpOtel(ctx, log.Logger) if err != nil { log.Errorw("failed to set up otel", logp.Error(err)) } diff --git a/internal/infra/observability/context.go b/internal/infra/observability/context.go deleted file mode 100644 index 6c3708a3a8..0000000000 --- a/internal/infra/observability/context.go +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package observability - -import ( - "context" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -type contextKeyType int - -const contextKey contextKeyType = iota - -func otelProvidersFromContext(ctx context.Context) otelProviders { - if ctx != nil { - if otl, ok := ctx.Value(contextKey).(otelProviders); ok { - return otl - } - if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { - return otelProviders{ - traceProvider: tracerNoShutdown{span.TracerProvider()}, - meterProvider: meterNoShutdown{otel.GetMeterProvider()}, - } - } - } - return otelProviders{ - traceProvider: tracerNoShutdown{otel.GetTracerProvider()}, - meterProvider: meterNoShutdown{otel.GetMeterProvider()}, - } -} - -func contextWithOTel(ctx context.Context, otl otelProviders) context.Context { - return context.WithValue(ctx, contextKey, otl) -} - -func TracerFromContext(ctx context.Context, name string, opts ...trace.TracerOption) trace.Tracer { - return otelProvidersFromContext(ctx).traceProvider.Tracer(name, opts...) -} - -func MeterFromContext(ctx context.Context, name string, opts ...metric.MeterOption) metric.Meter { - return otelProvidersFromContext(ctx).meterProvider.Meter(name, opts...) -} - -// meterNoShutdown and tracerNoShutdown patch the metric.MeterProvider and trace.TracerProvider interfaces with Shutdown -// and Flush operations. -// The trace.TracerProvider interface does not provide Shutdown or Flush but sdktrace.TracerProvider (returned by -// newTracerProvider) does. otel.GetTracerProvider() returns the first instead of the second. In real life, this will be -// the case when using a no-op tracer (e.g. on-prem with no APM server set up). -type ( - meterNoShutdown struct { - metric.MeterProvider - } - tracerNoShutdown struct { - trace.TracerProvider - } -) - -func (m meterNoShutdown) ForceFlush(context.Context) error { return nil } -func (m meterNoShutdown) Shutdown(context.Context) error { return nil } -func (t tracerNoShutdown) ForceFlush(context.Context) error { return nil } -func (t tracerNoShutdown) Shutdown(context.Context) error { return nil } diff --git a/internal/infra/observability/otel.go b/internal/infra/observability/otel.go index 0d460b8d16..338c37c348 100644 --- a/internal/infra/observability/otel.go +++ b/internal/infra/observability/otel.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "os" + "sync" "github.com/elastic/elastic-agent-libs/logp" "github.com/go-logr/logr" @@ -29,7 +30,6 @@ import ( "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" - "go.opentelemetry.io/otel/metric" sdkmetric "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" @@ -45,55 +45,50 @@ const ( endpointEnvVar = "OTEL_EXPORTER_OTLP_ENDPOINT" ) -type gracefulCloser interface { - ForceFlush(ctx context.Context) error - Shutdown(ctx context.Context) error -} - -type otelProviders struct { - traceProvider tracerProvider - meterProvider meterProvider -} +var ( + traceProvider *sdktrace.TracerProvider + meterProvider *sdkmetric.MeterProvider + once sync.Once +) // SetUpOtel initializes OpenTelemetry logging, tracing, and metrics providers. // It configures OTLP exporters that send data to an OTLP endpoint // (e.g., APM Server) configured via environment variables. -func SetUpOtel(ctx context.Context, logger *logp.Logger) (context.Context, error) { +func SetUpOtel(ctx context.Context, logger *logp.Logger) error { logger = logger.Named("otel") if os.Getenv(endpointEnvVar) == "" { logger.Infof("%s is not set, skipping OpenTelemetry setup", endpointEnvVar) - return ctx, nil + return nil } + var err error + once.Do(func() { + err = setUpOnce(ctx, logger) + }) + return err +} + +func setUpOnce(ctx context.Context, logger *logp.Logger) error { wrap := loggerWrapper{l: logger} otel.SetLogger(logr.New(&wrap)) otel.SetErrorHandler(&wrap) res, err := newResource(ctx) if err != nil { - return ctx, fmt.Errorf("failed to create OTel resource: %w", err) + return fmt.Errorf("failed to create OTel resource: %w", err) } - mp, err := newMetricsProvider(ctx, res) + err = newDefaultMeterProvider(ctx, res) if err != nil { - return ctx, fmt.Errorf("failed to create metrics provider: %w", err) + return fmt.Errorf("failed to create metrics provider: %w", err) } - tp, err := newTracerProvider(ctx, res) + err = newDefaultTracerProvider(ctx, res) if err != nil { - return ctx, fmt.Errorf("failed to create tracer provider: %w", err) + return fmt.Errorf("failed to create tracer provider: %w", err) } - return contextWithOTel(ctx, otelProviders{ - traceProvider: tp, - meterProvider: mp, - }), nil -} - -// StartSpan starts a new trace span. -// It's a convenience wrapper around tracer.Start(). -func StartSpan(ctx context.Context, tracerName, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - return TracerFromContext(ctx, tracerName).Start(ctx, spanName, opts...) + return nil } // FailSpan records an error in the span and sets its status to Error. @@ -106,42 +101,38 @@ func FailSpan(span trace.Span, msg string, err error) error { return err } -// tracerProvider is an extension of the trace.TracerProvider interface with shutdown and force flush operations. -type tracerProvider interface { - trace.TracerProvider - gracefulCloser -} - -// meterProvider is an extension of the metric.MeterProvider interface with shutdown and force flush operations. -type meterProvider interface { - metric.MeterProvider - gracefulCloser -} - // ShutdownOtel flushes and shuts down the registered OpenTelemetry providers. func ShutdownOtel(ctx context.Context) error { - otl := otelProvidersFromContext(ctx) - return errors.Join( - otl.meterProvider.ForceFlush(ctx), - otl.meterProvider.Shutdown(ctx), - otl.traceProvider.ForceFlush(ctx), - otl.traceProvider.Shutdown(ctx), - ) + var err error + if meterProvider != nil { + err = errors.Join( + meterProvider.ForceFlush(ctx), + meterProvider.Shutdown(ctx), + ) + } + if traceProvider != nil { + err = errors.Join( + err, + traceProvider.ForceFlush(ctx), + traceProvider.Shutdown(ctx), + ) + } + return err } -func newMetricsProvider(ctx context.Context, res *resource.Resource) (*sdkmetric.MeterProvider, error) { +func newDefaultMeterProvider(ctx context.Context, res *resource.Resource) error { // The OTLP gRPC exporter will be configured using environment variables (e.g., OTEL_EXPORTER_OTLP_ENDPOINT). metricExporter, err := otlpmetricgrpc.New(ctx) if err != nil { - return nil, fmt.Errorf("failed to create OTLP metric exporter: %w", err) + return fmt.Errorf("failed to create OTLP metric exporter: %w", err) } - mp := sdkmetric.NewMeterProvider( + meterProvider = sdkmetric.NewMeterProvider( sdkmetric.WithResource(res), sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter)), ) - otel.SetMeterProvider(mp) - return mp, nil + otel.SetMeterProvider(meterProvider) + return nil } func newResource(ctx context.Context) (*resource.Resource, error) { @@ -169,22 +160,22 @@ func newResource(ctx context.Context) (*resource.Resource, error) { return res, nil } -func newTracerProvider(ctx context.Context, res *resource.Resource) (*sdktrace.TracerProvider, error) { +func newDefaultTracerProvider(ctx context.Context, res *resource.Resource) error { // The APM server supports OTLP over gRPC, so we use the gRPC exporter. // The OTLP gRPC exporter uses environment variables for configuration (e.g., OTEL_EXPORTER_OTLP_ENDPOINT). exporter, err := otlptracegrpc.New(ctx) if err != nil { - return nil, fmt.Errorf("failed to create OTLP trace exporter: %w", err) + return fmt.Errorf("failed to create OTLP trace exporter: %w", err) } - tp := sdktrace.NewTracerProvider( + traceProvider = sdktrace.NewTracerProvider( sdktrace.WithResource(res), sdktrace.WithBatcher(exporter), // Batches spans for better performance. sdktrace.WithSpanProcessor(ensureSpanNameProcessor{}), ) // Set the global TracerProvider to allow instrumentation libraries to use it. - otel.SetTracerProvider(tp) - return tp, nil + otel.SetTracerProvider(traceProvider) + return nil } // loggerWrapper is a wrapper around logp.Logger that implements the logr.LogSink and otel.ErrorHandler interfaces. diff --git a/internal/infra/observability/otel_test.go b/internal/infra/observability/otel_test.go index 250d8b6fc3..de86aaf2ed 100644 --- a/internal/infra/observability/otel_test.go +++ b/internal/infra/observability/otel_test.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" metricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" tracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" otlpmetricpb "go.opentelemetry.io/proto/otlp/metrics/v1" @@ -39,12 +40,15 @@ import ( "github.com/elastic/cloudbeat/internal/resources/utils/testhelper" ) +var tracer = otel.Tracer("test-scope") +var meter = otel.Meter("test-scope") + func TestOtel(t *testing.T) { testhelper.SkipLong(t) ctx := t.Context() t.Run("No OTel Setup", func(t *testing.T) { - spanCtx, span := observability.StartSpan(ctx, "test-tracer", "test-span") + spanCtx, span := tracer.Start(ctx, "test-span") require.NotNil(t, spanCtx) assert.False(t, span.IsRecording()) }) @@ -55,12 +59,12 @@ func TestOtel(t *testing.T) { t.Cleanup(server.Stop) log := testhelper.NewObserverLogger(t) - ctx, err := observability.SetUpOtel(ctx, log.Logger) + err := observability.SetUpOtel(ctx, log.Logger) require.NoError(t, err) var spanID, traceID string t.Run("Start Span", func(t *testing.T) { - spanCtx, span := observability.StartSpan(ctx, "test-tracer", "test-span") + spanCtx, span := tracer.Start(ctx, "test-span") require.NotNil(t, spanCtx) assert.True(t, span.IsRecording()) assert.True(t, span.SpanContext().IsValid()) @@ -73,7 +77,7 @@ func TestOtel(t *testing.T) { span.End() // End the span to ensure it is recorded }) - counter, err := observability.MeterFromContext(ctx, "test-meter").Int64Counter("test-counter") + counter, err := meter.Int64Counter("test-counter") require.NoError(t, err) counter.Add(ctx, 10) diff --git a/internal/resources/fetching/manager/manager.go b/internal/resources/fetching/manager/manager.go index 58024e8fd8..d1985ab2da 100644 --- a/internal/resources/fetching/manager/manager.go +++ b/internal/resources/fetching/manager/manager.go @@ -23,17 +23,20 @@ import ( "sync" "time" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "github.com/elastic/cloudbeat/internal/infra/clog" - "github.com/elastic/cloudbeat/internal/infra/observability" "github.com/elastic/cloudbeat/internal/resources/fetching/cycle" "github.com/elastic/cloudbeat/internal/resources/fetching/registry" ) const scopeName = "github.com/elastic/cloudbeat/internal/resources/fetching/manager" +var tracer = otel.Tracer(scopeName) +var meter = otel.Meter(scopeName) + type Manager struct { log *clog.Logger @@ -73,7 +76,7 @@ func (m *Manager) Stop() { } func (m *Manager) fetchAndSleep(ctx context.Context) { - counter, err := observability.MeterFromContext(ctx, scopeName).Int64Counter("cloudbeat.fetcher.manager.cycles") + counter, err := meter.Int64Counter("cloudbeat.fetcher.manager.cycles") if err != nil { m.log.Errorf(ctx, "Failed to create fetcher manager cycles counter: %v", err) } @@ -101,9 +104,8 @@ func (m *Manager) fetchAndSleep(ctx context.Context) { // fetchIteration waits for all the registered fetchers and trigger them to fetch relevant resources. // The function must not get called in parallel. func (m *Manager) fetchIteration(ctx context.Context) { - ctx, span := observability.StartSpan( + ctx, span := tracer.Start( ctx, - scopeName, "manager.Manager.fetchIteration", trace.WithAttributes(attribute.String("transaction.type", "request")), ) diff --git a/internal/resources/fetching/registry/registry.go b/internal/resources/fetching/registry/registry.go index 27e17df14e..a8de1bfd75 100644 --- a/internal/resources/fetching/registry/registry.go +++ b/internal/resources/fetching/registry/registry.go @@ -22,6 +22,8 @@ import ( "fmt" "strings" + "go.opentelemetry.io/otel" + "github.com/elastic/cloudbeat/internal/infra/clog" "github.com/elastic/cloudbeat/internal/infra/observability" "github.com/elastic/cloudbeat/internal/resources/fetching/cycle" @@ -29,6 +31,8 @@ import ( const scopeName = "github.com/elastic/cloudbeat/internal/resources/fetching/registry" +var tracer = otel.Tracer(scopeName) + type Registry interface { Keys() []string ShouldRun(key string) bool @@ -100,7 +104,7 @@ func (r *registry) Run(ctx context.Context, key string, metadata cycle.Metadata) return fmt.Errorf("fetcher %v not found", key) } - ctx, span := observability.StartSpan(ctx, scopeName, fmt.Sprintf("%s.Fetch", cleanTypeOf(registered.Fetcher))) + ctx, span := tracer.Start(ctx, fmt.Sprintf("%s.Fetch", cleanTypeOf(registered.Fetcher))) defer span.End() err := registered.Fetcher.Fetch(ctx, metadata) if err != nil { From 57b672ca59418059af97726bac44dac496f9f271 Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Tue, 12 Aug 2025 11:37:12 +0200 Subject: [PATCH 11/12] Validate mocks --- .../observability/mock_graceful_closer.go | 145 ----------- .../observability/mock_meter_provider.go | 241 ------------------ .../observability/mock_tracer_provider.go | 241 ------------------ 3 files changed, 627 deletions(-) delete mode 100644 internal/infra/observability/mock_graceful_closer.go delete mode 100644 internal/infra/observability/mock_meter_provider.go delete mode 100644 internal/infra/observability/mock_tracer_provider.go diff --git a/internal/infra/observability/mock_graceful_closer.go b/internal/infra/observability/mock_graceful_closer.go deleted file mode 100644 index 43fe68e34f..0000000000 --- a/internal/infra/observability/mock_graceful_closer.go +++ /dev/null @@ -1,145 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated by mockery v2.53.3. DO NOT EDIT. - -package observability - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" -) - -// mockGracefulCloser is an autogenerated mock type for the gracefulCloser type -type mockGracefulCloser struct { - mock.Mock -} - -type mockGracefulCloser_Expecter struct { - mock *mock.Mock -} - -func (_m *mockGracefulCloser) EXPECT() *mockGracefulCloser_Expecter { - return &mockGracefulCloser_Expecter{mock: &_m.Mock} -} - -// ForceFlush provides a mock function with given fields: ctx -func (_m *mockGracefulCloser) ForceFlush(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for ForceFlush") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// mockGracefulCloser_ForceFlush_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceFlush' -type mockGracefulCloser_ForceFlush_Call struct { - *mock.Call -} - -// ForceFlush is a helper method to define mock.On call -// - ctx context.Context -func (_e *mockGracefulCloser_Expecter) ForceFlush(ctx interface{}) *mockGracefulCloser_ForceFlush_Call { - return &mockGracefulCloser_ForceFlush_Call{Call: _e.mock.On("ForceFlush", ctx)} -} - -func (_c *mockGracefulCloser_ForceFlush_Call) Run(run func(ctx context.Context)) *mockGracefulCloser_ForceFlush_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *mockGracefulCloser_ForceFlush_Call) Return(_a0 error) *mockGracefulCloser_ForceFlush_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *mockGracefulCloser_ForceFlush_Call) RunAndReturn(run func(context.Context) error) *mockGracefulCloser_ForceFlush_Call { - _c.Call.Return(run) - return _c -} - -// Shutdown provides a mock function with given fields: ctx -func (_m *mockGracefulCloser) Shutdown(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for Shutdown") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// mockGracefulCloser_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' -type mockGracefulCloser_Shutdown_Call struct { - *mock.Call -} - -// Shutdown is a helper method to define mock.On call -// - ctx context.Context -func (_e *mockGracefulCloser_Expecter) Shutdown(ctx interface{}) *mockGracefulCloser_Shutdown_Call { - return &mockGracefulCloser_Shutdown_Call{Call: _e.mock.On("Shutdown", ctx)} -} - -func (_c *mockGracefulCloser_Shutdown_Call) Run(run func(ctx context.Context)) *mockGracefulCloser_Shutdown_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *mockGracefulCloser_Shutdown_Call) Return(_a0 error) *mockGracefulCloser_Shutdown_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *mockGracefulCloser_Shutdown_Call) RunAndReturn(run func(context.Context) error) *mockGracefulCloser_Shutdown_Call { - _c.Call.Return(run) - return _c -} - -// newMockGracefulCloser creates a new instance of mockGracefulCloser. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func newMockGracefulCloser(t interface { - mock.TestingT - Cleanup(func()) -}) *mockGracefulCloser { - mock := &mockGracefulCloser{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/internal/infra/observability/mock_meter_provider.go b/internal/infra/observability/mock_meter_provider.go deleted file mode 100644 index 5bbcf19676..0000000000 --- a/internal/infra/observability/mock_meter_provider.go +++ /dev/null @@ -1,241 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated by mockery v2.53.3. DO NOT EDIT. - -package observability - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - metric "go.opentelemetry.io/otel/metric" -) - -// mockMeterProvider is an autogenerated mock type for the meterProvider type -type mockMeterProvider struct { - mock.Mock -} - -type mockMeterProvider_Expecter struct { - mock *mock.Mock -} - -func (_m *mockMeterProvider) EXPECT() *mockMeterProvider_Expecter { - return &mockMeterProvider_Expecter{mock: &_m.Mock} -} - -// ForceFlush provides a mock function with given fields: ctx -func (_m *mockMeterProvider) ForceFlush(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for ForceFlush") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// mockMeterProvider_ForceFlush_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceFlush' -type mockMeterProvider_ForceFlush_Call struct { - *mock.Call -} - -// ForceFlush is a helper method to define mock.On call -// - ctx context.Context -func (_e *mockMeterProvider_Expecter) ForceFlush(ctx interface{}) *mockMeterProvider_ForceFlush_Call { - return &mockMeterProvider_ForceFlush_Call{Call: _e.mock.On("ForceFlush", ctx)} -} - -func (_c *mockMeterProvider_ForceFlush_Call) Run(run func(ctx context.Context)) *mockMeterProvider_ForceFlush_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *mockMeterProvider_ForceFlush_Call) Return(_a0 error) *mockMeterProvider_ForceFlush_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *mockMeterProvider_ForceFlush_Call) RunAndReturn(run func(context.Context) error) *mockMeterProvider_ForceFlush_Call { - _c.Call.Return(run) - return _c -} - -// Meter provides a mock function with given fields: name, opts -func (_m *mockMeterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, name) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Meter") - } - - var r0 metric.Meter - if rf, ok := ret.Get(0).(func(string, ...metric.MeterOption) metric.Meter); ok { - r0 = rf(name, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(metric.Meter) - } - } - - return r0 -} - -// mockMeterProvider_Meter_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Meter' -type mockMeterProvider_Meter_Call struct { - *mock.Call -} - -// Meter is a helper method to define mock.On call -// - name string -// - opts ...metric.MeterOption -func (_e *mockMeterProvider_Expecter) Meter(name interface{}, opts ...interface{}) *mockMeterProvider_Meter_Call { - return &mockMeterProvider_Meter_Call{Call: _e.mock.On("Meter", - append([]interface{}{name}, opts...)...)} -} - -func (_c *mockMeterProvider_Meter_Call) Run(run func(name string, opts ...metric.MeterOption)) *mockMeterProvider_Meter_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]metric.MeterOption, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(metric.MeterOption) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *mockMeterProvider_Meter_Call) Return(_a0 metric.Meter) *mockMeterProvider_Meter_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *mockMeterProvider_Meter_Call) RunAndReturn(run func(string, ...metric.MeterOption) metric.Meter) *mockMeterProvider_Meter_Call { - _c.Call.Return(run) - return _c -} - -// Shutdown provides a mock function with given fields: ctx -func (_m *mockMeterProvider) Shutdown(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for Shutdown") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// mockMeterProvider_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' -type mockMeterProvider_Shutdown_Call struct { - *mock.Call -} - -// Shutdown is a helper method to define mock.On call -// - ctx context.Context -func (_e *mockMeterProvider_Expecter) Shutdown(ctx interface{}) *mockMeterProvider_Shutdown_Call { - return &mockMeterProvider_Shutdown_Call{Call: _e.mock.On("Shutdown", ctx)} -} - -func (_c *mockMeterProvider_Shutdown_Call) Run(run func(ctx context.Context)) *mockMeterProvider_Shutdown_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *mockMeterProvider_Shutdown_Call) Return(_a0 error) *mockMeterProvider_Shutdown_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *mockMeterProvider_Shutdown_Call) RunAndReturn(run func(context.Context) error) *mockMeterProvider_Shutdown_Call { - _c.Call.Return(run) - return _c -} - -// meterProvider provides a mock function with no fields -func (_m *mockMeterProvider) meterProvider() { - _m.Called() -} - -// mockMeterProvider_meterProvider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'meterProvider' -type mockMeterProvider_meterProvider_Call struct { - *mock.Call -} - -// meterProvider is a helper method to define mock.On call -func (_e *mockMeterProvider_Expecter) meterProvider() *mockMeterProvider_meterProvider_Call { - return &mockMeterProvider_meterProvider_Call{Call: _e.mock.On("meterProvider")} -} - -func (_c *mockMeterProvider_meterProvider_Call) Run(run func()) *mockMeterProvider_meterProvider_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *mockMeterProvider_meterProvider_Call) Return() *mockMeterProvider_meterProvider_Call { - _c.Call.Return() - return _c -} - -func (_c *mockMeterProvider_meterProvider_Call) RunAndReturn(run func()) *mockMeterProvider_meterProvider_Call { - _c.Run(run) - return _c -} - -// newMockMeterProvider creates a new instance of mockMeterProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func newMockMeterProvider(t interface { - mock.TestingT - Cleanup(func()) -}) *mockMeterProvider { - mock := &mockMeterProvider{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/internal/infra/observability/mock_tracer_provider.go b/internal/infra/observability/mock_tracer_provider.go deleted file mode 100644 index c3ef008f9a..0000000000 --- a/internal/infra/observability/mock_tracer_provider.go +++ /dev/null @@ -1,241 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -// Code generated by mockery v2.53.3. DO NOT EDIT. - -package observability - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - trace "go.opentelemetry.io/otel/trace" -) - -// mockTracerProvider is an autogenerated mock type for the tracerProvider type -type mockTracerProvider struct { - mock.Mock -} - -type mockTracerProvider_Expecter struct { - mock *mock.Mock -} - -func (_m *mockTracerProvider) EXPECT() *mockTracerProvider_Expecter { - return &mockTracerProvider_Expecter{mock: &_m.Mock} -} - -// ForceFlush provides a mock function with given fields: ctx -func (_m *mockTracerProvider) ForceFlush(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for ForceFlush") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// mockTracerProvider_ForceFlush_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceFlush' -type mockTracerProvider_ForceFlush_Call struct { - *mock.Call -} - -// ForceFlush is a helper method to define mock.On call -// - ctx context.Context -func (_e *mockTracerProvider_Expecter) ForceFlush(ctx interface{}) *mockTracerProvider_ForceFlush_Call { - return &mockTracerProvider_ForceFlush_Call{Call: _e.mock.On("ForceFlush", ctx)} -} - -func (_c *mockTracerProvider_ForceFlush_Call) Run(run func(ctx context.Context)) *mockTracerProvider_ForceFlush_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *mockTracerProvider_ForceFlush_Call) Return(_a0 error) *mockTracerProvider_ForceFlush_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *mockTracerProvider_ForceFlush_Call) RunAndReturn(run func(context.Context) error) *mockTracerProvider_ForceFlush_Call { - _c.Call.Return(run) - return _c -} - -// Shutdown provides a mock function with given fields: ctx -func (_m *mockTracerProvider) Shutdown(ctx context.Context) error { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for Shutdown") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// mockTracerProvider_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown' -type mockTracerProvider_Shutdown_Call struct { - *mock.Call -} - -// Shutdown is a helper method to define mock.On call -// - ctx context.Context -func (_e *mockTracerProvider_Expecter) Shutdown(ctx interface{}) *mockTracerProvider_Shutdown_Call { - return &mockTracerProvider_Shutdown_Call{Call: _e.mock.On("Shutdown", ctx)} -} - -func (_c *mockTracerProvider_Shutdown_Call) Run(run func(ctx context.Context)) *mockTracerProvider_Shutdown_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *mockTracerProvider_Shutdown_Call) Return(_a0 error) *mockTracerProvider_Shutdown_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *mockTracerProvider_Shutdown_Call) RunAndReturn(run func(context.Context) error) *mockTracerProvider_Shutdown_Call { - _c.Call.Return(run) - return _c -} - -// Tracer provides a mock function with given fields: name, options -func (_m *mockTracerProvider) Tracer(name string, options ...trace.TracerOption) trace.Tracer { - _va := make([]interface{}, len(options)) - for _i := range options { - _va[_i] = options[_i] - } - var _ca []interface{} - _ca = append(_ca, name) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Tracer") - } - - var r0 trace.Tracer - if rf, ok := ret.Get(0).(func(string, ...trace.TracerOption) trace.Tracer); ok { - r0 = rf(name, options...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(trace.Tracer) - } - } - - return r0 -} - -// mockTracerProvider_Tracer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Tracer' -type mockTracerProvider_Tracer_Call struct { - *mock.Call -} - -// Tracer is a helper method to define mock.On call -// - name string -// - options ...trace.TracerOption -func (_e *mockTracerProvider_Expecter) Tracer(name interface{}, options ...interface{}) *mockTracerProvider_Tracer_Call { - return &mockTracerProvider_Tracer_Call{Call: _e.mock.On("Tracer", - append([]interface{}{name}, options...)...)} -} - -func (_c *mockTracerProvider_Tracer_Call) Run(run func(name string, options ...trace.TracerOption)) *mockTracerProvider_Tracer_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]trace.TracerOption, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(trace.TracerOption) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *mockTracerProvider_Tracer_Call) Return(_a0 trace.Tracer) *mockTracerProvider_Tracer_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *mockTracerProvider_Tracer_Call) RunAndReturn(run func(string, ...trace.TracerOption) trace.Tracer) *mockTracerProvider_Tracer_Call { - _c.Call.Return(run) - return _c -} - -// tracerProvider provides a mock function with no fields -func (_m *mockTracerProvider) tracerProvider() { - _m.Called() -} - -// mockTracerProvider_tracerProvider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'tracerProvider' -type mockTracerProvider_tracerProvider_Call struct { - *mock.Call -} - -// tracerProvider is a helper method to define mock.On call -func (_e *mockTracerProvider_Expecter) tracerProvider() *mockTracerProvider_tracerProvider_Call { - return &mockTracerProvider_tracerProvider_Call{Call: _e.mock.On("tracerProvider")} -} - -func (_c *mockTracerProvider_tracerProvider_Call) Run(run func()) *mockTracerProvider_tracerProvider_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *mockTracerProvider_tracerProvider_Call) Return() *mockTracerProvider_tracerProvider_Call { - _c.Call.Return() - return _c -} - -func (_c *mockTracerProvider_tracerProvider_Call) RunAndReturn(run func()) *mockTracerProvider_tracerProvider_Call { - _c.Run(run) - return _c -} - -// newMockTracerProvider creates a new instance of mockTracerProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func newMockTracerProvider(t interface { - mock.TestingT - Cleanup(func()) -}) *mockTracerProvider { - mock := &mockTracerProvider{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} From dd9d35b0263d8c66e0af52065dfac70d18081267 Mon Sep 17 00:00:00 2001 From: Orestis Floros Date: Tue, 12 Aug 2025 12:28:04 +0200 Subject: [PATCH 12/12] go mod tidy --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 28d72a7f5d..5ed45c2391 100644 --- a/go.mod +++ b/go.mod @@ -534,7 +534,7 @@ require ( go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/metric v1.37.0 + go.opentelemetry.io/otel/metric v1.37.0 // indirect go.opentelemetry.io/otel/sdk v1.37.0 go.opentelemetry.io/otel/trace v1.37.0 go.uber.org/multierr v1.11.0 // indirect