diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index cd6fe7ade112c..f9c09434f61c7 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -118,7 +118,7 @@ /.gitlab/binary_build/system_probe.yml @DataDog/ebpf-platform @DataDog/agent-delivery /.gitlab/binary_build/windows.yml @DataDog/agent-delivery @DataDog/windows-agent -/.gitlab/benchmarks/ @DataDog/agent-devx-infra @DataDog/apm-reliability-and-performance @DataDog/agent-apm +/.gitlab/benchmarks/ @DataDog/agent-devx-infra @DataDog/apm-ecosystems-performance @DataDog/agent-apm /.gitlab/deploy_containers/ @DataDog/container-integrations @DataDog/agent-delivery /.gitlab/deploy_dca/ @DataDog/container-integrations @DataDog/agent-delivery diff --git a/.github/workflows/assign_issue.yml b/.github/workflows/assign_issue.yml index f752788cf64c8..0230f9d7c4d6f 100644 --- a/.github/workflows/assign_issue.yml +++ b/.github/workflows/assign_issue.yml @@ -26,5 +26,6 @@ jobs: - name: Assign issue env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SLACK_API_TOKEN : ${{ secrets.SLACK_DATADOG_AGENT_BOT_TOKEN }} run: | inv -e issue.assign-owner -i ${{ github.event.issue.number }} diff --git a/.github/workflows/report-merged-pr.yml b/.github/workflows/report-merged-pr.yml index de4cfe0cb01b9..feefb3c5446bd 100644 --- a/.github/workflows/report-merged-pr.yml +++ b/.github/workflows/report-merged-pr.yml @@ -4,6 +4,8 @@ name: Report Merged PR on: pull_request: + branches: + - main types: [closed] permissions: {} diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index 092ef165d6231..3f0b2c313389e 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -14,8 +14,16 @@ - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E profile >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci # Now all `aws` commands target the agent-qa profile - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_public_key_rsa > $E2E_PUBLIC_KEY_PATH || exit $? - - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_key_rsa > $E2E_PRIVATE_KEY_PATH || exit $? + # TODO: ADXT-768: Create new secret with different ssh key for the different cloud providers + # SSH Key retrieval for AWS + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_public_key_rsa > $E2E_AWS_PUBLIC_KEY_PATH || exit $? + - touch $E2E_AWS_PRIVATE_KEY_PATH && chmod 600 $E2E_AWS_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_key_rsa > $E2E_AWS_PRIVATE_KEY_PATH || exit $? + # SSH Key retrieval for Azure + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_public_key_rsa > $E2E_AZURE_PUBLIC_KEY_PATH || exit $? + - touch $E2E_AZURE_PRIVATE_KEY_PATH && chmod 600 $E2E_AZURE_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_key_rsa > $E2E_AZURE_PRIVATE_KEY_PATH || exit $? + # SSH Key retrieval for GCP + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_public_key_rsa > $E2E_GCP_PUBLIC_KEY_PATH || exit $? + - touch $E2E_GCP_PRIVATE_KEY_PATH && chmod 600 $E2E_GCP_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_E2E ssh_key_rsa > $E2E_GCP_PRIVATE_KEY_PATH || exit $? # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" # Setup Azure credentials. https://www.pulumi.com/registry/packages/azure-native/installation-configuration/#set-configuration-using-pulumi-config @@ -35,15 +43,23 @@ KUBERNETES_MEMORY_REQUEST: 12Gi KUBERNETES_MEMORY_LIMIT: 16Gi KUBERNETES_CPU_REQUEST: 6 - E2E_PUBLIC_KEY_PATH: /tmp/agent-qa-ssh-key.pub - E2E_PRIVATE_KEY_PATH: /tmp/agent-qa-ssh-key + # AWS SSH Key configuration + E2E_AWS_PUBLIC_KEY_PATH: /tmp/agent-qa-aws-ssh-key.pub + E2E_AWS_PRIVATE_KEY_PATH: /tmp/agent-qa-aws-ssh-key E2E_KEY_PAIR_NAME: datadog-agent-ci-rsa + # Azure SSH Key configuration + E2E_AZURE_PUBLIC_KEY_PATH: /tmp/agent-qa-azure-ssh-key.pub + E2E_AZURE_PRIVATE_KEY_PATH: /tmp/agent-qa-azure-ssh-key + # GCP SSH Key configuration + E2E_GCP_PUBLIC_KEY_PATH: /tmp/agent-qa-gcp-ssh-key.pub + E2E_GCP_PRIVATE_KEY_PATH: /tmp/agent-qa-gcp-ssh-key E2E_PIPELINE_ID: $CI_PIPELINE_ID E2E_COMMIT_SHA: $CI_COMMIT_SHORT_SHA E2E_OUTPUT_DIR: $CI_PROJECT_DIR/e2e-output EXTERNAL_LINKS_PATH: external_links_$CI_JOB_ID.json + E2E_LOGS_PROCESSING_TEST_DEPTH: 1 script: - - inv -e new-e2e-tests.run --targets $TARGETS -c ddagent:imagePullRegistry=669783387624.dkr.ecr.us-east-1.amazonaws.com -c ddagent:imagePullUsername=AWS -c ddagent:imagePullPassword=$(aws ecr get-login-password) --junit-tar junit-${CI_JOB_ID}.tgz ${EXTRA_PARAMS} --test-washer + - inv -e new-e2e-tests.run --targets $TARGETS -c ddagent:imagePullRegistry=669783387624.dkr.ecr.us-east-1.amazonaws.com -c ddagent:imagePullUsername=AWS -c ddagent:imagePullPassword=$(aws ecr get-login-password) --junit-tar junit-${CI_JOB_ID}.tgz ${EXTRA_PARAMS} --test-washer --logs-folder=$E2E_OUTPUT_DIR/logs --logs-post-processing --logs-post-processing-test-depth=$E2E_LOGS_PROCESSING_TEST_DEPTH after_script: - $CI_PROJECT_DIR/tools/ci/junit_upload.sh artifacts: @@ -388,6 +404,7 @@ new-e2e-installer: TARGETS: ./tests/installer/unix TEAM: fleet FLEET_INSTALL_METHOD: "install_script" + E2E_LOGS_PROCESSING_TEST_DEPTH: 2 new-e2e-installer-windows: extends: .new_e2e_template diff --git a/.gitlab/e2e_install_packages/common.yml b/.gitlab/e2e_install_packages/common.yml index 8985150b3ff14..965f5c67ce6f8 100644 --- a/.gitlab/e2e_install_packages/common.yml +++ b/.gitlab/e2e_install_packages/common.yml @@ -9,6 +9,7 @@ TARGETS: ./tests/agent-platform/install-script TEAM: agent-delivery EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --arch $E2E_ARCH --flavor $FLAVOR --no-verbose + E2E_LOGS_PROCESSING_TEST_DEPTH: 2 # We use a single test suite and run all the platforms test as subtest .new-e2e_step_by_step: stage: e2e_install_packages @@ -16,6 +17,7 @@ TARGETS: ./tests/agent-platform/step-by-step TEAM: agent-delivery EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --cws-supported-osversion $E2E_CWS_SUPPORTED_OSVERS --arch $E2E_ARCH --flavor $FLAVOR + E2E_LOGS_PROCESSING_TEST_DEPTH: 2 # We use a single test suite and run all the platforms test as subtest .new-e2e_script_upgrade7: stage: e2e_install_packages @@ -23,6 +25,7 @@ TARGETS: ./tests/agent-platform/upgrade TEAM: agent-delivery EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --arch $E2E_ARCH --flavor $FLAVOR + E2E_LOGS_PROCESSING_TEST_DEPTH: 2 # We use a single test suite and run all the platforms test as subtest parallel: matrix: - START_MAJOR_VERSION: [5, 6, 7] @@ -37,6 +40,7 @@ TARGETS: ./tests/agent-platform/persisting-integrations TEAM: agent-delivery EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --arch $E2E_ARCH --flavor $FLAVOR + E2E_LOGS_PROCESSING_TEST_DEPTH: 2 # We use a single test suite and run all the platforms test as subtest script: - DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY_ORG2 token) || exit $?; export DATADOG_AGENT_API_KEY - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version 7 --test-washer @@ -47,6 +51,7 @@ TARGETS: ./tests/agent-platform/rpm TEAM: agent-delivery EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --arch $E2E_ARCH + E2E_LOGS_PROCESSING_TEST_DEPTH: 2 # We use a single test suite and run all the platforms test as subtest script: - DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY_ORG2 token) || exit $?; export DATADOG_AGENT_API_KEY - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --test-washer diff --git a/.gitlab/kernel_matrix_testing/security_agent.yml b/.gitlab/kernel_matrix_testing/security_agent.yml index 26e9293844e9e..b7f4b80cc6ed1 100644 --- a/.gitlab/kernel_matrix_testing/security_agent.yml +++ b/.gitlab/kernel_matrix_testing/security_agent.yml @@ -147,6 +147,7 @@ kmt_run_secagent_tests_x64: - "oracle_9.3" - "rocky_8.5" - "rocky_9.3" + - "rocky_9.4" - "opensuse_15.3" - "opensuse_15.5" - "suse_12.5" @@ -268,6 +269,7 @@ kmt_run_secagent_tests_x64_docker: - "oracle_9.3" - "rocky_8.5" - "rocky_9.3" + - "rocky_9.4" TEST_SET: [cws_docker] after_script: - !reference [.collect_outcomes_kmt] @@ -301,6 +303,7 @@ kmt_run_secagent_tests_arm64: - "oracle_9.3" - "rocky_8.5" - "rocky_9.3" + - "rocky_9.4" - "opensuse_15.5" TEST_SET: [cws_host] after_script: @@ -395,6 +398,7 @@ kmt_run_secagent_tests_arm64_docker: - "oracle_9.3" - "rocky_8.5" - "rocky_9.3" + - "rocky_9.4" TEST_SET: ["cws_docker"] after_script: - !reference [.collect_outcomes_kmt] @@ -407,35 +411,54 @@ kmt_run_secagent_tests_arm64_docker: variables: TEST_COMPONENT: security-agent -kmt_secagent_cleanup_arm64: - when: always +.kmt_secagent_tests_join: + stage: kernel_matrix_testing_cleanup + rules: !reference [.on_security_agent_changes_or_manual] + image: registry.ddbuild.io/ci/datadog-agent-buildimages/system-probe_arm64$DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_SYSPROBE_BUILDIMAGES + tags: ["arch:arm64"] + script: + - echo "nothing to do here" + +kmt_secagent_tests_join_arm64: extends: - - .kmt_secagent_cleanup + - .kmt_secagent_tests_join needs: - - kmt_setup_env_secagent_arm64 - kmt_run_secagent_tests_arm64 - kmt_run_secagent_tests_arm64_ad - kmt_run_secagent_tests_arm64_ebpfless - kmt_run_secagent_tests_arm64_fentry - kmt_run_secagent_tests_arm64_docker - - upload_dependencies_secagent_arm64 + +kmt_secagent_cleanup_arm64: + when: always + extends: + - .kmt_secagent_cleanup + needs: + - kmt_setup_env_secagent_arm64 + - kmt_secagent_tests_join_arm64 - upload_secagent_tests_arm64 variables: ARCH: arm64 INSTANCE_TYPE: "m6gd.metal" -kmt_secagent_cleanup_x64: - when: always +kmt_secagent_tests_join_x64: extends: - - .kmt_secagent_cleanup + - .kmt_secagent_tests_join needs: - - kmt_setup_env_secagent_x64 - kmt_run_secagent_tests_x64 - kmt_run_secagent_tests_x64_required - kmt_run_secagent_tests_x64_ad - kmt_run_secagent_tests_x64_ebpfless - kmt_run_secagent_tests_x64_fentry - kmt_run_secagent_tests_x64_docker + +kmt_secagent_cleanup_x64: + when: always + extends: + - .kmt_secagent_cleanup + needs: + - kmt_setup_env_secagent_x64 + - kmt_secagent_tests_join_x64 - upload_dependencies_secagent_x64 - upload_secagent_tests_x64 variables: diff --git a/cmd/security-agent/subcommands/runtime/command.go b/cmd/security-agent/subcommands/runtime/command.go index 0b32529c60d36..08bbdcea4c787 100644 --- a/cmd/security-agent/subcommands/runtime/command.go +++ b/cmd/security-agent/subcommands/runtime/command.go @@ -472,7 +472,7 @@ func checkPoliciesLocal(args *checkPoliciesCliParams, writer io.Writer) error { }, } - provider, err := rules.NewPoliciesDirProvider(args.dir, false) + provider, err := rules.NewPoliciesDirProvider(args.dir) if err != nil { return err } @@ -611,7 +611,7 @@ func evalRule(_ log.Component, _ config.Component, _ secrets.Component, evalArgs }, } - provider, err := rules.NewPoliciesDirProvider(policiesDir, false) + provider, err := rules.NewPoliciesDirProvider(policiesDir) if err != nil { return err } diff --git a/cmd/system-probe/modules/eventmonitor.go b/cmd/system-probe/modules/eventmonitor.go index f74b13bec36af..41707ad513dec 100644 --- a/cmd/system-probe/modules/eventmonitor.go +++ b/cmd/system-probe/modules/eventmonitor.go @@ -85,14 +85,9 @@ func createEventMonitorModule(_ *sysconfigtypes.Config, deps module.FactoryDepen netconfig := netconfig.New() if netconfig.EnableUSMEventStream { - procmonconsumer, err := createProcessMonitorConsumer(evm, netconfig) - if err != nil { + if err := createProcessMonitorConsumer(evm, netconfig); err != nil { return nil, err } - if procmonconsumer != nil { - evm.RegisterEventConsumer(procmonconsumer) - log.Info("USM process monitoring consumer initialized") - } } gpucfg := gpuconfig.New() diff --git a/cmd/system-probe/modules/eventmonitor_linux.go b/cmd/system-probe/modules/eventmonitor_linux.go index 498e3b9feb92d..a3ee2f33666da 100644 --- a/cmd/system-probe/modules/eventmonitor_linux.go +++ b/cmd/system-probe/modules/eventmonitor_linux.go @@ -11,11 +11,13 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/config" "github.com/DataDog/datadog-agent/pkg/eventmonitor" + "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers" netconfig "github.com/DataDog/datadog-agent/pkg/network/config" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" usmstate "github.com/DataDog/datadog-agent/pkg/network/usm/state" - procmon "github.com/DataDog/datadog-agent/pkg/process/monitor" + "github.com/DataDog/datadog-agent/pkg/process/monitor" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-agent/pkg/util/log" ) // EventMonitor - Event monitor Factory @@ -28,10 +30,28 @@ var EventMonitor = module.Factory{ }, } -func createProcessMonitorConsumer(evm *eventmonitor.EventMonitor, config *netconfig.Config) (eventmonitor.EventConsumer, error) { +const ( + eventMonitorID = "PROCESS_MONITOR" + eventMonitorChannelSize = 500 +) + +var ( + eventTypes = []consumers.ProcessConsumerEventTypes{ + consumers.ExecEventType, + consumers.ExitEventType, + } +) + +func createProcessMonitorConsumer(evm *eventmonitor.EventMonitor, config *netconfig.Config) error { if !usmconfig.IsUSMSupportedAndEnabled(config) || !usmconfig.NeedProcessMonitor(config) || usmstate.Get() != usmstate.Running { - return nil, nil + return nil } - return procmon.NewProcessMonitorEventConsumer(evm) + consumer, err := consumers.NewProcessConsumer(eventMonitorID, eventMonitorChannelSize, eventTypes, evm) + if err != nil { + return err + } + monitor.InitializeEventConsumer(consumer) + log.Info("USM process monitoring consumer initialized") + return nil } diff --git a/cmd/system-probe/modules/eventmonitor_windows.go b/cmd/system-probe/modules/eventmonitor_windows.go index 870c707f8f3e7..7ec627c076d67 100644 --- a/cmd/system-probe/modules/eventmonitor_windows.go +++ b/cmd/system-probe/modules/eventmonitor_windows.go @@ -21,8 +21,8 @@ var EventMonitor = module.Factory{ Fn: createEventMonitorModule, } -func createProcessMonitorConsumer(_ *eventmonitor.EventMonitor, _ *netconfig.Config) (eventmonitor.EventConsumer, error) { - return nil, nil +func createProcessMonitorConsumer(_ *eventmonitor.EventMonitor, _ *netconfig.Config) error { + return nil } func createGPUProcessEventConsumer(_ *eventmonitor.EventMonitor) error { diff --git a/cmd/system-probe/subcommands/runtime/command.go b/cmd/system-probe/subcommands/runtime/command.go index e6030500a9b17..e0d17e16d0a2e 100644 --- a/cmd/system-probe/subcommands/runtime/command.go +++ b/cmd/system-probe/subcommands/runtime/command.go @@ -466,7 +466,7 @@ func checkPoliciesLocal(args *checkPoliciesCliParams, writer io.Writer) error { }, } - provider, err := rules.NewPoliciesDirProvider(args.dir, false) + provider, err := rules.NewPoliciesDirProvider(args.dir) if err != nil { return err } @@ -583,7 +583,7 @@ func evalRule(_ log.Component, _ config.Component, _ secrets.Component, evalArgs }, } - provider, err := rules.NewPoliciesDirProvider(policiesDir, false) + provider, err := rules.NewPoliciesDirProvider(policiesDir) if err != nil { return err } diff --git a/comp/api/authtoken/go.mod b/comp/api/authtoken/go.mod index 532d1be88a5c4..233f326a297d1 100644 --- a/comp/api/authtoken/go.mod +++ b/comp/api/authtoken/go.mod @@ -68,13 +68,13 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/log/setup v0.58.0-devel // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/comp/core/config/go.mod b/comp/core/config/go.mod index 2973aa62895a7..bed15530c21b9 100644 --- a/comp/core/config/go.mod +++ b/comp/core/config/go.mod @@ -42,7 +42,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/defaultpaths v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 github.com/DataDog/viper v1.13.5 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 @@ -60,9 +60,9 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/comp/core/log/impl-systemprobe/systemprobe_logger.go b/comp/core/log/impl-systemprobe/systemprobe_logger.go index 8457442cc1c93..5cbf8dc21e5de 100644 --- a/comp/core/log/impl-systemprobe/systemprobe_logger.go +++ b/comp/core/log/impl-systemprobe/systemprobe_logger.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package logimpl implements a component to handle logging internal to the agent for system-probe. -package logimpl +// Package systemprobeimpl implements a component to handle logging internal to the agent for system-probe. +package systemprobeimpl import ( "context" diff --git a/comp/core/log/impl-systemprobe/systemprobe_logger_test.go b/comp/core/log/impl-systemprobe/systemprobe_logger_test.go index e6a2c6682a593..3ad4e5dad9198 100644 --- a/comp/core/log/impl-systemprobe/systemprobe_logger_test.go +++ b/comp/core/log/impl-systemprobe/systemprobe_logger_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package logimpl +package systemprobeimpl import ( "testing" diff --git a/comp/core/log/impl-trace/go.mod b/comp/core/log/impl-trace/go.mod index 46dbc7aba09c2..2ee9571d6657a 100644 --- a/comp/core/log/impl-trace/go.mod +++ b/comp/core/log/impl-trace/go.mod @@ -45,7 +45,7 @@ require ( github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect; v2.6 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 // indirect @@ -72,10 +72,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect diff --git a/comp/core/log/impl-trace/trace_logger.go b/comp/core/log/impl-trace/trace_logger.go index 71f4f32fdbf45..cbbc8fe5d8a0d 100644 --- a/comp/core/log/impl-trace/trace_logger.go +++ b/comp/core/log/impl-trace/trace_logger.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package logimpl provides a component that implements the log.Component for the trace-agent logger -package logimpl +// Package traceimpl provides a component that implements the log.Component for the trace-agent logger +package traceimpl import ( "context" diff --git a/comp/core/log/impl-trace/trace_logger_test.go b/comp/core/log/impl-trace/trace_logger_test.go index 8e074becb0772..83c5a18d14aa2 100644 --- a/comp/core/log/impl-trace/trace_logger_test.go +++ b/comp/core/log/impl-trace/trace_logger_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package logimpl +package traceimpl import ( "testing" diff --git a/comp/core/log/impl/go.mod b/comp/core/log/impl/go.mod index 116cf25e6c584..9d9c721ce2ba8 100644 --- a/comp/core/log/impl/go.mod +++ b/comp/core/log/impl/go.mod @@ -39,7 +39,7 @@ require ( github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/log/setup v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.10.0 ) @@ -61,10 +61,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/comp/core/log/mock/go.mod b/comp/core/log/mock/go.mod index 96c709d9f2f32..f8515313b702d 100644 --- a/comp/core/log/mock/go.mod +++ b/comp/core/log/mock/go.mod @@ -31,7 +31,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/log/setup v0.0.0-00010101000000-000000000000 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 ) @@ -40,7 +40,7 @@ require ( github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect diff --git a/comp/core/status/statusimpl/go.mod b/comp/core/status/statusimpl/go.mod index bc13e9cf4a943..600683ed2cceb 100644 --- a/comp/core/status/statusimpl/go.mod +++ b/comp/core/status/statusimpl/go.mod @@ -70,14 +70,14 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/log/setup v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/comp/core/tagger/impl-remote/remote.go b/comp/core/tagger/impl-remote/remote.go index c5a1fb6dad36c..b1a77165f18a9 100644 --- a/comp/core/tagger/impl-remote/remote.go +++ b/comp/core/tagger/impl-remote/remote.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package remotetaggerimpl implements a remote Tagger. -package remotetaggerimpl +// Package remoteimpl implements a remote Tagger. +package remoteimpl import ( "context" diff --git a/comp/core/tagger/impl-remote/remote_test.go b/comp/core/tagger/impl-remote/remote_test.go index 48c00d8006bf7..c19b8f5fe1336 100644 --- a/comp/core/tagger/impl-remote/remote_test.go +++ b/comp/core/tagger/impl-remote/remote_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package remotetaggerimpl +package remoteimpl import ( "context" diff --git a/comp/core/tagger/impl-remote/tagstore.go b/comp/core/tagger/impl-remote/tagstore.go index de354cf070ad6..11f189ae6da90 100644 --- a/comp/core/tagger/impl-remote/tagstore.go +++ b/comp/core/tagger/impl-remote/tagstore.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package remotetaggerimpl +package remoteimpl import ( "sync" diff --git a/comp/core/tagger/impl-remote/tagstore_test.go b/comp/core/tagger/impl-remote/tagstore_test.go index 8cd92107d56cc..ce2a226ed5dd8 100644 --- a/comp/core/tagger/impl-remote/tagstore_test.go +++ b/comp/core/tagger/impl-remote/tagstore_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package remotetaggerimpl +package remoteimpl import ( "testing" diff --git a/comp/dogstatsd/replay/impl-noop/noop.go b/comp/dogstatsd/replay/impl-noop/noop.go index 100d295a8bd0b..06e4985e0ace0 100644 --- a/comp/dogstatsd/replay/impl-noop/noop.go +++ b/comp/dogstatsd/replay/impl-noop/noop.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-2021 Datadog, Inc. -// Package replayimpl implements a no-op version of the component -package replayimpl +// Package noopimpl implements a no-op version of the component +package noopimpl import ( "sync" diff --git a/comp/dogstatsd/server/server_util_test.go b/comp/dogstatsd/server/server_util_test.go index 33355d4cbc982..f2c542f45c0e0 100644 --- a/comp/dogstatsd/server/server_util_test.go +++ b/comp/dogstatsd/server/server_util_test.go @@ -8,14 +8,12 @@ package server import ( - "runtime" "testing" "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" - "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "go.uber.org/fx" @@ -77,10 +75,6 @@ func fulfillDeps(t testing.TB) serverDeps { } func fulfillDepsWithConfigOverride(t testing.TB, overrides map[string]interface{}) serverDeps { - // TODO: https://datadoghq.atlassian.net/browse/AMLII-1948 - if runtime.GOOS == "darwin" { - flake.Mark(t) - } return fxutil.Test[serverDeps](t, fx.Options( core.MockBundle(), serverdebugimpl.MockModule(), diff --git a/comp/forwarder/defaultforwarder/go.mod b/comp/forwarder/defaultforwarder/go.mod index a6e6b5f2297da..55d901ebce30c 100644 --- a/comp/forwarder/defaultforwarder/go.mod +++ b/comp/forwarder/defaultforwarder/go.mod @@ -71,7 +71,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/fxutil v0.57.1 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 github.com/DataDog/datadog-agent/pkg/version v0.57.1 github.com/golang/protobuf v1.5.4 github.com/hashicorp/go-multierror v1.1.1 @@ -94,12 +94,12 @@ require ( github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/log/setup v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/comp/forwarder/orchestrator/orchestratorinterface/go.mod b/comp/forwarder/orchestrator/orchestratorinterface/go.mod index d5f8d766d32f6..63ef4e9d8c4a8 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/go.mod +++ b/comp/forwarder/orchestrator/orchestratorinterface/go.mod @@ -94,13 +94,13 @@ require ( github.com/DataDog/datadog-agent/pkg/util/fxutil v0.57.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.57.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/comp/logs/agent/agentimpl/agent_core_init.go b/comp/logs/agent/agentimpl/agent_core_init.go index 37d5c029cf0f6..fae9804bae9b8 100644 --- a/comp/logs/agent/agentimpl/agent_core_init.go +++ b/comp/logs/agent/agentimpl/agent_core_init.go @@ -46,7 +46,7 @@ func (a *logAgent) SetupPipeline(processingRules []*config.ProcessingRule, wmeta diagnosticMessageReceiver := diagnostic.NewBufferedMessageReceiver(nil, a.hostname) // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(config.NumberOfPipelines, auditor, diagnosticMessageReceiver, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config) + pipelineProvider := pipeline.NewProvider(a.config.GetInt("logs_config.pipelines"), auditor, diagnosticMessageReceiver, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config) // setup the launchers lnchrs := launchers.NewLaunchers(a.sources, pipelineProvider, auditor, a.tracker) diff --git a/comp/logs/agent/agentimpl/agent_serverless_init.go b/comp/logs/agent/agentimpl/agent_serverless_init.go index 31dbf3e41d2dc..67711def14029 100644 --- a/comp/logs/agent/agentimpl/agent_serverless_init.go +++ b/comp/logs/agent/agentimpl/agent_serverless_init.go @@ -49,7 +49,7 @@ func (a *logAgent) SetupPipeline( destinationsCtx := client.NewDestinationsContext() // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewServerlessProvider(config.NumberOfPipelines, a.auditor, diagnosticMessageReceiver, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config) + pipelineProvider := pipeline.NewServerlessProvider(a.config.GetInt("logs_config.pipelines"), a.auditor, diagnosticMessageReceiver, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config) lnchrs := launchers.NewLaunchers(a.sources, pipelineProvider, a.auditor, a.tracker) lnchrs.AddLauncher(channel.NewLauncher()) diff --git a/comp/logs/agent/config/constants.go b/comp/logs/agent/config/constants.go index ae9a0d74680f0..a66b989366370 100644 --- a/comp/logs/agent/config/constants.go +++ b/comp/logs/agent/config/constants.go @@ -5,11 +5,6 @@ package config -// Pipeline constraints -const ( - NumberOfPipelines = 4 -) - const ( // DateFormat is the default date format. DateFormat = "2006-01-02T15:04:05.000000000Z" diff --git a/comp/logs/agent/config/go.mod b/comp/logs/agent/config/go.mod index 91137e7bcc76c..733a8bf8b17c7 100644 --- a/comp/logs/agent/config/go.mod +++ b/comp/logs/agent/config/go.mod @@ -43,7 +43,7 @@ require ( github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 github.com/DataDog/viper v1.13.5 github.com/stretchr/testify v1.10.0 @@ -64,10 +64,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/comp/otelcol/collector/impl-pipeline/flare_filler.go b/comp/otelcol/collector/impl-pipeline/flare_filler.go index 97378486d1a22..8c170152f98ca 100644 --- a/comp/otelcol/collector/impl-pipeline/flare_filler.go +++ b/comp/otelcol/collector/impl-pipeline/flare_filler.go @@ -5,8 +5,8 @@ //go:build otlp -// Package collectorimpl implements the collector component -package collectorimpl +// Package pipelineimpl implements the collector component +package pipelineimpl import ( "encoding/json" diff --git a/comp/otelcol/collector/impl-pipeline/flare_filler_test.go b/comp/otelcol/collector/impl-pipeline/flare_filler_test.go index ed98632b0f171..93075929746d4 100644 --- a/comp/otelcol/collector/impl-pipeline/flare_filler_test.go +++ b/comp/otelcol/collector/impl-pipeline/flare_filler_test.go @@ -6,8 +6,8 @@ //go:build test && otlp // +build test,otlp -// Package collectorimpl implements the collector component -package collectorimpl +// Package pipelineimpl implements the collector component +package pipelineimpl import ( "bytes" diff --git a/comp/otelcol/collector/impl-pipeline/no_otlp.go b/comp/otelcol/collector/impl-pipeline/no_otlp.go index c1e5ae7c03789..60f1295ee33aa 100644 --- a/comp/otelcol/collector/impl-pipeline/no_otlp.go +++ b/comp/otelcol/collector/impl-pipeline/no_otlp.go @@ -5,8 +5,8 @@ //go:build !otlp -// Package collectorimpl contains a no-op implementation of the collector -package collectorimpl +// Package pipelineimpl contains a no-op implementation of the collector +package pipelineimpl import ( collector "github.com/DataDog/datadog-agent/comp/otelcol/collector/def" diff --git a/comp/otelcol/collector/impl-pipeline/pipeline.go b/comp/otelcol/collector/impl-pipeline/pipeline.go index ab46ab875bcff..34cd92d133ed8 100644 --- a/comp/otelcol/collector/impl-pipeline/pipeline.go +++ b/comp/otelcol/collector/impl-pipeline/pipeline.go @@ -5,8 +5,8 @@ //go:build otlp -// Package collectorimpl implements the collector component -package collectorimpl +// Package pipelineimpl implements the collector component +package pipelineimpl import ( "context" diff --git a/comp/otelcol/collector/impl-pipeline/status.go b/comp/otelcol/collector/impl-pipeline/status.go index e3c0b01791bc1..a94bb76a0e5dd 100644 --- a/comp/otelcol/collector/impl-pipeline/status.go +++ b/comp/otelcol/collector/impl-pipeline/status.go @@ -5,7 +5,7 @@ //go:build otlp -package collectorimpl +package pipelineimpl import ( "embed" diff --git a/comp/otelcol/collector/impl-pipeline/status_test.go b/comp/otelcol/collector/impl-pipeline/status_test.go index 0e9b6a82e364b..8ae735534ed69 100644 --- a/comp/otelcol/collector/impl-pipeline/status_test.go +++ b/comp/otelcol/collector/impl-pipeline/status_test.go @@ -5,7 +5,7 @@ //go:build otlp && test -package collectorimpl +package pipelineimpl import ( "bytes" diff --git a/comp/otelcol/converter/impl/go.mod b/comp/otelcol/converter/impl/go.mod index 3e8cb6163b3e7..0910f004f852c 100644 --- a/comp/otelcol/converter/impl/go.mod +++ b/comp/otelcol/converter/impl/go.mod @@ -69,13 +69,13 @@ require ( github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/comp/otelcol/ddflareextension/impl/go.mod b/comp/otelcol/ddflareextension/impl/go.mod index f95f7661bc201..d25ace6070271 100644 --- a/comp/otelcol/ddflareextension/impl/go.mod +++ b/comp/otelcol/ddflareextension/impl/go.mod @@ -251,17 +251,17 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/log/setup v0.58.0-devel // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.31.0 // indirect github.com/DataDog/datadog-go/v5 v5.5.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect diff --git a/comp/otelcol/logsagentpipeline/go.mod b/comp/otelcol/logsagentpipeline/go.mod index 0b8f861795c6c..40a8e60d9efba 100644 --- a/comp/otelcol/logsagentpipeline/go.mod +++ b/comp/otelcol/logsagentpipeline/go.mod @@ -96,15 +96,15 @@ require ( github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go index d1910d28db034..b014d5b31689e 100644 --- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go +++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go @@ -210,7 +210,7 @@ func (a *Agent) SetupPipeline( destinationsCtx := client.NewDestinationsContext() // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(config.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config) + pipelineProvider := pipeline.NewProvider(a.config.GetInt("logs_config.pipelines"), auditor, &diagnostic.NoopMessageReceiver{}, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config) a.auditor = auditor a.destinationsCtx = destinationsCtx diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod index ca80a71894cb3..25af1fd191eb1 100644 --- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod +++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod @@ -112,14 +112,14 @@ require ( github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/log/setup v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod index 8487b71cd8044..cab50f84895bc 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod @@ -186,16 +186,16 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.57.1 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.31.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod index f8e3037b5fe5e..0e14870fd4f2a 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod @@ -47,7 +47,7 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0 github.com/stormcat24/protodep v0.1.8 @@ -82,13 +82,13 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.31.0 // indirect github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0 // indirect diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod index 3986342521942..4ae9f643df3cf 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod @@ -68,7 +68,7 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0 github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0 github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0 @@ -129,11 +129,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.57.1 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect diff --git a/comp/otelcol/otlp/testutil/go.mod b/comp/otelcol/otlp/testutil/go.mod index e8f0c3eed3af3..e606714cf39f7 100644 --- a/comp/otelcol/otlp/testutil/go.mod +++ b/comp/otelcol/otlp/testutil/go.mod @@ -55,13 +55,13 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/comp/rdnsquerier/impl-none/none.go b/comp/rdnsquerier/impl-none/none.go index 0b7f1022f1eb9..014306826c989 100644 --- a/comp/rdnsquerier/impl-none/none.go +++ b/comp/rdnsquerier/impl-none/none.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2024-present Datadog, Inc. -// Package rdnsquerierimpl provides the noop rdnsquerier component -package rdnsquerierimpl +// Package noneimpl provides the noop rdnsquerier component +package noneimpl import ( "context" diff --git a/comp/serializer/compression/go.mod b/comp/serializer/compression/go.mod index 9e8574fe77f13..7575544de493a 100644 --- a/comp/serializer/compression/go.mod +++ b/comp/serializer/compression/go.mod @@ -36,7 +36,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/zstd v1.5.6 ) @@ -58,10 +58,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/comp/serializer/compression/impl-noop/no_strategy.go b/comp/serializer/compression/impl-noop/no_strategy.go index 45523952b4ca2..3c092a8a536d4 100644 --- a/comp/serializer/compression/impl-noop/no_strategy.go +++ b/comp/serializer/compression/impl-noop/no_strategy.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package compressionimpl provides a set of functions for compressing with zlib / zstd -package compressionimpl +// Package noopimpl provides a set of functions for compressing with zlib / zstd +package noopimpl import ( "bytes" diff --git a/comp/serializer/compression/impl-zlib/zlib_strategy.go b/comp/serializer/compression/impl-zlib/zlib_strategy.go index 2bbe1a99d1a09..0bd47af4f91fc 100644 --- a/comp/serializer/compression/impl-zlib/zlib_strategy.go +++ b/comp/serializer/compression/impl-zlib/zlib_strategy.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package compressionimpl provides a set of functions for compressing with zlib -package compressionimpl +// Package zlibimpl provides a set of functions for compressing with zlib +package zlibimpl import ( "bytes" diff --git a/comp/serializer/compression/impl-zstd/zstd_strategy.go b/comp/serializer/compression/impl-zstd/zstd_strategy.go index 994135a0889cd..3b90737bbfce8 100644 --- a/comp/serializer/compression/impl-zstd/zstd_strategy.go +++ b/comp/serializer/compression/impl-zstd/zstd_strategy.go @@ -3,14 +3,15 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package compressionimpl provides a set of functions for compressing with zstd -package compressionimpl +// Package zstdimpl provides a set of functions for compressing with zstd +package zstdimpl import ( "bytes" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" "github.com/DataDog/zstd" + + compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" ) // Requires contains the compression level for zstd compression diff --git a/comp/trace/compression/impl-gzip/gzip.go b/comp/trace/compression/impl-gzip/gzip.go index eada290dae873..4a7943bde1f03 100644 --- a/comp/trace/compression/impl-gzip/gzip.go +++ b/comp/trace/compression/impl-gzip/gzip.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2024-present Datadog, Inc. -// Package compressionimpl implements the compression component interface -package compressionimpl +// Package gzipimpl implements the compression component interface +package gzipimpl import ( "compress/gzip" diff --git a/comp/trace/compression/impl-zstd/zstd.go b/comp/trace/compression/impl-zstd/zstd.go index d418d4362f8b2..fc43c47cb0886 100644 --- a/comp/trace/compression/impl-zstd/zstd.go +++ b/comp/trace/compression/impl-zstd/zstd.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2024-present Datadog, Inc. -// Package compressionimpl implements the compression component interface -package compressionimpl +// Package zstdimpl implements the compression component interface +package zstdimpl import ( "io" diff --git a/docs/public/components/creating-components.md b/docs/public/components/creating-components.md index aa54f63cb3590..2fe844b7175d4 100644 --- a/docs/public/components/creating-components.md +++ b/docs/public/components/creating-components.md @@ -114,11 +114,11 @@ internally by each component (more on this [here TODO]()). --> ### The impl folders The `impl` folder is where the component implementation is written. The details of component implementation are up to the developer. -The only requirements are that the package name follows the pattern `impl` and that there is a public instantiation function called `NewComponent`. +The only requirements are that the package name follows the pattern `impl` for the regular implementation or `impl` for the alternative implementation, and that there is a public instantiation function called `NewComponent`. === ":octicons-file-code-16: comp/compression/impl-zstd/compressor.go" ```go - package compressionimpl + package zstdimpl // NewComponent returns a new ZSTD implementation for the compression component func NewComponent(reqs Requires) Provides { @@ -134,7 +134,7 @@ In this example, the compression component must access the configuration compone === ":octicons-file-code-16: comp/compression/impl-zstd/compressor.go" ```go - package compressionimpl + package zstdimpl import ( "fmt" @@ -160,7 +160,7 @@ In this example, the compression component must access the configuration compone For the output of the component, populate the `Provides` struct with the return values. === ":octicons-file-code-16: comp/compression/impl-zstd/compressor.go" ```go - package compressionimpl + package zstdimpl import ( // Always import the component def folder, so that you can return a 'compression.Component' type. @@ -180,7 +180,7 @@ All together, the component code looks like the following: === ":octicons-file-code-16: comp/compression/impl-zstd/compressor.go" ```go - package compressionimpl + package zstdimpl import ( "fmt" diff --git a/flakes.yaml b/flakes.yaml index ab2af6b55f3ea..49c403b802517 100644 --- a/flakes.yaml +++ b/flakes.yaml @@ -11,3 +11,5 @@ test/new-e2e/tests/containers: - TestECSSuite/TestCPU/metric___container.cpu.usage{^ecs_container_name:stress-ng$} - TestEKSSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - TestKindSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} + - TestKindSuite/TestAdmissionControllerWithAutoDetectedLanguage + - TestKindSuite diff --git a/go.mod b/go.mod index 8a950dd93772f..fd42d1719f1f8 100644 --- a/go.mod +++ b/go.mod @@ -154,9 +154,9 @@ require ( github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0 github.com/DataDog/datadog-agent/pkg/trace v0.59.0 github.com/DataDog/datadog-agent/pkg/util/cgroups v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 github.com/DataDog/datadog-go/v5 v5.5.0 github.com/DataDog/datadog-operator v0.7.1-0.20241024104907-734366f3c0d1 github.com/DataDog/ebpf-manager v0.7.4 @@ -729,7 +729,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 github.com/DataDog/datadog-agent/pkg/util/testutil v0.59.0 github.com/DataDog/datadog-agent/pkg/util/uuid v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 github.com/DataDog/datadog-agent/pkg/version v0.59.0 github.com/DataDog/go-libddwaf/v3 v3.5.1 github.com/DataDog/go-sqllexer v0.0.17 diff --git a/omnibus/config/projects/agent.rb b/omnibus/config/projects/agent.rb index b3ab9c10ae1ad..51bacc89a45d0 100644 --- a/omnibus/config/projects/agent.rb +++ b/omnibus/config/projects/agent.rb @@ -221,9 +221,6 @@ # ------------------------------------ if do_build - # Include traps db file in snmp.d/traps_db/ - dependency 'snmp-traps' - # Datadog agent dependency 'datadog-agent' diff --git a/omnibus/config/software/datadog-agent-dependencies.rb b/omnibus/config/software/datadog-agent-dependencies.rb index fd6712983b10b..fc34796e2fa4b 100644 --- a/omnibus/config/software/datadog-agent-dependencies.rb +++ b/omnibus/config/software/datadog-agent-dependencies.rb @@ -33,6 +33,9 @@ dependency 'libpcap' if linux_target? and !heroku_target? # system-probe dependency +# Include traps db file in snmp.d/traps_db/ +dependency 'snmp-traps' + # Additional software if windows_target? if ENV['WINDOWS_DDNPM_DRIVER'] and not ENV['WINDOWS_DDNPM_DRIVER'].empty? diff --git a/omnibus/config/software/datadog-agent.rb b/omnibus/config/software/datadog-agent.rb index 8f24178d2c07e..a05e3b71bd877 100644 --- a/omnibus/config/software/datadog-agent.rb +++ b/omnibus/config/software/datadog-agent.rb @@ -120,7 +120,8 @@ # move around bin and config files move 'bin/agent/dist/datadog.yaml', "#{conf_dir}/datadog.yaml.example" - move 'bin/agent/dist/conf.d', "#{conf_dir}/" + copy 'bin/agent/dist/conf.d/.', "#{conf_dir}" + delete 'bin/agent/dist/conf.d' unless windows_target? copy 'bin/agent', "#{install_dir}/bin/" diff --git a/omnibus/config/software/snmp-traps.rb b/omnibus/config/software/snmp-traps.rb index 5a021b01ed8d4..bb13fc542fd3b 100644 --- a/omnibus/config/software/snmp-traps.rb +++ b/omnibus/config/software/snmp-traps.rb @@ -1,9 +1,6 @@ name "snmp-traps" default_version "0.4.0" -# Needs the configuration folder as created in datadog-agent -dependency 'datadog-agent' - source :url => "https://s3.amazonaws.com/dd-agent-omnibus/snmp_traps_db/dd_traps_db-#{version}.json.gz", :sha256 => "04fb9d43754c2656edf35f08fbad11ba8dc20d52654962933f3dd8f4d463b42c", :target_filename => "dd_traps_db.json.gz" diff --git a/pkg/api/go.mod b/pkg/api/go.mod index b30cc5d081999..28a2032095417 100644 --- a/pkg/api/go.mod +++ b/pkg/api/go.mod @@ -43,7 +43,7 @@ require ( github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 github.com/stretchr/testify v1.10.0 ) @@ -64,9 +64,9 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/collector/corechecks/servicediscovery/module/comm.go b/pkg/collector/corechecks/servicediscovery/module/comm.go index 7b513080aef18..675eac0d2f246 100644 --- a/pkg/collector/corechecks/servicediscovery/module/comm.go +++ b/pkg/collector/corechecks/servicediscovery/module/comm.go @@ -13,8 +13,6 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/process" - "github.com/DataDog/datadog-agent/pkg/util/kernel" ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) @@ -41,11 +39,11 @@ var ( ) // shouldIgnoreComm returns true if process should be ignored -func (s *discovery) shouldIgnoreComm(proc *process.Process) bool { +func (s *discovery) shouldIgnoreComm(pid int32) bool { if s.config.ignoreComms == nil { return false } - commPath := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "comm") + commPath := kernel.HostProc(strconv.Itoa(int(pid)), "comm") file, err := os.Open(commPath) if err != nil { return true diff --git a/pkg/collector/corechecks/servicediscovery/module/comm_test.go b/pkg/collector/corechecks/servicediscovery/module/comm_test.go index 1bc296913ecf3..23568d19ecd2c 100644 --- a/pkg/collector/corechecks/servicediscovery/module/comm_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/comm_test.go @@ -137,16 +137,10 @@ func TestShouldIgnoreComm(t *testing.T) { _ = cmd.Process.Kill() }) - var proc *process.Process require.EventuallyWithT(t, func(collect *assert.CollectT) { - proc, err = customNewProcess(int32(cmd.Process.Pid)) - assert.NoError(collect, err) - }, 2*time.Second, 100*time.Millisecond) - - require.EventuallyWithT(t, func(collect *assert.CollectT) { - ignore := discovery.shouldIgnoreComm(proc) + ignore := discovery.shouldIgnoreComm(int32(cmd.Process.Pid)) assert.Equal(collect, test.ignore, ignore) - }, 500*time.Millisecond, 100*time.Millisecond) + }, 2*time.Second, 100*time.Millisecond) }) } } @@ -193,9 +187,8 @@ func BenchmarkProcName(b *testing.B) { for i := 0; i < b.N; i++ { // create a new process on each iteration to eliminate name caching from the calculation - proc, err := customNewProcess(int32(cmd.Process.Pid)) - if err != nil { - b.Fatal(err) + proc := &process.Process{ + Pid: int32(cmd.Process.Pid), } comm, err := proc.Name() if err != nil { @@ -216,11 +209,7 @@ func BenchmarkShouldIgnoreComm(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - proc, err := customNewProcess(int32(cmd.Process.Pid)) - if err != nil { - b.Fatal(err) - } - ok := discovery.shouldIgnoreComm(proc) + ok := discovery.shouldIgnoreComm(int32(cmd.Process.Pid)) if ok { b.Fatalf("process should not have been ignored") } diff --git a/pkg/collector/corechecks/servicediscovery/module/envs_test.go b/pkg/collector/corechecks/servicediscovery/module/envs_test.go index 31cb3c8816fe7..3d2987797ff0a 100644 --- a/pkg/collector/corechecks/servicediscovery/module/envs_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/envs_test.go @@ -151,15 +151,12 @@ func TestTargetEnvs(t *testing.T) { // BenchmarkGetEnvs benchmarks reading of all environment variables from /proc//environ. func BenchmarkGetEnvs(b *testing.B) { - proc, err := customNewProcess(int32(os.Getpid())) - if err != nil { - return - } + proc := &process.Process{Pid: int32(os.Getpid())} + b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { - _, err = getEnvs(proc) - if err != nil { + if _, err := getEnvs(proc); err != nil { return } } @@ -167,16 +164,12 @@ func BenchmarkGetEnvs(b *testing.B) { // BenchmarkGetEnvsTarget benchmarks reading of target environment variables only from /proc//environ. func BenchmarkGetEnvsTarget(b *testing.B) { - proc, err := customNewProcess(int32(os.Getpid())) - if err != nil { - return - } + proc := &process.Process{Pid: int32(os.Getpid())} b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { - _, err = getTargetEnvs(proc) - if err != nil { + if _, err := getTargetEnvs(proc); err != nil { return } } diff --git a/pkg/collector/corechecks/servicediscovery/module/ignore_proc_test.go b/pkg/collector/corechecks/servicediscovery/module/ignore_proc_test.go index 90d0dc9d8dcb3..317455f84fbbe 100644 --- a/pkg/collector/corechecks/servicediscovery/module/ignore_proc_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/ignore_proc_test.go @@ -69,12 +69,9 @@ func TestShouldIgnorePid(t *testing.T) { discovery := newDiscovery(nil) require.NotEmpty(t, discovery) - proc, err := customNewProcess(int32(cmd.Process.Pid)) - require.NoError(t, err) - require.EventuallyWithT(t, func(collect *assert.CollectT) { // wait until the service name becomes available - info, err := discovery.getServiceInfo(proc) + info, err := discovery.getServiceInfo(int32(cmd.Process.Pid)) assert.NoError(collect, err) assert.Equal(collect, test.service, info.ddServiceName) }, 3*time.Second, 100*time.Millisecond) @@ -92,7 +89,7 @@ func TestShouldIgnorePid(t *testing.T) { } // check saved pid to ignore - ignore := discovery.shouldIgnorePid(proc.Pid) + ignore := discovery.shouldIgnorePid(int32(cmd.Process.Pid)) require.Equal(t, test.ignore, ignore) }) } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index 8c2b276d8f56b..c70f64efeae37 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -383,7 +383,11 @@ func (s *discovery) cleanIgnoredPids(alivePids map[int32]struct{}) { // getServiceInfo gets the service information for a process using the // servicedetector module. -func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error) { +func (s *discovery) getServiceInfo(pid int32) (*serviceInfo, error) { + proc := &process.Process{ + Pid: pid, + } + cmdline, err := proc.CmdlineSlice() if err != nil { return nil, err @@ -432,39 +436,17 @@ func (s *discovery) getServiceInfo(proc *process.Process) (*serviceInfo, error) }, nil } -// customNewProcess is the same implementation as process.NewProcess but without calling CreateTimeWithContext, which -// is not needed and costly for the discovery module. -func customNewProcess(pid int32) (*process.Process, error) { - p := &process.Process{ - Pid: pid, - } - - exists, err := process.PidExists(pid) - if err != nil { - return p, err - } - if !exists { - return p, process.ErrorProcessNotRunning - } - return p, nil -} - // maxNumberOfPorts is the maximum number of listening ports which we report per // service. const maxNumberOfPorts = 50 // getService gets information for a single service. func (s *discovery) getService(context parsingContext, pid int32) *model.Service { - proc, err := customNewProcess(pid) - if err != nil { - return nil - } - - if s.shouldIgnorePid(proc.Pid) { + if s.shouldIgnorePid(pid) { return nil } - if s.shouldIgnoreComm(proc) { - s.addIgnoredPid(proc.Pid) + if s.shouldIgnoreComm(pid) { + s.addIgnoredPid(pid) return nil } @@ -523,7 +505,7 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service ports = ports[:maxNumberOfPorts] } - rss, err := getRSS(proc) + rss, err := getRSS(pid) if err != nil { return nil } @@ -535,7 +517,7 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service if ok { info = cached } else { - info, err = s.getServiceInfo(proc) + info, err = s.getServiceInfo(pid) if err != nil { return nil } @@ -550,7 +532,7 @@ func (s *discovery) getService(context parsingContext, pid int32) *model.Service name = info.generatedName } if s.shouldIgnoreService(name) { - s.addIgnoredPid(proc.Pid) + s.addIgnoredPid(pid) return nil } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index e584b9816c474..6970f36651c8f 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -1025,22 +1025,6 @@ func TestTagsPriority(t *testing.T) { } } -func BenchmarkOldProcess(b *testing.B) { - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - process.NewProcess(int32(os.Getpid())) - } -} - -func BenchmarkNewProcess(b *testing.B) { - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - customNewProcess(int32(os.Getpid())) - } -} - func getSocketsOld(p *process.Process) ([]uint64, error) { FDs, err := p.OpenFiles() if err != nil { diff --git a/pkg/collector/corechecks/servicediscovery/module/stat.go b/pkg/collector/corechecks/servicediscovery/module/stat.go index ca894aaf0e727..9ea526f7f829d 100644 --- a/pkg/collector/corechecks/servicediscovery/module/stat.go +++ b/pkg/collector/corechecks/servicediscovery/module/stat.go @@ -16,8 +16,6 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/process" - "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -28,8 +26,8 @@ var pageSize = uint64(os.Getpagesize()) // getRSS returns the RSS for the process, in bytes. Compare MemoryInfo() in // gopsutil which does the same thing but which parses several other fields // which we're not interested in. -func getRSS(proc *process.Process) (uint64, error) { - statmPath := kernel.HostProc(strconv.Itoa(int(proc.Pid)), "statm") +func getRSS(pid int32) (uint64, error) { + statmPath := kernel.HostProc(strconv.Itoa(int(pid)), "statm") // This file is very small so just read it fully. contents, err := os.ReadFile(statmPath) diff --git a/pkg/compliance/reporter.go b/pkg/compliance/reporter.go index 501ca6f10bc43..cf59ee9043489 100644 --- a/pkg/compliance/reporter.go +++ b/pkg/compliance/reporter.go @@ -44,7 +44,7 @@ func NewLogReporter(hostname string, sourceName, sourceType string, endpoints *c auditor.Start() // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(config.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, dstcontext, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) + pipelineProvider := pipeline.NewProvider(4, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, dstcontext, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) pipelineProvider.Start() logSource := sources.NewLogSource( diff --git a/pkg/config/env/go.mod b/pkg/config/env/go.mod index 7188be93ca23d..2d26e478fd4e7 100644 --- a/pkg/config/env/go.mod +++ b/pkg/config/env/go.mod @@ -8,18 +8,20 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log/ github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket/ + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../util/winutil ) require ( github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 github.com/stretchr/testify v1.10.0 ) require ( - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/pkg/config/mock/go.mod b/pkg/config/mock/go.mod index 041266bf69dff..9fc526fc2c5de 100644 --- a/pkg/config/mock/go.mod +++ b/pkg/config/mock/go.mod @@ -45,13 +45,13 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/pkg/config/remote/go.mod b/pkg/config/remote/go.mod index 5a00e0b18ab42..ab537bc1eb918 100644 --- a/pkg/config/remote/go.mod +++ b/pkg/config/remote/go.mod @@ -50,7 +50,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/grpc v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/uuid v0.56.0-rc.3 github.com/Masterminds/semver v1.5.0 github.com/benbjohnson/clock v1.3.5 @@ -83,7 +83,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-go/v5 v5.5.0 // indirect github.com/DataDog/go-libddwaf/v3 v3.5.1 // indirect @@ -121,7 +121,7 @@ require ( ) require ( - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 github.com/DataDog/viper v1.13.5 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index a9c74bb2765fa..a19de1ebed171 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -490,7 +490,7 @@ func InitConfig(config pkgconfigmodel.Setup) { config.BindEnvAndSetDefault("leader_election_default_resource", "configmap") config.BindEnvAndSetDefault("leader_election_release_on_shutdown", true) config.BindEnvAndSetDefault("kube_resources_namespace", "") - config.BindEnvAndSetDefault("kube_cache_sync_timeout_seconds", 5) + config.BindEnvAndSetDefault("kube_cache_sync_timeout_seconds", 10) // Datadog cluster agent config.BindEnvAndSetDefault("cluster_agent.enabled", false) @@ -1572,6 +1572,10 @@ func logsagent(config pkgconfigmodel.Setup) { // Add a tag to logs that are truncated by the agent config.BindEnvAndSetDefault("logs_config.tag_truncated_logs", false) + // Number of logs pipeline instances. Defaults to number of logical CPU cores as defined by GOMAXPROCS or 4, whichever is lower. + logsPipelines := min(4, runtime.GOMAXPROCS(0)) + config.BindEnvAndSetDefault("logs_config.pipelines", logsPipelines) + // If true, the agent looks for container logs in the location used by podman, rather // than docker. This is a temporary configuration parameter to support podman logs until // a more substantial refactor of autodiscovery is made to determine this automatically. diff --git a/pkg/config/setup/go.mod b/pkg/config/setup/go.mod index c6855f5379f0f..772c6b0ee8e53 100644 --- a/pkg/config/setup/go.mod +++ b/pkg/config/setup/go.mod @@ -44,11 +44,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/pkg/config/setup/system_probe_cws.go b/pkg/config/setup/system_probe_cws.go index b72d72df3c971..d2cc6276907d3 100644 --- a/pkg/config/setup/system_probe_cws.go +++ b/pkg/config/setup/system_probe_cws.go @@ -20,7 +20,6 @@ func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) { // CWS - general config cfg.BindEnvAndSetDefault("runtime_security_config.enabled", false) cfg.BindEnv("runtime_security_config.fim_enabled") - cfg.BindEnvAndSetDefault("runtime_security_config.policies.watch_dir", false) cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.per_rule_enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.report_internal_policies", false) diff --git a/pkg/config/utils/go.mod b/pkg/config/utils/go.mod index 4c39a2d8c2f78..0f4f9537ccb9f 100644 --- a/pkg/config/utils/go.mod +++ b/pkg/config/utils/go.mod @@ -38,7 +38,7 @@ require ( github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 github.com/stretchr/testify v1.10.0 ) @@ -54,10 +54,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/pkg/ebpf/uprobes/attacher_test.go b/pkg/ebpf/uprobes/attacher_test.go index 26428868c1d70..be848bb5c6afb 100644 --- a/pkg/ebpf/uprobes/attacher_test.go +++ b/pkg/ebpf/uprobes/attacher_test.go @@ -27,14 +27,12 @@ import ( "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" - eventmonitortestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/testutil" + "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil" "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries" fileopener "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" "github.com/DataDog/datadog-agent/pkg/process/monitor" - procmontestutil "github.com/DataDog/datadog-agent/pkg/process/monitor/testutil" - secutils "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -800,8 +798,7 @@ func launchProcessMonitor(t *testing.T, useEventStream bool) *monitor.ProcessMon t.Cleanup(pm.Stop) require.NoError(t, pm.Initialize(useEventStream)) if useEventStream { - secutils.SetCachedHostname("test-hostname") - eventmonitortestutil.StartEventMonitor(t, procmontestutil.RegisterProcessMonitorEventConsumer) + monitor.InitializeEventConsumer(testutil.NewTestProcessConsumer(t)) } return pm diff --git a/pkg/fleet/installer/setup/install.sh b/pkg/fleet/installer/setup/install.sh index 440e8b71acf1d..d76e790de910f 100644 --- a/pkg/fleet/installer/setup/install.sh +++ b/pkg/fleet/installer/setup/install.sh @@ -22,10 +22,10 @@ install() { $sudo_cmd mkdir -p "${tmp_dir}" case "$(uname -m)" in x86_64) - echo "${downloader_bin_linux_amd64}" | base64 -d | $sudo_cmd tee "${downloader_path}" >/dev/null + write_installer_amd64 "$sudo_cmd" "$downloader_path" ;; aarch64) - echo "${downloader_bin_linux_arm64}" | base64 -d | $sudo_cmd tee "${downloader_path}" >/dev/null + write_installer_arm64 "$sudo_cmd" "$downloader_path" ;; esac $sudo_cmd chmod +x "${downloader_path}" @@ -37,16 +37,16 @@ install() { # Embedded binaries used to install Datadog. # Source: https://github.com/DataDog/datadog-agent/tree/INSTALLER_COMMIT/pkg/fleet/installer # DO NOT EDIT THIS SECTION MANUALLY. -downloader_bin_linux_amd64=$( - cat </dev/null +} +write_installer_arm64() { + local sudo_cmd=$1 + local path=$2 + base64 -d <<<"DOWNLOADER_BIN_LINUX_ARM64" | $sudo_cmd tee "${path}" >/dev/null +} install "$@" exit 0 diff --git a/pkg/logs/auditor/go.mod b/pkg/logs/auditor/go.mod index 9f000ce69c516..0ea48c9e7895a 100644 --- a/pkg/logs/auditor/go.mod +++ b/pkg/logs/auditor/go.mod @@ -47,7 +47,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/stretchr/testify v1.10.0 ) @@ -66,11 +66,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/logs/client/go.mod b/pkg/logs/client/go.mod index 9f0ec05367e17..cac2a5ce40f32 100644 --- a/pkg/logs/client/go.mod +++ b/pkg/logs/client/go.mod @@ -60,7 +60,7 @@ require ( github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 github.com/stretchr/testify v1.10.0 golang.org/x/net v0.31.0 @@ -84,11 +84,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/benbjohnson/clock v1.3.5 // indirect diff --git a/pkg/logs/diagnostic/go.mod b/pkg/logs/diagnostic/go.mod index e5350d717dc74..7f3357f60d406 100644 --- a/pkg/logs/diagnostic/go.mod +++ b/pkg/logs/diagnostic/go.mod @@ -67,14 +67,14 @@ require ( github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/logs/message/go.mod b/pkg/logs/message/go.mod index a7acc1d3319f8..b4b0ab2e615ed 100644 --- a/pkg/logs/message/go.mod +++ b/pkg/logs/message/go.mod @@ -42,7 +42,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/stretchr/testify v1.10.0 ) @@ -62,11 +62,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/logs/pipeline/go.mod b/pkg/logs/pipeline/go.mod index aa3225348e8e9..06f20a79a4199 100644 --- a/pkg/logs/pipeline/go.mod +++ b/pkg/logs/pipeline/go.mod @@ -71,7 +71,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/sender v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 github.com/hashicorp/go-multierror v1.1.1 github.com/stretchr/testify v1.10.0 @@ -100,11 +100,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect diff --git a/pkg/logs/processor/go.mod b/pkg/logs/processor/go.mod index 42339476936cc..858475c3a8a54 100644 --- a/pkg/logs/processor/go.mod +++ b/pkg/logs/processor/go.mod @@ -57,7 +57,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/stretchr/testify v1.10.0 ) @@ -80,11 +80,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect diff --git a/pkg/logs/sds/go.mod b/pkg/logs/sds/go.mod index b9c7c9706a849..c6f00cb1cf5e7 100644 --- a/pkg/logs/sds/go.mod +++ b/pkg/logs/sds/go.mod @@ -52,7 +52,7 @@ require ( github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 github.com/stretchr/testify v1.10.0 ) @@ -77,11 +77,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/logs/sender/go.mod b/pkg/logs/sender/go.mod index 085c081f2774e..2f438b2f0cc86 100644 --- a/pkg/logs/sender/go.mod +++ b/pkg/logs/sender/go.mod @@ -59,7 +59,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/benbjohnson/clock v1.3.5 github.com/stretchr/testify v1.10.0 ) @@ -84,11 +84,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/logs/sources/go.mod b/pkg/logs/sources/go.mod index b81f5d609a906..a73c178af0286 100644 --- a/pkg/logs/sources/go.mod +++ b/pkg/logs/sources/go.mod @@ -41,7 +41,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 github.com/stretchr/testify v1.10.0 ) @@ -61,10 +61,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/logs/util/testutils/go.mod b/pkg/logs/util/testutils/go.mod index d7bbeef948cbb..d2aa1b59f3e82 100644 --- a/pkg/logs/util/testutils/go.mod +++ b/pkg/logs/util/testutils/go.mod @@ -58,14 +58,14 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/metrics/go.mod b/pkg/metrics/go.mod index 2129bb8455c09..198885497f0b9 100644 --- a/pkg/metrics/go.mod +++ b/pkg/metrics/go.mod @@ -46,7 +46,7 @@ require ( github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0 github.com/stretchr/testify v1.10.0 go.uber.org/atomic v1.11.0 @@ -67,11 +67,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/network/dns/snooper_test.go b/pkg/network/dns/snooper_test.go index 3a0d334c80488..a6c1383a9ddcc 100644 --- a/pkg/network/dns/snooper_test.go +++ b/pkg/network/dns/snooper_test.go @@ -230,7 +230,7 @@ func TestDNSFailedResponseCount(t *testing.T) { "nonexistenent.net.com", "missingdomain.com", } - queryIP, queryPort, reps, _ := testdns.SendDNSQueries(t, domains, testdns.GetServerIP(t), "tcp") + queryIP, queryPort, reps, _ := testdns.SendDNSQueries(domains, testdns.GetServerIP(t), "tcp") for _, rep := range reps { require.NotNil(t, rep) require.Equal(t, rep.Rcode, mdns.RcodeNameError) // All the queries should have failed @@ -281,7 +281,7 @@ func TestDNSOverNonPort53(t *testing.T) { shutdown, port := newTestServer(t, localhost, "udp") defer shutdown() - queryIP, queryPort, reps, err := testdns.SendDNSQueriesOnPort(t, domains, net.ParseIP(localhost), strconv.Itoa(int(port)), "udp") + queryIP, queryPort, reps, err := testdns.SendDNSQueriesOnPort(domains, net.ParseIP(localhost), strconv.Itoa(int(port)), "udp") require.NoError(t, err) require.NotNil(t, reps[0]) @@ -334,7 +334,7 @@ func TestDNSOverUDPTimeoutCount(t *testing.T) { invalidServerIP := "8.8.8.90" domainQueried := "agafsdfsdasdfsd" - queryIP, queryPort, reps, err := testdns.SendDNSQueries(t, []string{domainQueried}, net.ParseIP(invalidServerIP), "udp") + queryIP, queryPort, reps, err := testdns.SendDNSQueries([]string{domainQueried}, net.ParseIP(invalidServerIP), "udp") require.ErrorIs(t, err, os.ErrDeadlineExceeded, "error should be i/o timeout") require.Len(t, reps, 1) require.Nil(t, reps[0]) @@ -358,7 +358,7 @@ func TestDNSOverUDPTimeoutCountWithoutDomain(t *testing.T) { invalidServerIP := "8.8.8.90" domainQueried := "agafsdfsdasdfsd" - queryIP, queryPort, reps, err := testdns.SendDNSQueries(t, []string{domainQueried}, net.ParseIP(invalidServerIP), "udp") + queryIP, queryPort, reps, err := testdns.SendDNSQueries([]string{domainQueried}, net.ParseIP(invalidServerIP), "udp") require.ErrorIs(t, err, os.ErrDeadlineExceeded, "error should be i/o timeout") require.Len(t, reps, 1) require.Nil(t, reps[0]) diff --git a/pkg/network/ebpf/c/protocols/postgres/decoding-maps.h b/pkg/network/ebpf/c/protocols/postgres/decoding-maps.h index 67288e1243f9e..3ccbd67e52466 100644 --- a/pkg/network/ebpf/c/protocols/postgres/decoding-maps.h +++ b/pkg/network/ebpf/c/protocols/postgres/decoding-maps.h @@ -15,4 +15,8 @@ BPF_PERCPU_ARRAY_MAP(postgres_scratch_buffer, postgres_event_t, 1) // Maintains the current state of tail calls for each Postgres message. BPF_PERCPU_ARRAY_MAP(postgres_iterations, postgres_tail_call_state_t, 1) +// Postgres telemetry maps in kernel provide empirical statistics on the number of processed Postgres messages. +// Key 0 for plaintext traffic, key 1 for encrypted traffic. +BPF_ARRAY_MAP(postgres_telemetry, postgres_kernel_msg_count_t, 2) + #endif diff --git a/pkg/network/ebpf/c/protocols/postgres/decoding.h b/pkg/network/ebpf/c/protocols/postgres/decoding.h index 300ca6ba949a9..3f5e6d489376e 100644 --- a/pkg/network/ebpf/c/protocols/postgres/decoding.h +++ b/pkg/network/ebpf/c/protocols/postgres/decoding.h @@ -108,6 +108,48 @@ static int __always_inline skip_string(pktbuf_t pkt, int message_len) { return SKIP_STRING_FAILED; } +// Return a pointer to the postgres telemetry record in the corresponding map. +static __always_inline void* get_pg_msg_counts_map(pktbuf_t pkt) { + const __u32 plain_key = 0; + const __u32 tls_key = 1; + + pktbuf_map_lookup_option_t pg_telemetry_lookup_opt[] = { + [PKTBUF_SKB] = { + .map = &postgres_telemetry, + .key = (void*)&plain_key, + }, + [PKTBUF_TLS] = { + .map = &postgres_telemetry, + .key = (void*)&tls_key, + }, + }; + return pktbuf_map_lookup(pkt, pg_telemetry_lookup_opt); +} + +// update_msg_count_telemetry increases the corresponding counter of the telemetry bucket. +static __always_inline void update_msg_count_telemetry(postgres_kernel_msg_count_t* pg_msg_counts, __u8 count) { + // This line can be interpreted as a step function of the difference, multiplied by the difference itself. + // The step function of the difference returns 0 if the difference is negative and 1 if it is positive. + // As a result, if the difference is negative, the output will be 0; if the difference is positive, + // the output will equal the difference. + count = count < PG_KERNEL_MSG_COUNT_FIRST_BUCKET ? 0 : count - PG_KERNEL_MSG_COUNT_FIRST_BUCKET; + + // This line functions as a ceiling operation, ensuring that if the count is not a multiple of the bucket size, + // it is rounded up to the next bucket. Since eBPF does not support floating-point numbers, the implementation + // adds (bucket size - 1) to the count and then divides the result by the bucket size. + // This effectively simulates the ceiling function. + __u8 bucket_idx = (count + PG_KERNEL_MSG_COUNT_BUCKET_SIZE - 1) / PG_KERNEL_MSG_COUNT_BUCKET_SIZE; + + // This line ensures that the bucket index stays within the range of 0 to PG_KERNEL_MSG_COUNT_NUM_BUCKETS. + // While not strictly necessary, we include this check to satisfy the verifier and to explicitly define a lower bound. + bucket_idx = bucket_idx < 0 ? 0 : bucket_idx; + + // This line ensures that the bucket index remains within the range of 0 to PG_KERNEL_MSG_COUNT_NUM_BUCKETS, + // preventing any possibility of exceeding the upper bound. + bucket_idx = bucket_idx >= PG_KERNEL_MSG_COUNT_NUM_BUCKETS ? PG_KERNEL_MSG_COUNT_NUM_BUCKETS-1 : bucket_idx; + __sync_fetch_and_add(&pg_msg_counts->msg_count_buckets[bucket_idx], 1); +} + // Reads the first message header and decides what to do based on the // message tag. If the message is a new query, it stores the query in the in-flight map. // If the message is a parse message, we tail call to the dedicated process_parse_message program. @@ -148,7 +190,7 @@ static __always_inline void postgres_handle_message(pktbuf_t pkt, conn_tuple_t * return; } - iteration_value->iteration = 0; + iteration_value->total_msg_count = 0; iteration_value->data_off = 0; pktbuf_tail_call_option_t handle_response_tail_call_array[] = { [PKTBUF_SKB] = { @@ -197,10 +239,10 @@ static __always_inline void postgres_handle_parse_message(pktbuf_t pkt, conn_tup // Handles Postgres command complete messages by examining packet data for both plaintext and TLS traffic. // This function handles multiple messages within a single packet, processing up to POSTGRES_MAX_MESSAGES_PER_TAIL_CALL -// messages per call. When more messages exist beyond this limit, it uses tail call chaining (up to -// POSTGRES_MAX_TAIL_CALLS_FOR_MAX_MESSAGES) to continue processing. -static __always_inline bool handle_response(pktbuf_t pkt, conn_tuple_t conn_tuple) { +// messages per call. When more messages exist beyond this limit, it uses tail call chaining to continue processing. +static __always_inline bool handle_response(pktbuf_t pkt, conn_tuple_t conn_tuple, postgres_kernel_msg_count_t* pg_msg_counts) { const __u32 zero = 0; + bool read_result = false; bool found_command_complete = false; struct pg_message_header header; @@ -210,7 +252,7 @@ static __always_inline bool handle_response(pktbuf_t pkt, conn_tuple_t conn_tupl return 0; } - if (iteration_value->iteration >= POSTGRES_MAX_TAIL_CALLS_FOR_MAX_MESSAGES) { + if (iteration_value->total_msg_count >= (POSTGRES_MAX_TOTAL_MESSAGES - 1)) { return 0; } @@ -225,13 +267,14 @@ static __always_inline bool handle_response(pktbuf_t pkt, conn_tuple_t conn_tupl return 0; } + __u8 messages_count = 0; #pragma unroll(POSTGRES_MAX_MESSAGES_PER_TAIL_CALL) - for (__u32 iteration = 0; iteration < POSTGRES_MAX_MESSAGES_PER_TAIL_CALL; ++iteration) { - if (!read_message_header(pkt, &header)) { + for (; messages_count < POSTGRES_MAX_MESSAGES_PER_TAIL_CALL; ++messages_count) { + read_result = read_message_header(pkt, &header); + if (read_result != true) { break; } if (header.message_tag == POSTGRES_COMMAND_COMPLETE_MAGIC_BYTE) { - handle_command_complete(&conn_tuple, transaction); found_command_complete = true; break; } @@ -240,20 +283,35 @@ static __always_inline bool handle_response(pktbuf_t pkt, conn_tuple_t conn_tupl // the message tag. So we need to add 1 to the message length to jump over the entire message. pktbuf_advance(pkt, header.message_len + 1); } + iteration_value->total_msg_count += messages_count; if (found_command_complete) { + handle_command_complete(&conn_tuple, transaction); + update_msg_count_telemetry(pg_msg_counts, iteration_value->total_msg_count); + return 0; } - // We didn't find a command complete message, so we need to continue processing the packet. - // We save the current data offset and increment the iteration counter. - iteration_value->iteration += 1; - iteration_value->data_off = pktbuf_data_offset(pkt); - // If the maximum number of tail calls has been reached, we can skip invoking the next tail call. - if (iteration_value->iteration >= POSTGRES_MAX_TAIL_CALLS_FOR_MAX_MESSAGES) { + if (iteration_value->total_msg_count >= (POSTGRES_MAX_TOTAL_MESSAGES - 1)) { + // reached max messages, add counter and stop iterating. + __sync_fetch_and_add(&pg_msg_counts->reached_max_messages, 1); + return 0; + } + if (pktbuf_data_offset(pkt) == pktbuf_data_end(pkt)) { + // stop the iterator if the end of the TCP packet is reached. + update_msg_count_telemetry(pg_msg_counts, iteration_value->total_msg_count); + return 0; + } + if (read_result == false) { + // the packet was fragmented, add counter stop iterating. + __sync_fetch_and_add(&pg_msg_counts->fragmented_packets, 1); return 0; } + // We didn't find a command complete message, so we need to continue processing the packet. + // We save the current data offset. + iteration_value->data_off = pktbuf_data_offset(pkt); + pktbuf_tail_call_option_t handle_response_tail_call_array[] = { [PKTBUF_SKB] = { .prog_array_map = &protocols_progs, @@ -315,7 +373,11 @@ int socket__postgres_handle_response(struct __sk_buff* skb) { normalize_tuple(&conn_tuple); pktbuf_t pkt = pktbuf_from_skb(skb, &skb_info); - handle_response(pkt, conn_tuple); + postgres_kernel_msg_count_t* pg_msg_counts = get_pg_msg_counts_map(pkt); + if (pg_msg_counts == NULL) { + return 0; + } + handle_response(pkt, conn_tuple, pg_msg_counts); return 0; } @@ -406,10 +468,15 @@ int uprobe__postgres_tls_handle_response(struct pt_regs *ctx) { return 0; } + pktbuf_t pkt = pktbuf_from_tls(ctx, args); + postgres_kernel_msg_count_t* pg_msg_counts = get_pg_msg_counts_map(pkt); + if (pg_msg_counts == NULL) { + return 0; + } + // Copying the tuple to the stack to handle verifier issues on kernel 4.14. conn_tuple_t tup = args->tup; - pktbuf_t pkt = pktbuf_from_tls(ctx, args); - handle_response(pkt, tup); + handle_response(pkt, tup, pg_msg_counts); return 0; } diff --git a/pkg/network/ebpf/c/protocols/postgres/types.h b/pkg/network/ebpf/c/protocols/postgres/types.h index 0e88332dc78a7..6f0abf951ddaf 100644 --- a/pkg/network/ebpf/c/protocols/postgres/types.h +++ b/pkg/network/ebpf/c/protocols/postgres/types.h @@ -7,10 +7,13 @@ #define POSTGRES_BUFFER_SIZE 160 // Represents the maximum number of tail calls we can use to process a single message. -#define POSTGRES_MAX_TAIL_CALLS_FOR_MAX_MESSAGES 1 +#define POSTGRES_MAX_TAIL_CALLS_FOR_MAX_MESSAGES 3 // Represents the maximum number of messages we process in a single tail call. -#define POSTGRES_MAX_MESSAGES_PER_TAIL_CALL 80 +#define POSTGRES_MAX_MESSAGES_PER_TAIL_CALL 60 + +// maximum number of messages to fetch +#define POSTGRES_MAX_TOTAL_MESSAGES (POSTGRES_MAX_TAIL_CALLS_FOR_MAX_MESSAGES * POSTGRES_MAX_MESSAGES_PER_TAIL_CALL) // Postgres transaction information we store in the kernel. typedef struct { @@ -30,10 +33,26 @@ typedef struct { } postgres_event_t; typedef struct { - __u8 iteration; + __u8 total_msg_count; // Saving the packet data offset is crucial for maintaining the current read position and ensuring proper utilization // of tail calls. __u32 data_off; } postgres_tail_call_state_t; +// Postgres communication operates via a continuous message stream. +// Gather empirical statistics on the number of messages processed by the program. +// These statistics enable optimization of the monitoring code. +// Collect counters for each subsequent bucket. +// bucket 0: count 0 to 100, bucket 1: 100 to 119, bucket 2: 120 to 139 etc. +#define PG_KERNEL_MSG_COUNT_NUM_BUCKETS 5 +#define PG_KERNEL_MSG_COUNT_BUCKET_SIZE 20 +#define PG_KERNEL_MSG_COUNT_FIRST_BUCKET 100 + +// This structure stores statistics about the number of Postgres messages in a TCP packet. +typedef struct { + __u64 reached_max_messages; + __u64 fragmented_packets; + __u64 msg_count_buckets[PG_KERNEL_MSG_COUNT_NUM_BUCKETS]; +} postgres_kernel_msg_count_t; + #endif diff --git a/pkg/network/ebpf/c/shared-libraries/probes.h b/pkg/network/ebpf/c/shared-libraries/probes.h index 7a71650cdba5e..a33e228a63b26 100644 --- a/pkg/network/ebpf/c/shared-libraries/probes.h +++ b/pkg/network/ebpf/c/shared-libraries/probes.h @@ -23,10 +23,9 @@ static __always_inline void do_sys_open_helper_enter(const char *filename) { // Find the null character and clean up the garbage following it #pragma unroll for (int i = 0; i < LIB_PATH_MAX_SIZE; i++) { - if (path.len) { - path.buf[i] = 0; - } else if (path.buf[i] == 0) { + if (path.buf[i] == 0) { path.len = i; + break; } } } else { diff --git a/pkg/network/protocols/ebpf_types.go b/pkg/network/protocols/ebpf_types.go index 57bdb4539bf5e..c5b90864ee74e 100644 --- a/pkg/network/protocols/ebpf_types.go +++ b/pkg/network/protocols/ebpf_types.go @@ -19,10 +19,8 @@ const ( ) const ( - // PostgresMaxMessagesPerTailCall is the maximum number of messages that can be processed in a single tail call in our Postgres decoding solution - PostgresMaxMessagesPerTailCall = C.POSTGRES_MAX_MESSAGES_PER_TAIL_CALL - // PostgresMaxTailCalls is the maximum number of tail calls that can be made in our Postgres decoding solution - PostgresMaxTailCalls = C.POSTGRES_MAX_TAIL_CALLS_FOR_MAX_MESSAGES + // PostgresMaxTotalMessages is the maximum number of Postgres messages processed by the eBPF program. + PostgresMaxTotalMessages = C.POSTGRES_MAX_TOTAL_MESSAGES ) // DispatcherProgramType is a C type to represent the eBPF programs used for tail calls. diff --git a/pkg/network/protocols/ebpf_types_linux.go b/pkg/network/protocols/ebpf_types_linux.go index ae855cfe48b77..8cc9c663dc3d5 100644 --- a/pkg/network/protocols/ebpf_types_linux.go +++ b/pkg/network/protocols/ebpf_types_linux.go @@ -10,9 +10,7 @@ const ( ) const ( - PostgresMaxMessagesPerTailCall = 0x50 - - PostgresMaxTailCalls = 0x1 + PostgresMaxTotalMessages = 0xb4 ) type DispatcherProgramType uint32 diff --git a/pkg/network/protocols/postgres/ebpf/types.go b/pkg/network/protocols/postgres/ebpf/types.go index 7c615186958e4..63071b914e5a7 100644 --- a/pkg/network/protocols/postgres/ebpf/types.go +++ b/pkg/network/protocols/postgres/ebpf/types.go @@ -9,6 +9,7 @@ package ebpf /* #include "../../ebpf/c/protocols/postgres/types.h" +#include "../../ebpf/c/protocols/postgres/defs.h" #include "../../ebpf/c/protocols/classification/defs.h" */ import "C" @@ -19,7 +20,12 @@ type ConnTuple = C.conn_tuple_t type EbpfEvent C.postgres_event_t type EbpfTx C.postgres_transaction_t +type PostgresKernelMsgCount C.postgres_kernel_msg_count_t const ( - BufferSize = C.POSTGRES_BUFFER_SIZE + BufferSize = C.POSTGRES_BUFFER_SIZE + MsgCountBucketSize = C.PG_KERNEL_MSG_COUNT_BUCKET_SIZE + MsgCountNumBuckets = C.PG_KERNEL_MSG_COUNT_NUM_BUCKETS + MsgCountFirstBucket = C.PG_KERNEL_MSG_COUNT_FIRST_BUCKET + MsgCountMaxTotal = C.POSTGRES_MAX_TOTAL_MESSAGES ) diff --git a/pkg/network/protocols/postgres/ebpf/types_linux.go b/pkg/network/protocols/postgres/ebpf/types_linux.go index 9b0097a2b5f9d..9413ee269ac20 100644 --- a/pkg/network/protocols/postgres/ebpf/types_linux.go +++ b/pkg/network/protocols/postgres/ebpf/types_linux.go @@ -27,7 +27,16 @@ type EbpfTx struct { Tags uint8 Pad_cgo_0 [3]byte } +type PostgresKernelMsgCount struct { + Reached_max_messages uint64 + Fragmented_packets uint64 + Msg_count_buckets [5]uint64 +} const ( - BufferSize = 0xa0 + BufferSize = 0xa0 + MsgCountBucketSize = 0x14 + MsgCountNumBuckets = 0x5 + MsgCountFirstBucket = 0x64 + MsgCountMaxTotal = 0xb4 ) diff --git a/pkg/network/protocols/postgres/protocol.go b/pkg/network/protocols/postgres/protocol.go index 6bc245896a91a..c0d7aa37a6747 100644 --- a/pkg/network/protocols/postgres/protocol.go +++ b/pkg/network/protocols/postgres/protocol.go @@ -9,6 +9,7 @@ package postgres import ( "io" + "time" "unsafe" "github.com/cilium/ebpf" @@ -28,6 +29,8 @@ import ( ) const ( + // KernelTelemetryMap is the map for getting kernel metrics + KernelTelemetryMap = "postgres_telemetry" // InFlightMap is the name of the in-flight map. InFlightMap = "postgres_in_flight" scratchBufferMap = "postgres_scratch_buffer" @@ -44,11 +47,13 @@ const ( // protocol holds the state of the postgres protocol monitoring. type protocol struct { - cfg *config.Config - telemetry *Telemetry - eventsConsumer *events.Consumer[postgresebpf.EbpfEvent] - mapCleaner *ddebpf.MapCleaner[netebpf.ConnTuple, postgresebpf.EbpfTx] - statskeeper *StatKeeper + cfg *config.Config + telemetry *Telemetry + eventsConsumer *events.Consumer[postgresebpf.EbpfEvent] + mapCleaner *ddebpf.MapCleaner[netebpf.ConnTuple, postgresebpf.EbpfTx] + statskeeper *StatKeeper + kernelTelemetry *kernelTelemetry // retrieves Postgres metrics from kernel + kernelTelemetryStopCh chan struct{} } // Spec is the protocol spec for the postgres protocol. @@ -133,9 +138,11 @@ func newPostgresProtocol(cfg *config.Config) (protocols.Protocol, error) { } return &protocol{ - cfg: cfg, - telemetry: NewTelemetry(cfg), - statskeeper: NewStatkeeper(cfg), + cfg: cfg, + telemetry: NewTelemetry(cfg), + statskeeper: NewStatkeeper(cfg), + kernelTelemetry: newKernelTelemetry(), + kernelTelemetryStopCh: make(chan struct{}), }, nil } @@ -175,6 +182,7 @@ func (p *protocol) PreStart(mgr *manager.Manager) (err error) { func (p *protocol) PostStart(mgr *manager.Manager) error { // Setup map cleaner after manager start. p.setupMapCleaner(mgr) + p.startKernelTelemetry(mgr) return nil } @@ -186,11 +194,16 @@ func (p *protocol) Stop(*manager.Manager) { if p.eventsConsumer != nil { p.eventsConsumer.Stop() } + if p.kernelTelemetryStopCh != nil { + close(p.kernelTelemetryStopCh) + } } // DumpMaps dumps map contents for debugging. func (p *protocol) DumpMaps(w io.Writer, mapName string, currentMap *ebpf.Map) { - if mapName == InFlightMap { // maps/postgres_in_flight (BPF_MAP_TYPE_HASH), key ConnTuple, value EbpfTx + switch mapName { + case InFlightMap: + // maps/postgres_in_flight (BPF_MAP_TYPE_HASH), key ConnTuple, value EbpfTx var key netebpf.ConnTuple var value postgresebpf.EbpfTx protocols.WriteMapDumpHeader(w, currentMap, mapName, key, value) @@ -198,12 +211,27 @@ func (p *protocol) DumpMaps(w io.Writer, mapName string, currentMap *ebpf.Map) { for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { spew.Fdump(w, key, value) } + case KernelTelemetryMap: + // postgres_msg_count (BPF_ARRAY_MAP), key 0 and 1, value PostgresKernelMsgCount + plainKey := uint32(0) + tlsKey := uint32(1) + + var value postgresebpf.PostgresKernelMsgCount + protocols.WriteMapDumpHeader(w, currentMap, mapName, plainKey, value) + if err := currentMap.Lookup(unsafe.Pointer(&plainKey), unsafe.Pointer(&value)); err == nil { + spew.Fdump(w, plainKey, value) + } + protocols.WriteMapDumpHeader(w, currentMap, mapName, tlsKey, value) + if err := currentMap.Lookup(unsafe.Pointer(&tlsKey), unsafe.Pointer(&value)); err == nil { + spew.Fdump(w, tlsKey, value) + } } } // GetStats returns a map of Postgres stats. func (p *protocol) GetStats() *protocols.ProtocolStats { p.eventsConsumer.Sync() + p.kernelTelemetry.Log() return &protocols.ProtocolStats{ Type: protocols.Postgres, @@ -250,3 +278,40 @@ func (p *protocol) setupMapCleaner(mgr *manager.Manager) { p.mapCleaner = mapCleaner } + +func (p *protocol) startKernelTelemetry(mgr *manager.Manager) { + telemetryMap, err := protocols.GetMap(mgr, KernelTelemetryMap) + if err != nil { + log.Errorf("couldnt find kernel telemetry map: %s, error: %v", telemetryMap, err) + return + } + + plainKey := uint32(0) + tlsKey := uint32(1) + pgKernelMsgCount := &postgresebpf.PostgresKernelMsgCount{} + ticker := time.NewTicker(30 * time.Second) + + go func() { + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := telemetryMap.Lookup(unsafe.Pointer(&plainKey), unsafe.Pointer(pgKernelMsgCount)); err != nil { + log.Errorf("unable to lookup %q map: %s", KernelTelemetryMap, err) + return + } + p.kernelTelemetry.update(pgKernelMsgCount, false) + + if err := telemetryMap.Lookup(unsafe.Pointer(&tlsKey), unsafe.Pointer(pgKernelMsgCount)); err != nil { + log.Errorf("unable to lookup %q map: %s", KernelTelemetryMap, err) + return + } + p.kernelTelemetry.update(pgKernelMsgCount, true) + + case <-p.kernelTelemetryStopCh: + return + } + } + }() +} diff --git a/pkg/network/protocols/postgres/telemetry.go b/pkg/network/protocols/postgres/telemetry.go index cc17b902c24ea..d8cb08725a80e 100644 --- a/pkg/network/protocols/postgres/telemetry.go +++ b/pkg/network/protocols/postgres/telemetry.go @@ -9,6 +9,7 @@ package postgres import ( "fmt" + "strconv" "github.com/cihub/seelog" @@ -178,3 +179,49 @@ func (t *Telemetry) Log() { log.Debugf("postgres stats summary: %s", t.metricGroup.Summary()) } } + +// kernelTelemetry provides empirical kernel statistics about the number of messages in each TCP packet +type kernelTelemetry struct { + metricGroup *libtelemetry.MetricGroup + reachedMaxMessages *libtelemetry.TLSAwareCounter + fragmentedPackets *libtelemetry.TLSAwareCounter + msgCountBuckets [ebpf.MsgCountNumBuckets]*libtelemetry.TLSAwareCounter // Postgres messages counters divided into buckets +} + +// newKernelTelemetry this is the Postgres message counter store. +func newKernelTelemetry() *kernelTelemetry { + metricGroup := libtelemetry.NewMetricGroup("usm.postgres", libtelemetry.OptStatsd) + kernelTel := &kernelTelemetry{ + metricGroup: metricGroup, + } + kernelTel.reachedMaxMessages = libtelemetry.NewTLSAwareCounter(metricGroup, "max_messages") + kernelTel.fragmentedPackets = libtelemetry.NewTLSAwareCounter(metricGroup, "incomplete_messages") + + for i := range kernelTel.msgCountBuckets { + kernelTel.msgCountBuckets[i] = libtelemetry.NewTLSAwareCounter(metricGroup, "messages_count_bucket_"+strconv.Itoa(i+1)) + } + return kernelTel +} + +// update the postgres message counter store with new counters from the kernel, return immediately if nothing to add. +func (t *kernelTelemetry) update(kernCounts *ebpf.PostgresKernelMsgCount, isTLS bool) { + if kernCounts == nil { + return + } + + t.reachedMaxMessages.Set(int64(kernCounts.Reached_max_messages), isTLS) + t.fragmentedPackets.Set(int64(kernCounts.Fragmented_packets), isTLS) + + for i := range t.msgCountBuckets { + v := kernCounts.Msg_count_buckets[i] + t.msgCountBuckets[i].Set(int64(v), isTLS) + } +} + +// Log logs summary of telemetry +func (t *kernelTelemetry) Log() { + if log.ShouldLog(seelog.DebugLvl) { + s := t.metricGroup.Summary() + log.Debugf("postgres kernel telemetry, summary: %s", s) + } +} diff --git a/pkg/network/protocols/telemetry/metric.go b/pkg/network/protocols/telemetry/metric.go index 46aa226351278..56ce64a1b00b8 100644 --- a/pkg/network/protocols/telemetry/metric.go +++ b/pkg/network/protocols/telemetry/metric.go @@ -26,6 +26,11 @@ func NewCounter(name string, tagsAndOptions ...string) *Counter { return globalRegistry.FindOrCreate(c).(*Counter) } +// Set value atomically +func (c *Counter) Set(v int64) { + c.value.Store(v) +} + // Add value atomically func (c *Counter) Add(v int64) { if v > 0 { @@ -118,6 +123,15 @@ func NewTLSAwareCounter(metricGroup *MetricGroup, metricName string, tags ...str } } +// Set Sets the given value to the counter based on the encryption. +func (c *TLSAwareCounter) Set(v int64, isTLS bool) { + if isTLS { + c.counterTLS.Set(v) + return + } + c.counterPlain.Set(v) +} + // Add adds the given delta to the counter based on the encryption. func (c *TLSAwareCounter) Add(delta int64, isTLS bool) { if isTLS { diff --git a/pkg/network/tracer/testutil/proxy/external_unix_proxy_server/external_unix_proxy_server.go b/pkg/network/tracer/testutil/proxy/external_unix_proxy_server/external_unix_proxy_server.go index 8ef7f164ae5b2..1f077d0717746 100644 --- a/pkg/network/tracer/testutil/proxy/external_unix_proxy_server/external_unix_proxy_server.go +++ b/pkg/network/tracer/testutil/proxy/external_unix_proxy_server/external_unix_proxy_server.go @@ -22,11 +22,13 @@ func main() { var unixPath string var useTLS bool var useControl bool + var useSplice bool flag.StringVar(&remoteAddr, "remote", "", "Remote server address to forward connections to") flag.StringVar(&unixPath, "unix", "/tmp/transparent.sock", "A local unix socket to listen on") flag.BoolVar(&useTLS, "tls", false, "Use TLS to connect to the remote server") flag.BoolVar(&useControl, "control", false, "Use control messages") + flag.BoolVar(&useSplice, "splice", false, "Use splice(2) to transfer data") // Parse command-line flags flag.Parse() @@ -34,7 +36,7 @@ func main() { done := make(chan os.Signal, 1) signal.Notify(done, syscall.SIGINT) - srv := proxy.NewUnixTransparentProxyServer(unixPath, remoteAddr, useTLS, useControl) + srv := proxy.NewUnixTransparentProxyServer(unixPath, remoteAddr, useTLS, useControl, useSplice) defer srv.Stop() if err := srv.Run(); err != nil { diff --git a/pkg/network/tracer/testutil/proxy/unix_transparent_proxy.go b/pkg/network/tracer/testutil/proxy/unix_transparent_proxy.go index 0d19319837b2b..9109b9e0d0756 100644 --- a/pkg/network/tracer/testutil/proxy/unix_transparent_proxy.go +++ b/pkg/network/tracer/testutil/proxy/unix_transparent_proxy.go @@ -43,6 +43,8 @@ type UnixTransparentProxyServer struct { useTLS bool // useControl indicates whether the proxy should expect control messages on the client socket useControl bool + // useSplice indicates whether splice(2) should be used to transfer data between the sockets + useSplice bool // isReady is a flag indicating whether the server is ready to accept connections. isReady atomic.Bool // wg is a wait group used to wait for the server to stop. @@ -52,12 +54,13 @@ type UnixTransparentProxyServer struct { } // NewUnixTransparentProxyServer returns a new instance of a UnixTransparentProxyServer. -func NewUnixTransparentProxyServer(unixPath, remoteAddr string, useTLS, useControl bool) *UnixTransparentProxyServer { +func NewUnixTransparentProxyServer(unixPath, remoteAddr string, useTLS, useControl bool, useSplice bool) *UnixTransparentProxyServer { return &UnixTransparentProxyServer{ unixPath: unixPath, remoteAddr: remoteAddr, useTLS: useTLS, useControl: useControl, + useSplice: useSplice, } } @@ -124,6 +127,51 @@ func WaitForConnectionReady(unixSocket string) error { return fmt.Errorf("could not connect %q after %d retries (after %v)", unixSocket, connectionRetries, connectionRetryInterval*connectionRetries) } +// copyWithoutSplice is based on io.copyBuffer() in the standard library with +// the WriteTo/ReadFrom usage removed (to remove the use of splice(2)) and the +// internal errors replaced (since they are inaccessible from here). +func copyWithoutSplice(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + if buf == nil { + size := 32 * 1024 + if l, ok := src.(*io.LimitedReader); ok && int64(size) > l.N { + if l.N < 1 { + size = 1 + } else { + size = int(l.N) + } + } + buf = make([]byte, size) + } + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw < 0 || nr < nw { + nw = 0 + if ew == nil { + ew = errors.New("invalid write result") + } + } + written += int64(nw) + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + if er != io.EOF { + err = er + } + break + } + } + return written, err +} + // handleConnection handles a new connection, by forwarding all traffic to the remote address. func (p *UnixTransparentProxyServer) handleConnection(unixSocketConn net.Conn) { defer unixSocketConn.Close() @@ -219,7 +267,12 @@ func (p *UnixTransparentProxyServer) handleConnection(unixSocketConn net.Conn) { if cleanup != nil { defer cleanup() } - _, _ = io.Copy(dst, src) + + if p.useSplice { + _, _ = io.Copy(dst, src) + } else { + _, _ = copyWithoutSplice(dst, src, nil) + } } // If the unix socket is closed, we can close the remote as well. diff --git a/pkg/network/tracer/testutil/testdns/test_dns_server.go b/pkg/network/tracer/testutil/testdns/test_dns_server.go index 48c4e2fe670fe..05745479b4073 100644 --- a/pkg/network/tracer/testutil/testdns/test_dns_server.go +++ b/pkg/network/tracer/testutil/testdns/test_dns_server.go @@ -131,8 +131,7 @@ func respond(req *dns.Msg, writer dns.ResponseWriter, record string) { } // SendDNSQueriesOnPort makes a DNS query for every domain provided, on the given serverIP, port, and protocol. -func SendDNSQueriesOnPort(t *testing.T, domains []string, serverIP net.IP, port string, protocol string) (string, int, []*dns.Msg, error) { - t.Helper() +func SendDNSQueriesOnPort(domains []string, serverIP net.IP, port string, protocol string) (string, int, []*dns.Msg, error) { dnsClient := dns.Client{Net: protocol, Timeout: 3 * time.Second} dnsHost := net.JoinHostPort(serverIP.String(), port) conn, err := dnsClient.Dial(dnsHost) @@ -168,24 +167,21 @@ func SendDNSQueriesOnPort(t *testing.T, domains []string, serverIP net.IP, port // SendDNSQueriesAndCheckError is a simple helper that requires no errors to be present when calling SendDNSQueries func SendDNSQueriesAndCheckError( - t *testing.T, + t require.TestingT, domains []string, serverIP net.IP, protocol string, ) (string, int, []*dns.Msg) { - t.Helper() - ip, port, resp, err := SendDNSQueries(t, domains, serverIP, protocol) + ip, port, resp, err := SendDNSQueries(domains, serverIP, protocol) require.NoError(t, err) return ip, port, resp } // SendDNSQueries is a simple helper that calls SendDNSQueriesOnPort with port 53 func SendDNSQueries( - t *testing.T, domains []string, serverIP net.IP, protocol string, ) (string, int, []*dns.Msg, error) { - t.Helper() - return SendDNSQueriesOnPort(t, domains, serverIP, "53", protocol) + return SendDNSQueriesOnPort(domains, serverIP, "53", protocol) } diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index 5897839fb2c41..d90928d560da8 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -174,7 +174,7 @@ func (s *TracerSuite) TestTCPRetransmit() { var conn *network.ConnectionStats require.EventuallyWithT(t, func(ct *assert.CollectT) { // Iterate through active connections until we find connection created above, and confirm send + recv counts - connections := getConnections(t, tr) + connections := getConnections(ct, tr) ok := false conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) @@ -185,7 +185,7 @@ func (s *TracerSuite) TestTCPRetransmit() { assert.Equal(ct, 100*clientMessageSize, int(conn.Monotonic.SentBytes)) assert.Equal(ct, serverMessageSize, int(conn.Monotonic.RecvBytes)) if !tr.config.EnableEbpfless { - assert.Equal(t, os.Getpid(), int(conn.Pid)) + assert.Equal(ct, os.Getpid(), int(conn.Pid)) } assert.Equal(ct, addrPort(server.Address()), int(conn.DPort)) @@ -308,7 +308,7 @@ func (s *TracerSuite) TestTCPRTT() { require.EventuallyWithT(t, func(ct *assert.CollectT) { // Fetch connection matching source and target address - allConnections := getConnections(t, tr) + allConnections := getConnections(ct, tr) conn, ok := findConnection(c.LocalAddr(), c.RemoteAddr(), allConnections) if !assert.True(ct, ok) { return @@ -478,7 +478,7 @@ func (s *TracerSuite) TestConntrackExpiration() { return } - connections := getConnections(t, tr) + connections := getConnections(collect, tr) t.Log(connections) // for debugging failures var ok bool conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) @@ -541,10 +541,11 @@ func (s *TracerSuite) TestConntrackDelays() { _, err = c.Write([]byte("ping")) require.NoError(t, err) - require.Eventually(t, func() bool { - connections := getConnections(t, tr) + require.EventuallyWithT(t, func(collect *assert.CollectT) { + connections := getConnections(collect, tr) conn, ok := findConnection(c.LocalAddr(), c.RemoteAddr(), connections) - return ok && tr.conntracker.GetTranslationForConn(&conn.ConnectionTuple) != nil + require.True(collect, ok) + require.NotNil(collect, tr.conntracker.GetTranslationForConn(&conn.ConnectionTuple)) }, 3*time.Second, 100*time.Millisecond, "failed to find connection with translation") // write newline so server connections will exit @@ -629,7 +630,7 @@ func (s *TracerSuite) TestUnconnectedUDPSendIPv6() { require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { - connections := getConnections(t, tr) + connections := getConnections(ct, tr) outgoing := network.FilterConnections(connections, func(cs network.ConnectionStats) bool { if cs.Type != network.UDP { return false @@ -726,7 +727,7 @@ func (s *TracerSuite) TestGatewayLookupEnabled() { var clientIP string var clientPort int require.EventuallyWithT(t, func(c *assert.CollectT) { - clientIP, clientPort, _, err = testdns.SendDNSQueries(t, []string{"google.com"}, dnsAddr, "udp") + clientIP, clientPort, _, err = testdns.SendDNSQueries([]string{"google.com"}, dnsAddr, "udp") assert.NoError(c, err) }, 6*time.Second, 100*time.Millisecond, "failed to send dns query") @@ -783,7 +784,7 @@ func (s *TracerSuite) TestGatewayLookupSubnetLookupError() { var clientIP string var clientPort int require.EventuallyWithT(t, func(c *assert.CollectT) { - clientIP, clientPort, _, err = testdns.SendDNSQueries(t, []string{destDomain}, destAddr, "udp") + clientIP, clientPort, _, err = testdns.SendDNSQueries([]string{destDomain}, destAddr, "udp") assert.NoError(c, err) }, 6*time.Second, 100*time.Millisecond, "failed to send dns query") @@ -798,7 +799,7 @@ func (s *TracerSuite) TestGatewayLookupSubnetLookupError() { require.Nil(t, c.Via) require.EventuallyWithT(t, func(c *assert.CollectT) { - clientIP, clientPort, _, err = testdns.SendDNSQueries(t, []string{destDomain}, destAddr, "udp") + clientIP, clientPort, _, err = testdns.SendDNSQueries([]string{destDomain}, destAddr, "udp") assert.NoError(c, err) }, 6*time.Second, 100*time.Millisecond, "failed to send dns query") @@ -906,12 +907,13 @@ func (s *TracerSuite) TestGatewayLookupCrossNamespace() { _, err = c.Write([]byte("foo")) require.NoError(t, err) - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { var ok bool - conns := getConnections(t, tr) + conns := getConnections(collect, tr) t.Log(conns) conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) - return ok && conn.Direction == network.OUTGOING + require.True(collect, ok) + require.Equal(collect, network.OUTGOING, conn.Direction) }, 3*time.Second, 100*time.Millisecond) // conn.Via should be nil, since traffic is local @@ -939,12 +941,13 @@ func (s *TracerSuite) TestGatewayLookupCrossNamespace() { require.NoError(t, err) var conn *network.ConnectionStats - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { var ok bool - conns := getConnections(t, tr) + conns := getConnections(collect, tr) t.Log(conns) conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) - return ok && conn.Direction == network.OUTGOING + require.True(collect, ok) + require.Equal(collect, network.OUTGOING, conn.Direction) }, 3*time.Second, 100*time.Millisecond) // traffic is local, so Via field should not be set @@ -957,7 +960,7 @@ func (s *TracerSuite) TestGatewayLookupCrossNamespace() { var clientPort int require.EventuallyWithT(t, func(c *assert.CollectT) { kernel.WithNS(test2Ns, func() error { - clientIP, clientPort, _, err = testdns.SendDNSQueries(t, []string{"google.com"}, dnsAddr, "udp") + clientIP, clientPort, _, err = testdns.SendDNSQueries([]string{"google.com"}, dnsAddr, "udp") return nil }) assert.NoError(c, err) @@ -969,10 +972,11 @@ func (s *TracerSuite) TestGatewayLookupCrossNamespace() { iif := ipRouteGet(t, "", dnsClientAddr.IP.String(), nil) ifi := ipRouteGet(t, dnsClientAddr.IP.String(), dnsAddr.String(), iif) - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { var ok bool - conn, ok = findConnection(dnsClientAddr, dnsServerAddr, getConnections(t, tr)) - return ok && conn.Direction == network.OUTGOING + conn, ok = findConnection(dnsClientAddr, dnsServerAddr, getConnections(collect, tr)) + require.True(collect, ok) + require.Equal(collect, network.OUTGOING, conn.Direction) }, 3*time.Second, 100*time.Millisecond) require.NotNil(t, conn.Via) @@ -1012,16 +1016,15 @@ func (s *TracerSuite) TestConnectionAssured() { require.NoError(t, err) } - var conn *network.ConnectionStats - require.Eventually(t, func() bool { - conns := getConnections(t, tr) - var ok bool - conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) - return ok && conn.Monotonic.SentBytes > 0 && conn.Monotonic.RecvBytes > 0 + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) + conn, ok := findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + require.True(collect, ok) + require.Positive(collect, conn.Monotonic.SentBytes) + require.Positive(collect, conn.Monotonic.RecvBytes) + // verify the connection is marked as assured + require.True(collect, conn.IsAssured) }, 3*time.Second, 100*time.Millisecond, "could not find udp connection") - - // verify the connection is marked as assured - require.True(t, conn.IsAssured) } func (s *TracerSuite) TestConnectionNotAssured() { @@ -1047,16 +1050,15 @@ func (s *TracerSuite) TestConnectionNotAssured() { _, err = c.Write(genPayload(clientMessageSize)) require.NoError(t, err) - var conn *network.ConnectionStats - require.Eventually(t, func() bool { - conns := getConnections(t, tr) - var ok bool - conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) - return ok && conn.Monotonic.SentBytes > 0 && conn.Monotonic.RecvBytes == 0 + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) + conn, ok := findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + require.True(collect, ok) + require.Positive(collect, conn.Monotonic.SentBytes) + require.Zero(collect, conn.Monotonic.RecvBytes) + // verify the connection is marked as not assured + require.False(collect, conn.IsAssured) }, 3*time.Second, 100*time.Millisecond, "could not find udp connection") - - // verify the connection is marked as not assured - require.False(t, conn.IsAssured) } func (s *TracerSuite) TestUDPConnExpiryTimeout() { @@ -1111,19 +1113,15 @@ func (s *TracerSuite) TestDNATIntraHostIntegration() { }) var incoming, outgoing *network.ConnectionStats - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { _, err = conn.Write([]byte("ping")) - if !assert.NoError(t, err, "error writing in client") { - return false - } + require.NoError(collect, err) bs := make([]byte, 4) _, err = conn.Read(bs) - if !assert.NoError(t, err) { - return false - } + require.NoError(collect, err) - conns := getConnections(t, tr) + conns := getConnections(collect, tr) t.Log(conns) outgoing, _ = findConnection(conn.LocalAddr(), conn.RemoteAddr(), conns) @@ -1131,7 +1129,9 @@ func (s *TracerSuite) TestDNATIntraHostIntegration() { t.Logf("incoming: %+v, outgoing: %+v", incoming, outgoing) - return outgoing != nil && incoming != nil && outgoing.IPTranslation != nil + require.NotNil(collect, outgoing) + require.NotNil(collect, incoming) + require.NotNil(collect, outgoing.IPTranslation) }, 3*time.Second, 100*time.Millisecond, "failed to get both incoming and outgoing connection") assert.True(t, outgoing.IntraHost, "did not find outgoing connection classified as local: %v", outgoing) @@ -1175,13 +1175,13 @@ func (s *TracerSuite) TestSelfConnect() { t.Logf("port is %d", port) - require.Eventually(t, func() bool { - conns := network.FilterConnections(getConnections(t, tr), func(cs network.ConnectionStats) bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := network.FilterConnections(getConnections(collect, tr), func(cs network.ConnectionStats) bool { return cs.SPort == uint16(port) && cs.DPort == uint16(port) && cs.Source.IsLoopback() && cs.Dest.IsLoopback() }) t.Logf("connections: %v", conns) - return len(conns) == 2 + require.Len(collect, conns, 2) }, 5*time.Second, 100*time.Millisecond, "could not find expected number of tcp connections, expected: 2") } @@ -1277,8 +1277,8 @@ func testUDPPeekCount(t *testing.T, udpnet, ip string) { var incoming *network.ConnectionStats var outgoing *network.ConnectionStats - require.Eventuallyf(t, func() bool { - conns := getConnections(t, tr) + require.EventuallyWithTf(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) if outgoing == nil { outgoing, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) } @@ -1286,7 +1286,8 @@ func testUDPPeekCount(t *testing.T, udpnet, ip string) { incoming, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), conns) } - return outgoing != nil && incoming != nil + require.NotNil(collect, outgoing) + require.NotNil(collect, incoming) }, 3*time.Second, 100*time.Millisecond, "couldn't find incoming and outgoing connections matching") m := outgoing.Monotonic @@ -1343,8 +1344,8 @@ func testUDPPacketSumming(t *testing.T, udpnet, ip string) { var incoming *network.ConnectionStats var outgoing *network.ConnectionStats - require.Eventuallyf(t, func() bool { - conns := getConnections(t, tr) + require.EventuallyWithTf(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) if outgoing == nil { outgoing, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) } @@ -1352,7 +1353,8 @@ func testUDPPacketSumming(t *testing.T, udpnet, ip string) { incoming, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), conns) } - return outgoing != nil && incoming != nil + require.NotNil(collect, outgoing) + require.NotNil(collect, incoming) }, 3*time.Second, 100*time.Millisecond, "couldn't find incoming and outgoing connections matching") m := outgoing.Monotonic @@ -1407,8 +1409,8 @@ func (s *TracerSuite) TestUDPPythonReusePort() { t.Logf("port is %d", port) conns := map[network.ConnectionTuple]network.ConnectionStats{} - require.Eventually(t, func() bool { - _conns := network.FilterConnections(getConnections(t, tr), func(cs network.ConnectionStats) bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { + _conns := network.FilterConnections(getConnections(collect, tr), func(cs network.ConnectionStats) bool { return cs.Type == network.UDP && cs.Source.IsLoopback() && cs.Dest.IsLoopback() && @@ -1421,7 +1423,7 @@ func (s *TracerSuite) TestUDPPythonReusePort() { t.Log(conns) - return len(conns) == 4 + require.Len(collect, conns, 4) }, 3*time.Second, 100*time.Millisecond, "could not find expected number of udp connections, expected: 4") var incoming, outgoing []network.ConnectionStats @@ -1781,15 +1783,12 @@ func (s *TracerSuite) TestSendfileError() { c.Close() - var conn *network.ConnectionStats - require.Eventually(t, func() bool { - conns := getConnections(t, tr) - var ok bool - conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) - return ok + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) + conn, ok := findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + require.True(collect, ok) + require.Equalf(collect, int64(0), int64(conn.Monotonic.SentBytes), "sendfile data wasn't properly traced") }, 3*time.Second, 100*time.Millisecond, "couldn't find connection used by sendfile(2)") - - assert.Equalf(t, int64(0), int64(conn.Monotonic.SentBytes), "sendfile data wasn't properly traced") } func sendFile(t *testing.T, c SyscallConn, f *os.File, offset *int64, count int) (int, error) { @@ -1881,15 +1880,12 @@ func (s *TracerSuite) TestShortWrite() { c, err := net.FileConn(f) require.NoError(t, err) - var conn *network.ConnectionStats - require.Eventually(t, func() bool { - conns := getConnections(t, tr) - var ok bool - conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), conns) - return ok + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) + conn, ok := findConnection(c.LocalAddr(), c.RemoteAddr(), conns) + require.True(collect, ok) + require.Equal(collect, sent, conn.Monotonic.SentBytes) }, 3*time.Second, 100*time.Millisecond, "couldn't find connection used by short write") - - assert.Equal(t, sent, conn.Monotonic.SentBytes) } func (s *TracerSuite) TestKprobeAttachWithKprobeEvents() { @@ -1972,14 +1968,11 @@ func (s *TracerSuite) TestBlockingReadCounts() { assert.Equal(collect, 6, read) }, 10*time.Second, 100*time.Millisecond, "failed to get required bytes") - var conn *network.ConnectionStats - require.Eventually(t, func() bool { - var found bool - conn, found = findConnection(c.(*net.TCPConn).LocalAddr(), c.(*net.TCPConn).RemoteAddr(), getConnections(t, tr)) - return found + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conn, found := findConnection(c.(*net.TCPConn).LocalAddr(), c.(*net.TCPConn).RemoteAddr(), getConnections(collect, tr)) + require.True(collect, found) + require.Equal(collect, uint64(read), conn.Monotonic.RecvBytes) }, 3*time.Second, 100*time.Millisecond) - - assert.Equal(t, uint64(read), conn.Monotonic.RecvBytes) } func (s *TracerSuite) TestPreexistingConnectionDirection() { @@ -2020,15 +2013,16 @@ func (s *TracerSuite) TestPreexistingConnectionDirection() { c.Close() var incoming, outgoing *network.ConnectionStats - require.Eventually(t, func() bool { - connections := getConnections(t, tr) + require.EventuallyWithT(t, func(collect *assert.CollectT) { + connections := getConnections(collect, tr) if outgoing == nil { outgoing, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) } if incoming == nil { incoming, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), connections) } - return incoming != nil && outgoing != nil + require.NotNil(collect, outgoing) + require.NotNil(collect, incoming) }, 3*time.Second, 100*time.Millisecond, "could not find connection incoming and outgoing connections") m := outgoing.Monotonic @@ -2136,14 +2130,12 @@ func (s *TracerSuite) TestUDPIncomingDirectionFix() { raddr, err := net.ResolveUDPAddr("udp", server.address) require.NoError(t, err) - var conn *network.ConnectionStats - require.Eventually(t, func() bool { - conns := getConnections(t, tr) - conn, _ = findConnection(net.UDPAddrFromAddrPort(ap), raddr, conns) - return conn != nil + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) + conn, _ := findConnection(net.UDPAddrFromAddrPort(ap), raddr, conns) + require.NotNil(collect, conn) + require.Equal(collect, network.OUTGOING, conn.Direction) }, 3*time.Second, 100*time.Millisecond) - - assert.Equal(t, network.OUTGOING, conn.Direction) } func TestEbpfConntrackerFallback(t *testing.T) { @@ -2413,31 +2405,24 @@ LOOP: // get connections, the client connection will still // not be in the closed state, so duration will the // timestamp of when it was created - var conn *network.ConnectionStats require.EventuallyWithT(t, func(collect *assert.CollectT) { - conns := getConnections(t, tr) - var found bool - conn, found = findConnection(c.LocalAddr(), srv.Addr(), conns) - assert.True(collect, found, "could not find connection") - + conns := getConnections(collect, tr) + conn, found := findConnection(c.LocalAddr(), srv.Addr(), conns) + require.True(collect, found, "could not find connection") + // all we can do is verify it is > 0 + require.Greater(collect, conn.Duration, time.Duration(0)) }, 3*time.Second, 100*time.Millisecond, "could not find connection") - // all we can do is verify it is > 0 - assert.Greater(t, conn.Duration, time.Duration(0)) require.NoError(t, c.Close(), "error closing client connection") require.EventuallyWithT(t, func(collect *assert.CollectT) { - var found bool - conn, found = findConnection(c.LocalAddr(), srv.Addr(), getConnections(t, tr)) - if !assert.True(collect, found, "could not find connection") { - return - } - assert.True(collect, conn.IsClosed, "connection should be closed") + conn, found := findConnection(c.LocalAddr(), srv.Addr(), getConnections(t, tr)) + require.True(collect, found, "could not find connection") + require.True(collect, conn.IsClosed, "connection should be closed") + // after closing the client connection, the duration should be + // updated to a value between 1s and 2s + require.Greater(collect, conn.Duration, time.Second, "connection duration should be between 1 and 2 seconds") + require.Less(collect, conn.Duration, 2*time.Second, "connection duration should be between 1 and 2 seconds") }, 3*time.Second, 100*time.Millisecond, "could not find closed connection") - - // after closing the client connection, the duration should be - // updated to a value between 1s and 2s - assert.Greater(t, conn.Duration, time.Second, "connection duration should be between 1 and 2 seconds") - assert.Less(t, conn.Duration, 2*time.Second, "connection duration should be between 1 and 2 seconds") } var failedConnectionsBuildModes = map[ebpftest.BuildMode]struct{}{ @@ -2503,19 +2488,17 @@ func (s *TracerSuite) TestTCPFailureConnectionTimeout() { localAddr := fmt.Sprintf("127.0.0.1:%d", port) // Check if the connection was recorded as failed due to timeout - var conn *network.ConnectionStats - require.Eventually(t, func() bool { - conns := getConnections(t, tr) + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) // 110 is the errno for ETIMEDOUT - conn = findFailedConnection(t, localAddr, srvAddr, conns, 110) - return conn != nil + conn := findFailedConnection(t, localAddr, srvAddr, conns, 110) + require.NotNil(collect, conn) + assert.Equal(collect, uint32(0), conn.TCPFailures[104], "expected 0 connection reset") + assert.Equal(collect, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") + assert.Equal(collect, uint32(1), conn.TCPFailures[110], "expected 1 connection timeout") + assert.Equal(collect, uint64(0), conn.Monotonic.SentBytes, "expected 0 bytes sent") + assert.Equal(collect, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") - - assert.Equal(t, uint32(0), conn.TCPFailures[104], "expected 0 connection reset") - assert.Equal(t, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") - assert.Equal(t, uint32(1), conn.TCPFailures[110], "expected 1 connection timeout") - assert.Equal(t, uint64(0), conn.Monotonic.SentBytes, "expected 0 bytes sent") - assert.Equal(t, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") } func (s *TracerSuite) TestTCPFailureConnectionResetWithDNAT() { @@ -2561,10 +2544,11 @@ func (s *TracerSuite) TestTCPFailureConnectionResetWithDNAT() { // Check if the connection was recorded as reset var conn *network.ConnectionStats - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { // 104 is the errno for ECONNRESET - conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, getConnections(t, tr), 104) - return conn != nil + // findFailedConnection gets `t` as it needs to log, it does not assert so no conversion is needed. + conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, getConnections(collect, tr), 104) + require.NotNil(collect, conn) }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") require.NoError(t, c.Close(), "error closing client connection") diff --git a/pkg/network/tracer/tracer_test.go b/pkg/network/tracer/tracer_test.go index b032aabe44c5f..a108e11363fef 100644 --- a/pkg/network/tracer/tracer_test.go +++ b/pkg/network/tracer/tracer_test.go @@ -198,12 +198,13 @@ func (s *TracerSuite) TestTCPSendAndReceive() { require.NoError(t, err) var conn *network.ConnectionStats - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { // Iterate through active connections until we find connection created above, and confirm send + recv counts - connections := getConnections(t, tr) + connections := getConnections(collect, tr) var ok bool conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) - return conn != nil && ok + require.True(collect, ok) + require.NotNil(collect, conn) }, 3*time.Second, 100*time.Millisecond, "failed to find connection") m := conn.Monotonic @@ -246,10 +247,10 @@ func (s *TracerSuite) TestTCPShortLived() { c.Close() var conn *network.ConnectionStats - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { var ok bool - conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), getConnections(t, tr)) - return ok + conn, ok = findConnection(c.LocalAddr(), c.RemoteAddr(), getConnections(collect, tr)) + require.True(collect, ok) }, 3*time.Second, 100*time.Millisecond, "connection not found") m := conn.Monotonic @@ -399,14 +400,15 @@ func (s *TracerSuite) TestTCPConnsReported() { var reverse *network.ConnectionStats var okForward, okReverse bool // for ebpfless, it takes time for the packet capture to arrive, so poll - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { // Test - connections := getConnections(t, tr) + connections := getConnections(collect, tr) // Server-side forward, okForward = findConnection(c.RemoteAddr(), c.LocalAddr(), connections) + require.True(collect, okForward) // Client-side reverse, okReverse = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) - return okForward && okReverse + require.True(collect, okReverse) }, 3*time.Second, 100*time.Millisecond, "connection not found") assert.Equal(t, network.INCOMING, forward.Direction) @@ -470,7 +472,7 @@ func testUDPSendAndReceive(t *testing.T, tr *Tracer, addr string) { // Iterate through active connections until we find connection created above, and confirm send + recv counts require.EventuallyWithT(t, func(ct *assert.CollectT) { // use t instead of ct because getConnections uses require (not assert), and we get a better error message - connections := getConnections(t, tr) + connections := getConnections(ct, tr) incoming, ok := findConnection(c.RemoteAddr(), c.LocalAddr(), connections) if assert.True(ct, ok, "unable to find incoming connection") { assert.Equal(ct, network.INCOMING, incoming.Direction) @@ -482,12 +484,12 @@ func testUDPSendAndReceive(t *testing.T, tr *Tracer, addr string) { } outgoing, ok := findConnection(c.LocalAddr(), c.RemoteAddr(), connections) - if assert.True(t, ok, "unable to find outgoing connection") { - assert.Equal(t, network.OUTGOING, outgoing.Direction) + if assert.True(ct, ok, "unable to find outgoing connection") { + assert.Equal(ct, network.OUTGOING, outgoing.Direction) - assert.Equal(t, clientMessageSize, int(outgoing.Monotonic.SentBytes), "outgoing sent") - assert.Equal(t, serverMessageSize, int(outgoing.Monotonic.RecvBytes), "outgoing recv") - assert.True(t, outgoing.IntraHost, "outgoing intrahost") + assert.Equal(ct, clientMessageSize, int(outgoing.Monotonic.SentBytes), "outgoing sent") + assert.Equal(ct, serverMessageSize, int(outgoing.Monotonic.RecvBytes), "outgoing recv") + assert.True(ct, outgoing.IntraHost, "outgoing intrahost") } }, 3*time.Second, 100*time.Millisecond) @@ -579,14 +581,14 @@ func (s *TracerSuite) TestLocalDNSCollectionEnabled() { assert.NoError(t, err) // Iterate through active connections making sure theres at least one connection - require.Eventually(t, func() bool { - for _, c := range getConnections(t, tr).Conns { + require.EventuallyWithT(t, func(collect *assert.CollectT) { + for _, c := range getConnections(collect, tr).Conns { if isLocalDNS(c) { - return true + return } } - return false + require.Fail(collect, "could not find connection") }, 3*time.Second, 100*time.Millisecond, "could not find connection") } @@ -651,15 +653,15 @@ func (s *TracerSuite) TestShouldExcludeEmptyStatsConnection() { assert.NoError(t, err) var zeroConn network.ConnectionStats - require.Eventually(t, func() bool { - cxs := getConnections(t, tr) + require.EventuallyWithT(t, func(collect *assert.CollectT) { + cxs := getConnections(collect, tr) for _, c := range cxs.Conns { if c.Dest.String() == "127.0.0.1" && c.DPort == 80 { zeroConn = c - return true + return } } - return false + require.Fail(collect, "could not find connection") }, 2*time.Second, 100*time.Millisecond) // next call should not have the same connection @@ -1094,9 +1096,9 @@ func (s *TracerSuite) TestTCPEstablished() { var ok bool // for ebpfless, wait for the packet capture to appear - require.Eventually(t, func() bool { - conn, ok = findConnection(laddr, raddr, getConnections(t, tr)) - return ok + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conn, ok = findConnection(laddr, raddr, getConnections(collect, tr)) + require.True(collect, ok) }, 3*time.Second, 100*time.Millisecond, "couldn't find connection") require.True(t, ok) @@ -1106,10 +1108,10 @@ func (s *TracerSuite) TestTCPEstablished() { c.Close() // Wait for the connection to be sent from the perf buffer - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { var ok bool - conn, ok = findConnection(laddr, raddr, getConnections(t, tr)) - return ok + conn, ok = findConnection(laddr, raddr, getConnections(collect, tr)) + require.True(collect, ok) }, 3*time.Second, 100*time.Millisecond, "couldn't find connection") require.True(t, ok) @@ -1140,10 +1142,10 @@ func (s *TracerSuite) TestTCPEstablishedPreExistingConn() { // Wait for the connection to be sent from the perf buffer var conn *network.ConnectionStats - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { var ok bool - conn, ok = findConnection(laddr, raddr, getConnections(t, tr)) - return ok + conn, ok = findConnection(laddr, raddr, getConnections(collect, tr)) + require.True(collect, ok) }, 3*time.Second, 100*time.Millisecond, "couldn't find connection") m := conn.Monotonic @@ -1167,7 +1169,7 @@ func (s *TracerSuite) TestUnconnectedUDPSendIPv4() { require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { - connections := getConnections(t, tr) + connections := getConnections(ct, tr) outgoing := network.FilterConnections(connections, func(cs network.ConnectionStats) bool { return cs.DPort == uint16(remotePort) }) @@ -1198,7 +1200,7 @@ func (s *TracerSuite) TestConnectedUDPSendIPv6() { var outgoing []network.ConnectionStats require.EventuallyWithT(t, func(ct *assert.CollectT) { - connections := getConnections(t, tr) + connections := getConnections(ct, tr) outgoing = network.FilterConnections(connections, func(cs network.ConnectionStats) bool { return cs.DPort == uint16(remotePort) }) @@ -1247,8 +1249,8 @@ func (s *TracerSuite) TestTCPDirection() { // Iterate through active connections until we find connection created above var outgoingConns []network.ConnectionStats var incomingConns []network.ConnectionStats - require.Eventuallyf(t, func() bool { - conns := getConnections(t, tr) + require.EventuallyWithTf(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) if len(outgoingConns) == 0 { outgoingConns = network.FilterConnections(conns, func(cs network.ConnectionStats) bool { return fmt.Sprintf("%s:%d", cs.Dest, cs.DPort) == serverAddr @@ -1260,7 +1262,8 @@ func (s *TracerSuite) TestTCPDirection() { }) } - return len(outgoingConns) == 1 && len(incomingConns) == 1 + require.Len(collect, outgoingConns, 1) + require.Len(collect, incomingConns, 1) }, 3*time.Second, 10*time.Millisecond, "couldn't find incoming and outgoing http connections matching: %s", serverAddr) // Verify connection directions @@ -1290,11 +1293,11 @@ func (s *TracerSuite) TestTCPFailureConnectionRefused() { // Check if the connection was recorded as refused var foundConn *network.ConnectionStats - require.Eventually(t, func() bool { - conns := getConnections(t, tr) + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) // Check for the refusal record foundConn = findFailedConnectionByRemoteAddr(srvAddr, conns, 111) - return foundConn != nil + require.NotNil(collect, foundConn) }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") assert.Equal(t, uint32(1), foundConn.TCPFailures[111], "expected 1 connection refused") @@ -1342,10 +1345,11 @@ func (s *TracerSuite) TestTCPFailureConnectionResetWithData() { // Check if the connection was recorded as reset var conn *network.ConnectionStats - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { // 104 is the errno for ECONNRESET - conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, getConnections(t, tr), 104) - return conn != nil + // findFailedConnection needs `t` for logging, hence no need to pass `collect`. + conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, getConnections(collect, tr), 104) + require.NotNil(collect, conn) }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") require.NoError(t, c.Close(), "error closing client connection") @@ -1391,11 +1395,12 @@ func (s *TracerSuite) TestTCPFailureConnectionResetNoData() { // Check if the connection was recorded as reset var conn *network.ConnectionStats - require.Eventually(t, func() bool { - conns := getConnections(t, tr) + require.EventuallyWithT(t, func(collect *assert.CollectT) { + conns := getConnections(collect, tr) // 104 is the errno for ECONNRESET + // findFailedConnection needs `t` for logging, hence no need to pass `collect`. conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, conns, 104) - return conn != nil + require.NotNil(collect, conn) }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") require.NoError(t, c.Close(), "error closing client connection") diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go index 645db1e3bc064..c4f3e30c08ad4 100644 --- a/pkg/network/usm/kafka_monitor_test.go +++ b/pkg/network/usm/kafka_monitor_test.go @@ -521,14 +521,14 @@ func (s *KafkaProtocolParsingSuite) testKafkaProtocolParsing(t *testing.T, tls b require.NoError(t, client.Client.ProduceSync(ctxTimeout, record).FirstErr(), "record had a produce error while synchronously producing") var telemetryMap *kafka.RawKernelTelemetry - require.Eventually(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { telemetryMap, err = kafka.GetKernelTelemetryMap(monitor.ebpfProgram.Manager.Manager) - require.NoError(t, err) + require.NoError(collect, err) // Ensure that the other buckets remain unchanged before verifying the expected bucket. for idx := 0; idx < kafka.TopicNameBuckets; idx++ { if idx != tt.expectedBucketIndex { - require.Equal(t, currentRawKernelTelemetry.Topic_name_size_buckets[idx], + require.Equal(collect, currentRawKernelTelemetry.Topic_name_size_buckets[idx], telemetryMap.Topic_name_size_buckets[idx], "Expected bucket (%d) to remain unchanged", idx) } @@ -536,7 +536,7 @@ func (s *KafkaProtocolParsingSuite) testKafkaProtocolParsing(t *testing.T, tls b // Verify that the expected bucket contains the correct number of occurrences. expectedNumberOfOccurrences := fixCount(2) // (1 produce request + 1 fetch request) - return uint64(expectedNumberOfOccurrences)+currentRawKernelTelemetry.Topic_name_size_buckets[tt.expectedBucketIndex] == telemetryMap.Topic_name_size_buckets[tt.expectedBucketIndex] + require.Equal(collect, uint64(expectedNumberOfOccurrences)+currentRawKernelTelemetry.Topic_name_size_buckets[tt.expectedBucketIndex], telemetryMap.Topic_name_size_buckets[tt.expectedBucketIndex]) }, time.Second*3, time.Millisecond*100) // Update the current raw kernel telemetry for the next iteration diff --git a/pkg/network/usm/monitor_tls_test.go b/pkg/network/usm/monitor_tls_test.go index 322f9be425da4..44ca4244b07a1 100644 --- a/pkg/network/usm/monitor_tls_test.go +++ b/pkg/network/usm/monitor_tls_test.go @@ -31,7 +31,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" - eventmonitortestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/testutil" + consumerstestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/protocols" @@ -44,8 +44,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/usm/consts" usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" - procmontestutil "github.com/DataDog/datadog-agent/pkg/process/monitor/testutil" - secutils "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/process/monitor" globalutils "github.com/DataDog/datadog-agent/pkg/util/testutil" dockerutils "github.com/DataDog/datadog-agent/pkg/util/testutil/docker" ) @@ -870,8 +869,7 @@ func setupUSMTLSMonitor(t *testing.T, cfg *config.Config) *Monitor { require.NoError(t, err) require.NoError(t, usmMonitor.Start()) if cfg.EnableUSMEventStream && usmconfig.NeedProcessMonitor(cfg) { - secutils.SetCachedHostname("test-hostname") - eventmonitortestutil.StartEventMonitor(t, procmontestutil.RegisterProcessMonitorEventConsumer) + monitor.InitializeEventConsumer(consumerstestutil.NewTestProcessConsumer(t)) } t.Cleanup(usmMonitor.Stop) t.Cleanup(utils.ResetDebugger) diff --git a/pkg/network/usm/postgres_monitor_test.go b/pkg/network/usm/postgres_monitor_test.go index 76c616beff5bd..9198448f99468 100644 --- a/pkg/network/usm/postgres_monitor_test.go +++ b/pkg/network/usm/postgres_monitor_test.go @@ -16,8 +16,10 @@ import ( "sync" "testing" "time" + "unsafe" "github.com/jackc/pgx/v5/pgproto3" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -47,7 +49,6 @@ const ( alterTableQuery = "ALTER TABLE dummy ADD test VARCHAR(255);" truncateTableQuery = "TRUNCATE TABLE dummy" showQuery = "SHOW search_path" - maxSupportedMessages = protocols.PostgresMaxMessagesPerTailCall * protocols.PostgresMaxTailCalls ) var ( @@ -591,7 +592,7 @@ func testDecoding(t *testing.T, isTLS bool) { }, // The purpose of this test is to validate the POSTGRES_MAX_MESSAGES_PER_TAIL_CALL * POSTGRES_MAX_TAIL_CALLS_FOR_MAX_MESSAGES limit. { - name: "validate supporting max supported messages limit", + name: "validate max supported messages limit", preMonitorSetup: func(t *testing.T, ctx pgTestContext) { pg, err := postgres.NewPGXClient(postgres.ConnectionOptions{ ServerAddress: ctx.serverAddress, @@ -607,7 +608,7 @@ func testDecoding(t *testing.T, isTLS bool) { ctx.extras["pg"] = pg require.NoError(t, pg.RunQuery(createTableQuery)) // We reduce the limit by 2 messages because the protocol adds messages at the beginning of the maximum message response. - require.NoError(t, pg.RunQuery(createInsertQuery(generateTestValues(1, maxSupportedMessages-3)...))) + require.NoError(t, pg.RunQuery(createInsertQuery(generateTestValues(1, protocols.PostgresMaxTotalMessages-3)...))) require.NoError(t, pg.RunQuery(selectAllQuery)) }, validation: func(t *testing.T, _ pgTestContext, monitor *Monitor) { @@ -623,7 +624,7 @@ func testDecoding(t *testing.T, isTLS bool) { // This test validates that when we exceed the POSTGRES_MAX_MESSAGES_PER_TAIL_CALL * POSTGRES_MAX_TAIL_CALLS_FOR_MAX_MESSAGES limit, // the request is not captured as we will miss the response.In this case, it applies to the SELECT query. { - name: "validate exceeding max supported messages limit is not supported", + name: "exceeding max supported messages limit", preMonitorSetup: func(t *testing.T, ctx pgTestContext) { pg, err := postgres.NewPGXClient(postgres.ConnectionOptions{ ServerAddress: ctx.serverAddress, @@ -638,7 +639,7 @@ func testDecoding(t *testing.T, isTLS bool) { require.NoError(t, pg.Ping()) ctx.extras["pg"] = pg require.NoError(t, pg.RunQuery(createTableQuery)) - require.NoError(t, pg.RunQuery(createInsertQuery(generateTestValues(1, maxSupportedMessages+1)...))) + require.NoError(t, pg.RunQuery(createInsertQuery(generateTestValues(1, protocols.PostgresMaxTotalMessages+1)...))) require.NoError(t, pg.RunQuery(selectAllQuery)) }, validation: func(t *testing.T, _ pgTestContext, monitor *Monitor) { @@ -890,3 +891,169 @@ func createFragment(fragment []byte) [ebpf.BufferSize]byte { copy(b[:], fragment) return b } + +func (s *postgresProtocolParsingSuite) TestKernelTelemetry() { + t := s.T() + tests := []struct { + name string + isTLS bool + }{ + { + name: "without TLS", + isTLS: false, + }, + { + name: "with TLS", + isTLS: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.isTLS && !gotlstestutil.GoTLSSupported(t, config.New()) { + t.Skip("GoTLS not supported for this setup") + } + testKernelMessagesCount(t, tt.isTLS) + }) + } +} + +// testKernelMessagesCount check postgres kernel messages count. +func testKernelMessagesCount(t *testing.T, isTLS bool) { + serverHost := "127.0.0.1" + serverAddress := net.JoinHostPort(serverHost, postgresPort) + require.NoError(t, postgres.RunServer(t, serverHost, postgresPort, isTLS)) + waitForPostgresServer(t, serverAddress, isTLS) + + monitor := setupUSMTLSMonitor(t, getPostgresDefaultTestConfiguration(isTLS)) + if isTLS { + utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, os.Getpid(), utils.ManualTracingFallbackEnabled) + } + pgClient := setupPGClient(t, serverAddress, isTLS) + t.Cleanup(func() { + if pgClient != nil { + _ = pgClient.RunQuery(dropTableQuery) + pgClient.Close() + pgClient = nil + } + }) + + createLargeTable(t, pgClient, ebpf.MsgCountFirstBucket+ebpf.MsgCountBucketSize*ebpf.MsgCountNumBuckets) + expectedBuckets := [ebpf.MsgCountNumBuckets]bool{} + + for i := 0; i < ebpf.MsgCountNumBuckets; i++ { + testName := fmt.Sprintf("kernel messages count bucket[%d]", i) + t.Run(testName, func(t *testing.T) { + + expectedBuckets[i] = true + cleanProtocolMaps(t, "postgres", monitor.ebpfProgram.Manager.Manager) + + require.NoError(t, monitor.Resume()) + if i == 0 { + // first bucket, it counts upto ebpf.MsgCountFirstBucketMax messages + // subtract three messages ('bind', 'row description' and 'ready') + require.NoError(t, pgClient.RunQuery(generateSelectLimitQuery(ebpf.MsgCountFirstBucket-3))) + } else { + limitCount := ebpf.MsgCountFirstBucket + i*ebpf.MsgCountBucketSize - 3 + require.NoError(t, pgClient.RunQuery(generateSelectLimitQuery(limitCount))) + } + require.NoError(t, monitor.Pause()) + + validateKernelBuckets(t, monitor, isTLS, expectedBuckets) + if i > 0 { + // clear current bucket except the first one, which is always non-empty. + expectedBuckets[i] = false + } + }) + } + + t.Run("exceed max buckets", func(t *testing.T) { + + cleanProtocolMaps(t, "postgres", monitor.ebpfProgram.Manager.Manager) + require.NoError(t, monitor.Resume()) + + require.NoError(t, pgClient.RunQuery(generateSelectLimitQuery(ebpf.MsgCountMaxTotal))) + require.NoError(t, monitor.Pause()) + + validateKernelExceedingMax(t, monitor, isTLS) + }) +} + +func setupPGClient(t *testing.T, serverAddress string, isTLS bool) *postgres.PGXClient { + pg, err := postgres.NewPGXClient(postgres.ConnectionOptions{ + ServerAddress: serverAddress, + EnableTLS: isTLS, + }) + require.NoError(t, err) + require.NoError(t, pg.Ping()) + return pg +} + +// createLargeTable runs a postgres query to create a table large enough to retrieve long responses later. +func createLargeTable(t *testing.T, pg *postgres.PGXClient, tableValuesCount int) { + require.NoError(t, pg.RunQuery(createTableQuery)) + require.NoError(t, pg.RunQuery(createInsertQuery(generateTestValues(0, tableValuesCount)...))) +} + +// validateKernel Checking telemetry data received for a postgres query +func validateKernelBuckets(t *testing.T, monitor *Monitor, tls bool, expected [ebpf.MsgCountNumBuckets]bool) { + var actual *ebpf.PostgresKernelMsgCount + assert.Eventually(t, func() bool { + found, err := getKernelTelemetry(monitor, tls) + if err != nil { + return false + } + actual = found + return compareMessagesCount(found, expected) + }, time.Second*2, time.Millisecond*100) + if t.Failed() { + t.Logf("expected telemetry:\n %+v;\nactual telemetry:\n %+v", expected, actual) + ebpftest.DumpMapsTestHelper(t, monitor.DumpMaps, postgres.KernelTelemetryMap) + } +} + +// getKernelTelemetry returns statistics obtained from the kernel +func getKernelTelemetry(monitor *Monitor, isTLS bool) (*ebpf.PostgresKernelMsgCount, error) { + pgKernelTelemetry := &ebpf.PostgresKernelMsgCount{} + mapName := postgres.KernelTelemetryMap + key := uint32(0) + if isTLS { + key = uint32(1) + } + mp, _, err := monitor.ebpfProgram.GetMap(mapName) + if err != nil { + return nil, fmt.Errorf("unable to get %q map: %s", mapName, err) + } + if err := mp.Lookup(unsafe.Pointer(&key), unsafe.Pointer(pgKernelTelemetry)); err != nil { + return nil, fmt.Errorf("unable to lookup %q map: %s", mapName, err) + } + return pgKernelTelemetry, nil +} + +// compareMessagesCount returns true if the expected bucket is non-empty +func compareMessagesCount(found *ebpf.PostgresKernelMsgCount, expected [ebpf.MsgCountNumBuckets]bool) bool { + for i := range expected { + if expected[i] && found.Msg_count_buckets[i] == 0 { + return false + } + if !expected[i] && found.Msg_count_buckets[i] > 0 { + return false + } + } + return true +} + +// validateKernelExceedingMax check for exceeding the maximum number of buckets +func validateKernelExceedingMax(t *testing.T, monitor *Monitor, tls bool) { + var actual *ebpf.PostgresKernelMsgCount + assert.Eventually(t, func() bool { + found, err := getKernelTelemetry(monitor, tls) + if err != nil { + return false + } + actual = found + return found.Reached_max_messages > 0 + }, time.Second*2, time.Millisecond*100) + if t.Failed() { + t.Logf("expected non-zero max messages, actual telemetry:\n %+v", actual) + } +} diff --git a/pkg/network/usm/sharedlibraries/watcher_test.go b/pkg/network/usm/sharedlibraries/watcher_test.go index ca401e180fc50..7af5b82782fe3 100644 --- a/pkg/network/usm/sharedlibraries/watcher_test.go +++ b/pkg/network/usm/sharedlibraries/watcher_test.go @@ -26,13 +26,11 @@ import ( "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" - eventmonitortestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/testutil" + "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" fileopener "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" "github.com/DataDog/datadog-agent/pkg/process/monitor" - procmontestutil "github.com/DataDog/datadog-agent/pkg/process/monitor/testutil" - secutils "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -42,8 +40,7 @@ func launchProcessMonitor(t *testing.T, useEventStream bool) { t.Cleanup(pm.Stop) require.NoError(t, pm.Initialize(useEventStream)) if useEventStream { - secutils.SetCachedHostname("test-hostname") - eventmonitortestutil.StartEventMonitor(t, procmontestutil.RegisterProcessMonitorEventConsumer) + monitor.InitializeEventConsumer(testutil.NewTestProcessConsumer(t)) } } diff --git a/pkg/network/usm/tests/tracer_usm_linux_test.go b/pkg/network/usm/tests/tracer_usm_linux_test.go index cb7cf47e30e4a..5cbbef2763777 100644 --- a/pkg/network/usm/tests/tracer_usm_linux_test.go +++ b/pkg/network/usm/tests/tracer_usm_linux_test.go @@ -429,15 +429,15 @@ func (s *USMSuite) TestIgnoreTLSClassificationIfApplicationProtocolWasDetected() // Perform the TLS handshake require.NoError(t, tlsConn.Handshake()) - - require.Eventually(t, func() bool { - payload := getConnections(t, tr) + require.EventuallyWithT(t, func(collect *assert.CollectT) { + payload := getConnections(collect, tr) for _, c := range payload.Conns { if c.DPort == srvPortU16 || c.SPort == srvPortU16 { - return c.ProtocolStack.Contains(protocols.TLS) == tt.shouldBeTLS + require.Equal(collect, c.ProtocolStack.Contains(protocols.TLS), tt.shouldBeTLS) + return } } - return false + require.Fail(collect, "") }, 10*time.Second, 100*time.Millisecond) }) } @@ -502,14 +502,14 @@ func (s *USMSuite) TestTLSClassification() { }, validation: func(t *testing.T, tr *tracer.Tracer) { // Iterate through active connections until we find connection created above - require.Eventuallyf(t, func() bool { - payload := getConnections(t, tr) + require.EventuallyWithTf(t, func(collect *assert.CollectT) { + payload := getConnections(collect, tr) for _, c := range payload.Conns { if c.DPort == port && c.ProtocolStack.Contains(protocols.TLS) { - return true + return } } - return false + require.Fail(collect, "") }, 4*time.Second, 100*time.Millisecond, "couldn't find TLS connection matching: dst port %v", portAsString) }, }) @@ -574,8 +574,8 @@ func (s *USMSuite) TestTLSClassificationAlreadyRunning() { // Iterate through active connections until we find connection created above var foundIncoming, foundOutgoing bool - require.Eventuallyf(t, func() bool { - payload := getConnections(t, tr) + require.EventuallyWithTf(t, func(collect *assert.CollectT) { + payload := getConnections(collect, tr) for _, c := range payload.Conns { if !foundIncoming && c.DPort == uint16(portAsValue) && c.ProtocolStack.Contains(protocols.TLS) { @@ -586,7 +586,8 @@ func (s *USMSuite) TestTLSClassificationAlreadyRunning() { foundOutgoing = true } } - return foundIncoming && foundOutgoing + require.True(collect, foundIncoming) + require.True(collect, foundOutgoing) }, 4*time.Second, 100*time.Millisecond, "couldn't find matching TLS connection") } diff --git a/pkg/networkpath/traceroute/common/common.go b/pkg/networkpath/traceroute/common/common.go new file mode 100644 index 0000000000000..c7622fa391891 --- /dev/null +++ b/pkg/networkpath/traceroute/common/common.go @@ -0,0 +1,160 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package common contains common functionality for both TCP and UDP +// traceroute implementations +package common + +import ( + "fmt" + "net" + "strconv" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "golang.org/x/net/ipv4" +) + +const ( + // IPProtoICMP is the IP protocol number for ICMP + // we create our own constant here because there are + // different imports for the constant in different + // operating systems + IPProtoICMP = 1 +) + +type ( + // Results encapsulates a response from the + // traceroute + Results struct { + Source net.IP + SourcePort uint16 + Target net.IP + DstPort uint16 + Hops []*Hop + } + + // Hop encapsulates information about a single + // hop in a traceroute + Hop struct { + IP net.IP + Port uint16 + ICMPType layers.ICMPv4TypeCode + RTT time.Duration + IsDest bool + } + + // CanceledError is sent when a listener + // is canceled + CanceledError string + + // ICMPResponse encapsulates the data from + // an ICMP response packet needed for matching + ICMPResponse struct { + SrcIP net.IP + DstIP net.IP + TypeCode layers.ICMPv4TypeCode + InnerSrcIP net.IP + InnerDstIP net.IP + InnerSrcPort uint16 + InnerDstPort uint16 + InnerSeqNum uint32 + } +) + +func (c CanceledError) Error() string { + return string(c) +} + +// LocalAddrForHost takes in a destionation IP and port and returns the local +// address that should be used to connect to the destination +func LocalAddrForHost(destIP net.IP, destPort uint16) (*net.UDPAddr, error) { + // this is a quick way to get the local address for connecting to the host + // using UDP as the network type to avoid actually creating a connection to + // the host, just get the OS to give us a local IP and local ephemeral port + conn, err := net.Dial("udp4", net.JoinHostPort(destIP.String(), strconv.Itoa(int(destPort)))) + if err != nil { + return nil, err + } + defer conn.Close() + localAddr := conn.LocalAddr() + + localUDPAddr, ok := localAddr.(*net.UDPAddr) + if !ok { + return nil, fmt.Errorf("invalid address type for %s: want %T, got %T", localAddr, localUDPAddr, localAddr) + } + + return localUDPAddr, nil +} + +// ParseICMP takes in an IPv4 header and payload and tries to convert to an ICMP +// message, it returns all the fields from the packet we need to validate it's the response +// we're looking for +func ParseICMP(header *ipv4.Header, payload []byte) (*ICMPResponse, error) { + // in addition to parsing, it is probably not a bad idea to do some validation + // so we can ignore the ICMP packets we don't care about + icmpResponse := ICMPResponse{} + + if header.Protocol != IPProtoICMP || header.Version != 4 || + header.Src == nil || header.Dst == nil { + return nil, fmt.Errorf("invalid IP header for ICMP packet: %+v", header) + } + icmpResponse.SrcIP = header.Src + icmpResponse.DstIP = header.Dst + + var icmpv4Layer layers.ICMPv4 + decoded := []gopacket.LayerType{} + icmpParser := gopacket.NewDecodingLayerParser(layers.LayerTypeICMPv4, &icmpv4Layer) + icmpParser.IgnoreUnsupported = true // ignore unsupported layers, we will decode them in the next step + if err := icmpParser.DecodeLayers(payload, &decoded); err != nil { + return nil, fmt.Errorf("failed to decode ICMP packet: %w", err) + } + // since we ignore unsupported layers, we need to check if we actually decoded + // anything + if len(decoded) < 1 { + return nil, fmt.Errorf("failed to decode ICMP packet, no layers decoded") + } + icmpResponse.TypeCode = icmpv4Layer.TypeCode + + var icmpPayload []byte + if len(icmpv4Layer.Payload) < 40 { + log.Tracef("Payload length %d is less than 40, extending...\n", len(icmpv4Layer.Payload)) + icmpPayload = make([]byte, 40) + copy(icmpPayload, icmpv4Layer.Payload) + // we have to set this in order for the TCP + // parser to work + icmpPayload[32] = 5 << 4 // set data offset + } else { + icmpPayload = icmpv4Layer.Payload + } + + // a separate parser is needed to decode the inner IP and TCP headers because + // gopacket doesn't support this type of nesting in a single decoder + var innerIPLayer layers.IPv4 + var innerTCPLayer layers.TCP + innerIPParser := gopacket.NewDecodingLayerParser(layers.LayerTypeIPv4, &innerIPLayer, &innerTCPLayer) + if err := innerIPParser.DecodeLayers(icmpPayload, &decoded); err != nil { + return nil, fmt.Errorf("failed to decode inner ICMP payload: %w", err) + } + icmpResponse.InnerSrcIP = innerIPLayer.SrcIP + icmpResponse.InnerDstIP = innerIPLayer.DstIP + icmpResponse.InnerSrcPort = uint16(innerTCPLayer.SrcPort) + icmpResponse.InnerDstPort = uint16(innerTCPLayer.DstPort) + icmpResponse.InnerSeqNum = innerTCPLayer.Seq + + return &icmpResponse, nil +} + +// ICMPMatch checks if an ICMP response matches the expected response +// based on the local and remote IP, port, and sequence number +func ICMPMatch(localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32, response *ICMPResponse) bool { + return localIP.Equal(response.InnerSrcIP) && + remoteIP.Equal(response.InnerDstIP) && + localPort == response.InnerSrcPort && + remotePort == response.InnerDstPort && + seqNum == response.InnerSeqNum +} diff --git a/pkg/networkpath/traceroute/common/common_test.go b/pkg/networkpath/traceroute/common/common_test.go new file mode 100644 index 0000000000000..aa6f6cfdf24cb --- /dev/null +++ b/pkg/networkpath/traceroute/common/common_test.go @@ -0,0 +1,117 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package common + +import ( + "net" + "testing" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" + "github.com/google/gopacket/layers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/ipv4" +) + +var ( + srcIP = net.ParseIP("1.2.3.4") + dstIP = net.ParseIP("5.6.7.8") + + innerSrcIP = net.ParseIP("10.0.0.1") + innerDstIP = net.ParseIP("192.168.1.1") +) + +func Test_parseICMP(t *testing.T) { + ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 1) + icmpLayer := testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded) + innerIPv4Layer := testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP) + innerTCPLayer := testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true) + + tt := []struct { + description string + inHeader *ipv4.Header + inPayload []byte + expected *ICMPResponse + errMsg string + }{ + { + description: "empty IPv4 layer should return an error", + inHeader: &ipv4.Header{}, + inPayload: []byte{}, + expected: nil, + errMsg: "invalid IP header for ICMP packet", + }, + { + description: "missing ICMP layer should return an error", + inHeader: ipv4Header, + inPayload: []byte{}, + expected: nil, + errMsg: "failed to decode ICMP packet", + }, + { + description: "missing inner layers should return an error", + inHeader: ipv4Header, + inPayload: testutils.CreateMockICMPPacket(nil, icmpLayer, nil, nil, false), + expected: nil, + errMsg: "failed to decode inner ICMP payload", + }, + { + description: "ICMP packet with partial TCP header should create icmpResponse", + inHeader: ipv4Header, + inPayload: testutils.CreateMockICMPPacket(nil, icmpLayer, innerIPv4Layer, innerTCPLayer, true), + expected: &ICMPResponse{ + SrcIP: srcIP, + DstIP: dstIP, + InnerSrcIP: innerSrcIP, + InnerDstIP: innerDstIP, + InnerSrcPort: 12345, + InnerDstPort: 443, + InnerSeqNum: 28394, + }, + errMsg: "", + }, + { + description: "full ICMP packet should create icmpResponse", + inHeader: ipv4Header, + inPayload: testutils.CreateMockICMPPacket(nil, icmpLayer, innerIPv4Layer, innerTCPLayer, true), + expected: &ICMPResponse{ + SrcIP: srcIP, + DstIP: dstIP, + InnerSrcIP: innerSrcIP, + InnerDstIP: innerDstIP, + InnerSrcPort: 12345, + InnerDstPort: 443, + InnerSeqNum: 28394, + }, + errMsg: "", + }, + } + + for _, test := range tt { + t.Run(test.description, func(t *testing.T) { + actual, err := ParseICMP(test.inHeader, test.inPayload) + if test.errMsg != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), test.errMsg) + assert.Nil(t, actual) + return + } + require.Nil(t, err) + require.NotNil(t, actual) + // assert.Equal doesn't handle net.IP well + assert.Equal(t, testutils.StructFieldCount(test.expected), testutils.StructFieldCount(actual)) + assert.Truef(t, test.expected.SrcIP.Equal(actual.SrcIP), "mismatch source IPs: expected %s, got %s", test.expected.SrcIP.String(), actual.SrcIP.String()) + assert.Truef(t, test.expected.DstIP.Equal(actual.DstIP), "mismatch dest IPs: expected %s, got %s", test.expected.DstIP.String(), actual.DstIP.String()) + assert.Truef(t, test.expected.InnerSrcIP.Equal(actual.InnerSrcIP), "mismatch inner source IPs: expected %s, got %s", test.expected.InnerSrcIP.String(), actual.InnerSrcIP.String()) + assert.Truef(t, test.expected.InnerDstIP.Equal(actual.InnerDstIP), "mismatch inner dest IPs: expected %s, got %s", test.expected.InnerDstIP.String(), actual.InnerDstIP.String()) + assert.Equal(t, test.expected.InnerSrcPort, actual.InnerSrcPort) + assert.Equal(t, test.expected.InnerDstPort, actual.InnerDstPort) + assert.Equal(t, test.expected.InnerSeqNum, actual.InnerSeqNum) + }) + } +} diff --git a/pkg/networkpath/traceroute/runner/runner.go b/pkg/networkpath/traceroute/runner/runner.go index 9087b66b4eb8a..b8240ee6db3cd 100644 --- a/pkg/networkpath/traceroute/runner/runner.go +++ b/pkg/networkpath/traceroute/runner/runner.go @@ -24,6 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config" "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/tcp" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -222,7 +223,7 @@ func (r *Runner) runTCP(cfg config.Config, hname string, target net.IP, maxTTL u return pathResult, nil } -func (r *Runner) processTCPResults(res *tcp.Results, hname string, destinationHost string, destinationPort uint16, destinationIP net.IP) (payload.NetworkPath, error) { +func (r *Runner) processTCPResults(res *common.Results, hname string, destinationHost string, destinationPort uint16, destinationIP net.IP) (payload.NetworkPath, error) { traceroutePath := payload.NetworkPath{ AgentVersion: version.AgentVersion, PathtraceID: payload.NewPathtraceID(), diff --git a/pkg/networkpath/traceroute/tcp/tcpv4.go b/pkg/networkpath/traceroute/tcp/tcpv4.go index 64484f9c0ad60..ca38edf21cf4c 100644 --- a/pkg/networkpath/traceroute/tcp/tcpv4.go +++ b/pkg/networkpath/traceroute/tcp/tcpv4.go @@ -9,8 +9,6 @@ package tcp import ( "net" "time" - - "github.com/google/gopacket/layers" ) type ( @@ -27,26 +25,6 @@ type ( Delay time.Duration // delay between sending packets (not applicable if we go the serial send/receive route) Timeout time.Duration // full timeout for all packets } - - // Results encapsulates a response from the TCP - // traceroute - Results struct { - Source net.IP - SourcePort uint16 - Target net.IP - DstPort uint16 - Hops []*Hop - } - - // Hop encapsulates information about a single - // hop in a TCP traceroute - Hop struct { - IP net.IP - Port uint16 - ICMPType layers.ICMPv4TypeCode - RTT time.Duration - IsDest bool - } ) // Close doesn't to anything yet, but we should diff --git a/pkg/networkpath/traceroute/tcp/tcpv4_unix.go b/pkg/networkpath/traceroute/tcp/tcpv4_unix.go index 32cf7e19ee11e..f5859d056e00e 100644 --- a/pkg/networkpath/traceroute/tcp/tcpv4_unix.go +++ b/pkg/networkpath/traceroute/tcp/tcpv4_unix.go @@ -16,15 +16,16 @@ import ( "golang.org/x/net/ipv4" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" "github.com/DataDog/datadog-agent/pkg/util/log" ) // TracerouteSequential runs a traceroute sequentially where a packet is // sent and we wait for a response before sending the next packet -func (t *TCPv4) TracerouteSequential() (*Results, error) { +func (t *TCPv4) TracerouteSequential() (*common.Results, error) { // Get local address for the interface that connects to this // host and store in in the probe - addr, err := localAddrForHost(t.Target, t.DestPort) + addr, err := common.LocalAddrForHost(t.Target, t.DestPort) if err != nil { return nil, fmt.Errorf("failed to get local address for target: %w", err) } @@ -71,7 +72,7 @@ func (t *TCPv4) TracerouteSequential() (*Results, error) { } // hops should be of length # of hops - hops := make([]*Hop, 0, t.MaxTTL-t.MinTTL) + hops := make([]*common.Hop, 0, t.MaxTTL-t.MinTTL) for i := int(t.MinTTL); i <= int(t.MaxTTL); i++ { seqNumber := rand.Uint32() @@ -88,7 +89,7 @@ func (t *TCPv4) TracerouteSequential() (*Results, error) { } } - return &Results{ + return &common.Results{ Source: t.srcIP, SourcePort: t.srcPort, Target: t.Target, @@ -97,7 +98,7 @@ func (t *TCPv4) TracerouteSequential() (*Results, error) { }, nil } -func (t *TCPv4) sendAndReceive(rawIcmpConn *ipv4.RawConn, rawTCPConn *ipv4.RawConn, ttl int, seqNum uint32, timeout time.Duration) (*Hop, error) { +func (t *TCPv4) sendAndReceive(rawIcmpConn *ipv4.RawConn, rawTCPConn *ipv4.RawConn, ttl int, seqNum uint32, timeout time.Duration) (*common.Hop, error) { tcpHeader, tcpPacket, err := createRawTCPSyn(t.srcIP, t.srcPort, t.Target, t.DestPort, seqNum, ttl) if err != nil { log.Errorf("failed to create TCP packet with TTL: %d, error: %s", ttl, err.Error()) @@ -122,7 +123,7 @@ func (t *TCPv4) sendAndReceive(rawIcmpConn *ipv4.RawConn, rawTCPConn *ipv4.RawCo rtt = end.Sub(start) } - return &Hop{ + return &common.Hop{ IP: hopIP, Port: hopPort, ICMPType: icmpType, diff --git a/pkg/networkpath/traceroute/tcp/tcpv4_windows.go b/pkg/networkpath/traceroute/tcp/tcpv4_windows.go index 3067695b0e559..e9eb74cae702f 100644 --- a/pkg/networkpath/traceroute/tcp/tcpv4_windows.go +++ b/pkg/networkpath/traceroute/tcp/tcpv4_windows.go @@ -14,6 +14,7 @@ import ( "golang.org/x/sys/windows" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -67,14 +68,14 @@ func createRawSocket() (*winrawsocket, error) { // TracerouteSequential runs a traceroute sequentially where a packet is // sent and we wait for a response before sending the next packet -func (t *TCPv4) TracerouteSequential() (*Results, error) { +func (t *TCPv4) TracerouteSequential() (*common.Results, error) { log.Debugf("Running traceroute to %+v", t) // Get local address for the interface that connects to this // host and store in in the probe // // TODO: do this once for the probe and hang on to the // listener until we decide to close the probe - addr, err := localAddrForHost(t.Target, t.DestPort) + addr, err := common.LocalAddrForHost(t.Target, t.DestPort) if err != nil { return nil, fmt.Errorf("failed to get local address for target: %w", err) } @@ -87,7 +88,7 @@ func (t *TCPv4) TracerouteSequential() (*Results, error) { } defer rs.close() - hops := make([]*Hop, 0, int(t.MaxTTL-t.MinTTL)+1) + hops := make([]*common.Hop, 0, int(t.MaxTTL-t.MinTTL)+1) for i := int(t.MinTTL); i <= int(t.MaxTTL); i++ { seqNumber := rand.Uint32() @@ -104,7 +105,7 @@ func (t *TCPv4) TracerouteSequential() (*Results, error) { } } - return &Results{ + return &common.Results{ Source: t.srcIP, SourcePort: t.srcPort, Target: t.Target, @@ -113,7 +114,7 @@ func (t *TCPv4) TracerouteSequential() (*Results, error) { }, nil } -func (t *TCPv4) sendAndReceive(rs *winrawsocket, ttl int, seqNum uint32, timeout time.Duration) (*Hop, error) { +func (t *TCPv4) sendAndReceive(rs *winrawsocket, ttl int, seqNum uint32, timeout time.Duration) (*common.Hop, error) { _, buffer, _, err := createRawTCPSynBuffer(t.srcIP, t.srcPort, t.Target, t.DestPort, seqNum, ttl) if err != nil { log.Errorf("failed to create TCP packet with TTL: %d, error: %s", ttl, err.Error()) @@ -138,7 +139,7 @@ func (t *TCPv4) sendAndReceive(rs *winrawsocket, ttl int, seqNum uint32, timeout rtt = end.Sub(start) } - return &Hop{ + return &common.Hop{ IP: hopIP, Port: hopPort, ICMPType: icmpType, diff --git a/pkg/networkpath/traceroute/tcp/utils.go b/pkg/networkpath/traceroute/tcp/utils.go index ae7d507f23940..3bd9e437e90c7 100644 --- a/pkg/networkpath/traceroute/tcp/utils.go +++ b/pkg/networkpath/traceroute/tcp/utils.go @@ -8,46 +8,14 @@ package tcp import ( "fmt" "net" - "strconv" + "syscall" - "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/google/gopacket" "github.com/google/gopacket/layers" "golang.org/x/net/ipv4" ) -const ( - // ACK is the acknowledge TCP flag - ACK = 1 << 4 - // RST is the reset TCP flag - RST = 1 << 2 - // SYN is the synchronization TCP flag - SYN = 1 << 1 - - // IPProtoICMP is the ICMP protocol number - IPProtoICMP = 1 - // IPProtoTCP is the TCP protocol number - IPProtoTCP = 6 -) - type ( - // canceledError is sent when a listener - // is canceled - canceledError string - - // icmpResponse encapsulates the data from - // an ICMP response packet needed for matching - icmpResponse struct { - SrcIP net.IP - DstIP net.IP - TypeCode layers.ICMPv4TypeCode - InnerSrcIP net.IP - InnerDstIP net.IP - InnerSrcPort uint16 - InnerDstPort uint16 - InnerSeqNum uint32 - } - // tcpResponse encapsulates the data from a // TCP response needed for matching tcpResponse struct { @@ -57,25 +25,6 @@ type ( } ) -func localAddrForHost(destIP net.IP, destPort uint16) (*net.UDPAddr, error) { - // this is a quick way to get the local address for connecting to the host - // using UDP as the network type to avoid actually creating a connection to - // the host, just get the OS to give us a local IP and local ephemeral port - conn, err := net.Dial("udp4", net.JoinHostPort(destIP.String(), strconv.Itoa(int(destPort)))) - if err != nil { - return nil, err - } - defer conn.Close() - localAddr := conn.LocalAddr() - - localUDPAddr, ok := localAddr.(*net.UDPAddr) - if !ok { - return nil, fmt.Errorf("invalid address type for %s: want %T, got %T", localAddr, localUDPAddr, localAddr) - } - - return localUDPAddr, nil -} - // reserveLocalPort reserves an ephemeral TCP port // and returns both the listener and port because the // listener should be held until the port is no longer @@ -145,64 +94,6 @@ func createRawTCPSynBuffer(sourceIP net.IP, sourcePort uint16, destIP net.IP, de return &ipHdr, packet, 20, nil } -// parseICMP takes in an IPv4 header and payload and tries to convert to an ICMP -// message, it returns all the fields from the packet we need to validate it's the response -// we're looking for -func parseICMP(header *ipv4.Header, payload []byte) (*icmpResponse, error) { - // in addition to parsing, it is probably not a bad idea to do some validation - // so we can ignore the ICMP packets we don't care about - icmpResponse := icmpResponse{} - - if header.Protocol != IPProtoICMP || header.Version != 4 || - header.Src == nil || header.Dst == nil { - return nil, fmt.Errorf("invalid IP header for ICMP packet: %+v", header) - } - icmpResponse.SrcIP = header.Src - icmpResponse.DstIP = header.Dst - - var icmpv4Layer layers.ICMPv4 - decoded := []gopacket.LayerType{} - icmpParser := gopacket.NewDecodingLayerParser(layers.LayerTypeICMPv4, &icmpv4Layer) - icmpParser.IgnoreUnsupported = true // ignore unsupported layers, we will decode them in the next step - if err := icmpParser.DecodeLayers(payload, &decoded); err != nil { - return nil, fmt.Errorf("failed to decode ICMP packet: %w", err) - } - // since we ignore unsupported layers, we need to check if we actually decoded - // anything - if len(decoded) < 1 { - return nil, fmt.Errorf("failed to decode ICMP packet, no layers decoded") - } - icmpResponse.TypeCode = icmpv4Layer.TypeCode - - var icmpPayload []byte - if len(icmpv4Layer.Payload) < 40 { - log.Tracef("Payload length %d is less than 40, extending...\n", len(icmpv4Layer.Payload)) - icmpPayload = make([]byte, 40) - copy(icmpPayload, icmpv4Layer.Payload) - // we have to set this in order for the TCP - // parser to work - icmpPayload[32] = 5 << 4 // set data offset - } else { - icmpPayload = icmpv4Layer.Payload - } - - // a separate parser is needed to decode the inner IP and TCP headers because - // gopacket doesn't support this type of nesting in a single decoder - var innerIPLayer layers.IPv4 - var innerTCPLayer layers.TCP - innerIPParser := gopacket.NewDecodingLayerParser(layers.LayerTypeIPv4, &innerIPLayer, &innerTCPLayer) - if err := innerIPParser.DecodeLayers(icmpPayload, &decoded); err != nil { - return nil, fmt.Errorf("failed to decode inner ICMP payload: %w", err) - } - icmpResponse.InnerSrcIP = innerIPLayer.SrcIP - icmpResponse.InnerDstIP = innerIPLayer.DstIP - icmpResponse.InnerSrcPort = uint16(innerTCPLayer.SrcPort) - icmpResponse.InnerDstPort = uint16(innerTCPLayer.DstPort) - icmpResponse.InnerSeqNum = innerTCPLayer.Seq - - return &icmpResponse, nil -} - type tcpParser struct { layer layers.TCP decoded []gopacket.LayerType @@ -216,7 +107,7 @@ func newTCPParser() *tcpParser { } func (tp *tcpParser) parseTCP(header *ipv4.Header, payload []byte) (*tcpResponse, error) { - if header.Protocol != IPProtoTCP || header.Version != 4 || + if header.Protocol != syscall.IPPROTO_TCP || header.Version != 4 || header.Src == nil || header.Dst == nil { return nil, fmt.Errorf("invalid IP header for TCP packet: %+v", header) } @@ -236,14 +127,6 @@ func (tp *tcpParser) parseTCP(header *ipv4.Header, payload []byte) (*tcpResponse return resp, nil } -func icmpMatch(localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32, response *icmpResponse) bool { - return localIP.Equal(response.InnerSrcIP) && - remoteIP.Equal(response.InnerDstIP) && - localPort == response.InnerSrcPort && - remotePort == response.InnerDstPort && - seqNum == response.InnerSeqNum -} - func tcpMatch(localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32, response *tcpResponse) bool { flagsCheck := (response.TCPResponse.SYN && response.TCPResponse.ACK) || response.TCPResponse.RST sourcePort := uint16(response.TCPResponse.SrcPort) @@ -256,7 +139,3 @@ func tcpMatch(localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint seqNum == response.TCPResponse.Ack-1 && flagsCheck } - -func (c canceledError) Error() string { - return string(c) -} diff --git a/pkg/networkpath/traceroute/tcp/utils_test.go b/pkg/networkpath/traceroute/tcp/utils_test.go index d79cda12ac5da..5b344df71ec16 100644 --- a/pkg/networkpath/traceroute/tcp/utils_test.go +++ b/pkg/networkpath/traceroute/tcp/utils_test.go @@ -10,15 +10,14 @@ package tcp import ( "fmt" "net" - "reflect" "runtime" "testing" - "github.com/google/gopacket" - "github.com/google/gopacket/layers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/ipv4" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" ) var ( @@ -113,102 +112,12 @@ func Test_createRawTCPSynBuffer(t *testing.T) { assert.Equal(t, expectedPktBytes, pktBytes) } -func Test_parseICMP(t *testing.T) { - ipv4Header := createMockIPv4Header(srcIP, dstIP, 1) - icmpLayer := createMockICMPLayer(layers.ICMPv4CodeTTLExceeded) - innerIPv4Layer := createMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP) - innerTCPLayer := createMockTCPLayer(12345, 443, 28394, 12737, true, true, true) - - tt := []struct { - description string - inHeader *ipv4.Header - inPayload []byte - expected *icmpResponse - errMsg string - }{ - { - description: "empty IPv4 layer should return an error", - inHeader: &ipv4.Header{}, - inPayload: []byte{}, - expected: nil, - errMsg: "invalid IP header for ICMP packet", - }, - { - description: "missing ICMP layer should return an error", - inHeader: ipv4Header, - inPayload: []byte{}, - expected: nil, - errMsg: "failed to decode ICMP packet", - }, - { - description: "missing inner layers should return an error", - inHeader: ipv4Header, - inPayload: createMockICMPPacket(nil, icmpLayer, nil, nil, false), - expected: nil, - errMsg: "failed to decode inner ICMP payload", - }, - { - description: "ICMP packet with partial TCP header should create icmpResponse", - inHeader: ipv4Header, - inPayload: createMockICMPPacket(nil, icmpLayer, innerIPv4Layer, innerTCPLayer, true), - expected: &icmpResponse{ - SrcIP: srcIP, - DstIP: dstIP, - InnerSrcIP: innerSrcIP, - InnerDstIP: innerDstIP, - InnerSrcPort: 12345, - InnerDstPort: 443, - InnerSeqNum: 28394, - }, - errMsg: "", - }, - { - description: "full ICMP packet should create icmpResponse", - inHeader: ipv4Header, - inPayload: createMockICMPPacket(nil, icmpLayer, innerIPv4Layer, innerTCPLayer, true), - expected: &icmpResponse{ - SrcIP: srcIP, - DstIP: dstIP, - InnerSrcIP: innerSrcIP, - InnerDstIP: innerDstIP, - InnerSrcPort: 12345, - InnerDstPort: 443, - InnerSeqNum: 28394, - }, - errMsg: "", - }, - } - - for _, test := range tt { - t.Run(test.description, func(t *testing.T) { - actual, err := parseICMP(test.inHeader, test.inPayload) - if test.errMsg != "" { - require.Error(t, err) - assert.Contains(t, err.Error(), test.errMsg) - assert.Nil(t, actual) - return - } - require.Nil(t, err) - require.NotNil(t, actual) - // assert.Equal doesn't handle net.IP well - assert.Equal(t, structFieldCount(test.expected), structFieldCount(actual)) - assert.Truef(t, test.expected.SrcIP.Equal(actual.SrcIP), "mismatch source IPs: expected %s, got %s", test.expected.SrcIP.String(), actual.SrcIP.String()) - assert.Truef(t, test.expected.DstIP.Equal(actual.DstIP), "mismatch dest IPs: expected %s, got %s", test.expected.DstIP.String(), actual.DstIP.String()) - assert.Truef(t, test.expected.InnerSrcIP.Equal(actual.InnerSrcIP), "mismatch inner source IPs: expected %s, got %s", test.expected.InnerSrcIP.String(), actual.InnerSrcIP.String()) - assert.Truef(t, test.expected.InnerDstIP.Equal(actual.InnerDstIP), "mismatch inner dest IPs: expected %s, got %s", test.expected.InnerDstIP.String(), actual.InnerDstIP.String()) - assert.Equal(t, test.expected.InnerSrcPort, actual.InnerSrcPort) - assert.Equal(t, test.expected.InnerDstPort, actual.InnerDstPort) - assert.Equal(t, test.expected.InnerSeqNum, actual.InnerSeqNum) - }) - } -} - func Test_parseTCP(t *testing.T) { - ipv4Header := createMockIPv4Header(srcIP, dstIP, 6) // 6 is TCP - tcpLayer := createMockTCPLayer(12345, 443, 28394, 12737, true, true, true) + ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 6) // 6 is TCP + tcpLayer := testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true) // full packet - encodedTCPLayer, fullTCPPacket := createMockTCPPacket(ipv4Header, tcpLayer, false) + encodedTCPLayer, fullTCPPacket := testutils.CreateMockTCPPacket(ipv4Header, tcpLayer, false) tt := []struct { description string @@ -257,7 +166,7 @@ func Test_parseTCP(t *testing.T) { require.Nil(t, err) require.NotNil(t, actual) // assert.Equal doesn't handle net.IP well - assert.Equal(t, structFieldCount(test.expected), structFieldCount(actual)) + assert.Equal(t, testutils.StructFieldCount(test.expected), testutils.StructFieldCount(actual)) assert.Truef(t, test.expected.SrcIP.Equal(actual.SrcIP), "mismatch source IPs: expected %s, got %s", test.expected.SrcIP.String(), actual.SrcIP.String()) assert.Truef(t, test.expected.DstIP.Equal(actual.DstIP), "mismatch dest IPs: expected %s, got %s", test.expected.DstIP.String(), actual.DstIP.String()) assert.Equal(t, test.expected.TCPResponse, actual.TCPResponse) @@ -266,11 +175,11 @@ func Test_parseTCP(t *testing.T) { } func BenchmarkParseTCP(b *testing.B) { - ipv4Header := createMockIPv4Header(srcIP, dstIP, 6) // 6 is TCP - tcpLayer := createMockTCPLayer(12345, 443, 28394, 12737, true, true, true) + ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 6) // 6 is TCP + tcpLayer := testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true) // full packet - _, fullTCPPacket := createMockTCPPacket(ipv4Header, tcpLayer, false) + _, fullTCPPacket := testutils.CreateMockTCPPacket(ipv4Header, tcpLayer, false) tp := newTCPParser() @@ -282,129 +191,3 @@ func BenchmarkParseTCP(b *testing.B) { } } } - -func createMockIPv4Header(srcIP, dstIP net.IP, protocol int) *ipv4.Header { - return &ipv4.Header{ - Version: 4, - Src: srcIP, - Dst: dstIP, - Protocol: protocol, - TTL: 64, - Len: 8, - } -} - -func createMockICMPPacket(ipLayer *layers.IPv4, icmpLayer *layers.ICMPv4, innerIP *layers.IPv4, innerTCP *layers.TCP, partialTCPHeader bool) []byte { - innerBuf := gopacket.NewSerializeBuffer() - opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} - - innerLayers := make([]gopacket.SerializableLayer, 0, 2) - if innerIP != nil { - innerLayers = append(innerLayers, innerIP) - } - if innerTCP != nil { - innerLayers = append(innerLayers, innerTCP) - if innerIP != nil { - innerTCP.SetNetworkLayerForChecksum(innerIP) - } - } - - gopacket.SerializeLayers(innerBuf, opts, - innerLayers..., - ) - payload := innerBuf.Bytes() - - // if partialTCP is set, truncate - // the payload to include only the - // first 8 bytes of the TCP header - if partialTCPHeader { - payload = payload[:32] - } - - buf := gopacket.NewSerializeBuffer() - gopacket.SerializeLayers(buf, opts, - icmpLayer, - gopacket.Payload(payload), - ) - - icmpBytes := buf.Bytes() - if ipLayer == nil { - return icmpBytes - } - - buf = gopacket.NewSerializeBuffer() - gopacket.SerializeLayers(buf, opts, - ipLayer, - gopacket.Payload(icmpBytes), - ) - - return buf.Bytes() -} - -func createMockTCPPacket(ipHeader *ipv4.Header, tcpLayer *layers.TCP, includeHeader bool) (*layers.TCP, []byte) { - ipLayer := &layers.IPv4{ - Version: 4, - SrcIP: ipHeader.Src, - DstIP: ipHeader.Dst, - Protocol: layers.IPProtocol(ipHeader.Protocol), - TTL: 64, - Length: 8, - } - tcpLayer.SetNetworkLayerForChecksum(ipLayer) - buf := gopacket.NewSerializeBuffer() - opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} - if includeHeader { - gopacket.SerializeLayers(buf, opts, - ipLayer, - tcpLayer, - ) - } else { - gopacket.SerializeLayers(buf, opts, - tcpLayer, - ) - } - - pkt := gopacket.NewPacket(buf.Bytes(), layers.LayerTypeTCP, gopacket.Default) - - // return encoded TCP layer here - return pkt.Layer(layers.LayerTypeTCP).(*layers.TCP), buf.Bytes() -} - -func createMockIPv4Layer(srcIP, dstIP net.IP, protocol layers.IPProtocol) *layers.IPv4 { - return &layers.IPv4{ - SrcIP: srcIP, - DstIP: dstIP, - Version: 4, - Protocol: protocol, - } -} - -func createMockICMPLayer(typeCode layers.ICMPv4TypeCode) *layers.ICMPv4 { - return &layers.ICMPv4{ - TypeCode: typeCode, - } -} - -func createMockTCPLayer(srcPort uint16, dstPort uint16, seqNum uint32, ackNum uint32, syn bool, ack bool, rst bool) *layers.TCP { - return &layers.TCP{ - SrcPort: layers.TCPPort(srcPort), - DstPort: layers.TCPPort(dstPort), - Seq: seqNum, - Ack: ackNum, - SYN: syn, - ACK: ack, - RST: rst, - } -} - -func structFieldCount(v interface{}) int { - val := reflect.ValueOf(v) - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - if val.Kind() != reflect.Struct { - return -1 - } - - return val.NumField() -} diff --git a/pkg/networkpath/traceroute/tcp/utils_unix.go b/pkg/networkpath/traceroute/tcp/utils_unix.go index 2a52e5f8bea88..4fd1b2e4b251d 100644 --- a/pkg/networkpath/traceroute/tcp/utils_unix.go +++ b/pkg/networkpath/traceroute/tcp/utils_unix.go @@ -14,6 +14,7 @@ import ( "sync" "time" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/google/gopacket/layers" "go.uber.org/multierr" @@ -68,8 +69,8 @@ func listenPackets(icmpConn rawConnWrapper, tcpConn rawConnWrapper, timeout time wg.Wait() if tcpErr != nil && icmpErr != nil { - _, tcpCanceled := tcpErr.(canceledError) - _, icmpCanceled := icmpErr.(canceledError) + _, tcpCanceled := tcpErr.(common.CanceledError) + _, icmpCanceled := icmpErr.(common.CanceledError) if icmpCanceled && tcpCanceled { log.Trace("timed out waiting for responses") return net.IP{}, 0, 0, time.Time{}, nil @@ -103,7 +104,7 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo for { select { case <-ctx.Done(): - return net.IP{}, 0, 0, time.Time{}, canceledError("listener canceled") + return net.IP{}, 0, 0, time.Time{}, common.CanceledError("listener canceled") default: } now := time.Now() @@ -127,12 +128,12 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo // TODO: remove listener constraint and parse all packets // in the same function return a succinct struct here if listener == "icmp" { - icmpResponse, err := parseICMP(header, packet) + icmpResponse, err := common.ParseICMP(header, packet) if err != nil { log.Tracef("failed to parse ICMP packet: %s", err) continue } - if icmpMatch(localIP, localPort, remoteIP, remotePort, seqNum, icmpResponse) { + if common.ICMPMatch(localIP, localPort, remoteIP, remotePort, seqNum, icmpResponse) { return icmpResponse.SrcIP, 0, icmpResponse.TypeCode, received, nil } } else if listener == "tcp" { diff --git a/pkg/networkpath/traceroute/tcp/utils_unix_test.go b/pkg/networkpath/traceroute/tcp/utils_unix_test.go index 731f5affe1380..db78310723d28 100644 --- a/pkg/networkpath/traceroute/tcp/utils_unix_test.go +++ b/pkg/networkpath/traceroute/tcp/utils_unix_test.go @@ -19,6 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/ipv4" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" ) type ( @@ -40,7 +42,7 @@ type ( ) func Test_handlePackets(t *testing.T) { - _, tcpBytes := createMockTCPPacket(createMockIPv4Header(dstIP, srcIP, 6), createMockTCPLayer(443, 12345, 28394, 28395, true, true, true), false) + _, tcpBytes := testutils.CreateMockTCPPacket(testutils.CreateMockIPv4Header(dstIP, srcIP, 6), testutils.CreateMockTCPLayer(443, 12345, 28394, 28395, true, true, true), false) tt := []struct { description string @@ -120,8 +122,8 @@ func Test_handlePackets(t *testing.T) { description: "successful ICMP parsing returns IP, port, and type code", ctxTimeout: 500 * time.Millisecond, conn: &mockRawConn{ - header: createMockIPv4Header(srcIP, dstIP, 1), - payload: createMockICMPPacket(nil, createMockICMPLayer(layers.ICMPv4CodeTTLExceeded), createMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), createMockTCPLayer(12345, 443, 28394, 12737, true, true, true), false), + header: testutils.CreateMockIPv4Header(srcIP, dstIP, 1), + payload: testutils.CreateMockICMPPacket(nil, testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded), testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true), false), }, localIP: innerSrcIP, localPort: 12345, @@ -137,7 +139,7 @@ func Test_handlePackets(t *testing.T) { description: "successful TCP parsing returns IP, port, and type code", ctxTimeout: 500 * time.Millisecond, conn: &mockRawConn{ - header: createMockIPv4Header(dstIP, srcIP, 6), + header: testutils.CreateMockIPv4Header(dstIP, srcIP, 6), payload: tcpBytes, }, localIP: srcIP, diff --git a/pkg/networkpath/traceroute/tcp/utils_windows.go b/pkg/networkpath/traceroute/tcp/utils_windows.go index 077495f43203e..483167109b6e4 100644 --- a/pkg/networkpath/traceroute/tcp/utils_windows.go +++ b/pkg/networkpath/traceroute/tcp/utils_windows.go @@ -16,6 +16,7 @@ import ( "golang.org/x/net/ipv4" "golang.org/x/sys/windows" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/google/gopacket/layers" ) @@ -49,7 +50,7 @@ func (w *winrawsocket) listenPackets(timeout time.Duration, localIP net.IP, loca wg.Wait() if icmpErr != nil { - _, icmpCanceled := icmpErr.(canceledError) + _, icmpCanceled := icmpErr.(common.CanceledError) if icmpCanceled { log.Trace("timed out waiting for responses") return net.IP{}, 0, 0, time.Time{}, nil @@ -74,7 +75,7 @@ func (w *winrawsocket) handlePackets(ctx context.Context, localIP net.IP, localP for { select { case <-ctx.Done(): - return net.IP{}, 0, 0, time.Time{}, canceledError("listener canceled") + return net.IP{}, 0, 0, time.Time{}, common.CanceledError("listener canceled") default: } @@ -110,12 +111,12 @@ func (w *winrawsocket) handlePackets(ctx context.Context, localIP net.IP, localP // TODO: remove listener constraint and parse all packets // in the same function return a succinct struct here if header.Protocol == windows.IPPROTO_ICMP { - icmpResponse, err := parseICMP(header, packet) + icmpResponse, err := common.ParseICMP(header, packet) if err != nil { log.Tracef("failed to parse ICMP packet: %s", err.Error()) continue } - if icmpMatch(localIP, localPort, remoteIP, remotePort, seqNum, icmpResponse) { + if common.ICMPMatch(localIP, localPort, remoteIP, remotePort, seqNum, icmpResponse) { return icmpResponse.SrcIP, 0, icmpResponse.TypeCode, received, nil } } else if header.Protocol == windows.IPPROTO_TCP { diff --git a/pkg/networkpath/traceroute/tcp/utils_windows_test.go b/pkg/networkpath/traceroute/tcp/utils_windows_test.go index 6e5b2a1c81ba4..6fbd7d0cc860b 100644 --- a/pkg/networkpath/traceroute/tcp/utils_windows_test.go +++ b/pkg/networkpath/traceroute/tcp/utils_windows_test.go @@ -18,6 +18,7 @@ import ( "golang.org/x/sys/windows" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" "github.com/google/gopacket/layers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,7 +35,7 @@ type ( ) func Test_handlePackets(t *testing.T) { - _, tcpBytes := createMockTCPPacket(createMockIPv4Header(dstIP, srcIP, 6), createMockTCPLayer(443, 12345, 28394, 28395, true, true, true), true) + _, tcpBytes := testutils.CreateMockTCPPacket(testutils.CreateMockIPv4Header(dstIP, srcIP, 6), testutils.CreateMockTCPLayer(443, 12345, 28394, 28395, true, true, true), true) tt := []struct { description string @@ -92,7 +93,7 @@ func Test_handlePackets(t *testing.T) { description: "successful ICMP parsing returns IP, port, and type code", ctxTimeout: 500 * time.Millisecond, conn: &mockRawConn{ - payload: createMockICMPPacket(createMockIPv4Layer(srcIP, dstIP, layers.IPProtocolICMPv4), createMockICMPLayer(layers.ICMPv4CodeTTLExceeded), createMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), createMockTCPLayer(12345, 443, 28394, 12737, true, true, true), false), + payload: testutils.CreateMockICMPPacket(testutils.CreateMockIPv4Layer(srcIP, dstIP, layers.IPProtocolICMPv4), testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded), testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true), false), }, localIP: innerSrcIP, localPort: 12345, diff --git a/pkg/networkpath/traceroute/testutils/doc.go b/pkg/networkpath/traceroute/testutils/doc.go new file mode 100644 index 0000000000000..2a31e324c585c --- /dev/null +++ b/pkg/networkpath/traceroute/testutils/doc.go @@ -0,0 +1,7 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package testutils contains utilities for testing traceroute code +package testutils diff --git a/pkg/networkpath/traceroute/testutils/testutils.go b/pkg/networkpath/traceroute/testutils/testutils.go new file mode 100644 index 0000000000000..e412d8971372b --- /dev/null +++ b/pkg/networkpath/traceroute/testutils/testutils.go @@ -0,0 +1,150 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package testutils + +import ( + "net" + "reflect" + + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "golang.org/x/net/ipv4" +) + +// CreateMockIPv4Header creates a mock IPv4 header for testing +func CreateMockIPv4Header(srcIP, dstIP net.IP, protocol int) *ipv4.Header { + return &ipv4.Header{ + Version: 4, + Src: srcIP, + Dst: dstIP, + Protocol: protocol, + TTL: 64, + Len: 8, + } +} + +// CreateMockICMPPacket creates a mock ICMP packet for testing +func CreateMockICMPPacket(ipLayer *layers.IPv4, icmpLayer *layers.ICMPv4, innerIP *layers.IPv4, innerTCP *layers.TCP, partialTCPHeader bool) []byte { + innerBuf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + + innerLayers := make([]gopacket.SerializableLayer, 0, 2) + if innerIP != nil { + innerLayers = append(innerLayers, innerIP) + } + if innerTCP != nil { + innerLayers = append(innerLayers, innerTCP) + if innerIP != nil { + innerTCP.SetNetworkLayerForChecksum(innerIP) // nolint: errcheck + } + } + + gopacket.SerializeLayers(innerBuf, opts, // nolint: errcheck + innerLayers..., + ) + payload := innerBuf.Bytes() + + // if partialTCP is set, truncate + // the payload to include only the + // first 8 bytes of the TCP header + if partialTCPHeader { + payload = payload[:32] + } + + buf := gopacket.NewSerializeBuffer() + gopacket.SerializeLayers(buf, opts, // nolint: errcheck + icmpLayer, + gopacket.Payload(payload), + ) + + icmpBytes := buf.Bytes() + if ipLayer == nil { + return icmpBytes + } + + buf = gopacket.NewSerializeBuffer() + gopacket.SerializeLayers(buf, opts, // nolint: errcheck + ipLayer, + gopacket.Payload(icmpBytes), + ) + + return buf.Bytes() +} + +// CreateMockTCPPacket creates a mock TCP packet for testing +func CreateMockTCPPacket(ipHeader *ipv4.Header, tcpLayer *layers.TCP, includeHeader bool) (*layers.TCP, []byte) { + ipLayer := &layers.IPv4{ + Version: 4, + SrcIP: ipHeader.Src, + DstIP: ipHeader.Dst, + Protocol: layers.IPProtocol(ipHeader.Protocol), + TTL: 64, + Length: 8, + } + tcpLayer.SetNetworkLayerForChecksum(ipLayer) // nolint: errcheck + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + if includeHeader { + gopacket.SerializeLayers(buf, opts, // nolint: errcheck + ipLayer, + tcpLayer, + ) + } else { + gopacket.SerializeLayers(buf, opts, // nolint: errcheck + tcpLayer, + ) + } + + pkt := gopacket.NewPacket(buf.Bytes(), layers.LayerTypeTCP, gopacket.Default) + + // return encoded TCP layer here + return pkt.Layer(layers.LayerTypeTCP).(*layers.TCP), buf.Bytes() +} + +// CreateMockIPv4Layer creates a mock IPv4 layer for testing +func CreateMockIPv4Layer(srcIP, dstIP net.IP, protocol layers.IPProtocol) *layers.IPv4 { + return &layers.IPv4{ + SrcIP: srcIP, + DstIP: dstIP, + Version: 4, + Protocol: protocol, + } +} + +// CreateMockICMPLayer creates a mock ICMP layer for testing +func CreateMockICMPLayer(typeCode layers.ICMPv4TypeCode) *layers.ICMPv4 { + return &layers.ICMPv4{ + TypeCode: typeCode, + } +} + +// CreateMockTCPLayer creates a mock TCP layer for testing +func CreateMockTCPLayer(srcPort uint16, dstPort uint16, seqNum uint32, ackNum uint32, syn bool, ack bool, rst bool) *layers.TCP { + return &layers.TCP{ + SrcPort: layers.TCPPort(srcPort), + DstPort: layers.TCPPort(dstPort), + Seq: seqNum, + Ack: ackNum, + SYN: syn, + ACK: ack, + RST: rst, + } +} + +// StructFieldCount returns the number of fields in a struct +func StructFieldCount(v interface{}) int { + val := reflect.ValueOf(v) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + if val.Kind() != reflect.Struct { + return -1 + } + + return val.NumField() +} diff --git a/pkg/process/monitor/process_monitor.go b/pkg/process/monitor/process_monitor.go index 8710aa63e324f..8dbdefc99a822 100644 --- a/pkg/process/monitor/process_monitor.go +++ b/pkg/process/monitor/process_monitor.go @@ -18,10 +18,9 @@ import ( "github.com/vishvananda/netlink" "go.uber.org/atomic" - "github.com/DataDog/datadog-agent/pkg/eventmonitor" + "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" "github.com/DataDog/datadog-agent/pkg/runtime" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -221,7 +220,10 @@ func (pm *ProcessMonitor) initNetlinkProcessEventMonitor() error { // initCallbackRunner runs multiple workers that run tasks sent over a queue. func (pm *ProcessMonitor) initCallbackRunner() { - cpuNum := runtime.NumVCPU() + cpuNum, err := kernel.PossibleCPUs() + if err != nil { + cpuNum = runtime.NumVCPU() + } pm.callbackRunner = make(chan func(), pendingCallbacksQueueSize) pm.callbackRunnerStopChannel = make(chan struct{}) pm.callbackRunnersWG.Add(cpuNum) @@ -487,76 +489,20 @@ func (pm *ProcessMonitor) Stop() { pm.processExitCallbacksMutex.Unlock() } -// Event defines the event used by the process monitor -type Event struct { - Type model.EventType - Pid uint32 -} - -// EventConsumer defines an event consumer to handle event monitor events in the -// process monitor -type EventConsumer struct{} - -// NewProcessMonitorEventConsumer returns a new process monitor event consumer -func NewProcessMonitorEventConsumer(em *eventmonitor.EventMonitor) (*EventConsumer, error) { - consumer := &EventConsumer{} - err := em.AddEventConsumerHandler(consumer) - return consumer, err -} - -// ChanSize returns the channel size used by this consumer -func (ec *EventConsumer) ChanSize() int { - return 500 -} - -// ID returns the ID of this consumer -func (ec *EventConsumer) ID() string { - return "PROCESS_MONITOR" -} - -// Start the consumer -func (ec *EventConsumer) Start() error { - return nil -} - -// Stop the consumer -func (ec *EventConsumer) Stop() { -} - -// EventTypes returns the event types handled by this consumer -func (ec *EventConsumer) EventTypes() []model.EventType { - return []model.EventType{ - model.ExecEventType, - model.ExitEventType, - } -} - -// HandleEvent handles events received from the event monitor -func (ec *EventConsumer) HandleEvent(event any) { - sevent, ok := event.(*Event) - if !ok { - return - } - - processMonitor.tel.events.Add(1) - switch sevent.Type { - case model.ExecEventType: +// InitializeEventConsumer initializes the event consumer with the event handling. +func InitializeEventConsumer(consumer *consumers.ProcessConsumer) { + consumer.SubscribeExec(func(pid uint32) { + processMonitor.tel.events.Add(1) processMonitor.tel.exec.Add(1) if processMonitor.hasExecCallbacks.Load() { - processMonitor.handleProcessExec(sevent.Pid) + processMonitor.handleProcessExec(pid) } - case model.ExitEventType: + }) + consumer.SubscribeExit(func(pid uint32) { + processMonitor.tel.events.Add(1) processMonitor.tel.exit.Add(1) if processMonitor.hasExitCallbacks.Load() { - processMonitor.handleProcessExit(sevent.Pid) + processMonitor.handleProcessExit(pid) } - } -} - -// Copy should copy the given event or return nil to discard it -func (ec *EventConsumer) Copy(event *model.Event) any { - return &Event{ - Type: event.GetEventType(), - Pid: event.GetProcessPid(), - } + }) } diff --git a/pkg/process/monitor/process_monitor_test.go b/pkg/process/monitor/process_monitor_test.go index 0fe9e80bd21cb..c03cf721aaafc 100644 --- a/pkg/process/monitor/process_monitor_test.go +++ b/pkg/process/monitor/process_monitor_test.go @@ -19,13 +19,10 @@ import ( "github.com/vishvananda/netns" "go.uber.org/atomic" - "github.com/DataDog/datadog-agent/pkg/eventmonitor" - eventmonitortestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/testutil" + "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" - "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/kernel" - "github.com/DataDog/datadog-agent/pkg/util/log" ) func getProcessMonitor(t *testing.T) *ProcessMonitor { @@ -40,12 +37,12 @@ func getProcessMonitor(t *testing.T) *ProcessMonitor { func waitForProcessMonitor(t *testing.T, pm *ProcessMonitor) { execCounter := atomic.NewInt32(0) execCallback := func(_ uint32) { execCounter.Inc() } - registerCallback(t, pm, true, (*ProcessCallback)(&execCallback)) + registerCallback(t, pm, true, &execCallback) exitCounter := atomic.NewInt32(0) // Sanity subscribing a callback. exitCallback := func(_ uint32) { exitCounter.Inc() } - registerCallback(t, pm, false, (*ProcessCallback)(&exitCallback)) + registerCallback(t, pm, false, &exitCallback) require.Eventually(t, func() bool { _ = exec.Command("/bin/echo").Run() @@ -56,14 +53,7 @@ func waitForProcessMonitor(t *testing.T, pm *ProcessMonitor) { func initializePM(t *testing.T, pm *ProcessMonitor, useEventStream bool) { require.NoError(t, pm.Initialize(useEventStream)) if useEventStream { - utils.SetCachedHostname("test-hostname") - eventmonitortestutil.StartEventMonitor(t, func(t *testing.T, evm *eventmonitor.EventMonitor) { - // Can't use the implementation in procmontestutil due to import cycles - procmonconsumer, err := NewProcessMonitorEventConsumer(evm) - require.NoError(t, err) - evm.RegisterEventConsumer(procmonconsumer) - log.Info("process monitoring test consumer initialized") - }) + InitializeEventConsumer(testutil.NewTestProcessConsumer(t)) } waitForProcessMonitor(t, pm) } @@ -113,7 +103,7 @@ func (s *processMonitorSuite) TestProcessMonitorSanity() { defer execsMutex.Unlock() execs[pid] = struct{}{} } - registerCallback(t, pm, true, (*ProcessCallback)(&callback)) + registerCallback(t, pm, true, &callback) exitMutex := sync.RWMutex{} exits := make(map[uint32]struct{}) @@ -122,7 +112,7 @@ func (s *processMonitorSuite) TestProcessMonitorSanity() { defer exitMutex.Unlock() exits[pid] = struct{}{} } - registerCallback(t, pm, false, (*ProcessCallback)(&exitCallback)) + registerCallback(t, pm, false, &exitCallback) initializePM(t, pm, s.useEventStream) cmd := exec.Command(testBinaryPath, "test") @@ -182,7 +172,7 @@ func (s *processMonitorSuite) TestProcessRegisterMultipleCallbacks() { defer execCountersMutexes[i].Unlock() c[pid] = struct{}{} } - registerCallback(t, pm, true, (*ProcessCallback)(&callback)) + registerCallback(t, pm, true, &callback) exitCountersMutexes[i] = sync.RWMutex{} exitCounters[i] = make(map[uint32]struct{}) @@ -193,7 +183,7 @@ func (s *processMonitorSuite) TestProcessRegisterMultipleCallbacks() { defer exitCountersMutexes[i].Unlock() exitc[pid] = struct{}{} } - registerCallback(t, pm, false, (*ProcessCallback)(&exitCallback)) + registerCallback(t, pm, false, &exitCallback) } initializePM(t, pm, s.useEventStream) @@ -252,10 +242,10 @@ func (s *processMonitorSuite) TestProcessMonitorInNamespace() { pm := getProcessMonitor(t) callback := func(pid uint32) { execSet.Store(pid, struct{}{}) } - registerCallback(t, pm, true, (*ProcessCallback)(&callback)) + registerCallback(t, pm, true, &callback) exitCallback := func(pid uint32) { exitSet.Store(pid, struct{}{}) } - registerCallback(t, pm, false, (*ProcessCallback)(&exitCallback)) + registerCallback(t, pm, false, &exitCallback) monNs, err := netns.New() require.NoError(t, err, "could not create network namespace for process monitor") diff --git a/pkg/process/monitor/testutil/testutil.go b/pkg/process/monitor/testutil/testutil.go deleted file mode 100644 index d270faec3968f..0000000000000 --- a/pkg/process/monitor/testutil/testutil.go +++ /dev/null @@ -1,27 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build linux && test - -// Package testutil provides utilities for testing the process monitor -package testutil - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/DataDog/datadog-agent/pkg/eventmonitor" - procmon "github.com/DataDog/datadog-agent/pkg/process/monitor" - "github.com/DataDog/datadog-agent/pkg/util/log" -) - -// RegisterProcessMonitorEventConsumer registers the process monitor consumer to an EventMonitor -func RegisterProcessMonitorEventConsumer(t *testing.T, evm *eventmonitor.EventMonitor) { - procmonconsumer, err := procmon.NewProcessMonitorEventConsumer(evm) - require.NoError(t, err) - evm.RegisterEventConsumer(procmonconsumer) - log.Info("process monitoring test consumer initialized") -} diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go index fc9cbcecc6a4b..ab6b0b6b591e1 100644 --- a/pkg/security/config/config.go +++ b/pkg/security/config/config.go @@ -45,8 +45,6 @@ type RuntimeSecurityConfig struct { RuntimeEnabled bool // PoliciesDir defines the folder in which the policy files are located PoliciesDir string - // WatchPoliciesDir activate policy dir inotify - WatchPoliciesDir bool // PolicyMonitorEnabled enable policy monitoring PolicyMonitorEnabled bool // PolicyMonitorPerRuleEnabled enabled per-rule policy monitoring @@ -353,7 +351,6 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { // policy & ruleset PoliciesDir: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.policies.dir"), - WatchPoliciesDir: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.watch_dir"), PolicyMonitorEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.monitor.enabled"), PolicyMonitorPerRuleEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.monitor.per_rule_enabled"), PolicyMonitorReportInternalPolicies: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.monitor.report_internal_policies"), diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index c51e920adf851..1bb574938bfdd 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -1097,7 +1097,7 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { } else { pid, err := utils.TryToResolveTraceePid(event.ProcessContext.Process.Pid, event.PTrace.NSPID) if err != nil { - seclog.Errorf("PTrace err: %v", err) + seclog.Infof("PTrace err: %v", err) return } pidToResolve = pid diff --git a/pkg/security/reporter/reporter.go b/pkg/security/reporter/reporter.go index 1c8a4f71bf5a0..227f94ac456ed 100644 --- a/pkg/security/reporter/reporter.go +++ b/pkg/security/reporter/reporter.go @@ -52,7 +52,7 @@ func newReporter(hostname string, stopper startstop.Stopper, sourceName, sourceT stopper.Add(auditor) // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(logsconfig.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) + pipelineProvider := pipeline.NewProvider(4, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) pipelineProvider.Start() stopper.Add(pipelineProvider) diff --git a/pkg/security/resolvers/cgroup/model/model.go b/pkg/security/resolvers/cgroup/model/model.go index c916e45477876..1bde78216970f 100644 --- a/pkg/security/resolvers/cgroup/model/model.go +++ b/pkg/security/resolvers/cgroup/model/model.go @@ -15,7 +15,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" - "github.com/DataDog/datadog-agent/pkg/security/utils" ) // CacheEntry cgroup resolver cache entry @@ -23,9 +22,8 @@ type CacheEntry struct { model.CGroupContext model.ContainerContext sync.RWMutex - Deleted *atomic.Bool - WorkloadSelector WorkloadSelector - PIDs map[uint32]bool + Deleted *atomic.Bool + PIDs map[uint32]bool } // NewCacheEntry returns a new instance of a CacheEntry @@ -78,27 +76,3 @@ func (cgce *CacheEntry) AddPID(pid uint32) { cgce.PIDs[pid] = true } - -// SetTags sets the tags for the provided workload -func (cgce *CacheEntry) SetTags(tags []string) { - cgce.Lock() - defer cgce.Unlock() - - cgce.Tags = tags - cgce.WorkloadSelector.Image = utils.GetTagValue("image_name", tags) - cgce.WorkloadSelector.Tag = utils.GetTagValue("image_tag", tags) - if len(cgce.WorkloadSelector.Image) != 0 && len(cgce.WorkloadSelector.Tag) == 0 { - cgce.WorkloadSelector.Tag = "latest" - } -} - -// GetWorkloadSelectorCopy returns a copy of the workload selector of this cgroup -func (cgce *CacheEntry) GetWorkloadSelectorCopy() *WorkloadSelector { - cgce.Lock() - defer cgce.Unlock() - - return &WorkloadSelector{ - Image: cgce.WorkloadSelector.Image, - Tag: cgce.WorkloadSelector.Tag, - } -} diff --git a/pkg/security/resolvers/cgroup/model/types.go b/pkg/security/resolvers/cgroup/model/types.go index d07f94be04121..09a9a66473aa6 100644 --- a/pkg/security/resolvers/cgroup/model/types.go +++ b/pkg/security/resolvers/cgroup/model/types.go @@ -61,3 +61,8 @@ func (ws WorkloadSelector) ToTags() []string { "image_tag:" + ws.Tag, } } + +// Copy returns a copy of the selector +func (ws WorkloadSelector) Copy() *WorkloadSelector { + return &ws +} diff --git a/pkg/security/resolvers/sbom/resolver.go b/pkg/security/resolvers/sbom/resolver.go index d0a290975d6df..e282dbd6f23d9 100644 --- a/pkg/security/resolvers/sbom/resolver.go +++ b/pkg/security/resolvers/sbom/resolver.go @@ -33,6 +33,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" + "github.com/DataDog/datadog-agent/pkg/security/resolvers/tags" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/seclog" @@ -486,15 +487,15 @@ func (r *Resolver) triggerScan(sbom *SBOM) { } // OnWorkloadSelectorResolvedEvent is used to handle the creation of a new cgroup with its resolved tags -func (r *Resolver) OnWorkloadSelectorResolvedEvent(cgroup *cgroupModel.CacheEntry) { +func (r *Resolver) OnWorkloadSelectorResolvedEvent(workload *tags.Workload) { r.sbomsLock.Lock() defer r.sbomsLock.Unlock() - if cgroup == nil { + if workload == nil { return } - id := cgroup.ContainerID + id := workload.ContainerID // We don't scan hosts for now if len(id) == 0 { return @@ -502,8 +503,8 @@ func (r *Resolver) OnWorkloadSelectorResolvedEvent(cgroup *cgroupModel.CacheEntr _, ok := r.sboms[id] if !ok { - workloadKey := getWorkloadKey(cgroup.GetWorkloadSelectorCopy()) - sbom, err := r.newWorkloadEntry(id, cgroup, workloadKey) + workloadKey := getWorkloadKey(workload.Selector.Copy()) + sbom, err := r.newWorkloadEntry(id, workload.CacheEntry, workloadKey) if err != nil { seclog.Errorf("couldn't create new SBOM entry for sbom '%s': %v", id, err) } diff --git a/pkg/security/resolvers/sbom/resolver_unsupported.go b/pkg/security/resolvers/sbom/resolver_unsupported.go index 9e073e03e552c..614682af506fa 100644 --- a/pkg/security/resolvers/sbom/resolver_unsupported.go +++ b/pkg/security/resolvers/sbom/resolver_unsupported.go @@ -15,6 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" + "github.com/DataDog/datadog-agent/pkg/security/resolvers/tags" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" ) @@ -33,7 +34,7 @@ func (r *Resolver) OnCGroupDeletedEvent(_ *cgroupModel.CacheEntry) { } // OnWorkloadSelectorResolvedEvent is used to handle the creation of a new cgroup with its resolved tags -func (r *Resolver) OnWorkloadSelectorResolvedEvent(_ *cgroupModel.CacheEntry) { +func (r *Resolver) OnWorkloadSelectorResolvedEvent(_ *tags.Workload) { } // ResolvePackage returns the Package that owns the provided file diff --git a/pkg/security/resolvers/tags/resolver.go b/pkg/security/resolvers/tags/resolver.go index 051122e4436ec..a5f0a4b956775 100644 --- a/pkg/security/resolvers/tags/resolver.go +++ b/pkg/security/resolvers/tags/resolver.go @@ -21,6 +21,8 @@ type Event int const ( // WorkloadSelectorResolved is used to notify that a new cgroup with a resolved workload selector is ready WorkloadSelectorResolved Event = iota + // WorkloadSelectorDeleted is used to notify that a cgroup with a resolved workload selector is deleted + WorkloadSelectorDeleted ) // Tagger defines a Tagger for the Tags Resolver diff --git a/pkg/security/resolvers/tags/resolver_linux.go b/pkg/security/resolvers/tags/resolver_linux.go index e11b2001dba4a..e029f003696b6 100644 --- a/pkg/security/resolvers/tags/resolver_linux.go +++ b/pkg/security/resolvers/tags/resolver_linux.go @@ -13,21 +13,26 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/seclog" "github.com/DataDog/datadog-agent/pkg/security/utils" ) -type pendingWorkload struct { +// Workload represents a workload along with its tags +type Workload struct { *cgroupModel.CacheEntry - retries int + Tags []string + Selector cgroupModel.WorkloadSelector + retries int } // LinuxResolver represents a default resolver based directly on the underlying tagger type LinuxResolver struct { *DefaultResolver - *utils.Notifier[Event, *cgroupModel.CacheEntry] - workloadsWithoutTags chan *pendingWorkload + *utils.Notifier[Event, *Workload] + workloadsWithoutTags chan *Workload cgroupResolver *cgroup.Resolver + workloads map[containerutils.CGroupID]*Workload } // Start the resolver @@ -37,12 +42,19 @@ func (t *LinuxResolver) Start(ctx context.Context) error { } if err := t.cgroupResolver.RegisterListener(cgroup.CGroupCreated, func(cgce *cgroupModel.CacheEntry) { - workload := &pendingWorkload{CacheEntry: cgce, retries: 3} + workload := &Workload{CacheEntry: cgce, retries: 3} + t.workloads[cgce.CGroupID] = workload t.checkTags(workload) }); err != nil { return err } + if err := t.cgroupResolver.RegisterListener(cgroup.CGroupDeleted, func(cgce *cgroupModel.CacheEntry) { + delete(t.workloads, cgce.CGroupID) + }); err != nil { + return err + } + go func() { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -73,17 +85,18 @@ func (t *LinuxResolver) Start(ctx context.Context) error { return nil } -func needsTagsResolution(cgce *cgroupModel.CacheEntry) bool { - return len(cgce.ContainerID) != 0 && !cgce.WorkloadSelector.IsReady() +func needsTagsResolution(workload *Workload) bool { + return len(workload.ContainerID) != 0 && !workload.Selector.IsReady() } // checkTags checks if the tags of a workload were properly set -func (t *LinuxResolver) checkTags(pendingWorkload *pendingWorkload) { - workload := pendingWorkload.CacheEntry +func (t *LinuxResolver) checkTags(pendingWorkload *Workload) { + workload := pendingWorkload // check if the workload tags were found or if it was deleted if !workload.Deleted.Load() && needsTagsResolution(workload) { // this is an alive cgroup, try to resolve its tags now - if err := t.fetchTags(workload); err != nil || needsTagsResolution(workload) { + err := t.fetchTags(workload) + if err != nil || needsTagsResolution(workload) { if pendingWorkload.retries--; pendingWorkload.retries >= 0 { // push to the workloadsWithoutTags chan so that its tags can be resolved later select { @@ -102,22 +115,29 @@ func (t *LinuxResolver) checkTags(pendingWorkload *pendingWorkload) { } // fetchTags fetches tags for the provided workload -func (t *LinuxResolver) fetchTags(container *cgroupModel.CacheEntry) error { - newTags, err := t.ResolveWithErr(container.ContainerID) +func (t *LinuxResolver) fetchTags(workload *Workload) error { + newTags, err := t.ResolveWithErr(workload.ContainerID) if err != nil { - return fmt.Errorf("failed to resolve %s: %w", container.ContainerID, err) + return fmt.Errorf("failed to resolve %s: %w", workload.ContainerID, err) } - container.SetTags(newTags) + + workload.Selector.Image = utils.GetTagValue("image_name", newTags) + workload.Selector.Tag = utils.GetTagValue("image_tag", newTags) + if len(workload.Selector.Image) != 0 && len(workload.Selector.Tag) == 0 { + workload.Selector.Tag = "latest" + } + return nil } // NewResolver returns a new tags resolver func NewResolver(tagger Tagger, cgroupsResolver *cgroup.Resolver) *LinuxResolver { resolver := &LinuxResolver{ - Notifier: utils.NewNotifier[Event, *cgroupModel.CacheEntry](), + Notifier: utils.NewNotifier[Event, *Workload](), DefaultResolver: NewDefaultResolver(tagger), - workloadsWithoutTags: make(chan *pendingWorkload, 100), + workloadsWithoutTags: make(chan *Workload, 100), cgroupResolver: cgroupsResolver, + workloads: make(map[containerutils.CGroupID]*Workload), } return resolver } diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index 6b0f98fd15666..ffb73244b6c2d 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -377,7 +377,7 @@ func (e *RuleEngine) gatherDefaultPolicyProviders() []rules.PolicyProvider { } // directory policy provider - if provider, err := rules.NewPoliciesDirProvider(e.config.PoliciesDir, e.config.WatchPoliciesDir); err != nil { + if provider, err := rules.NewPoliciesDirProvider(e.config.PoliciesDir); err != nil { seclog.Errorf("failed to load local policies: %s", err) } else { policyProviders = append(policyProviders, provider) diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index 9c3d225dcdad1..e5f0a3c820483 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -8,7 +8,6 @@ require ( github.com/alecthomas/participle v0.7.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/fatih/structtag v1.2.0 - github.com/fsnotify/fsnotify v1.8.0 github.com/google/go-cmp v0.6.0 github.com/google/gopacket v1.1.19 github.com/hashicorp/go-multierror v1.1.1 diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index 18391dc2e1b6e..0c694b56983cf 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -17,8 +17,6 @@ github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4 github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= diff --git a/pkg/security/secl/rules/policy_dir.go b/pkg/security/secl/rules/policy_dir.go index 119f31a40f461..f1c31ca699b46 100644 --- a/pkg/security/secl/rules/policy_dir.go +++ b/pkg/security/secl/rules/policy_dir.go @@ -7,12 +7,10 @@ package rules import ( - "context" "os" "path/filepath" "sort" - "github.com/fsnotify/fsnotify" "github.com/hashicorp/go-multierror" ) @@ -25,17 +23,10 @@ var _ PolicyProvider = (*PoliciesDirProvider)(nil) // PoliciesDirProvider defines a new policy dir provider type PoliciesDirProvider struct { PoliciesDir string - - onNewPoliciesReadyCb func() - cancelFnc func() - watcher *fsnotify.Watcher - watchedFiles []string } // SetOnNewPoliciesReadyCb implements the policy provider interface -func (p *PoliciesDirProvider) SetOnNewPoliciesReadyCb(cb func()) { - p.onNewPoliciesReadyCb = cb -} +func (p *PoliciesDirProvider) SetOnNewPoliciesReadyCb(_ func()) {} // Start starts the policy dir provider func (p *PoliciesDirProvider) Start() {} @@ -92,14 +83,6 @@ func (p *PoliciesDirProvider) LoadPolicies(macroFilters []MacroFilter, ruleFilte errs = multierror.Append(errs, err) } - // remove oldest watched files - if p.watcher != nil { - for _, watched := range p.watchedFiles { - _ = p.watcher.Remove(watched) - } - p.watchedFiles = p.watchedFiles[0:0] - } - // Load and parse policies for _, filename := range policyFiles { policy, err := p.loadPolicy(filename, macroFilters, ruleFilters) @@ -112,14 +95,6 @@ func (p *PoliciesDirProvider) LoadPolicies(macroFilters []MacroFilter, ruleFilte } policies = append(policies, policy) - - if p.watcher != nil { - if err := p.watcher.Add(filename); err != nil { - errs = multierror.Append(errs, err) - } else { - p.watchedFiles = append(p.watchedFiles, filename) - } - } } return policies, errs @@ -127,79 +102,14 @@ func (p *PoliciesDirProvider) LoadPolicies(macroFilters []MacroFilter, ruleFilte // Close stops policy provider interface func (p *PoliciesDirProvider) Close() error { - if p.cancelFnc != nil { - p.cancelFnc() - } - - if p.watcher != nil { - p.watcher.Close() - } return nil } -func filesEqual(a []string, b []string) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} - -func (p *PoliciesDirProvider) watch(ctx context.Context) { - go func() { - for { - select { - case <-ctx.Done(): - return - case event, ok := <-p.watcher.Events: - if !ok { - return - } - - if event.Op&(fsnotify.Create|fsnotify.Remove) > 0 { - files, _ := p.getPolicyFiles() - if !filesEqual(files, p.watchedFiles) { - p.onNewPoliciesReadyCb() - } - } else if event.Op&fsnotify.Write > 0 && filepath.Ext(event.Name) == policyExtension { - p.onNewPoliciesReadyCb() - } - case _, ok := <-p.watcher.Errors: - if !ok { - return - } - } - } - }() -} - // NewPoliciesDirProvider returns providers for the given policies dir -func NewPoliciesDirProvider(policiesDir string, watch bool) (*PoliciesDirProvider, error) { - p := &PoliciesDirProvider{ +func NewPoliciesDirProvider(policiesDir string) (*PoliciesDirProvider, error) { + return &PoliciesDirProvider{ PoliciesDir: policiesDir, - } - - if watch { - var err error - if p.watcher, err = fsnotify.NewWatcher(); err != nil { - return nil, err - } - - if err := p.watcher.Add(policiesDir); err != nil { - p.watcher.Close() - return nil, err - } - - var ctx context.Context - ctx, p.cancelFnc = context.WithCancel(context.Background()) - go p.watch(ctx) - } - - return p, nil + }, nil } // Type returns the type of policy dir provider diff --git a/pkg/security/secl/rules/policy_test.go b/pkg/security/secl/rules/policy_test.go index e6252c10593f1..da1fda9740860 100644 --- a/pkg/security/secl/rules/policy_test.go +++ b/pkg/security/secl/rules/policy_test.go @@ -75,7 +75,7 @@ func TestMacroMerge(t *testing.T) { event.SetFieldValue("open.file.path", "/tmp/test") event.SetFieldValue("process.comm", "/usr/bin/vi") - provider, err := NewPoliciesDirProvider(tmpDir, false) + provider, err := NewPoliciesDirProvider(tmpDir) if err != nil { t.Fatal(err) } @@ -150,7 +150,7 @@ func TestRuleMerge(t *testing.T) { t.Fatal(err) } - provider, err := NewPoliciesDirProvider(tmpDir, false) + provider, err := NewPoliciesDirProvider(tmpDir) if err != nil { t.Fatal(err) } @@ -284,7 +284,7 @@ func TestActionSetVariable(t *testing.T) { t.Fatal(err) } - provider, err := NewPoliciesDirProvider(tmpDir, false) + provider, err := NewPoliciesDirProvider(tmpDir) if err != nil { t.Fatal(err) } @@ -354,7 +354,7 @@ func TestActionSetVariableTTL(t *testing.T) { t.Fatal(err) } - provider, err := NewPoliciesDirProvider(tmpDir, false) + provider, err := NewPoliciesDirProvider(tmpDir) if err != nil { t.Fatal(err) } @@ -420,7 +420,7 @@ func TestActionSetVariableConflict(t *testing.T) { t.Fatal(err) } - provider, err := NewPoliciesDirProvider(tmpDir, false) + provider, err := NewPoliciesDirProvider(tmpDir) if err != nil { t.Fatal(err) } @@ -441,7 +441,7 @@ func loadPolicy(t *testing.T, testPolicy *PolicyDef, policyOpts PolicyLoaderOpts t.Fatal(err) } - provider, err := NewPoliciesDirProvider(tmpDir, false) + provider, err := NewPoliciesDirProvider(tmpDir) if err != nil { t.Fatal(err) } diff --git a/pkg/security/security_profile/dump/manager.go b/pkg/security/security_profile/dump/manager.go index a0b8c490b965e..b37a2c7264f1a 100644 --- a/pkg/security/security_profile/dump/manager.go +++ b/pkg/security/security_profile/dump/manager.go @@ -34,6 +34,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/proto/api" "github.com/DataDog/datadog-agent/pkg/security/resolvers" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" + "github.com/DataDog/datadog-agent/pkg/security/resolvers/tags" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/seclog" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" @@ -47,7 +48,7 @@ type ActivityDumpHandler interface { // SecurityProfileManager is a generic interface used to communicate with the Security Profile manager type SecurityProfileManager interface { - FetchSilentWorkloads() map[cgroupModel.WorkloadSelector][]*cgroupModel.CacheEntry + FetchSilentWorkloads() map[cgroupModel.WorkloadSelector][]*tags.Workload OnLocalStorageCleanup(files []string) } diff --git a/pkg/security/security_profile/profile/manager.go b/pkg/security/security_profile/profile/manager.go index aadd9b94cd88b..38fd111fbfe5a 100644 --- a/pkg/security/security_profile/profile/manager.go +++ b/pkg/security/security_profile/profile/manager.go @@ -29,7 +29,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/proto/api" "github.com/DataDog/datadog-agent/pkg/security/resolvers" - "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" "github.com/DataDog/datadog-agent/pkg/security/resolvers/tags" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" @@ -228,7 +227,7 @@ func (m *SecurityProfileManager) Start(ctx context.Context) { // register the manager to the CGroup resolver _ = m.resolvers.TagsResolver.RegisterListener(tags.WorkloadSelectorResolved, m.OnWorkloadSelectorResolvedEvent) - _ = m.resolvers.CGroupResolver.RegisterListener(cgroup.CGroupDeleted, m.OnCGroupDeletedEvent) + _ = m.resolvers.TagsResolver.RegisterListener(tags.WorkloadSelectorDeleted, m.OnWorkloadDeletedEvent) seclog.Infof("security profile manager started") @@ -250,7 +249,7 @@ func (m *SecurityProfileManager) propagateWorkloadSelectorsToProviders() { } // OnWorkloadSelectorResolvedEvent is used to handle the creation of a new cgroup with its resolved tags -func (m *SecurityProfileManager) OnWorkloadSelectorResolvedEvent(workload *cgroupModel.CacheEntry) { +func (m *SecurityProfileManager) OnWorkloadSelectorResolvedEvent(workload *tags.Workload) { m.profilesLock.Lock() defer m.profilesLock.Unlock() workload.Lock() @@ -261,7 +260,7 @@ func (m *SecurityProfileManager) OnWorkloadSelectorResolvedEvent(workload *cgrou return } - selector := workload.WorkloadSelector + selector := workload.Selector selector.Tag = "*" // check if the workload of this selector already exists @@ -307,7 +306,7 @@ func (m *SecurityProfileManager) OnWorkloadSelectorResolvedEvent(workload *cgrou } // LinkProfile applies a profile to the provided workload -func (m *SecurityProfileManager) LinkProfile(profile *SecurityProfile, workload *cgroupModel.CacheEntry) { +func (m *SecurityProfileManager) LinkProfile(profile *SecurityProfile, workload *tags.Workload) { profile.Lock() defer profile.Unlock() @@ -329,7 +328,7 @@ func (m *SecurityProfileManager) LinkProfile(profile *SecurityProfile, workload } // UnlinkProfile removes the link between a workload and a profile -func (m *SecurityProfileManager) UnlinkProfile(profile *SecurityProfile, workload *cgroupModel.CacheEntry) { +func (m *SecurityProfileManager) UnlinkProfile(profile *SecurityProfile, workload *tags.Workload) { profile.Lock() defer profile.Unlock() @@ -394,11 +393,11 @@ func FillProfileContextFromProfile(ctx *model.SecurityProfileContext, profile *S } } -// OnCGroupDeletedEvent is used to handle a CGroupDeleted event -func (m *SecurityProfileManager) OnCGroupDeletedEvent(workload *cgroupModel.CacheEntry) { +// OnWorkloadDeletedEvent is used to handle a WorkloadDeleted event +func (m *SecurityProfileManager) OnWorkloadDeletedEvent(workload *tags.Workload) { // lookup the profile selector := cgroupModel.WorkloadSelector{ - Image: workload.WorkloadSelector.Image, + Image: workload.Selector.Image, Tag: "*", } profile := m.GetProfile(selector) @@ -640,24 +639,24 @@ func (m *SecurityProfileManager) unloadProfile(profile *SecurityProfile) { } // linkProfile (thread unsafe) updates the kernel space mapping between a workload and its profile -func (m *SecurityProfileManager) linkProfile(profile *SecurityProfile, workload *cgroupModel.CacheEntry) { +func (m *SecurityProfileManager) linkProfile(profile *SecurityProfile, workload *tags.Workload) { if err := m.securityProfileMap.Put([]byte(workload.ContainerID), profile.profileCookie); err != nil { - seclog.Errorf("couldn't link workload %s (selector: %s) with profile %s (check map size limit ?): %v", workload.ContainerID, workload.WorkloadSelector.String(), profile.Metadata.Name, err) + seclog.Errorf("couldn't link workload %s (selector: %s) with profile %s (check map size limit ?): %v", workload.ContainerID, workload.Selector.String(), profile.Metadata.Name, err) return } - seclog.Infof("workload %s (selector: %s) successfully linked to profile %s", workload.ContainerID, workload.WorkloadSelector.String(), profile.Metadata.Name) + seclog.Infof("workload %s (selector: %s) successfully linked to profile %s", workload.ContainerID, workload.Selector.String(), profile.Metadata.Name) } // unlinkProfile (thread unsafe) updates the kernel space mapping between a workload and its profile -func (m *SecurityProfileManager) unlinkProfile(profile *SecurityProfile, workload *cgroupModel.CacheEntry) { +func (m *SecurityProfileManager) unlinkProfile(profile *SecurityProfile, workload *tags.Workload) { if !profile.loadedInKernel { return } if err := m.securityProfileMap.Delete([]byte(workload.ContainerID)); err != nil { - seclog.Errorf("couldn't unlink workload %s (selector: %s) with profile %s: %v", workload.ContainerID, workload.WorkloadSelector.String(), profile.Metadata.Name, err) + seclog.Errorf("couldn't unlink workload %s (selector: %s) with profile %s: %v", workload.ContainerID, workload.Selector.String(), profile.Metadata.Name, err) } - seclog.Infof("workload %s (selector: %s) successfully unlinked from profile %s", workload.ContainerID, workload.WorkloadSelector.String(), profile.Metadata.Name) + seclog.Infof("workload %s (selector: %s) successfully unlinked from profile %s", workload.ContainerID, workload.Selector.String(), profile.Metadata.Name) } func (m *SecurityProfileManager) canGenerateAnomaliesFor(e *model.Event) bool { @@ -960,11 +959,11 @@ func (m *SecurityProfileManager) SaveSecurityProfile(params *api.SecurityProfile } // FetchSilentWorkloads returns the list of workloads for which we haven't received any profile -func (m *SecurityProfileManager) FetchSilentWorkloads() map[cgroupModel.WorkloadSelector][]*cgroupModel.CacheEntry { +func (m *SecurityProfileManager) FetchSilentWorkloads() map[cgroupModel.WorkloadSelector][]*tags.Workload { m.profilesLock.Lock() defer m.profilesLock.Unlock() - out := make(map[cgroupModel.WorkloadSelector][]*cgroupModel.CacheEntry) + out := make(map[cgroupModel.WorkloadSelector][]*tags.Workload) for selector, profile := range m.profiles { profile.Lock() diff --git a/pkg/security/security_profile/profile/manager_test.go b/pkg/security/security_profile/profile/manager_test.go index dc41ce396c286..b25390fbad9d0 100644 --- a/pkg/security/security_profile/profile/manager_test.go +++ b/pkg/security/security_profile/profile/manager_test.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" + "github.com/DataDog/datadog-agent/pkg/security/resolvers/tags" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" @@ -841,14 +842,15 @@ func TestSecurityProfileManager_tryAutolearn(t *testing.T) { if ti.newProfile || profile == nil { profile = NewSecurityProfile(cgroupModel.WorkloadSelector{Image: "image", Tag: "tag"}, []model.EventType{model.ExecEventType, model.DNSEventType}, nil) profile.ActivityTree = activity_tree.NewActivityTree(profile, nil, "security_profile") - profile.Instances = append(profile.Instances, &cgroupModel.CacheEntry{ - ContainerContext: model.ContainerContext{ + profile.Instances = append(profile.Instances, &tags.Workload{ + CacheEntry: &cgroupModel.CacheEntry{ContainerContext: model.ContainerContext{ ContainerID: containerutils.ContainerID(defaultContainerID), }, - CGroupContext: model.CGroupContext{ - CGroupID: containerutils.CGroupID(defaultContainerID), + CGroupContext: model.CGroupContext{ + CGroupID: containerutils.CGroupID(defaultContainerID), + }, }, - WorkloadSelector: cgroupModel.WorkloadSelector{Image: "image", Tag: "tag"}, + Selector: cgroupModel.WorkloadSelector{Image: "image", Tag: "tag"}, }) profile.loadedNano = uint64(t0.UnixNano()) } diff --git a/pkg/security/security_profile/profile/profile.go b/pkg/security/security_profile/profile/profile.go index c70c28f9712b2..b7118732d1a9d 100644 --- a/pkg/security/security_profile/profile/profile.go +++ b/pkg/security/security_profile/profile/profile.go @@ -22,6 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/proto/api" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" + "github.com/DataDog/datadog-agent/pkg/security/resolvers/tags" "github.com/DataDog/datadog-agent/pkg/security/secl/model" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" mtdt "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree/metadata" @@ -69,7 +70,7 @@ type SecurityProfile struct { pathsReducer *activity_tree.PathsReducer // Instances is the list of workload instances to witch the profile should apply - Instances []*cgroupModel.CacheEntry + Instances []*tags.Workload // Metadata contains metadata for the current profile Metadata mtdt.Metadata diff --git a/pkg/security/security_profile/tests/activity_tree_test.go b/pkg/security/security_profile/tests/activity_tree_test.go index 5f9afbb1c9e9a..a814932f385fd 100644 --- a/pkg/security/security_profile/tests/activity_tree_test.go +++ b/pkg/security/security_profile/tests/activity_tree_test.go @@ -20,6 +20,7 @@ import ( "github.com/stretchr/testify/assert" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" + "github.com/DataDog/datadog-agent/pkg/security/resolvers/tags" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" @@ -692,12 +693,14 @@ func TestActivityTree_CreateProcessNode(t *testing.T) { profile := profile.NewSecurityProfile(cgroupModel.WorkloadSelector{Image: "image", Tag: "tag"}, []model.EventType{model.ExecEventType, model.DNSEventType}, nil) at = activity_tree.NewActivityTree(profile, nil, "profile") profile.ActivityTree = at - profile.Instances = append(profile.Instances, &cgroupModel.CacheEntry{ - ContainerContext: model.ContainerContext{ - ContainerID: containerutils.ContainerID(contID), + profile.Instances = append(profile.Instances, &tags.Workload{ + CacheEntry: &cgroupModel.CacheEntry{ + ContainerContext: model.ContainerContext{ + ContainerID: containerutils.ContainerID(contID), + }, + CGroupContext: model.CGroupContext{CGroupID: containerutils.CGroupID(contID)}, }, - CGroupContext: model.CGroupContext{CGroupID: containerutils.CGroupID(contID)}, - WorkloadSelector: cgroupModel.WorkloadSelector{Image: "image", Tag: "tag"}, + Selector: cgroupModel.WorkloadSelector{Image: "image", Tag: "tag"}, }) } } else { // retrieve last saved tree state diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index 22711615cc70f..0b01cbcd72d49 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -118,7 +118,7 @@ func (tm *testModule) reloadPolicies() error { log.Debugf("reload policies with cfgDir: %s", commonCfgDir) bundledPolicyProvider := bundled.NewPolicyProvider(tm.eventMonitor.Probe.Config.RuntimeSecurity) - policyDirProvider, err := rules.NewPoliciesDirProvider(commonCfgDir, false) + policyDirProvider, err := rules.NewPoliciesDirProvider(commonCfgDir) if err != nil { return err } diff --git a/pkg/serializer/go.mod b/pkg/serializer/go.mod index 5e0bd3938c766..4b6353199d174 100644 --- a/pkg/serializer/go.mod +++ b/pkg/serializer/go.mod @@ -77,7 +77,7 @@ require ( github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/version v0.57.1 github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0 github.com/gogo/protobuf v1.3.2 @@ -116,11 +116,11 @@ require ( github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect diff --git a/pkg/util/filesystem/go.mod b/pkg/util/filesystem/go.mod index 1753d7310cddd..d1fbed28429ae 100644 --- a/pkg/util/filesystem/go.mod +++ b/pkg/util/filesystem/go.mod @@ -5,11 +5,13 @@ go 1.22.0 replace ( github.com/DataDog/datadog-agent/pkg/util/log => ../log/ github.com/DataDog/datadog-agent/pkg/util/scrubber => ../scrubber/ + github.com/DataDog/datadog-agent/pkg/util/winutil => ../winutil/ ) require ( - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 github.com/shirou/gopsutil/v3 v3.24.5 github.com/stretchr/testify v1.10.0 @@ -17,11 +19,10 @@ require ( ) require ( - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect diff --git a/pkg/util/filesystem/go.sum b/pkg/util/filesystem/go.sum index e3cc0b7e589e0..7bde33aa41ad5 100644 --- a/pkg/util/filesystem/go.sum +++ b/pkg/util/filesystem/go.sum @@ -1,6 +1,5 @@ github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= diff --git a/pkg/util/filesystem/permission_windows.go b/pkg/util/filesystem/permission_windows.go index 2ae2929ce1809..43819ee0be97c 100644 --- a/pkg/util/filesystem/permission_windows.go +++ b/pkg/util/filesystem/permission_windows.go @@ -8,15 +8,16 @@ package filesystem import ( "fmt" - "syscall" "github.com/hectane/go-acl" "golang.org/x/sys/windows" + + "github.com/DataDog/datadog-agent/pkg/util/winutil" ) // Permission handles permissions for Unix and Windows type Permission struct { - currentUserSid *windows.SID + ddUserSid *windows.SID administratorSid *windows.SID systemSid *windows.SID } @@ -32,32 +33,24 @@ func NewPermission() (*Permission, error) { return nil, err } - currentUserSid, err := getCurrentUserSid() + ddUserSid, err := getDatadogUserSid() if err != nil { - return nil, fmt.Errorf("Unable to get current user sid %v", err) + return nil, fmt.Errorf("unable to get datadog user sid %v", err) } return &Permission{ - currentUserSid: currentUserSid, + ddUserSid: ddUserSid, administratorSid: administratorSid, systemSid: systemSid, }, nil } -func getCurrentUserSid() (*windows.SID, error) { - token, err := syscall.OpenCurrentProcessToken() - if err != nil { - return nil, fmt.Errorf("Couldn't get process token %v", err) - } - defer token.Close() - user, err := token.GetTokenUser() - if err != nil { - return nil, fmt.Errorf("Couldn't get token user %v", err) - } - sidString, err := user.User.Sid.String() +func getDatadogUserSid() (*windows.SID, error) { + ddUserSid, err := winutil.GetDDAgentUserSID() if err != nil { - return nil, fmt.Errorf("Couldn't get user sid string %v", err) + // falls back to current user on failure + return winutil.GetSidFromUser() } - return windows.StringToSid(sidString) + return ddUserSid, nil } // RestrictAccessToUser update the ACL of a file so only the current user and ADMIN/SYSTEM can access it @@ -68,7 +61,7 @@ func (p *Permission) RestrictAccessToUser(path string) error { false, // don't inherit acl.GrantSid(windows.GENERIC_ALL, p.administratorSid), acl.GrantSid(windows.GENERIC_ALL, p.systemSid), - acl.GrantSid(windows.GENERIC_ALL, p.currentUserSid)) + acl.GrantSid(windows.GENERIC_ALL, p.ddUserSid)) } // RemoveAccessToOtherUsers on Windows this function calls RestrictAccessToUser diff --git a/pkg/util/filesystem/permission_windows_test.go b/pkg/util/filesystem/permission_windows_test.go new file mode 100644 index 0000000000000..61921718cd37f --- /dev/null +++ b/pkg/util/filesystem/permission_windows_test.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build windows + +package filesystem + +import ( + "os" + "path/filepath" + "testing" + "unsafe" + + "github.com/stretchr/testify/require" + "golang.org/x/sys/windows" +) + +func TestRemoveAccessToOtherUsers(t *testing.T) { + + p, err := NewPermission() + require.NoError(t, err) + + root := t.TempDir() + + t.Log(root) + + testFile := filepath.Join(root, "file") + testDir := filepath.Join(root, "dir") + + err = os.WriteFile(testFile, []byte("test"), 0777) + require.NoError(t, err) + err = os.Mkdir(testDir, 0777) + require.NoError(t, err) + + err = p.RemoveAccessToOtherUsers(testFile) + require.NoError(t, err) + + // Assert the permissions for the file + assertPermissions(t, testFile, p) + + err = p.RemoveAccessToOtherUsers(testDir) + require.NoError(t, err) + + // Assert the permissions for the directory + assertPermissions(t, testDir, p) +} + +func assertPermissions(t *testing.T, path string, p *Permission) { + sd, err := windows.GetNamedSecurityInfo( + path, + windows.SE_FILE_OBJECT, + windows.DACL_SECURITY_INFORMATION, + ) + require.NoError(t, err) + + dacl, _, err := sd.DACL() + require.NoError(t, err) + + aceCount := int(dacl.AceCount) + for i := 0; i < aceCount; i++ { + var ace *windows.ACCESS_ALLOWED_ACE + err := windows.GetAce(dacl, uint32(i), &ace) + require.NoError(t, err) + + var sid *windows.SID = (*windows.SID)(unsafe.Pointer(&ace.SidStart)) + + if !sid.Equals(p.ddUserSid) && !sid.Equals(p.administratorSid) && !sid.Equals(p.systemSid) { + t.Errorf("Unexpected SID with access: %v", sid) + } + } +} diff --git a/pkg/util/flavor/go.mod b/pkg/util/flavor/go.mod index bb7190a4d241c..dabdfd9bcee13 100644 --- a/pkg/util/flavor/go.mod +++ b/pkg/util/flavor/go.mod @@ -46,13 +46,13 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/pkg/util/grpc/go.mod b/pkg/util/grpc/go.mod index b058dd317dd34..bdbb97d385501 100644 --- a/pkg/util/grpc/go.mod +++ b/pkg/util/grpc/go.mod @@ -37,7 +37,7 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/api v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/stretchr/testify v1.10.0 @@ -61,10 +61,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect diff --git a/pkg/util/http/go.mod b/pkg/util/http/go.mod index 0d248d6101c9f..e4699e99baf5e 100644 --- a/pkg/util/http/go.mod +++ b/pkg/util/http/go.mod @@ -33,7 +33,7 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/stretchr/testify v1.10.0 golang.org/x/net v0.31.0 ) @@ -51,10 +51,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/pkg/util/log/setup/go.mod b/pkg/util/log/setup/go.mod index 8eb352e5a4570..1d7dfdbbb7c8b 100644 --- a/pkg/util/log/setup/go.mod +++ b/pkg/util/log/setup/go.mod @@ -33,7 +33,7 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/stretchr/testify v1.10.0 ) @@ -51,10 +51,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect diff --git a/pkg/util/system/go.mod b/pkg/util/system/go.mod index db28f3923c003..8ec61475312c9 100644 --- a/pkg/util/system/go.mod +++ b/pkg/util/system/go.mod @@ -13,10 +13,10 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/testutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 github.com/shirou/gopsutil/v3 v3.24.5 github.com/stretchr/testify v1.10.0 go.uber.org/atomic v1.11.0 @@ -24,7 +24,7 @@ require ( ) require ( - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/go-ole/go-ole v1.3.0 // indirect diff --git a/pkg/util/winutil/service.go b/pkg/util/winutil/service.go index 11bc407bb0f43..22387fd89ea28 100644 --- a/pkg/util/winutil/service.go +++ b/pkg/util/winutil/service.go @@ -419,3 +419,20 @@ func IsServiceRunning(serviceName string) (running bool, err error) { } return (status.State == windows.SERVICE_RUNNING), nil } + +// GetServiceUser returns the service user for the given service +func GetServiceUser(serviceName string) (string, error) { + manager, service, err := openManagerService(serviceName, windows.SERVICE_QUERY_CONFIG) + if err != nil { + return "", err + } + defer closeManagerService(manager, service) + + serviceConfig, err := service.Config() + if err != nil { + return "", fmt.Errorf("could not retrieve config for %s: %w", serviceName, err) + } + + return serviceConfig.ServiceStartName, nil + +} diff --git a/pkg/util/winutil/users.go b/pkg/util/winutil/users.go index 7026b76507ebb..be17fe6a7cdb2 100644 --- a/pkg/util/winutil/users.go +++ b/pkg/util/winutil/users.go @@ -8,6 +8,7 @@ package winutil import ( "fmt" + "strings" "syscall" "golang.org/x/sys/windows" @@ -75,3 +76,53 @@ func GetLocalSystemSID() (*windows.SID, error) { return localSystem, err } + +// GetServiceUserSID returns the SID of the specified service account +func GetServiceUserSID(service string) (*windows.SID, error) { + // get config for datadogagent service + user, err := GetServiceUser(service) + if err != nil { + return nil, fmt.Errorf("could not get datadogagent service user: %s", err) + } + + username, err := getUserFromServiceUser(user) + if err != nil { + return nil, err + } + + // Manually map some aliases that SCM uses and are not recognized by the + // security subsystem (`LookupAccountName()` will fail) + // https://learn.microsoft.com/en-us/windows/win32/services/service-user-accounts + if username == "LocalSystem" { + return windows.StringToSid("S-1-5-18") + } + + // get the SID for the user account + sid, _, _, err := windows.LookupSID("", username) + return sid, err +} + +func getUserFromServiceUser(user string) (string, error) { + var domain, username string + parts := strings.SplitN(user, "\\", 2) + if len(parts) == 1 { + username = user + } else if len(parts) == 2 { + domain = parts[0] + if domain == "." { + username = parts[1] + } else { + username = user + } + } else { + return "", fmt.Errorf("could not parse user: %s", user) + } + + return username, nil + +} + +// GetDDAgentUserSID returns the SID of the DataDog Agent account +func GetDDAgentUserSID() (*windows.SID, error) { + return GetServiceUserSID("datadogagent") +} diff --git a/pkg/util/winutil/users_test.go b/pkg/util/winutil/users_test.go index d541eeb4e1ff8..d1941003bfda4 100644 --- a/pkg/util/winutil/users_test.go +++ b/pkg/util/winutil/users_test.go @@ -11,6 +11,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sys/windows" ) func TestGetSidFromUser(t *testing.T) { @@ -19,3 +21,27 @@ func TestGetSidFromUser(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, sid) } + +func TestGetServiceUserSID(t *testing.T) { + // create LocalService SID + serviceSid, err := windows.StringToSid("S-1-5-19") + require.NoError(t, err) + + // get the SID for the EventLog service (has LocalService as its user) + sid, err := GetServiceUserSID("EventLog") + require.NoError(t, err) + assert.NotNil(t, sid) + assert.True(t, windows.EqualSid(sid, serviceSid)) + t.Logf("The SID found was: %v", sid) + + // create LocalSystem SID + systemSid, err := windows.StringToSid("S-1-5-18") + require.NoError(t, err) + + // get the SID for the BITS service (has LocalSystem as its user) + sid, err = GetServiceUserSID("BITS") + require.NoError(t, err) + assert.NotNil(t, sid) + assert.True(t, windows.EqualSid(sid, systemSid)) + t.Logf("The SID found was: %v", sid) +} diff --git a/release.json b/release.json index 6c42ba0c2fe30..24cb211dbbdc3 100644 --- a/release.json +++ b/release.json @@ -3,7 +3,7 @@ "current_milestone": "7.62.0", "last_stable": { "6": "6.53.0", - "7": "7.59.0" + "7": "7.59.1" }, "nightly": { "INTEGRATIONS_CORE_VERSION": "master", diff --git a/releasenotes/notes/cws-remove-watch-dir-option-f8faee4937353316.yaml b/releasenotes/notes/cws-remove-watch-dir-option-f8faee4937353316.yaml new file mode 100644 index 0000000000000..320f7e164f786 --- /dev/null +++ b/releasenotes/notes/cws-remove-watch-dir-option-f8faee4937353316.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +deprecations: + - | + CWS: the `runtime_security_config.policies.watch_dir` option has been removed. + Use remote configuration for dynamically updating policies, or send + the `SIGHUP` signal to the `system-probe` process to reload the policies. diff --git a/releasenotes/notes/file-permissions-to-dduser-windows-6e9940175f9130ff.yaml b/releasenotes/notes/file-permissions-to-dduser-windows-6e9940175f9130ff.yaml new file mode 100644 index 0000000000000..45e2515092583 --- /dev/null +++ b/releasenotes/notes/file-permissions-to-dduser-windows-6e9940175f9130ff.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix Windows file permissions on authToken to give access to the Datadog user even when privilege processes create it. + diff --git a/tasks/components.py b/tasks/components.py index c534d63c56b83..fc5b04806ce04 100644 --- a/tasks/components.py +++ b/tasks/components.py @@ -96,8 +96,6 @@ def has_type_component(content) -> bool: 'comp/core/configsync/configsyncimpl', 'comp/core/gui/guiimpl', 'comp/core/hostname/hostnameimpl', - 'comp/core/log/logimpl', - 'comp/core/log/tracelogimpl', 'comp/core/pid/pidimpl', 'comp/core/secrets/secretsimpl', 'comp/core/settings/settingsimpl', @@ -165,7 +163,6 @@ def has_type_component(content) -> bool: # Please do not add a new component to this list. components_missing_implementation_folder = [ "comp/dogstatsd/statsd", - "comp/core/tagger", "comp/forwarder/orchestrator/orchestratorinterface", "comp/core/hostname/hostnameinterface", ] @@ -233,6 +230,12 @@ def check_component_contents_and_file_hiearchy(comp): for src_file in locate_nontest_source_files(impl_folders): pkgname = parse_package_name(src_file) expectname = comp.name + 'impl' + + for part in src_file.parts: + if "impl-" in part: + parts = part.split("-") + expectname = parts[1] + 'impl' + if pkgname != expectname: return f"** {src_file} has wrong package name '{pkgname}', must be '{expectname}'" if comp.path in ignore_fx_import: diff --git a/tasks/installer.py b/tasks/installer.py index 7429a1eaf3148..9e758a96a3bac 100644 --- a/tasks/installer.py +++ b/tasks/installer.py @@ -111,7 +111,7 @@ def build_linux_script( build_downloader(ctx, flavor=flavor, version=version, os='linux', arch=arch) with open(DOWNLOADER_BIN, 'rb') as f: encoded_bin = base64.encodebytes(f.read()).decode('utf-8') - install_script = install_script.replace(f'DOWNLOADER_BIN_{arch.upper()}', encoded_bin) + install_script = install_script.replace(f'DOWNLOADER_BIN_LINUX_{arch.upper()}', encoded_bin) commit_sha = ctx.run('git rev-parse HEAD', hide=True).stdout.strip() install_script = install_script.replace('INSTALLER_COMMIT', commit_sha) diff --git a/tasks/issue.py b/tasks/issue.py index 3d3f80161fdd4..4cd05c97abcdc 100644 --- a/tasks/issue.py +++ b/tasks/issue.py @@ -25,9 +25,16 @@ def assign_owner(_, issue_id, dry_run=False): from slack_sdk import WebClient client = WebClient(os.environ['SLACK_API_TOKEN']) - channel = GITHUB_SLACK_MAP.get(owner.lower(), '#agent-ask-anything') - message = f':githubstatus_partial_outage: *New Community Issue*\n{issue.title} <{issue.html_url}|{gh.repo.name}#{issue_id}>' - message += "\nThe assignation to your team was done automatically, using issue content and title. Please redirect if needed." + channel = next( + (chan for team, chan in GITHUB_SLACK_MAP.items() if owner.lower() in team), '#agent-ask-anything' + ) + message = f':githubstatus_partial_outage: *New Community Issue*\n{issue.title} <{issue.html_url}|{gh.repo.name}#{issue_id}>\n' + if channel == '#agent-ask-anything': + message += "The CI bot failed to assign this issue to a team.\nPlease assign it manually." + else: + message += ( + "Your team was assigned automatically, using the issue content and title.\nPlease redirect if needed." + ) client.chat_postMessage(channel=channel, text=message) return owner diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py index 175b6d848d4d7..56e96ba23403f 100644 --- a/tasks/libs/common/utils.py +++ b/tasks/libs/common/utils.py @@ -661,3 +661,15 @@ def agent_working_directory(): from tasks.libs.common.worktree import LOCAL_DIRECTORY, WORKTREE_DIRECTORY, is_worktree return WORKTREE_DIRECTORY if is_worktree() else LOCAL_DIRECTORY + + +def is_macos(): + return sys.platform == 'darwin' + + +def is_linux(): + return sys.platform.startswith('linux') + + +def is_windows(): + return sys.platform == 'win32' diff --git a/tasks/libs/pipeline/github_slack_map.yaml b/tasks/libs/pipeline/github_slack_map.yaml index fd021cf169528..1eebefe479b21 100644 --- a/tasks/libs/pipeline/github_slack_map.yaml +++ b/tasks/libs/pipeline/github_slack_map.yaml @@ -44,5 +44,5 @@ '@datadog/agent-devx-infra': '#agent-devx-ops' '@datadog/agent-devx-loops': '#agent-devx-ops' '@datadog/apm-onboarding': '#apm-onboarding' -'@datadog/apm-reliability-and-performance': '#apm-ecosystems-reliability' +'@datadog/apm-ecosystems-performance': '#apm-benchmarking-platform' '@DataDog/container-ecosystems': '#container-ecosystems-ops' diff --git a/tasks/new_e2e_tests.py b/tasks/new_e2e_tests.py index 4b0aef5a214a4..74f027afd1582 100644 --- a/tasks/new_e2e_tests.py +++ b/tasks/new_e2e_tests.py @@ -23,7 +23,7 @@ from tasks.libs.common.git import get_commit_sha from tasks.libs.common.go import download_go_dependencies from tasks.libs.common.gomodules import get_default_modules -from tasks.libs.common.utils import REPO_PATH, color_message, running_in_ci +from tasks.libs.common.utils import REPO_PATH, color_message, gitlab_section, running_in_ci from tasks.tools.e2e_stacks import destroy_remote_stack @@ -66,6 +66,9 @@ def run( test_washer=False, agent_image="", cluster_agent_image="", + logs_post_processing=False, + logs_post_processing_test_depth=1, + logs_folder="e2e_logs", ): """ Run E2E Tests based on test-infra-definitions infrastructure provisioning. @@ -108,7 +111,14 @@ def run( test_run_arg = f"-run {test_run_name}" cmd = f'gotestsum --format {gotestsum_format} ' - cmd += '{junit_file_flag} {json_flag} --packages="{packages}" -- -ldflags="-X {REPO_PATH}/test/new-e2e/tests/containers.GitCommit={commit}" {verbose} -mod={go_mod} -vet=off -timeout {timeout} -tags "{go_build_tags}" {nocache} {run} {skip} {test_run_arg} -args {osversion} {platform} {major_version} {arch} {flavor} {cws_supported_osversion} {src_agent_version} {dest_agent_version} {keep_stacks} {extra_flags}' + scrubber_raw_command = "" + # Scrub the test output to avoid leaking API or APP keys when running in the CI + if running_in_ci(): + scrubber_raw_command = ( + # Using custom go command piped with scrubber sed instructions https://github.com/gotestyourself/gotestsum#custom-go-test-command + f"--raw-command {os.path.join(os.path.dirname(__file__), 'tools', 'gotest-scrubbed.sh')} {{packages}}" + ) + cmd += f'{{junit_file_flag}} {{json_flag}} --packages="{{packages}}" {scrubber_raw_command} -- -ldflags="-X {{REPO_PATH}}/test/new-e2e/tests/containers.GitCommit={{commit}}" {{verbose}} -mod={{go_mod}} -vet=off -timeout {{timeout}} -tags "{{go_build_tags}}" {{nocache}} {{run}} {{skip}} {{test_run_arg}} -args {{osversion}} {{platform}} {{major_version}} {{arch}} {{flavor}} {{cws_supported_osversion}} {{src_agent_version}} {{dest_agent_version}} {{keep_stacks}} {{extra_flags}}' args = { "go_mod": "readonly", @@ -164,6 +174,27 @@ def run( 'You can also add `E2E_DEV_MODE="true"` to run in dev mode which will leave the environment up after the tests.' ) + if logs_post_processing: + if len(test_res) == 1: + post_processed_output = post_process_output( + test_res[0].result_json_path, test_depth=logs_post_processing_test_depth + ) + + os.makedirs(logs_folder, exist_ok=True) + write_result_to_log_files(post_processed_output, logs_folder) + try: + pretty_print_logs(post_processed_output) + except TooManyLogsError: + print( + color_message("WARNING", "yellow") + + f": Too many logs to print, skipping logs printing to avoid Gitlab collapse. You can find your logs properly organized in the job artifacts: https://gitlab.ddbuild.io/DataDog/datadog-agent/-/jobs/{os.getenv('CI_JOB_ID')}/artifacts/browse/e2e-output/logs/" + ) + else: + print( + color_message("WARNING", "yellow") + + f": Logs post processing expect only test result for test/new-e2e module. Skipping because result contains test for {len(test_res)} modules." + ) + if not success: raise Exit(code=1) @@ -253,6 +284,94 @@ def cleanup_remote_stacks(ctx, stack_regex, pulumi_backend): print(f"Failed to destroy stack {stack}") +def post_process_output(path: str, test_depth: int = 1): + """ + Post process the test results to add the test run name + path: path to the test result json file + test_depth: depth of the test name to consider + + By default the test_depth is set to 1, which means that the logs will be splitted depending on the test suite name. + If we use a single test suite to run multiple tests we can increase the test_depth to split the logs per test. + For example with: + TestPackages/run_ubuntu + TestPackages/run_centos + TestPackages/run_debian + We should set test_depth to 2 to avoid mixing all the logs of the different tested platform + """ + + def is_parent(parent: list[str], child: list[str]) -> bool: + for i in range(len(parent)): + if parent[i] != child[i]: + return False + return True + + logs_per_test = {} + with open(path) as f: + all_lines = f.readlines() + + # Initalize logs_per_test with all test names + for line in all_lines: + json_line = json.loads(line) + if "Package" not in json_line or "Test" not in json_line or "Output" not in json_line: + continue + splitted_test = json_line["Test"].split("/") + if len(splitted_test) < test_depth: + continue + if json_line["Package"] not in logs_per_test: + logs_per_test[json_line["Package"]] = {} + + test_name = splitted_test[: min(test_depth, len(splitted_test))] + logs_per_test[json_line["Package"]]["/".join(test_name)] = [] + + for line in all_lines: + json_line = json.loads(line) + if "Package" not in json_line or "Test" not in json_line or "Output" not in json_line: + continue + + if "===" in json_line["Output"]: # Ignore these lines that are produced when running test concurrently + continue + + splitted_test = json_line["Test"].split("/") + + if len(splitted_test) < test_depth: # Append logs to all children tests + for test_name in logs_per_test[json_line["Package"]]: + if is_parent(splitted_test, test_name.split("/")): + logs_per_test[json_line["Package"]][test_name].append(json_line["Output"]) + continue + + logs_per_test[json_line["Package"]]["/".join(splitted_test[:test_depth])].append(json_line["Output"]) + return logs_per_test + + +def write_result_to_log_files(logs_per_test, log_folder): + for package, tests in logs_per_test.items(): + for test, logs in tests.items(): + sanitized_package_name = re.sub(r"[^\w_. -]", "_", package) + sanitized_test_name = re.sub(r"[^\w_. -]", "_", test) + with open(f"{log_folder}/{sanitized_package_name}.{sanitized_test_name}.log", "w") as f: + f.write("".join(logs)) + + +class TooManyLogsError(Exception): + pass + + +def pretty_print_logs(logs_per_test, max_size=250000): + # Compute size in bytes of what we are about to print. If it exceeds max_size, we skip printing because it will make the Gitlab logs almost completely collapsed. + # By default Gitlab has a limit of 500KB per job log, so we want to avoid printing too much. + size = 0 + for _, tests in logs_per_test.items(): + for _, logs in tests.items(): + size += len("".join(logs).encode()) + if size > max_size and running_in_ci(): + raise TooManyLogsError + for package, tests in logs_per_test.items(): + for test, logs in tests.items(): + with gitlab_section("Complete logs for " + package + "." + test, collapsed=True): + print("Complete logs for " + package + "." + test) + print("".join(logs)) + + @task def deps(ctx, verbose=False): """ diff --git a/tasks/setup.py b/tasks/setup.py index 5e1eeee98d1f1..cf75c396f5779 100644 --- a/tasks/setup.py +++ b/tasks/setup.py @@ -19,7 +19,7 @@ from tasks.libs.common.color import Color, color_message from tasks.libs.common.git import get_default_branch from tasks.libs.common.status import Status -from tasks.libs.common.utils import running_in_pyapp +from tasks.libs.common.utils import is_linux, is_windows, running_in_pyapp if TYPE_CHECKING: from collections.abc import Generator @@ -145,10 +145,15 @@ def check_python_version(_ctx) -> SetupResult: status = Status.OK if tuple(sys.version_info)[:2] != tuple(int(d) for d in expected_version.split(".")): status = Status.FAIL - message = ( - f"Python version is {sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}. " - "Please update your environment: https://datadoghq.dev/datadog-agent/setup/#python-dependencies" - ) + install_message = f"Please install Python {expected_version} with 'brew install python@{expected_version}'" + if is_windows(): + install_message = f"Please install Python {expected_version} from https://www.python.org/downloads/windows/" + elif is_linux(): + install_message = ( + f"Please install Python {expected_version} with 'sudo apt-get install python{expected_version}-dev'" + ) + + message = f"Python version out of date, current is {sys.version_info[0]}.{sys.version_info[1]} while expected is {expected_version}.\n{install_message}" return SetupResult("Check Python version", status, message) diff --git a/tasks/tools/gotest-scrubbed.sh b/tasks/tools/gotest-scrubbed.sh new file mode 100755 index 0000000000000..2000005289fae --- /dev/null +++ b/tasks/tools/gotest-scrubbed.sh @@ -0,0 +1,6 @@ +#!/bin/bash +### This script is used to run go test and scrub the output, the command can be used as follow: +### ./gotest-scrubbed.sh -- +go test -json "$1" "${@:3}" | +sed -E 's/\b[a-fA-F0-9]{27}([a-fA-F0-9]{5})\b/**************************\1/g' | # Scrub API keys +sed -E 's/\b[a-fA-F0-9]{35}([a-fA-F0-9]{5})\b/************************************\1/g' # Scrub APP keys diff --git a/tasks/unit_tests/components_tests.py b/tasks/unit_tests/components_tests.py index e5d02cab45530..b6b29acc68164 100644 --- a/tasks/unit_tests/components_tests.py +++ b/tasks/unit_tests/components_tests.py @@ -214,6 +214,7 @@ def test_validate_component_implementation(self): implfolder = os.path.join(comps[3].path, 'impl') newfolder = os.path.join(comps[3].path, 'impl-alt') + replace_line(filename, 'package newstyleimpl', 'package altimpl') shutil.move(implfolder, newfolder) comps, _ = components.get_components_and_bundles() diff --git a/tasks/unit_tests/testdata/components_src/comp/multiple/impl-one/multiple.go b/tasks/unit_tests/testdata/components_src/comp/multiple/impl-one/multiple.go index 19e6f23129500..ac4b78b3aeb69 100644 --- a/tasks/unit_tests/testdata/components_src/comp/multiple/impl-one/multiple.go +++ b/tasks/unit_tests/testdata/components_src/comp/multiple/impl-one/multiple.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package multipleimpl +package oneimpl import ( multiple "github.com/DataDog/datadog-agent/comp/multiple/def" diff --git a/tasks/unit_tests/testdata/components_src/comp/multiple/impl-two/multiple.go b/tasks/unit_tests/testdata/components_src/comp/multiple/impl-two/multiple.go index 6ca0f53345424..5be15c220b450 100644 --- a/tasks/unit_tests/testdata/components_src/comp/multiple/impl-two/multiple.go +++ b/tasks/unit_tests/testdata/components_src/comp/multiple/impl-two/multiple.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package multipleimpl +package twoimpl import ( multiple "github.com/DataDog/datadog-agent/comp/multiple/def" diff --git a/test/new-e2e/pkg/runner/configmap.go b/test/new-e2e/pkg/runner/configmap.go index bb4fd0a9fb0f7..9541320664572 100644 --- a/test/new-e2e/pkg/runner/configmap.go +++ b/test/new-e2e/pkg/runner/configmap.go @@ -127,14 +127,19 @@ func BuildStackParameters(profile Profile, scenarioConfig ConfigMap) (ConfigMap, // Parameters from profile cm.Set(InfraEnvironmentVariables, profile.EnvironmentNames(), false) params := map[parameters.StoreKey][]string{ - parameters.KeyPairName: {AWSKeyPairName}, - parameters.PublicKeyPath: {AWSPublicKeyPath, AzurePublicKeyPath, GCPPublicKeyPath, LocalPublicKeyPath}, - parameters.PrivateKeyPath: {AWSPrivateKeyPath, AzurePrivateKeyPath, GCPPrivateKeyPath}, - parameters.ExtraResourcesTags: {InfraExtraResourcesTags}, - parameters.PipelineID: {AgentPipelineID}, - parameters.MajorVersion: {AgentMajorVersion}, - parameters.CommitSHA: {AgentCommitSHA}, - parameters.InitOnly: {InfraInitOnly}, + parameters.KeyPairName: {AWSKeyPairName}, + parameters.AWSPublicKeyPath: {AWSPublicKeyPath}, + parameters.AzurePublicKeyPath: {AzurePublicKeyPath}, + parameters.GCPPublicKeyPath: {GCPPublicKeyPath}, + parameters.AWSPrivateKeyPath: {AWSPrivateKeyPath}, + parameters.AzurePrivateKeyPath: {AzurePrivateKeyPath}, + parameters.GCPPrivateKeyPath: {GCPPrivateKeyPath}, + parameters.LocalPublicKeyPath: {LocalPublicKeyPath}, + parameters.ExtraResourcesTags: {InfraExtraResourcesTags}, + parameters.PipelineID: {AgentPipelineID}, + parameters.MajorVersion: {AgentMajorVersion}, + parameters.CommitSHA: {AgentCommitSHA}, + parameters.InitOnly: {InfraInitOnly}, } for storeKey, configMapKeys := range params { @@ -149,9 +154,11 @@ func BuildStackParameters(profile Profile, scenarioConfig ConfigMap) (ConfigMap, // Secret parameters from profile store secretParams := map[parameters.StoreKey][]string{ - parameters.APIKey: {AgentAPIKey}, - parameters.APPKey: {AgentAPPKey}, - parameters.PrivateKeyPassword: {AWSPrivateKeyPassword, AzurePrivateKeyPassword, GCPPrivateKeyPassword}, + parameters.APIKey: {AgentAPIKey}, + parameters.APPKey: {AgentAPPKey}, + parameters.AWSPrivateKeyPassword: {AWSPrivateKeyPassword}, + parameters.AzurePrivateKeyPassword: {AzurePrivateKeyPassword}, + parameters.GCPPrivateKeyPassword: {GCPPrivateKeyPassword}, } for storeKey, configMapKeys := range secretParams { diff --git a/test/new-e2e/pkg/runner/configmap_test.go b/test/new-e2e/pkg/runner/configmap_test.go index 2e9bf966a53a2..23b02f0a0156c 100644 --- a/test/new-e2e/pkg/runner/configmap_test.go +++ b/test/new-e2e/pkg/runner/configmap_test.go @@ -37,16 +37,16 @@ func Test_BuildStackParameters(t *testing.T) { "ddinfra:env": auto.ConfigValue{Value: "", Secret: false}, "ddinfra:extraResourcesTags": auto.ConfigValue{Value: "extra_resources_tags", Secret: false}, "ddinfra:initOnly": auto.ConfigValue{Value: "init_only", Secret: false}, - "ddinfra:aws/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false}, - "ddinfra:aws/defaultPrivateKeyPath": auto.ConfigValue{Value: "private_key_path", Secret: false}, - "ddinfra:aws/defaultPrivateKeyPassword": auto.ConfigValue{Value: "private_key_password", Secret: true}, - "ddinfra:az/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false}, - "ddinfra:az/defaultPrivateKeyPath": auto.ConfigValue{Value: "private_key_path", Secret: false}, - "ddinfra:az/defaultPrivateKeyPassword": auto.ConfigValue{Value: "private_key_password", Secret: true}, - "ddinfra:gcp/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false}, - "ddinfra:gcp/defaultPrivateKeyPath": auto.ConfigValue{Value: "private_key_path", Secret: false}, - "ddinfra:gcp/defaultPrivateKeyPassword": auto.ConfigValue{Value: "private_key_password", Secret: true}, - "ddinfra:local/defaultPublicKeyPath": auto.ConfigValue{Value: "public_key_path", Secret: false}, + "ddinfra:aws/defaultPublicKeyPath": auto.ConfigValue{Value: "aws_public_key_path", Secret: false}, + "ddinfra:aws/defaultPrivateKeyPath": auto.ConfigValue{Value: "aws_private_key_path", Secret: false}, + "ddinfra:aws/defaultPrivateKeyPassword": auto.ConfigValue{Value: "aws_private_key_password", Secret: true}, + "ddinfra:az/defaultPublicKeyPath": auto.ConfigValue{Value: "azure_public_key_path", Secret: false}, + "ddinfra:az/defaultPrivateKeyPath": auto.ConfigValue{Value: "azure_private_key_path", Secret: false}, + "ddinfra:az/defaultPrivateKeyPassword": auto.ConfigValue{Value: "azure_private_key_password", Secret: true}, + "ddinfra:gcp/defaultPublicKeyPath": auto.ConfigValue{Value: "gcp_public_key_path", Secret: false}, + "ddinfra:gcp/defaultPrivateKeyPath": auto.ConfigValue{Value: "gcp_private_key_path", Secret: false}, + "ddinfra:gcp/defaultPrivateKeyPassword": auto.ConfigValue{Value: "gcp_private_key_password", Secret: true}, + "ddinfra:local/defaultPublicKeyPath": auto.ConfigValue{Value: "local_public_key_path", Secret: false}, "ddagent:pipeline_id": auto.ConfigValue{Value: "pipeline_id", Secret: false}, "ddagent:commit_sha": auto.ConfigValue{Value: "commit_sha", Secret: false}, "ddagent:majorVersion": auto.ConfigValue{Value: "major_version", Secret: false}, diff --git a/test/new-e2e/pkg/runner/parameters/const.go b/test/new-e2e/pkg/runner/parameters/const.go index c23c1502946b6..227abac7f651f 100644 --- a/test/new-e2e/pkg/runner/parameters/const.go +++ b/test/new-e2e/pkg/runner/parameters/const.go @@ -5,6 +5,8 @@ package parameters +import "github.com/DataDog/test-infra-definitions/components" + // StoreKey alias to string type StoreKey string @@ -19,14 +21,28 @@ const ( ExtraResourcesTags StoreKey = "extra_resources_tags" // KeyPairName aws keypairname, used to access EC2 instances KeyPairName StoreKey = "key_pair_name" - // PrivateKeyPassword private ssh key password - PrivateKeyPassword StoreKey = "private_key_password" - // PrivateKeyPath private ssh key path - PrivateKeyPath StoreKey = "private_key_path" + // AWSPrivateKeyPassword private ssh key password + AWSPrivateKeyPassword StoreKey = StoreKey(components.CloudProviderAWS + PrivateKeyPasswordSuffix) + // AWSPrivateKeyPath private ssh key path + AWSPrivateKeyPath StoreKey = StoreKey(components.CloudProviderAWS + PrivateKeyPathSuffix) // Profile aws profile name Profile StoreKey = "profile" - // PublicKeyPath public ssh key path - PublicKeyPath StoreKey = "public_key_path" + // AWSPublicKeyPath public ssh key path + AWSPublicKeyPath StoreKey = StoreKey(components.CloudProviderAWS + PublicKeyPathSuffix) + //AzurePrivateKeyPassword private ssh key password + AzurePrivateKeyPassword StoreKey = StoreKey(components.CloudProviderAzure + PrivateKeyPasswordSuffix) + //AzurePrivateKeyPath private ssh key path + AzurePrivateKeyPath StoreKey = StoreKey(components.CloudProviderAzure + PrivateKeyPathSuffix) + //AzurePublicKeyPath public ssh key path + AzurePublicKeyPath StoreKey = StoreKey(components.CloudProviderAzure + PublicKeyPathSuffix) + //GCPPrivateKeyPassword private ssh key password + GCPPrivateKeyPassword StoreKey = StoreKey(components.CloudProviderGCP + PrivateKeyPasswordSuffix) + //GCPPrivateKeyPath private ssh key path + GCPPrivateKeyPath StoreKey = StoreKey(components.CloudProviderGCP + PrivateKeyPathSuffix) + //GCPPublicKeyPath public ssh key path + GCPPublicKeyPath StoreKey = StoreKey(components.CloudProviderGCP + PublicKeyPathSuffix) + // LocalPublicKeyPath public ssh key path + LocalPublicKeyPath StoreKey = "local_public_key_path" // PulumiPassword config file parameter name PulumiPassword StoreKey = "pulumi_password" // SkipDeleteOnFailure keep the stack on test failure @@ -56,3 +72,12 @@ const ( // MajorVersion config flag parameter name MajorVersion StoreKey = "major_version" ) + +const ( + // PrivateKeyPathSuffix private ssh key path suffix + PrivateKeyPathSuffix = "_private_key_path" + // PublicKeyPathSuffix public ssh key path suffix + PublicKeyPathSuffix = "_public_key_path" + // PrivateKeyPasswordSuffix private ssh key password suffix + PrivateKeyPasswordSuffix = "_private_key_password" +) diff --git a/test/new-e2e/pkg/runner/parameters/store_config_file.go b/test/new-e2e/pkg/runner/parameters/store_config_file.go index e9f20051e3708..8fd564adfb636 100644 --- a/test/new-e2e/pkg/runner/parameters/store_config_file.go +++ b/test/new-e2e/pkg/runner/parameters/store_config_file.go @@ -38,6 +38,8 @@ type Config struct { type ConfigParams struct { AWS AWS `yaml:"aws"` Azure Azure `yaml:"azure"` + GCP GCP `yaml:"gcp"` + Local Local `yaml:"local"` Agent Agent `yaml:"agent"` OutputDir string `yaml:"outputDir"` Pulumi Pulumi `yaml:"pulumi"` @@ -62,6 +64,19 @@ type Azure struct { PrivateKeyPassword string `yaml:"privateKeyPassword"` } +// GCP instance contains GCP related parameters +type GCP struct { + Account string `yaml:"account"` + PublicKeyPath string `yaml:"publicKeyPath"` + PrivateKeyPath string `yaml:"privateKeyPath"` + PrivateKeyPassword string `yaml:"privateKeyPassword"` +} + +// Local instance contains local related parameters +type Local struct { + PublicKeyPath string `yaml:"publicKeyPath"` +} + // Agent instance contains agent related parameters type Agent struct { APIKey string `yaml:"apiKey"` @@ -135,12 +150,26 @@ func (s configFileValueStore) get(key StoreKey) (string, error) { value = s.config.ConfigParams.Agent.APPKey case KeyPairName: value = s.config.ConfigParams.AWS.KeyPairName - case PublicKeyPath: + case AWSPublicKeyPath: value = s.config.ConfigParams.AWS.PublicKeyPath - case PrivateKeyPath: + case AWSPrivateKeyPath: value = s.config.ConfigParams.AWS.PrivateKeyPath - case PrivateKeyPassword: + case AWSPrivateKeyPassword: value = s.config.ConfigParams.AWS.PrivateKeyPassword + case AzurePrivateKeyPassword: + value = s.config.ConfigParams.Azure.PrivateKeyPassword + case AzurePrivateKeyPath: + value = s.config.ConfigParams.Azure.PrivateKeyPath + case AzurePublicKeyPath: + value = s.config.ConfigParams.Azure.PublicKeyPath + case GCPPrivateKeyPassword: + value = s.config.ConfigParams.GCP.PrivateKeyPassword + case GCPPrivateKeyPath: + value = s.config.ConfigParams.GCP.PrivateKeyPath + case GCPPublicKeyPath: + value = s.config.ConfigParams.GCP.PublicKeyPath + case LocalPublicKeyPath: + value = s.config.ConfigParams.Local.PublicKeyPath case StackParameters: value = s.stackParamsJSON case ExtraResourcesTags: diff --git a/test/new-e2e/pkg/runner/parameters/store_config_file_test.go b/test/new-e2e/pkg/runner/parameters/store_config_file_test.go index 68826e44f3b67..0c99a5a6d7314 100644 --- a/test/new-e2e/pkg/runner/parameters/store_config_file_test.go +++ b/test/new-e2e/pkg/runner/parameters/store_config_file_test.go @@ -52,7 +52,7 @@ func Test_NewConfigFileStore(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "totoro", value) - value, err = store.Get(PublicKeyPath) + value, err = store.Get(AWSPublicKeyPath) assert.NoError(t, err) assert.Equal(t, "/Users/totoro/.ssh/id_rsa.pub", value) @@ -81,7 +81,7 @@ func Test_NewConfigFileStoreNoAWSAccount(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "totoro", value) - value, err = store.Get(PublicKeyPath) + value, err = store.Get(AWSPublicKeyPath) assert.NoError(t, err) assert.Equal(t, "/Users/totoro/.ssh/id_rsa.pub", value) diff --git a/test/new-e2e/pkg/runner/parameters/store_env.go b/test/new-e2e/pkg/runner/parameters/store_env.go index 6e43128a2af69..1aba3ce15de34 100644 --- a/test/new-e2e/pkg/runner/parameters/store_env.go +++ b/test/new-e2e/pkg/runner/parameters/store_env.go @@ -19,10 +19,16 @@ var envVariablesByStoreKey = map[StoreKey]string{ Environments: "E2E_ENVIRONMENTS", ExtraResourcesTags: "E2E_EXTRA_RESOURCES_TAGS", KeyPairName: "E2E_KEY_PAIR_NAME", - PrivateKeyPassword: "E2E_PRIVATE_KEY_PASSWORD", - PrivateKeyPath: "E2E_PRIVATE_KEY_PATH", + AWSPrivateKeyPassword: "E2E_AWS_PRIVATE_KEY_PASSWORD", + AWSPrivateKeyPath: "E2E_AWS_PRIVATE_KEY_PATH", Profile: "E2E_PROFILE", - PublicKeyPath: "E2E_PUBLIC_KEY_PATH", + AWSPublicKeyPath: "E2E_AWS_PUBLIC_KEY_PATH", + AzurePrivateKeyPath: "E2E_AZURE_PRIVATE_KEY_PATH", + AzurePublicKeyPath: "E2E_AZURE_PUBLIC_KEY_PATH", + AzurePrivateKeyPassword: "E2E_AZURE_PRIVATE_KEY_PASSWORD", + GCPPrivateKeyPath: "E2E_GCP_PRIVATE_KEY_PATH", + GCPPublicKeyPath: "E2E_GCP_PUBLIC_KEY_PATH", + GCPPrivateKeyPassword: "E2E_GCP_PRIVATE_KEY_PASSWORD", PulumiPassword: "E2E_PULUMI_PASSWORD", SkipDeleteOnFailure: "E2E_SKIP_DELETE_ON_FAILURE", StackParameters: "E2E_STACK_PARAMS", diff --git a/test/new-e2e/pkg/utils/e2e/client/docker.go b/test/new-e2e/pkg/utils/e2e/client/docker.go index b31aba4ce7e85..1235bc56c0c2a 100644 --- a/test/new-e2e/pkg/utils/e2e/client/docker.go +++ b/test/new-e2e/pkg/utils/e2e/client/docker.go @@ -40,7 +40,7 @@ func NewDocker(t *testing.T, dockerOutput docker.ManagerOutput) (*Docker, error) sshOpts := []string{"-o", "StrictHostKeyChecking no"} - privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.PrivateKeyPath, "") + privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.StoreKey(dockerOutput.Host.CloudProvider+parameters.PrivateKeyPathSuffix), "") if err != nil { return nil, err } diff --git a/test/new-e2e/pkg/utils/e2e/client/host.go b/test/new-e2e/pkg/utils/e2e/client/host.go index e446f56ff32db..ad0f1e0eb8557 100644 --- a/test/new-e2e/pkg/utils/e2e/client/host.go +++ b/test/new-e2e/pkg/utils/e2e/client/host.go @@ -65,12 +65,12 @@ type Host struct { // reconnect retry logic func NewHost(context e2e.Context, hostOutput remote.HostOutput) (*Host, error) { var privateSSHKey []byte - privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.PrivateKeyPath, "") + + privateKeyPath, err := runner.GetProfile().ParamStore().GetWithDefault(parameters.StoreKey(hostOutput.CloudProvider+parameters.PrivateKeyPathSuffix), "") if err != nil { return nil, err } - - privateKeyPassword, err := runner.GetProfile().SecretStore().GetWithDefault(parameters.PrivateKeyPassword, "") + privateKeyPassword, err := runner.GetProfile().SecretStore().GetWithDefault(parameters.StoreKey(hostOutput.CloudProvider+parameters.PrivateKeyPasswordSuffix), "") if err != nil { return nil, err } diff --git a/test/new-e2e/tests/containers/dump_cluster_state.go b/test/new-e2e/tests/containers/dump_cluster_state.go index ddf546d889d09..a1d0eeb97928c 100644 --- a/test/new-e2e/tests/containers/dump_cluster_state.go +++ b/test/new-e2e/tests/containers/dump_cluster_state.go @@ -17,7 +17,6 @@ import ( "strings" "sync" - "github.com/DataDog/datadog-agent/pkg/util/pointer" awsconfig "github.com/aws/aws-sdk-go-v2/config" awsec2 "github.com/aws/aws-sdk-go-v2/service/ec2" awsec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" @@ -34,6 +33,8 @@ import ( clientcmdapi "k8s.io/client-go/tools/clientcmd/api" kubectlget "k8s.io/kubectl/pkg/cmd/get" kubectlutil "k8s.io/kubectl/pkg/cmd/util" + + "github.com/DataDog/datadog-agent/pkg/util/pointer" ) func dumpEKSClusterState(ctx context.Context, name string) (ret string) { @@ -287,7 +288,7 @@ func dumpK8sClusterState(ctx context.Context, kubeconfig *clientcmdapi.Config, o getCmd.SetErr(out) getCmd.SetContext(ctx) getCmd.SetArgs([]string{ - "nodes,all", + "nodes,mutatingwebhookconfiguration,validatingwebhookconfiguration,all", "--all-namespaces", "-o", "wide", diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go index fbb1195a976c3..fb235f979c951 100644 --- a/test/new-e2e/tests/containers/k8s_test.go +++ b/test/new-e2e/tests/containers/k8s_test.go @@ -19,7 +19,6 @@ import ( "gopkg.in/zorkian/go-datadog-api.v2" "github.com/DataDog/datadog-agent/pkg/util/pointer" - "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/fakeintake/aggregator" fakeintake "github.com/DataDog/datadog-agent/test/fakeintake/client" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" @@ -993,8 +992,6 @@ func (suite *k8sSuite) TestAdmissionControllerWithLibraryAnnotation() { } func (suite *k8sSuite) TestAdmissionControllerWithAutoDetectedLanguage() { - // CONTINT-4009 - flake.Mark(suite.T()) suite.testAdmissionControllerPod("workload-mutated-lib-injection", "mutated-with-auto-detected-language", "python", true) } diff --git a/test/new-e2e/tests/containers/kindvm_test.go b/test/new-e2e/tests/containers/kindvm_test.go index 08e327fced435..5282e6fd65e34 100644 --- a/test/new-e2e/tests/containers/kindvm_test.go +++ b/test/new-e2e/tests/containers/kindvm_test.go @@ -10,8 +10,6 @@ import ( "encoding/json" "testing" - "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" - "github.com/DataDog/test-infra-definitions/scenarios/aws/kindvm" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" @@ -28,7 +26,6 @@ type kindSuite struct { } func TestKindSuite(t *testing.T) { - flake.Mark(t) suite.Run(t, &kindSuite{}) } diff --git a/test/otel/go.mod b/test/otel/go.mod index 2a9b3d05d5b04..6ceb9b9ca2ebf 100644 --- a/test/otel/go.mod +++ b/test/otel/go.mod @@ -164,15 +164,15 @@ require ( github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.31.0 // indirect github.com/DataDog/datadog-go/v5 v5.5.0 // indirect