diff --git a/.circleci/config.yml b/.circleci/config.yml index 2036eac612597..08ed5555fe081 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,7 +15,7 @@ experimental: templates: job_template: &job_template docker: - - image: gcr.io/datadoghq/agent-circleci-runner:v41089396-d65d75ec + - image: gcr.io/datadoghq/agent-circleci-runner:v45186095-84d9d5f2 environment: USE_SYSTEM_LIBS: "1" working_directory: /go/src/github.com/DataDog/datadog-agent diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fbdd00e85974f..4c47f9109d2cf 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -47,12 +47,12 @@ /.circleci/ @DataDog/agent-devx-infra /.github/CODEOWNERS # do not notify anyone -/.github/*_TEMPLATE.md @DataDog/agent-all +/.github/*_TEMPLATE.md @DataDog/agent-devx-loops /.github/dependabot.yaml @DataDog/agent-devx-infra -/.github/workflows/serverless-benchmarks.yml @DataDog/serverless -/.github/workflows/serverless-binary-size.yml @DataDog/serverless -/.github/workflows/serverless-integration.yml @DataDog/serverless -/.github/workflows/serverless-vuln-scan.yml @DataDog/serverless +/.github/workflows/serverless-benchmarks.yml @DataDog/serverless @Datadog/serverless-aws +/.github/workflows/serverless-binary-size.yml @DataDog/serverless @Datadog/serverless-aws +/.github/workflows/serverless-integration.yml @DataDog/serverless @Datadog/serverless-aws +/.github/workflows/serverless-vuln-scan.yml @DataDog/serverless @Datadog/serverless-aws /.github/workflows/windows-*.yml @DataDog/windows-agent /.github/workflows/cws-btfhub-sync.yml @DataDog/agent-security /.github/workflows/gohai.yml @DataDog/agent-shared-components @@ -72,7 +72,7 @@ /.gitlab/binary_build/cluster_agent.yml @DataDog/container-integrations @DataDog/agent-delivery /.gitlab/binary_build/fakeintake.yml @DataDog/agent-devx-loops /.gitlab/binary_build/otel_agent.yml @DataDog/opentelemetry @DataDog/agent-delivery -/.gitlab/binary_build/serverless.yml @DataDog/serverless @DataDog/agent-delivery +/.gitlab/binary_build/serverless.yml @DataDog/serverless @Datadog/serverless-aws @DataDog/agent-delivery /.gitlab/binary_build/system_probe.yml @DataDog/ebpf-platform @DataDog/agent-delivery /.gitlab/binary_build/windows.yml @DataDog/agent-delivery @DataDog/windows-agent @@ -98,13 +98,12 @@ /.gitlab/common/test_infra_version.yml @DataDog/agent-devx-loops @DataDog/agent-devx-infra /.gitlab/e2e/e2e.yml @DataDog/container-integrations @DataDog/agent-devx-loops -/.gitlab/e2e_k8s/e2e_k8s.yml @DataDog/container-integrations @DataDog/agent-devx-loops /.gitlab/e2e/install_packages @DataDog/agent-delivery /.gitlab/container_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /.gitlab/binary_build/fakeintake.yml @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /.gitlab/functional_test/security_agent.yml @DataDog/agent-security @DataDog/agent-devx-infra -/.gitlab/functional_test/serverless.yml @DataDog/serverless @DataDog/agent-devx-infra +/.gitlab/functional_test/serverless.yml @DataDog/serverless @Datadog/serverless-aws @DataDog/agent-devx-infra /.gitlab/functional_test_cleanup.yml @DataDog/agent-security @DataDog/windows-kernel-integrations @DataDog/agent-devx-infra /.gitlab/functional_test/system_probe_windows.yml @DataDog/agent-devx-infra @DataDog/windows-kernel-integrations /.gitlab/functional_test/common.yml @DataDog/agent-devx-infra @DataDog/windows-kernel-integrations @@ -188,7 +187,7 @@ /cmd/dogstatsd/ @DataDog/agent-metrics-logs /cmd/otel-agent/ @DataDog/opentelemetry /cmd/process-agent/ @DataDog/processes -/cmd/serverless/ @DataDog/serverless +/cmd/serverless/ @DataDog/serverless @Datadog/serverless-aws /cmd/serverless/dependencies*.txt @DataDog/serverless @DataDog/agent-shared-components /cmd/serverless-init/ @DataDog/serverless /cmd/system-probe/ @DataDog/ebpf-platform @@ -311,7 +310,7 @@ /pkg/metrics/metricsource.go @DataDog/agent-metrics-logs @DataDog/agent-integrations /pkg/serializer/ @DataDog/agent-processing-and-routing /pkg/serializer/internal/metrics/origin_mapping.go @DataDog/agent-processing-and-routing @DataDog/agent-integrations -/pkg/serverless/ @DataDog/serverless +/pkg/serverless/ @DataDog/serverless @Datadog/serverless-aws /pkg/serverless/appsec/ @DataDog/asm-go /pkg/status/ @DataDog/agent-shared-components /pkg/status/templates/trace-agent.tmpl @DataDog/agent-apm @@ -351,9 +350,9 @@ /pkg/collector/corechecks/ebpf/ebpf* @DataDog/ebpf-platform /pkg/collector/corechecks/ebpf/probe/ebpfcheck/ @DataDog/ebpf-platform /pkg/collector/corechecks/ebpf/c/runtime/ebpf* @DataDog/ebpf-platform -/pkg/collector/corechecks/embed/ @Datadog/agent-devx-infra -/pkg/collector/corechecks/embed/apm/ @Datadog/agent-devx-infra @DataDog/agent-apm -/pkg/collector/corechecks/embed/process/ @Datadog/agent-devx-infra @DataDog/processes +/pkg/collector/corechecks/embed/ @Datadog/agent-delivery +/pkg/collector/corechecks/embed/apm/ @DataDog/agent-apm +/pkg/collector/corechecks/embed/process/ @DataDog/processes /pkg/collector/corechecks/network-devices/ @DataDog/network-device-monitoring /pkg/collector/corechecks/orchestrator/ @DataDog/container-app /pkg/collector/corechecks/kubernetes/ @DataDog/container-integrations @@ -399,7 +398,7 @@ /pkg/flare/*_windows_test.go @Datadog/windows-agent /pkg/fleet/ @DataDog/fleet @DataDog/windows-agent /pkg/otlp/ @DataDog/opentelemetry -/pkg/otlp/*_serverless*.go @DataDog/serverless +/pkg/otlp/*_serverless*.go @DataDog/serverless @Datadog/serverless-aws /pkg/otlp/*_not_serverless*.go @DataDog/opentelemetry /pkg/pidfile/ @DataDog/agent-shared-components /pkg/persistentcache/ @DataDog/agent-metrics-logs @@ -565,13 +564,9 @@ /test/ @DataDog/agent-devx-loops /test/benchmarks/ @DataDog/agent-metrics-logs /test/benchmarks/kubernetes_state/ @DataDog/container-integrations -/test/e2e/ @DataDog/container-integrations @DataDog/agent-security -/test/e2e/cws-tests/ @DataDog/agent-security -/test/e2e/argo-workflows/otlp-workflow.yaml @DataDog/opentelemetry -/test/e2e/containers/otlp_sender/ @DataDog/opentelemetry /test/integration/ @DataDog/container-integrations -/test/integration/serverless @DataDog/serverless -/test/integration/serverless_perf @DataDog/serverless +/test/integration/serverless @DataDog/serverless @Datadog/serverless-aws +/test/integration/serverless_perf @DataDog/serverless @Datadog/serverless-aws /test/kitchen/ @DataDog/agent-devx-loops /test/kitchen/test-definitions/ @DataDog/agent-delivery /test/kitchen/test/integration/ @DataDog/agent-delivery diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 0ae6b23fa7e6f..cd1de234a6014 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,46 +1,25 @@ ### What does this PR do? - - ### Motivation - +### Describe how to test/QA your changes -### Additional Notes +### Possible Drawbacks / Trade-offs +### Additional Notes - -### Possible Drawbacks / Trade-offs - - - -### Describe how to test/QA your changes - - +--> \ No newline at end of file diff --git a/.github/workflows/backport-pr.yml b/.github/workflows/backport-pr.yml index 3ee7eeeb276b4..950d3ff41ca6a 100644 --- a/.github/workflows/backport-pr.yml +++ b/.github/workflows/backport-pr.yml @@ -24,7 +24,7 @@ jobs: contents: write pull-requests: write steps: - - uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3 + - uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0 id: app-token with: app-id: ${{ vars.DD_GITHUB_TOKEN_GENERATOR_APP_ID }} diff --git a/.github/workflows/buildimages-update.yml b/.github/workflows/buildimages-update.yml index 523018890c0f6..454fc14e335a1 100644 --- a/.github/workflows/buildimages-update.yml +++ b/.github/workflows/buildimages-update.yml @@ -54,7 +54,7 @@ jobs: ref: ${{ inputs.branch }} - name: Setup Python and pip - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: # use Python < 3.12 so that distutil is still available by default python-version: 3.11 diff --git a/.github/workflows/chase_release_managers.yml b/.github/workflows/chase_release_managers.yml index bcf922f93d575..c1a4cff2baad0 100644 --- a/.github/workflows/chase_release_managers.yml +++ b/.github/workflows/chase_release_managers.yml @@ -11,7 +11,7 @@ on: permissions: {} jobs: - create_release_schedule: + chase_release_managers: runs-on: ubuntu-latest steps: - name: Checkout repository @@ -19,7 +19,7 @@ jobs: with: ref: ${{ github.head_ref }} - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" @@ -31,5 +31,6 @@ jobs: env: ATLASSIAN_USERNAME : ${{ secrets.ATLASSIAN_USERNAME }} ATLASSIAN_PASSWORD : ${{ secrets.ATLASSIAN_PASSWORD }} + SLACK_API_TOKEN : ${{ secrets.SLACK_DATADOG_AGENT_BOT_TOKEN }} run: | inv -e release.chase-release-managers --version ${{ github.event.inputs.version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 748cd3e5aaeba..7cb2811ad74c4 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -28,7 +28,7 @@ jobs: fetch-depth: 0 - name: Setup Python3 - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: "3.11.8" cache: "pip" diff --git a/.github/workflows/create_rc_pr.yml b/.github/workflows/create_rc_pr.yml index bfed4df0f66b4..bd40afbf6e315 100644 --- a/.github/workflows/create_rc_pr.yml +++ b/.github/workflows/create_rc_pr.yml @@ -24,7 +24,7 @@ jobs: sparse-checkout: 'tasks' - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" @@ -64,7 +64,7 @@ jobs: fetch-depth: 0 - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" @@ -81,8 +81,8 @@ jobs: env: ATLASSIAN_USERNAME: ${{ secrets.ATLASSIAN_USERNAME }} ATLASSIAN_PASSWORD: ${{ secrets.ATLASSIAN_PASSWORD }} + SLACK_API_TOKEN : ${{ secrets.SLACK_DATADOG_AGENT_BOT_TOKEN }} run: | - export SLACK_API_TOKEN="${{ secrets.SLACK_API_TOKEN }}" echo "CHANGES=$(inv -e release.check-for-changes -r ${{ matrix.value }} ${{ needs.find_release_branches.outputs.warning }})" >> $GITHUB_OUTPUT - name: Create RC PR diff --git a/.github/workflows/create_release_schedule.yml b/.github/workflows/create_release_schedule.yml index 4fc749d9b280c..4f674987ecf05 100644 --- a/.github/workflows/create_release_schedule.yml +++ b/.github/workflows/create_release_schedule.yml @@ -24,7 +24,7 @@ jobs: with: ref: ${{ github.head_ref }} - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" diff --git a/.github/workflows/cws-btfhub-sync.yml b/.github/workflows/cws-btfhub-sync.yml index 2e3152fb10763..e5fe407c6307b 100644 --- a/.github/workflows/cws-btfhub-sync.yml +++ b/.github/workflows/cws-btfhub-sync.yml @@ -64,7 +64,7 @@ jobs: sparse-checkout: ${{ matrix.cone }} - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' @@ -103,7 +103,7 @@ jobs: ref: ${{ inputs.base_branch || 'main' }} - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' diff --git a/.github/workflows/docs-dev.yml b/.github/workflows/docs-dev.yml index 4ce377865f81b..04c29577b2bbf 100644 --- a/.github/workflows/docs-dev.yml +++ b/.github/workflows/docs-dev.yml @@ -31,7 +31,7 @@ jobs: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: '3.12' diff --git a/.github/workflows/external-contributor.yml b/.github/workflows/external-contributor.yml index d4092f41492ae..d03850b0b15f6 100644 --- a/.github/workflows/external-contributor.yml +++ b/.github/workflows/external-contributor.yml @@ -22,7 +22,7 @@ jobs: ref: main fetch-depth: 0 - name: Setup python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' diff --git a/.github/workflows/go_mod_tidy.yml b/.github/workflows/go_mod_tidy.yml index d90caf056fb82..a01b9d29ad803 100644 --- a/.github/workflows/go_mod_tidy.yml +++ b/.github/workflows/go_mod_tidy.yml @@ -30,7 +30,7 @@ jobs: with: go-version-file: ".go-version" - name: Install python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: "pip" diff --git a/.github/workflows/label-analysis.yml b/.github/workflows/label-analysis.yml index 1f0601757941f..29f35d223e758 100644 --- a/.github/workflows/label-analysis.yml +++ b/.github/workflows/label-analysis.yml @@ -25,7 +25,7 @@ jobs: - name: Checkout repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - name: Setup python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' @@ -43,7 +43,7 @@ jobs: with: fetch-depth: 0 - name: Setup python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: 3.11 cache: 'pip' diff --git a/.github/workflows/serverless-binary-size.yml b/.github/workflows/serverless-binary-size.yml index c8880d3306d01..9240bb666b3f9 100644 --- a/.github/workflows/serverless-binary-size.yml +++ b/.github/workflows/serverless-binary-size.yml @@ -11,6 +11,8 @@ permissions: {} jobs: comment: runs-on: ubuntu-latest + permissions: + pull-requests: write # Add comment to PR steps: - name: Checkout datadog-agent repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a813d836841a8..ccc2b95720930 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,7 +22,6 @@ include: - .gitlab/deps_fetch/deps_fetch.yml - .gitlab/dev_container_deploy/include.yml - .gitlab/e2e/e2e.yml - - .gitlab/e2e_k8s/e2e_k8s.yml - .gitlab/e2e_install_packages/include.yml - .gitlab/e2e_pre_test/e2e_pre_test.yml - .gitlab/functional_test/include.yml @@ -35,6 +34,7 @@ include: - .gitlab/kitchen_cleanup/include.yml - .gitlab/kitchen_deploy/kitchen_deploy.yml - .gitlab/kitchen_testing/include.yml + - .gitlab/lint/include.yml - .gitlab/maintenance_jobs/include.yml - .gitlab/notify/notify.yml - .gitlab/package_build/include.yml @@ -65,6 +65,7 @@ stages: - maintenance_jobs - deps_build - deps_fetch + - lint - source_test - source_test_stats - software_composition_analysis @@ -169,15 +170,15 @@ variables: # To use images from datadog-agent-buildimages dev branches, set the corresponding # SUFFIX variable to _test_only DATADOG_AGENT_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_BUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_BUILDIMAGES: v45186095-84d9d5f2 DATADOG_AGENT_WINBUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_WINBUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_WINBUILDIMAGES: v45186095-84d9d5f2 DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_ARMBUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_ARMBUILDIMAGES: v45186095-84d9d5f2 DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v45186095-84d9d5f2 DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v41089396-d65d75ec + DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v45186095-84d9d5f2 DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded DEB_GPG_KEY_ID: c0962c7d @@ -794,6 +795,7 @@ workflow: paths: - test/new-e2e/pkg/**/* - test/new-e2e/go.mod + - flakes.yaml compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 .on_e2e_or_windows_installer_changes: @@ -1032,6 +1034,15 @@ workflow: - when: manual allow_failure: true +.on_cspm_or_e2e_changes: + - !reference [.on_e2e_main_release_or_rc] + - changes: + paths: + - pkg/security/**/* + - test/new-e2e/tests/cspm/**/* #TODO: Add other paths that should trigger the execution of CSPM e2e tests + compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + when: on_success + .on_windows_systemprobe_or_e2e_changes: - !reference [.on_e2e_main_release_or_rc] - changes: diff --git a/.gitlab/.pre/cancel-prev-pipelines.yml b/.gitlab/.pre/cancel-prev-pipelines.yml index 488820ac33544..48b5170248759 100644 --- a/.gitlab/.pre/cancel-prev-pipelines.yml +++ b/.gitlab/.pre/cancel-prev-pipelines.yml @@ -14,5 +14,5 @@ cancel-prev-pipelines: when: never - when: on_success script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - inv pipeline.auto-cancel-previous-pipelines diff --git a/.gitlab/.pre/create_release_qa_cards.yml b/.gitlab/.pre/create_release_qa_cards.yml new file mode 100644 index 0000000000000..d6343b073fd15 --- /dev/null +++ b/.gitlab/.pre/create_release_qa_cards.yml @@ -0,0 +1,14 @@ +--- +create_release_qa_cards: + stage: .pre + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + rules: + - !reference [.on_deploy_rc] + script: + - !reference [.setup_agent_github_app] + - ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $JIRA_READ_API_TOKEN) || exit $?; export ATLASSIAN_PASSWORD + - export ATLASSIAN_USERNAME=robot-jira-agentplatform@datadoghq.com + - pip install ddqa + - inv release.create-qa-cards -t ${CI_COMMIT_REF_NAME} + allow_failure: true diff --git a/.gitlab/.pre/test_gitlab_configuration.yml b/.gitlab/.pre/test_gitlab_configuration.yml index 1c17aa088a4a6..1de3d176087d5 100644 --- a/.gitlab/.pre/test_gitlab_configuration.yml +++ b/.gitlab/.pre/test_gitlab_configuration.yml @@ -5,7 +5,7 @@ test_gitlab_configuration: rules: - !reference [.on_gitlab_changes] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN - inv -e linter.gitlab-ci - inv -e linter.job-change-path - inv -e linter.gitlab-change-paths @@ -18,8 +18,7 @@ test_gitlab_compare_to: rules: - !reference [.on_gitlab_changes] script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN - !reference [.setup_agent_github_app] - pip install -r tasks/requirements.txt - inv pipeline.compare-to-itself diff --git a/.gitlab/JOBOWNERS b/.gitlab/JOBOWNERS index 913e732bc6620..ded05612f505a 100644 --- a/.gitlab/JOBOWNERS +++ b/.gitlab/JOBOWNERS @@ -142,15 +142,18 @@ new-e2e-process* @DataDog/processes new-e2e-agent-platform* @DataDog/agent-delivery new-e2e-aml* @DataDog/agent-metrics-logs new-e2e-apm* @DataDog/agent-apm +new-e2e-discovery* @Datadog/universal-service-monitoring new-e2e-ndm* @DataDog/network-device-monitoring new-e2e-npm* @DataDog/Networks new-e2e-cws* @DataDog/agent-security -new-e2e-windows-agent* @DataDog/windows-agent new-e2e-orchestrator* @DataDog/container-app e2e_pre_test* @DataDog/agent-devx-loops new-e2e-remote-config* @DataDog/remote-config new-e2e-installer* @DataDog/fleet -new-e2e-windows-service-test @DataDog/windows-agent +new-e2e-installer-windows @DataDog/windows-agent +new-e2e-windows* @DataDog/windows-agent +new-e2e-windows-systemprobe @DataDog/windows-kernel-integrations +new-e2e-windows-security-agent @DataDog/windows-kernel-integrations new-e2e_windows_powershell_module_test @DataDog/windows-kernel-integrations # Kernel matrix testing diff --git a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml index 873a471a3b85a..0a3b9802eb47b 100644 --- a/.gitlab/binary_build/cluster_agent_cloudfoundry.yml +++ b/.gitlab/binary_build/cluster_agent_cloudfoundry.yml @@ -21,5 +21,5 @@ cluster_agent_cloudfoundry-build_amd64: - inv -e cluster-agent-cloudfoundry.build - cd $CI_PROJECT_DIR/$CLUSTER_AGENT_CLOUDFOUNDRY_BINARIES_DIR - mkdir -p $OMNIBUS_PACKAGE_DIR - - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) + - PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) || exit $? - tar cf $OMNIBUS_PACKAGE_DIR/datadog-cluster-agent-cloudfoundry-$PACKAGE_VERSION-$ARCH.tar.xz datadog-cluster-agent-cloudfoundry diff --git a/.gitlab/choco_deploy/choco_deploy.yml b/.gitlab/choco_deploy/choco_deploy.yml index 715387a08f961..eb3a3e27f353a 100644 --- a/.gitlab/choco_deploy/choco_deploy.yml +++ b/.gitlab/choco_deploy/choco_deploy.yml @@ -11,7 +11,7 @@ publish_choco_7_x64: ARCH: "x64" before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" "$Env:CHOCOLATEY_API_KEY" "$tmpfile") + - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:CHOCOLATEY_API_KEY" -tempFile "$tmpfile") - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $chocolateyApiKey=$(cat "$tmpfile") - Remove-Item "$tmpfile" diff --git a/.gitlab/common/container_publish_job_templates.yml b/.gitlab/common/container_publish_job_templates.yml index e87bc2d37860e..19786f15fa81d 100644 --- a/.gitlab/common/container_publish_job_templates.yml +++ b/.gitlab/common/container_publish_job_templates.yml @@ -13,7 +13,7 @@ IMG_VARIABLES: "" IMG_SIGNING: "" script: # We can't use the 'trigger' keyword on manual jobs, otherwise they can't be run if the pipeline fails and is retried - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - | if [[ "$BUCKET_BRANCH" == "nightly" && ( "$IMG_SOURCES" =~ "$SRC_AGENT" || "$IMG_SOURCES" =~ "$SRC_DCA" || "$IMG_SOURCES" =~ "$SRC_CWS_INSTRUMENTATION" || "$IMG_VARIABLES" =~ "$SRC_AGENT" || "$IMG_VARIABLES" =~ "$SRC_DCA" || "$IMG_VARIABLES" =~ "$SRC_CWS_INSTRUMENTATION" ) ]]; then export ECR_RELEASE_SUFFIX="-nightly" diff --git a/.gitlab/common/shared.yml b/.gitlab/common/shared.yml index bb1d7e2198518..8a16870d9fac2 100644 --- a/.gitlab/common/shared.yml +++ b/.gitlab/common/shared.yml @@ -21,33 +21,37 @@ .setup_deb_signing_key: &setup_deb_signing_key - set +x - - DEB_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY) - - printf -- "${DEB_GPG_KEY}" | gpg --import --batch - - export DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE) + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - DEB_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE) || exit $?; export DEB_SIGNING_PASSPHRASE .setup_macos_github_app: # GitHub App rate-limits are per-app. # This balances the requests made to GitHub between the two apps we have set up. - | if [[ "$(( RANDOM % 2 ))" == "1" ]]; then - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID) + GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY) || exit $?; export GITHUB_KEY_B64 + GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID) || exit $?; export GITHUB_APP_ID + GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID) || exit $?; export GITHUB_INSTALLATION_ID echo "Using GitHub App instance 1" else - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY_2) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID_2) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID_2) + GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY_2) || exit $?; export GITHUB_KEY_B64 + GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID_2) || exit $?; export GITHUB_APP_ID + GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID_2) || exit $?; export GITHUB_INSTALLATION_ID echo "Using GitHub App instance 2" fi .setup_agent_github_app: - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_KEY) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_APP_ID) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_INSTALLATION_ID) + - GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_KEY) || exit $?; export GITHUB_KEY_B64 + - GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_APP_ID) || exit $?; export GITHUB_APP_ID + - GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_GITHUB_INSTALLATION_ID) || exit $?; export GITHUB_INSTALLATION_ID - echo "Using agent GitHub App" # Install `dd-pkg` and lint packages produced by Omnibus, supports only deb and rpm packages .lint_linux_packages: - curl -sSL "https://dd-package-tools.s3.amazonaws.com/dd-pkg/${DD_PKG_VERSION}/dd-pkg_Linux_${DD_PKG_ARCH}.tar.gz" | tar -xz -C /usr/local/bin dd-pkg - find $OMNIBUS_PACKAGE_DIR -iregex '.*\.\(deb\|rpm\)' | xargs dd-pkg lint + - | + if [ -n "$PACKAGE_REQUIRED_FILES_LIST" ]; then + find $OMNIBUS_PACKAGE_DIR \( -name '*.deb' -or -name '*.rpm' \) -a -not -name '*-dbg[_-]*' | xargs dd-pkg check-files --required-files ${PACKAGE_REQUIRED_FILES_LIST} + fi diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index d095cd25f8513..d68b5b034ea01 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -4,4 +4,4 @@ variables: # and check the job creating the image to make sure you have the right SHA prefix TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: "" # Make sure to update test-infra-definitions version in go.mod as well - TEST_INFRA_DEFINITIONS_BUILDIMAGES: ce6a4aad9299 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: 283b257025df diff --git a/.gitlab/container_build/docker_linux.yml b/.gitlab/container_build/docker_linux.yml index 3d93f364b430e..b6c028e032a01 100644 --- a/.gitlab/container_build/docker_linux.yml +++ b/.gitlab/container_build/docker_linux.yml @@ -13,8 +13,9 @@ fi - TARGET_TAG=${IMAGE}${ECR_RELEASE_SUFFIX}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}$TAG_SUFFIX-$ARCH # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi # Build image, use target none label to avoid replication - docker buildx build --no-cache --push --pull --platform linux/$ARCH --build-arg CIBUILD=true --build-arg GENERAL_ARTIFACTS_CACHE_BUCKET_URL=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL} $BUILD_ARG --build-arg DD_GIT_REPOSITORY_URL=https://github.com/DataDog/datadog-agent --build-arg DD_GIT_COMMIT_SHA=${CI_COMMIT_SHA} --file $BUILD_CONTEXT/Dockerfile --tag ${TARGET_TAG} --label "org.opencontainers.image.created=$(date --rfc-3339=seconds)" --label "org.opencontainers.image.authors=Datadog " --label "org.opencontainers.image.source=https://github.com/DataDog/datadog-agent" --label "org.opencontainers.image.version=$(inv agent.version)" --label "org.opencontainers.image.revision=${CI_COMMIT_SHA}" --label "org.opencontainers.image.vendor=Datadog, Inc." --label "target=none" $BUILD_CONTEXT # Squash image diff --git a/.gitlab/container_build/fakeintake.yml b/.gitlab/container_build/fakeintake.yml index 92a2f94da565c..a348a3c729cb4 100644 --- a/.gitlab/container_build/fakeintake.yml +++ b/.gitlab/container_build/fakeintake.yml @@ -15,7 +15,8 @@ docker_build_fakeintake: BUILD_CONTEXT: . script: # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | docker login --username "$DOCKER_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi - docker buildx build --push --pull --platform ${PLATFORMS} --file ${DOCKERFILE} --tag ${TARGET} $BUILD_CONTEXT retry: 2 diff --git a/.gitlab/deploy_containers/deploy_containers_a7.yml b/.gitlab/deploy_containers/deploy_containers_a7.yml index 5d3fdac92be9a..05eb4b531708c 100644 --- a/.gitlab/deploy_containers/deploy_containers_a7.yml +++ b/.gitlab/deploy_containers/deploy_containers_a7.yml @@ -25,7 +25,7 @@ include: stage: deploy_containers dependencies: [] before_script: - - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)"; fi + - if [[ "$VERSION" == "" ]]; then VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)" || exit $?; fi - export IMG_BASE_SRC="${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - export IMG_LINUX_SOURCES="${IMG_BASE_SRC}-7${JMX}-amd64,${IMG_BASE_SRC}-7${JMX}-arm64" - export IMG_WINDOWS_SOURCES="${IMG_BASE_SRC}-7${JMX}-win1809${FLAVOR}-amd64,${IMG_BASE_SRC}-7${JMX}-winltsc2022${FLAVOR}-amd64" @@ -45,6 +45,16 @@ include: - "-servercore" - "-linux" +.deploy_containers-a7-base-ot: + extends: .docker_publish_job_definition + stage: deploy_containers + rules: + - when: manual + allow_failure: true + variables: + AGENT_REPOSITORY: agent + IMG_REGISTRIES: public + dependencies: [] deploy_containers-a7: extends: .deploy_containers-a7_external @@ -65,7 +75,7 @@ deploy_containers-dogstatsd: !reference [.manual_on_deploy_auto_on_rc] dependencies: [] before_script: - - export VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)" + - VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)" || exit $? - export IMG_SOURCES="${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64" - export IMG_DESTINATIONS="${DSD_REPOSITORY}:${VERSION}" @@ -86,22 +96,16 @@ deploy_containers-a7_internal-rc: deploy_containers-ot: - extends: .docker_publish_job_definition - stage: deploy_containers - rules: - - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: agent - IMG_REGISTRIES: public - VERSION: 7 - dependencies: [] + extends: .deploy_containers-a7-base-ot + before_script: + - if [[ "$VERSION" == "" ]]; then VERSION="$(inv agent.version --major-version 7 --url-safe --pipeline-id $PARENT_PIPELINE_ID)" || exit $?; fi + - export IMG_SOURCES="${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta${JMX}-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta${JMX}-arm64" + - export IMG_DESTINATIONS="${AGENT_REPOSITORY}:${VERSION}-ot-beta${JMX}" parallel: matrix: - - IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-arm64 - IMG_DESTINATIONS: ${AGENT_REPOSITORY}:${VERSION}-ot-beta - - IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-jmx-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-jmx-arm64 - IMG_DESTINATIONS: ${AGENT_REPOSITORY}:${VERSION}-ot-beta-jmx + - JMX: + - "" + - "-jmx" # @@ -129,7 +133,6 @@ deploy_containers_latest-a7: IMG_SOURCES: "%BASE%-win1809-servercore-amd64,%BASE%-winltsc2022-servercore-amd64" IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-servercore-jmx,${AGENT_REPOSITORY}:latest-servercore-jmx - deploy_containers_latest-a7_internal: extends: .docker_publish_job_definition stage: deploy_containers @@ -142,7 +145,6 @@ deploy_containers_latest-a7_internal: IMG_SOURCES: "%BASE%-amd64,%BASE%-arm64,%BASE%-win1809-amd64,%BASE%-winltsc2022-amd64" IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-jmx - deploy_containers_latest-dogstatsd: extends: .docker_publish_job_definition stage: deploy_containers @@ -152,3 +154,15 @@ deploy_containers_latest-dogstatsd: variables: IMG_SOURCES: ${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-amd64,${SRC_DSD}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-arm64 IMG_DESTINATIONS: ${DSD_REPOSITORY}:7,${DSD_REPOSITORY}:latest + +deploy_containers_latest-ot: + extends: .deploy_containers-a7-base-ot + variables: + VERSION: 7 + parallel: + matrix: + - IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-arm64 + IMG_DESTINATIONS: ${AGENT_REPOSITORY}:${VERSION}-ot-beta + - IMG_SOURCES: ${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-jmx-amd64,${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-ot-beta-jmx-arm64 + IMG_DESTINATIONS: ${AGENT_REPOSITORY}:${VERSION}-ot-beta-jmx + diff --git a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml index 2d7301d70bca4..330c60bb239ea 100644 --- a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml +++ b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml @@ -11,7 +11,7 @@ include: stage: deploy_cws_instrumentation dependencies: [] before_script: - - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi + - if [[ "$VERSION" == "" ]]; then VERSION="$(inv agent.version --major-version 7 --url-safe)" || exit $?; fi - if [[ "$CWS_INSTRUMENTATION_REPOSITORY" == "" ]]; then export CWS_INSTRUMENTATION_REPOSITORY="cws-instrumentation"; fi - export IMG_BASE_SRC="${SRC_CWS_INSTRUMENTATION}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - export IMG_SOURCES="${IMG_BASE_SRC}-amd64,${IMG_BASE_SRC}-arm64" diff --git a/.gitlab/deploy_dca/deploy_dca.yml b/.gitlab/deploy_dca/deploy_dca.yml index 5065744f8e315..63ef1ed56d74c 100644 --- a/.gitlab/deploy_dca/deploy_dca.yml +++ b/.gitlab/deploy_dca/deploy_dca.yml @@ -15,7 +15,7 @@ include: - job: "docker_build_cluster_agent_arm64" artifacts: false before_script: - - if [[ "$VERSION" == "" ]]; then export VERSION="$(inv agent.version --major-version 7 --url-safe)"; fi + - if [[ "$VERSION" == "" ]]; then VERSION="$(inv agent.version --major-version 7 --url-safe)" || exit $?; fi - if [[ "$CLUSTER_AGENT_REPOSITORY" == "" ]]; then export CLUSTER_AGENT_REPOSITORY="cluster-agent"; fi - export IMG_BASE_SRC="${SRC_DCA}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" - export IMG_SOURCES="${IMG_BASE_SRC}-amd64,${IMG_BASE_SRC}-arm64" diff --git a/.gitlab/deploy_packages/nix.yml b/.gitlab/deploy_packages/nix.yml index 4baead2992b04..8a7dd50335f73 100644 --- a/.gitlab/deploy_packages/nix.yml +++ b/.gitlab/deploy_packages/nix.yml @@ -172,7 +172,7 @@ deploy_staging_dsd: needs: ["build_dogstatsd-binary_x64"] script: - $S3_CP_CMD $S3_ARTIFACTS_URI/dogstatsd/dogstatsd ./dogstatsd - - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) + - PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) || exit $? - $S3_CP_CMD ./dogstatsd $S3_DSD6_URI/linux/dogstatsd-$PACKAGE_VERSION --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 # deploy iot-agent x64 binary to staging bucket @@ -185,7 +185,7 @@ deploy_staging_iot_agent: needs: ["build_iot_agent-binary_x64"] script: - $S3_CP_CMD $S3_ARTIFACTS_URI/iot/agent ./agent - - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) + - PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7) || exit $? - $S3_CP_CMD ./agent $S3_DSD6_URI/linux/iot/agent-$PACKAGE_VERSION --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 # Datadog Installer diff --git a/.gitlab/deploy_packages/oci.yml b/.gitlab/deploy_packages/oci.yml index da2dd66b2e11e..b3e39afe86fb1 100644 --- a/.gitlab/deploy_packages/oci.yml +++ b/.gitlab/deploy_packages/oci.yml @@ -15,7 +15,8 @@ include: - ls $OMNIBUS_PACKAGE_DIR script: - set +x - - export VERSION=$(inv agent.version --url-safe)-1 + - !reference [.retrieve_linux_go_tools_deps] + - VERSION="$(inv agent.version --url-safe)-1" || exit $? - git config --global url."https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/".insteadOf "https://github.com/DataDog/" - go env -w GOPRIVATE="github.com/DataDog/*" - export PATH=$PATH:$(go env GOPATH)/bin @@ -28,12 +29,12 @@ include: deploy_agent_oci: extends: ".deploy_packages_oci" - needs: [ "agent_oci" ] + needs: [ "agent_oci", "go_tools_deps"] variables: OCI_PRODUCT: "datadog-agent" deploy_installer_oci: extends: ".deploy_packages_oci" - needs: [ "installer_oci" ] + needs: [ "installer_oci", "go_tools_deps" ] variables: OCI_PRODUCT: "datadog-installer" diff --git a/.gitlab/deploy_packages/windows.yml b/.gitlab/deploy_packages/windows.yml index c119e50f48048..36e2ffe349060 100644 --- a/.gitlab/deploy_packages/windows.yml +++ b/.gitlab/deploy_packages/windows.yml @@ -84,4 +84,5 @@ deploy_installer_packages_windows-x64: --include "datadog-installer-*-1-x86_64.msi" --include "datadog-installer-*-1-x86_64.debug.zip" --include "datadog-installer-*-1-x86_64.zip" + --include "datadog-installer-*-1-x86_64.exe" $OMNIBUS_PACKAGE_DIR $S3_RELEASE_INSTALLER_ARTIFACTS_URI/msi/x86_64/ diff --git a/.gitlab/deploy_packages/winget.yml b/.gitlab/deploy_packages/winget.yml index a35239c948381..f28f946b1fb0c 100644 --- a/.gitlab/deploy_packages/winget.yml +++ b/.gitlab/deploy_packages/winget.yml @@ -11,7 +11,7 @@ publish_winget_7_x64: ARCH: "x64" before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" "$Env:WINGET_PAT" "$tmpfile") + - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:WINGET_PAT" -tempFile "$tmpfile") - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $wingetPat=$(cat "$tmpfile") - Remove-Item "$tmpfile" diff --git a/.gitlab/deps_fetch/deps_fetch.yml b/.gitlab/deps_fetch/deps_fetch.yml index 4fde8af699088..6b1ee4dd06fa2 100644 --- a/.gitlab/deps_fetch/deps_fetch.yml +++ b/.gitlab/deps_fetch/deps_fetch.yml @@ -4,15 +4,15 @@ # to reuse them in further jobs that need them. .retrieve_linux_go_deps: - - mkdir -p $GOPATH/pkg/mod && tar xJf modcache.tar.xz -C $GOPATH/pkg/mod + - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache.tar.xz -C $GOPATH/pkg/mod/cache - rm -f modcache.tar.xz .retrieve_linux_go_tools_deps: - - mkdir -p $GOPATH/pkg/mod && tar xJf modcache_tools.tar.xz -C $GOPATH/pkg/mod + - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache_tools.tar.xz -C $GOPATH/pkg/mod/cache - rm -f modcache_tools.tar.xz .retrieve_linux_go_e2e_deps: - - mkdir -p $GOPATH/pkg/mod && tar xJf modcache_e2e.tar.xz -C $GOPATH/pkg/mod + - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache_e2e.tar.xz -C $GOPATH/pkg/mod/cache - rm -f modcache_e2e.tar.xz .cache: @@ -41,17 +41,23 @@ go_deps: # but still provide the artifact that's expected for the other jobs to run - if [ -f modcache.tar.xz ]; then exit 0; fi - inv -e deps --verbose - - cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache.tar.xz . + - inv -e install-tools + - cd $GOPATH/pkg/mod/cache/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache.tar.xz . artifacts: expire_in: 1 day paths: - $CI_PROJECT_DIR/modcache.tar.xz cache: + # The `cache:key:files` only accepts up to two path ([docs](https://docs.gitlab.com/ee/ci/yaml/#cachekeyfiles)). + # Ideally, we should also include the https://github.com/DataDog/datadog-agent/blob/main/.custom-gcl.yml file to + # avoid issues if a plugin is added in one PR and enabled in another. However, we decided to accept this limitation + # because the probability for this to happen is very low and go mod files are modified frequently so the risk of + # failing a job because of a network issue when building the custom binary is very low, but still exists. - key: files: - - go.mod - - ./**/go.mod - prefix: "go_deps" + - \**/go.mod + - .gitlab/deps_fetch/deps_fetch.yml + prefix: "go_deps_modcache" paths: - modcache.tar.xz @@ -59,8 +65,13 @@ go_tools_deps: extends: .cache script: - if [ -f modcache_tools.tar.xz ]; then exit 0; fi - - inv -e download-tools - - cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_tools.tar.xz . + - inv -e install-tools + # Partial mitigation for #incident-30779. It will cache datadog-packages , but if the upstream version is updated without the key cache changing, the cache will not work until the key cache changes. + # Long term solution will be to provide the datadog-packages as a binary hosted internally + - git config --global url."https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/".insteadOf "https://github.com/DataDog/" + - go env -w GOPRIVATE="github.com/DataDog/*" + - go install github.com/DataDog/datadog-packages/cmd/datadog-package@latest + - cd $GOPATH/pkg/mod/cache/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_tools.tar.xz . artifacts: expire_in: 1 day paths: @@ -69,7 +80,8 @@ go_tools_deps: - key: files: - ./**/go.mod - prefix: "go_tools_deps" + - .gitlab/deps_fetch/deps_fetch.yml + prefix: "go_tools_deps_modcache" paths: - modcache_tools.tar.xz @@ -77,9 +89,8 @@ go_e2e_deps: extends: .cache script: - if [ -f modcache_e2e.tar.xz ]; then exit 0; fi - - source /root/.bashrc - inv -e new-e2e-tests.deps - - cd $GOPATH/pkg/mod/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_e2e.tar.xz . + - cd $GOPATH/pkg/mod/cache/ && tar c -I "pxz -T${KUBERNETES_CPU_REQUEST}" -f $CI_PROJECT_DIR/modcache_e2e.tar.xz . artifacts: expire_in: 1 day paths: @@ -88,6 +99,7 @@ go_e2e_deps: - key: files: - ./test/new-e2e/go.mod - prefix: "go_e2e_deps" + - .gitlab/deps_fetch/deps_fetch.yml + prefix: "go_e2e_deps_modcache" paths: - modcache_e2e.tar.xz diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index 8cb38a1e6f869..6af3b4dcb3d50 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -11,22 +11,22 @@ - !reference [.retrieve_linux_go_e2e_deps] # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci # Now all `aws` commands target the agent-qa profile - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_PUBLIC_KEY_RSA > $E2E_PUBLIC_KEY_PATH - - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY_RSA > $E2E_PRIVATE_KEY_PATH + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_PUBLIC_KEY_RSA > $E2E_PUBLIC_KEY_PATH || exit $? + - touch $E2E_PRIVATE_KEY_PATH && chmod 600 $E2E_PRIVATE_KEY_PATH && $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY_RSA > $E2E_PRIVATE_KEY_PATH || exit $? # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" # Setup Azure credentials. https://www.pulumi.com/registry/packages/azure-native/installation-configuration/#set-configuration-using-pulumi-config # The app is called `agent-e2e-tests` - - export ARM_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_ID) - - export ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_SECRET) - - export ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_TENANT_ID) - - export ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID) + - ARM_CLIENT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_ID) || exit $?; export ARM_CLIENT_ID + - ARM_CLIENT_SECRET=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_CLIENT_SECRET) || exit $?; export ARM_CLIENT_SECRET + - ARM_TENANT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_TENANT_ID) || exit $?; export ARM_TENANT_ID + - ARM_SUBSCRIPTION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_AZURE_SUBSCRIPTION_ID) || exit $?; export ARM_SUBSCRIPTION_ID # Setup GCP credentials. https://www.pulumi.com/registry/packages/gcp/installation-configuration/ # The service account is called `agent-e2e-tests` - - export GOOGLE_CREDENTIALS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_GCP_CREDENTIALS) + - GOOGLE_CREDENTIALS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_GCP_CREDENTIALS) || exit $?; export GOOGLE_CREDENTIALS # Generate external links to CI VISIBILITY, used by artifacts:reports:annotations - inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH variables: @@ -321,6 +321,20 @@ new-e2e-installer: - deploy_rpm_testing-a7_x64 - deploy_suse_rpm_testing_arm64-a7 - deploy_suse_rpm_testing_x64-a7 + - qa_installer_oci + - qa_agent_oci + variables: + TARGETS: ./tests/installer + TEAM: fleet + FLEET_INSTALL_METHOD: "install_script" + +new-e2e-installer-windows: + extends: .new_e2e_template + rules: + - !reference [.on_installer_or_e2e_changes] + - !reference [.manual] + needs: + - !reference [.needs_new_e2e_template] - deploy_windows_testing-a7 - qa_installer_oci - qa_agent_oci @@ -329,15 +343,14 @@ new-e2e-installer: # Must run before new_e2e_template changes the aws profile # Note: this is similar to the WINDOWS_AGENT_VERSION in new-e2e_windows_msi but this job is running cross platforms # Note 2: new_e2e_template does not define AGENT_MAJOR_VERSION, so define it as 7 below. - - export CURRENT_AGENT_VERSION=$(invoke agent.version --major-version 7) + - CURRENT_AGENT_VERSION=$(invoke agent.version --major-version 7) || exit $?; export CURRENT_AGENT_VERSION - export STABLE_AGENT_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/agent-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - export STABLE_INSTALLER_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/installer-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - !reference [.new_e2e_template, before_script] variables: TARGETS: ./tests/installer TEAM: fleet - FLEET_INSTALL_METHOD: "install_script" - allow_failure: true # incident-30484 + FLEET_INSTALL_METHOD: "windows" new-e2e-installer-ansible: extends: .new_e2e_template @@ -346,21 +359,18 @@ new-e2e-installer-ansible: - !reference [.manual] needs: - !reference [.needs_new_e2e_template] - - new-e2e-installer - before_script: - # CURRENT_AGENT_VERSION is used to verify the installed agent version - # Must run before new_e2e_template changes the aws profile - # Note: this is similar to the WINDOWS_AGENT_VERSION in new-e2e_windows_msi but this job is running cross platforms - # Note 2: new_e2e_template does not define AGENT_MAJOR_VERSION, so define it as 7 below. - - export CURRENT_AGENT_VERSION=$(invoke agent.version --major-version 7) - - export STABLE_AGENT_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/agent-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - - export STABLE_INSTALLER_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/installer-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - - !reference [.new_e2e_template, before_script] + - deploy_deb_testing-a7_arm64 + - deploy_deb_testing-a7_x64 + - deploy_rpm_testing-a7_arm64 + - deploy_rpm_testing-a7_x64 + - deploy_suse_rpm_testing_arm64-a7 + - deploy_suse_rpm_testing_x64-a7 + - qa_installer_oci + - qa_agent_oci variables: TARGETS: ./tests/installer TEAM: fleet FLEET_INSTALL_METHOD: "ansible" - allow_failure: true # incident-30484 new-e2e-ndm-netflow: extends: .new_e2e_template @@ -453,10 +463,26 @@ new-e2e-package-signing-suse-a7-x86_64: - .new-e2e_package_signing rules: !reference [.on_default_new_e2e_tests] +new-e2e-cspm: + extends: .new_e2e_template + rules: + - !reference [.on_cspm_or_e2e_changes] + - !reference [.manual] + needs: + - !reference [.needs_new_e2e_template] + - qa_agent + - qa_dca + variables: + TARGETS: ./tests/cspm + TEAM: cspm + generate-flakes-finder-pipeline: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES stage: e2e - rules: !reference [.on_deploy_nightly_repo_branch] + rules: + - !reference [.except_disable_e2e_tests] + - !reference [.on_deploy_nightly_repo_branch] + - !reference [.manual] needs: - deploy_deb_testing-a7_arm64 - deploy_deb_testing-a7_x64 @@ -471,9 +497,10 @@ generate-flakes-finder-pipeline: - qa_dca - qa_dogstatsd - qa_agent + - qa_agent_ot tags: ["arch:amd64"] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN - inv -e testwasher.generate-flake-finder-pipeline artifacts: paths: @@ -482,7 +509,10 @@ generate-flakes-finder-pipeline: trigger-flakes-finder: stage: e2e needs: [generate-flakes-finder-pipeline] - rules: !reference [.on_deploy_nightly_repo_branch] + rules: + - !reference [.except_disable_e2e_tests] + - !reference [.on_deploy_nightly_repo_branch] + - !reference [.manual] variables: PARENT_PIPELINE_ID: $CI_PIPELINE_ID PARENT_COMMIT_SHA: $CI_COMMIT_SHORT_SHA diff --git a/.gitlab/e2e_install_packages/common.yml b/.gitlab/e2e_install_packages/common.yml index d1457ff8f7e20..4684fafc3cd9d 100644 --- a/.gitlab/e2e_install_packages/common.yml +++ b/.gitlab/e2e_install_packages/common.yml @@ -2,6 +2,7 @@ rules: !reference [.on_kitchen_tests] #TODO: Change when migration is complete to another name without 'kitchen' variables: AGENT_MAJOR_VERSION: 6 + SHOULD_RUN_IN_FLAKES_FINDER: "false" .new-e2e_agent_a7: rules: !reference [.on_kitchen_tests] #TODO: Change when migration is complete to another name without 'kitchen' @@ -33,7 +34,7 @@ - START_MAJOR_VERSION: [5, 6] END_MAJOR_VERSION: [6] script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY) + - DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY) || exit $?; export DATADOG_AGENT_API_KEY - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION .new-e2e_script_upgrade7: @@ -47,7 +48,7 @@ - START_MAJOR_VERSION: [5, 6, 7] END_MAJOR_VERSION: [7] script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY ) + - DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY ) || exit $?; export DATADOG_AGENT_API_KEY - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} --src-agent-version $START_MAJOR_VERSION --dest-agent-version $END_MAJOR_VERSION .new-e2e_rpm: @@ -57,5 +58,5 @@ TEAM: agent-delivery EXTRA_PARAMS: --osversion $E2E_OSVERS --platform $E2E_PLATFORM --arch $E2E_ARCH script: - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY) + - DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $INSTALL_SCRIPT_API_KEY) || exit $?; export DATADOG_AGENT_API_KEY - inv -e new-e2e-tests.run --targets $TARGETS --junit-tar "junit-${CI_JOB_ID}.tgz" ${EXTRA_PARAMS} diff --git a/.gitlab/e2e_install_packages/windows.yml b/.gitlab/e2e_install_packages/windows.yml index 70603614abca2..43b8a49cfe105 100644 --- a/.gitlab/e2e_install_packages/windows.yml +++ b/.gitlab/e2e_install_packages/windows.yml @@ -10,7 +10,7 @@ before_script: # WINDOWS_AGENT_VERSION is used to verify the installed agent version # Must run before new_e2e_template changes the aws profile - - export WINDOWS_AGENT_VERSION=$(invoke agent.version --major-version $AGENT_MAJOR_VERSION) + - WINDOWS_AGENT_VERSION=$(invoke agent.version --major-version $AGENT_MAJOR_VERSION) || exit $?; export WINDOWS_AGENT_VERSION - !reference [.new_e2e_template, before_script] script: # LAST_STABLE_VERSION is used for upgrade test @@ -24,7 +24,7 @@ before_script: # WINDOWS_AGENT_VERSION is used to verify the installed agent version # Must run before new_e2e_template changes the aws profile - - export WINDOWS_AGENT_VERSION=$(invoke agent.version --major-version $AGENT_MAJOR_VERSION) + - WINDOWS_AGENT_VERSION=$(invoke agent.version --major-version $AGENT_MAJOR_VERSION) || exit $?; export WINDOWS_AGENT_VERSION - !reference [.new_e2e_template, before_script] script: # LAST_STABLE_VERSION is used for upgrade test diff --git a/.gitlab/e2e_k8s/e2e_k8s.yml b/.gitlab/e2e_k8s/e2e_k8s.yml deleted file mode 100644 index 606c4e6b1bc9f..0000000000000 --- a/.gitlab/e2e_k8s/e2e_k8s.yml +++ /dev/null @@ -1,70 +0,0 @@ - ---- -# e2e stage -# Jobs with the k8s_e2e template - -.k8s_e2e_template: - stage: e2e_k8s - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/gitlab_agent_deploy$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - dependencies: [] - variables: - LANG: C.UTF-8 - before_script: - - export DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) - - export DOCKER_REGISTRY_PWD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_DDDEV) - -.k8s-e2e-cws-cspm-init: - - set +x - - export DATADOG_AGENT_SITE=datadoghq.com - - export DATADOG_AGENT_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_API_KEY) - - export DATADOG_AGENT_APP_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_APP_KEY) - - export DATADOG_AGENT_RC_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $E2E_TESTS_RC_KEY) - -.k8s_e2e_template_needs_dev: - extends: .k8s_e2e_template - needs: - - dev_branch_multiarch-a7 - - dca_dev_branch - -.k8s_e2e_template_dev: - extends: .k8s_e2e_template_needs_dev - script: - - inv -e e2e-tests --agent-image=datadog/agent-dev:${CI_COMMIT_REF_SLUG}-py3 --dca-image=datadog/cluster-agent-dev:${CI_COMMIT_REF_SLUG} --argo-workflow=$ARGO_WORKFLOW - -.k8s_e2e_template_dev_with_cws_cspm_init: - extends: .k8s_e2e_template_needs_dev - script: - - !reference [.k8s-e2e-cws-cspm-init] - - inv -e e2e-tests --agent-image=datadog/agent-dev:${CI_COMMIT_REF_SLUG}-py3 --dca-image=datadog/cluster-agent-dev:${CI_COMMIT_REF_SLUG} --argo-workflow=$ARGO_WORKFLOW - -.k8s_e2e_template_needs_main: - extends: .k8s_e2e_template - needs: - - dev_master-a7 - - dca_dev_master - -.k8s_e2e_template_main_with_cws_cspm_init: - extends: .k8s_e2e_template_needs_main - script: - - !reference [.k8s-e2e-cws-cspm-init] - - inv -e e2e-tests --agent-image=datadog/agent-dev:master-py3 --dca-image=datadog/cluster-agent-dev:master --argo-workflow=$ARGO_WORKFLOW - -.k8s_e2e_template_main: - extends: .k8s_e2e_template_needs_main - script: - - inv -e e2e-tests --agent-image=datadog/agent-dev:master-py3 --dca-image=datadog/cluster-agent-dev:master --argo-workflow=$ARGO_WORKFLOW - -k8s-e2e-cspm-dev: - extends: .k8s_e2e_template_dev_with_cws_cspm_init - rules: !reference [.on_dev_branch_manual] - variables: - ARGO_WORKFLOW: cspm - -k8s-e2e-cspm-main: - extends: .k8s_e2e_template_main_with_cws_cspm_init - rules: !reference [.on_main] - retry: 1 - variables: - ARGO_WORKFLOW: cspm diff --git a/.gitlab/functional_test/oracle.yml b/.gitlab/functional_test/oracle.yml index e170254b42bb5..f1c89e36bf3cc 100644 --- a/.gitlab/functional_test/oracle.yml +++ b/.gitlab/functional_test/oracle.yml @@ -17,7 +17,6 @@ oracle: matrix: - DBMS_VERSION: "21.3.0-xe" before_script: - - source /root/.bashrc - !reference [.retrieve_linux_go_deps] script: - inv oracle.test \ No newline at end of file diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index 51ef2cc71dcb8..5980b15df1ef5 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -14,7 +14,9 @@ single-machine-performance-regression_detector: - submission_metadata # for provenance, debugging - ${CI_COMMIT_SHA}-baseline_sha # for provenance, debugging - outputs/report.md # for debugging, also on S3 - - outputs/report.html # for debugging, also on S3 + - outputs/regression_signal.json # for debugging, also on S3 + - outputs/bounds_check_signal.json # for debugging, also on S3 + - outputs/junit.xml # for debugging, also on S3 when: always variables: SMP_VERSION: 0.16.0 @@ -33,7 +35,6 @@ single-machine-performance-regression_detector: # Ensure output files exist for artifact downloads step - mkdir outputs # Also needed for smp job sync step - touch outputs/report.md # Will be emitted by smp job sync - - touch outputs/report.html # Will be emitted by smp job sync # Compute merge base of current commit and `main` - git fetch origin - SMP_BASE_BRANCH=$(inv release.get-release-json-value base_branch) @@ -42,12 +43,14 @@ single-machine-performance-regression_detector: - echo "Merge base is ${SMP_MERGE_BASE}" # Setup AWS credentials for single-machine-performance AWS account - AWS_NAMED_PROFILE="single-machine-performance" - - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT_ID) + - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT_ID) || exit $? - SMP_ECR_URL=${SMP_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com - - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_AGENT_TEAM_ID) - - SMP_API=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_API) - - aws configure set aws_access_key_id $($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY_ID) --profile ${AWS_NAMED_PROFILE} - - aws configure set aws_secret_access_key $($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY) --profile ${AWS_NAMED_PROFILE} + - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_AGENT_TEAM_ID) || exit $? + - SMP_API=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_API) || exit $? + - SMP_BOT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY_ID) || exit $? + - SMP_BOT_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY) || exit $? + - aws configure set aws_access_key_id "$SMP_BOT_ID" --profile ${AWS_NAMED_PROFILE} + - aws configure set aws_secret_access_key "$SMP_BOT_KEY" --profile ${AWS_NAMED_PROFILE} - aws configure set region us-west-2 --profile ${AWS_NAMED_PROFILE} # Download smp binary and prepare it for use - aws --profile single-machine-performance s3 cp s3://smp-cli-releases/v${SMP_VERSION}/x86_64-unknown-linux-gnu/smp smp @@ -93,6 +96,12 @@ single-machine-performance-regression_detector: - !reference [.install_pr_commenter] # Post HTML report to GitHub - cat outputs/report.md | /usr/local/bin/pr-commenter --for-pr="$CI_COMMIT_REF_NAME" --header="Regression Detector" + # Upload JUnit XML outside of Agent CI's tooling because the `junit_upload` + # invoke task has additional logic that does not seem to apply well to SMP's + # JUnit XML. Agent CI seems to use `datadog-agent` as the service name when + # uploading JUnit XML, so the upload command below respects that convention. + - DATADOG_API_KEY="$("$CI_PROJECT_DIR"/tools/ci/fetch_secret.sh "$API_KEY_ORG2")" || exit $?; export DATADOG_API_KEY + - datadog-ci junit upload --service datadog-agent outputs/junit.xml # Finally, exit 1 if the job signals a regression else 0. - RUST_LOG="${RUST_LOG}" ./smp --team-id ${SMP_AGENT_TEAM_ID} --api-base ${SMP_API} --aws-named-profile ${AWS_NAMED_PROFILE} job result diff --git a/.gitlab/install_script_testing/install_script_testing.yml b/.gitlab/install_script_testing/install_script_testing.yml index 1c24c0ebe9401..403625fb08085 100644 --- a/.gitlab/install_script_testing/install_script_testing.yml +++ b/.gitlab/install_script_testing/install_script_testing.yml @@ -5,7 +5,7 @@ test_install_script: tags: ["arch:amd64"] script: - set +x - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - export TESTING_APT_URL=$DEB_TESTING_S3_BUCKET - export TESTING_YUM_URL=$RPM_TESTING_S3_BUCKET - export TEST_PIPELINE_ID=$CI_PIPELINE_ID diff --git a/.gitlab/integration_test/otel.yml b/.gitlab/integration_test/otel.yml index 511a0cf47cffa..d9d3c71e73510 100644 --- a/.gitlab/integration_test/otel.yml +++ b/.gitlab/integration_test/otel.yml @@ -6,8 +6,9 @@ integration_tests_otel: stage: integration_test image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] - needs: [] + needs: ["go_deps"] script: + - !reference [.retrieve_linux_go_deps] - inv check-otel-build - inv check-otel-module-versions rules: diff --git a/.gitlab/integration_test/windows.yml b/.gitlab/integration_test/windows.yml index 127454f00688f..2ddf04fddb997 100644 --- a/.gitlab/integration_test/windows.yml +++ b/.gitlab/integration_test/windows.yml @@ -8,7 +8,7 @@ tags: ["runner:windows-docker", "windowsversion:1809"] before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" "$Env:VCPKG_BLOB_SAS_URL" "$tmpfile") + - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:VCPKG_BLOB_SAS_URL" -tempFile "$tmpfile") - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $vcpkgBlobSaSUrl=$(cat "$tmpfile") - Remove-Item "$tmpfile" diff --git a/.gitlab/internal_image_deploy/internal_image_deploy.yml b/.gitlab/internal_image_deploy/internal_image_deploy.yml index a39917217f5ae..687c4671defe8 100644 --- a/.gitlab/internal_image_deploy/internal_image_deploy.yml +++ b/.gitlab/internal_image_deploy/internal_image_deploy.yml @@ -22,7 +22,7 @@ docker_trigger_internal: TMPL_SRC_REPO: ci/datadog-agent/agent RELEASE_STAGING: "true" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -67,8 +67,7 @@ docker_trigger_internal-ot: TMPL_SRC_REPO: ci/datadog-agent/agent RELEASE_STAGING: "true" script: - - source /root/.bashrc - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -114,7 +113,7 @@ docker_trigger_cluster_agent_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then @@ -160,7 +159,7 @@ docker_trigger_cws_instrumentation_internal: RELEASE_STAGING: "true" RELEASE_PROD: "true" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - if [ "$BUCKET_BRANCH" = "beta" ] || [ "$BUCKET_BRANCH" = "stable" ]; then TMPL_SRC_REPO="${TMPL_SRC_REPO}-release"; fi - | if [ "$BUCKET_BRANCH" = "nightly" ]; then diff --git a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml index 3c9f414fae51c..605ac0def4114 100644 --- a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml @@ -36,7 +36,7 @@ internal_kubernetes_deploy_experimental: EXPLICIT_WORKFLOWS: "//workflows:beta_builds.agents_nightly.staging-deploy.publish,//workflows:beta_builds.agents_nightly.staging-validate.publish,//workflows:beta_builds.agents_nightly.prod-wait-business-hours.publish,//workflows:beta_builds.agents_nightly.prod-deploy.publish,//workflows:beta_builds.agents_nightly.prod-validate.publish,//workflows:beta_builds.agents_nightly.publish-image-confirmation.publish" BUNDLE_VERSION_OVERRIDE: "v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}" script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT --variable EXPLICIT_WORKFLOWS @@ -68,4 +68,4 @@ notify-slack: script: - export SDM_JWT=$(vault read -field=token identity/oidc/token/sdm) - python3 -m pip install -r tasks/requirements.txt - - inv pipeline.changelog ${CI_COMMIT_SHORT_SHA} + - inv pipeline.changelog ${CI_COMMIT_SHORT_SHA} || exit $? diff --git a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml index 179e1b64cbcd5..067ca517fdba9 100644 --- a/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml @@ -22,7 +22,7 @@ rc_kubernetes_deploy: EXPLICIT_WORKFLOWS: "//workflows:deploy_rc.agents_rc" AGENT_IMAGE_TAG: $CI_COMMIT_REF_NAME script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - "inv pipeline.trigger-child-pipeline --project-name DataDog/k8s-datadog-agent-ops --git-ref main --variable OPTION_AUTOMATIC_ROLLOUT --variable EXPLICIT_WORKFLOWS diff --git a/.gitlab/kernel_matrix_testing/common.yml b/.gitlab/kernel_matrix_testing/common.yml index 1dce667e038ab..b96292d94d7ea 100644 --- a/.gitlab/kernel_matrix_testing/common.yml +++ b/.gitlab/kernel_matrix_testing/common.yml @@ -29,7 +29,7 @@ .write_ssh_key_file: - touch $AWS_EC2_SSH_KEY_FILE && chmod 600 $AWS_EC2_SSH_KEY_FILE - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY > $AWS_EC2_SSH_KEY_FILE + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SSH_KEY > $AWS_EC2_SSH_KEY_FILE || exit $? # Without the newline ssh silently fails and moves on to try other auth methods - echo "" >> $AWS_EC2_SSH_KEY_FILE - chmod 600 $AWS_EC2_SSH_KEY_FILE @@ -47,7 +47,7 @@ .kmt_new_profile: - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci .define_if_collect_complexity: @@ -60,7 +60,7 @@ - echo "COLLECT_COMPLEXITY=${COLLECT_COMPLEXITY}" .collect_outcomes_kmt: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - export MICRO_VM_IP=$(jq --exit-status --arg TAG $TAG --arg ARCH $ARCH --arg TEST_SET $TEST_SET -r '.[$ARCH].microvms | map(select(."vmset-tags"| index($TEST_SET))) | map(select(.tag==$TAG)) | .[].ip' $CI_PROJECT_DIR/stack.output) # Collect setup-ddvm systemd service logs - mkdir -p $CI_PROJECT_DIR/logs @@ -114,7 +114,7 @@ scp $DD_AGENT_TESTING_DIR/kmt-dockers-$ARCH.tar.gz metal_instance:/opt/kernel-version-testing fi after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key @@ -142,11 +142,13 @@ KUBERNETES_MEMORY_REQUEST: "12Gi" KUBERNETES_MEMORY_LIMIT: "16Gi" VMCONFIG_FILE: "${CI_PROJECT_DIR}/vmconfig-${CI_PIPELINE_ID}-${ARCH}.json" + EXTERNAL_LINKS_PATH: external_links_$CI_JOB_ID.json before_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.retrieve_linux_go_deps] - !reference [.kmt_new_profile] - !reference [.write_ssh_key_file] + - inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH || true script: - echo "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" > $STACK_DIR - pulumi login $(cat $STACK_DIR | tr -d '\n') @@ -157,7 +159,7 @@ - jq "." $CI_PROJECT_DIR/stack.output - pulumi logout after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - export AWS_PROFILE=agent-qa-ci - !reference [.shared_filters_and_queries] - mkdir -p $CI_PROJECT_DIR/libvirt/log/$ARCH $CI_PROJECT_DIR/libvirt/xml $CI_PROJECT_DIR/libvirt/qemu $CI_PROJECT_DIR/libvirt/dnsmasq @@ -169,6 +171,19 @@ - scp -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE "ubuntu@$INSTANCE_IP:/tmp/ddvm-xml-*" $CI_PROJECT_DIR/libvirt/xml - scp -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE "ubuntu@$INSTANCE_IP:/tmp/qemu-ddvm-*.log" $CI_PROJECT_DIR/libvirt/qemu - scp -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE "ubuntu@$INSTANCE_IP:/tmp/dnsmasq/*" $CI_PROJECT_DIR/libvirt/dnsmasq + # build vm-metrics collector + - | + GO_ARCH=$ARCH + if [ "${ARCH}" == "x86_64" ]; then + GO_ARCH=amd64 + fi + - cd test/new-e2e && GOOS=linux GOARCH="${GO_ARCH}" go build system-probe/vm-metrics/vm-metrics.go + # The vm-metrics collector is uploaded and executed in the same job because we need to execute it after the datadog-agent + # is launched in the metal instance, and before the tests are executed. This place naturally satisfies these constraints. + # upload vm-metrics collector to metal instance + - scp -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE $CI_PROJECT_DIR/test/new-e2e/vm-metrics "ubuntu@$INSTANCE_IP:/home/ubuntu/vm-metrics" + # run vm-metrics collector + - ssh -o StrictHostKeyChecking=no -i $AWS_EC2_SSH_KEY_FILE "ubuntu@$INSTANCE_IP" "/home/ubuntu/vm-metrics -statsd-host=127.0.0.1 -statsd-port=8125 -libvirt-uri=/var/run/libvirt/libvirt-sock-ro --tag \"arch:${ARCH}\" --tag \"test-component:${TEST_COMPONENT}\" --tag \"ci-pipeline-id:${CI_PIPELINE_ID}\" --daemon -log-file /home/ubuntu/daemon.log" - !reference [.tag_kmt_ci_job] artifacts: when: always @@ -176,13 +191,16 @@ - $CI_PROJECT_DIR/stack.output - $CI_PROJECT_DIR/libvirt - $VMCONFIG_FILE + reports: + annotations: + - $EXTERNAL_LINKS_PATH .kmt_cleanup: stage: kernel_matrix_testing_cleanup image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES tags: ["arch:amd64"] before_script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - !reference [.kmt_new_profile] script: - !reference [.shared_filters_and_queries] @@ -199,7 +217,7 @@ aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" fi after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] # Manual cleanup jobs, these will be used to cleanup the instances after the tests @@ -214,6 +232,7 @@ .kmt_run_tests: retry: max: 2 + exit_codes: 42 when: - job_execution_timeout - runner_system_failure @@ -228,7 +247,7 @@ RETRY: 2 EXTERNAL_LINKS_PATH: external_links_$CI_JOB_ID.json before_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.kmt_new_profile] - !reference [.write_ssh_key_file] - echo "CI_JOB_URL=${CI_JOB_URL}" >> $DD_AGENT_TESTING_DIR/job_env.txt @@ -256,6 +275,7 @@ - NESTED_VM_CMD="/home/ubuntu/connector -host ${MICRO_VM_IP} -user root -ssh-file /home/kernel-version-testing/ddvm_rsa -vm-cmd 'CI=true /root/fetch_dependencies.sh ${ARCH} && COLLECT_COMPLEXITY=${COLLECT_COMPLEXITY} /opt/micro-vm-init.sh -test-tools /opt/testing-tools -retry ${RETRY} -test-root /opt/${TEST_COMPONENT}-tests -packages-run-config /opt/${TEST_SET}.json'" - $CI_PROJECT_DIR/connector-$ARCH -host $INSTANCE_IP -user ubuntu -ssh-file $AWS_EC2_SSH_KEY_FILE -vm-cmd "${NESTED_VM_CMD}" -send-env-vars DD_API_KEY # Allow DD_API_KEY to be passed to the metal instance, so we can use it to send metrics from the connector. - ssh metal_instance "ssh ${MICRO_VM_IP} '/opt/testing-tools/test-json-review -flakes /opt/testing-tools/flakes.yaml -codeowners /opt/testing-tools/CODEOWNERS -test-root /opt/${TEST_COMPONENT}-tests'" + - '[ ! -f $CI_PROJECT_DIR/daemon-${ARCH}.log ] && scp metal_instance:/home/ubuntu/daemon.log $CI_PROJECT_DIR/vm-metrics-daemon-${ARCH}.log' artifacts: expire_in: 2 weeks when: always @@ -265,6 +285,7 @@ - $DD_AGENT_TESTING_DIR/verifier-complexity-$ARCH-$TAG-${TEST_COMPONENT}.tar.gz - $CI_PROJECT_DIR/logs - $CI_PROJECT_DIR/pcaps + - $CI_PROJECT_DIR/vm-metrics-daemon-${ARCH}.log reports: annotations: - $EXTERNAL_LINKS_PATH @@ -311,13 +332,12 @@ notify_ebpf_complexity_changes: TEST_SET: no_usm allow_failure: true before_script: - - source /root/.bashrc - python3 -m pip install tabulate # Required for printing the tables - python3 -m pip install -r tasks/libs/requirements-github.txt - | - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) + GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) || exit $?; export GITHUB_KEY_B64 + GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) || exit $?; export GITHUB_APP_ID + GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) || exit $?; export GITHUB_INSTALLATION_ID + GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN script: - inv -e ebpf.generate-complexity-summary-for-pr diff --git a/.gitlab/kernel_matrix_testing/security_agent.yml b/.gitlab/kernel_matrix_testing/security_agent.yml index e70c923fcf24d..c75f78c5449d8 100644 --- a/.gitlab/kernel_matrix_testing/security_agent.yml +++ b/.gitlab/kernel_matrix_testing/security_agent.yml @@ -72,7 +72,7 @@ kmt_setup_env_secagent_x64: # upload connector to metal instance - scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key diff --git a/.gitlab/kernel_matrix_testing/system_probe.yml b/.gitlab/kernel_matrix_testing/system_probe.yml index f01de83cc7116..7d8da51e0e0ad 100644 --- a/.gitlab/kernel_matrix_testing/system_probe.yml +++ b/.gitlab/kernel_matrix_testing/system_probe.yml @@ -28,13 +28,14 @@ upload_dependencies_sysprobe_arm64: stage: kernel_matrix_testing_prepare script: # DockerHub login for build to limit rate limit when pulling base images - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | crane auth login --username "$DOCKER_REGISTRY_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - DOCKER_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) || exit $? + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD | crane auth login --username "$DOCKER_LOGIN" --password-stdin "$DOCKER_REGISTRY_URL" + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi # Pull base images - mkdir $KMT_DOCKERS - inv -e system-probe.save-test-dockers --use-crane --output-dir $KMT_DOCKERS --arch $ARCH after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] artifacts: expire_in: 1 day @@ -81,7 +82,7 @@ pull_test_dockers_arm64: - !reference [.setup_ssh_config] - scp $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/$ARCHIVE_NAME metal_instance:/opt/kernel-version-testing/ after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] variables: DEPENDENCIES: $CI_PROJECT_DIR/kmt-deps/ci/$ARCH/btfs @@ -160,7 +161,7 @@ kmt_setup_env_sysprobe_x64: # upload connector to metal instance - scp $CI_PROJECT_DIR/connector-${ARCH} metal_instance:/home/ubuntu/connector after_script: - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] variables: AWS_EC2_SSH_KEY_FILE: $CI_PROJECT_DIR/ssh_key diff --git a/.gitlab/kitchen_deploy/kitchen_deploy.yml b/.gitlab/kitchen_deploy/kitchen_deploy.yml index 1fd79c1796e7d..cc8657b6bbd39 100644 --- a/.gitlab/kitchen_deploy/kitchen_deploy.yml +++ b/.gitlab/kitchen_deploy/kitchen_deploy.yml @@ -3,15 +3,14 @@ # Contains jobs which deploy Agent package to testing repsoitories that are used in kitchen tests. .setup_rpm_signing_key: &setup_rpm_signing_key - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY) - - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) || exit $? .setup_apt_signing_key: &setup_apt_signing_key - - APT_SIGNING_PRIVATE_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY) - - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE) - - - printf -- "$APT_SIGNING_PRIVATE_KEY" | gpg --import --batch + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - APT_SIGNING_KEY_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DEB_SIGNING_PASSPHRASE) || exit $? .setup_signing_keys_package: &setup_signing_keys_package # Set up prod apt repo to get the datadog-signing-keys package diff --git a/.gitlab/lint/include.yml b/.gitlab/lint/include.yml new file mode 100644 index 0000000000000..27676d98eab76 --- /dev/null +++ b/.gitlab/lint/include.yml @@ -0,0 +1,6 @@ +# liont stage +# Include job that run linters on the Agent code. + +include: + - .gitlab/lint/technical_linters.yml + diff --git a/.gitlab/lint/technical_linters.yml b/.gitlab/lint/technical_linters.yml new file mode 100644 index 0000000000000..099cd975bb8a9 --- /dev/null +++ b/.gitlab/lint/technical_linters.yml @@ -0,0 +1,64 @@ + +.lint: + stage: lint + image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + +lint_licenses: + extends: .lint + script: + - !reference [.retrieve_linux_go_deps] + - !reference [.retrieve_linux_go_tools_deps] + - inv -e install-tools + - inv -e lint-licenses + needs: ["go_tools_deps", "go_deps"] + +lint_shell: + extends: .lint + script: + - inv -e install-shellcheck + - shellcheck --version + #Excludes: + #SC2028: echo may not expand escape sequences. Use printf. + #SC2059: Don't use variables in the printf format string. Use printf "..%s.." "$foo". + - shellcheck --severity=info -e SC2059 -e SC2028 --shell=bash ./cmd/**/*.sh ./omnibus/package-scripts/*/* + +lint_filename: + extends: .lint + script: + - inv -e linter.filenames + +lint_copyrights: + extends: .lint + script: + - inv -e linter.copyrights + +lint_codeowners: + extends: .lint + script: + - inv -e github.lint-codeowner + +lint_components: + extends: .lint + script: + - inv -e lint-components lint-fxutil-oneshot-test + + +lint_python: + extends: .lint + needs: [] + script: + - inv -e linter.python + +lint_update_go: + extends: .lint + needs: [] + script: + - inv -e linter.update-go + +validate_modules: + extends: .lint + needs: [] + script: + - inv -e modules.validate + - inv -e modules.validate-used-by-otel diff --git a/.gitlab/maintenance_jobs/docker.yml b/.gitlab/maintenance_jobs/docker.yml index 67a169f4dce8b..d9ef46ad19a73 100644 --- a/.gitlab/maintenance_jobs/docker.yml +++ b/.gitlab/maintenance_jobs/docker.yml @@ -60,8 +60,8 @@ delete_docker_tag: TAG: "" # tag name, for example "6.9.0" ORGANIZATION: "datadog" before_script: - - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) - - PASS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD) + - DOCKER_REGISTRY_LOGIN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_LOGIN) || exit $? + - PASS=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $DOCKER_REGISTRY_PWD) || exit $? - python3 -m pip install -r requirements.txt - | export DOCKER_TOKEN=`curl -s -H "Content-Type: application/json" -X POST -d '{"username": "'$DOCKER_REGISTRY_LOGIN'", "password": "'$PASS'"}' https://hub.docker.com/v2/users/login/ | python -c 'import sys, json; print(json.load(sys.stdin)["token"].strip())'` diff --git a/.gitlab/maintenance_jobs/kitchen.yml b/.gitlab/maintenance_jobs/kitchen.yml index 56cd45ef1fa9c..1716ac845ad86 100644 --- a/.gitlab/maintenance_jobs/kitchen.yml +++ b/.gitlab/maintenance_jobs/kitchen.yml @@ -26,10 +26,10 @@ periodic_kitchen_cleanup_azure: # the job to be run one at a time. resource_group: azure_cleanup script: - - export ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID` - - export ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID` - - export ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET` - - export ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID` + - ARM_SUBSCRIPTION_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_SUBSCRIPTION_ID` || exit $?; export ARM_SUBSCRIPTION_ID + - ARM_CLIENT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_ID` || exit $?; export ARM_CLIENT_ID + - ARM_CLIENT_SECRET=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_CLIENT_SECRET` || exit $?; export ARM_CLIENT_SECRET + - ARM_TENANT_ID=`$CI_PROJECT_DIR/tools/ci/fetch_secret.sh $KITCHEN_AZURE_TENANT_ID` || exit $?; export ARM_TENANT_ID # Remove kitchen resources for all existing test suite prefixes - RESOURCE_GROUP_PREFIX=kitchen-chef python3 /deploy_scripts/cleanup_azure.py - RESOURCE_GROUP_PREFIX=kitchen-win python3 /deploy_scripts/cleanup_azure.py diff --git a/.gitlab/notify/notify.yml b/.gitlab/notify/notify.yml index 1f6f08ba9c6a3..31e782eda47cc 100644 --- a/.gitlab/notify/notify.yml +++ b/.gitlab/notify/notify.yml @@ -25,8 +25,8 @@ notify: resource_group: notification timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) || exit $?; export GITLAB_TOKEN + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt - | # Do not send notifications if this is a child pipeline of another repo @@ -53,8 +53,8 @@ send_pipeline_stats: when: always dependencies: [] script: - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) || exit $?; export GITLAB_TOKEN + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - invoke -e notify.send-stats notify_github: @@ -107,13 +107,12 @@ notify_gitlab_ci_changes: - git checkout main - git checkout - script: - - source /root/.bashrc - python3 -m pip install -r tasks/libs/requirements-github.txt - | - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) + GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_APP_KEY | base64) || exit $?; export GITHUB_KEY_B64 + GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INTEGRATION_ID) || exit $?; export GITHUB_APP_ID + GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITHUB_PR_COMMENTER_INSTALLATION_ID) || exit $?; export GITHUB_INSTALLATION_ID + GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_FULL_API_TOKEN) || exit $?; export GITLAB_TOKEN - inv -e notify.gitlab-ci-diff --pr-comment .failure_summary_job: @@ -125,9 +124,9 @@ notify_gitlab_ci_changes: timeout: 15 minutes # Added to prevent a stuck job blocking the resource_group defined above .failure_summary_setup: - - export SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SLACK_AGENT_CI_TOKEN) - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - SLACK_API_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SLACK_AGENT_CI_TOKEN) || exit $?; export SLACK_API_TOKEN + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_READ_API_TOKEN) || exit $?; export GITLAB_TOKEN + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt # Upload failure summary data to S3 at the end of each main pipeline diff --git a/.gitlab/package_build/installer.yml b/.gitlab/package_build/installer.yml index 67b3ad894369d..01f826e8ff839 100644 --- a/.gitlab/package_build/installer.yml +++ b/.gitlab/package_build/installer.yml @@ -5,7 +5,8 @@ .common_build_oci: script: - echo "About to build for $RELEASE_VERSION" - - export INSTALL_DIR=/opt/datadog-packages/datadog-agent/$(inv agent.version -u)-1 + - AGENT_VERSION="$(inv agent.version -u)-1" || exit $? + - export INSTALL_DIR=/opt/datadog-packages/datadog-agent/"$AGENT_VERSION" - !reference [.retrieve_linux_go_deps] - !reference [.cache_omnibus_ruby_deps, setup] # remove artifacts from previous pipelines that may come from the cache @@ -143,7 +144,8 @@ installer-amd64-oci: variables: DESTINATION_FILE: "datadog-updater_7-amd64-oci.tar.xz" before_script: - - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/$(inv agent.version -u)-1 + - AGENT_VERSION="$(inv agent.version -u)-1" || exit $? + - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/"$AGENT_VERSION" - export INSTALL_DIR_PARAM="--install-directory=$INSTALL_DIR" installer-arm64-oci: @@ -151,7 +153,8 @@ installer-arm64-oci: variables: DESTINATION_FILE: "datadog-updater_7-arm64-oci.tar.xz" before_script: - - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/$(inv agent.version -u)-1 + - AGENT_VERSION="$(inv agent.version -u)-1" || exit $? + - export INSTALL_DIR=/opt/datadog-packages/datadog-installer/"$AGENT_VERSION" - export INSTALL_DIR_PARAM="--install-directory=$INSTALL_DIR" windows-installer-amd64: diff --git a/.gitlab/package_build/linux.yml b/.gitlab/package_build/linux.yml index 04aafb966b0e7..3b3d08ac8ca1c 100644 --- a/.gitlab/package_build/linux.yml +++ b/.gitlab/package_build/linux.yml @@ -1,25 +1,28 @@ +.agent_build_script: + - echo "About to build for $RELEASE_VERSION" + - !reference [.retrieve_linux_go_deps] + - !reference [.cache_omnibus_ruby_deps, setup] + # remove artifacts from previous pipelines that may come from the cache + - rm -rf $OMNIBUS_PACKAGE_DIR/* + # Artifacts and cache must live within project directory but we run omnibus in a neutral directory. + # Thus, we move the artifacts at the end in a gitlab-friendly dir. + - tar -xf $CI_PROJECT_DIR/sysprobe-build-outputs.tar.xz + - mkdir -p /tmp/system-probe + - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/clang-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/clang-bpf + - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/llc-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/llc-bpf + - cp $CI_PROJECT_DIR/minimized-btfs.tar.xz /tmp/system-probe/minimized-btfs.tar.xz + - chmod 0744 /tmp/system-probe/clang-bpf /tmp/system-probe/llc-bpf + - inv -e omnibus.build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --python-runtimes "$PYTHON_RUNTIMES" --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod" --system-probe-bin=/tmp/system-probe --flavor "$FLAVOR" --config-directory "$CONFIG_DIR" --install-directory "$INSTALL_DIR" + - ls -la $OMNIBUS_PACKAGE_DIR + - !reference [.upload_sbom_artifacts] + .agent_build_common: rules: - !reference [.except_mergequeue] - when: on_success stage: package_build script: - - echo "About to build for $RELEASE_VERSION" - - !reference [.retrieve_linux_go_deps] - - !reference [.cache_omnibus_ruby_deps, setup] - # remove artifacts from previous pipelines that may come from the cache - - rm -rf $OMNIBUS_PACKAGE_DIR/* - # Artifacts and cache must live within project directory but we run omnibus in a neutral directory. - # Thus, we move the artifacts at the end in a gitlab-friendly dir. - - tar -xf $CI_PROJECT_DIR/sysprobe-build-outputs.tar.xz - - mkdir -p /tmp/system-probe - - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/clang-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/clang-bpf - - $S3_CP_CMD $S3_PERMANENT_ARTIFACTS_URI/llc-$CLANG_LLVM_VER.${PACKAGE_ARCH} /tmp/system-probe/llc-bpf - - cp $CI_PROJECT_DIR/minimized-btfs.tar.xz /tmp/system-probe/minimized-btfs.tar.xz - - chmod 0744 /tmp/system-probe/clang-bpf /tmp/system-probe/llc-bpf - - inv -e omnibus.build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --python-runtimes "$PYTHON_RUNTIMES" --base-dir $OMNIBUS_BASE_DIR ${USE_S3_CACHING} --skip-deps --go-mod-cache="$GOPATH/pkg/mod" --system-probe-bin=/tmp/system-probe --flavor "$FLAVOR" - - ls -la $OMNIBUS_PACKAGE_DIR - - !reference [.upload_sbom_artifacts] + - !reference [.agent_build_script] variables: KUBERNETES_CPU_REQUEST: 16 KUBERNETES_MEMORY_REQUEST: "32Gi" @@ -81,7 +84,37 @@ before_script: - export RELEASE_VERSION=$RELEASE_VERSION_7 -# build Agent 6 binaries for x86_64 +# Temporary custom agent build test to prevent regression +# This test will be removed when custom path are used to build macos agent +# with in-house macos runner builds. +datadog-agent-7-x64-custom-path-test: + extends: [.agent_build_x86, .agent_7_build] + rules: + - !reference [.except_mergequeue] + - when: on_success + stage: package_build + script: + - mkdir /custom + - export CONFIG_DIR="/custom" + - export INSTALL_DIR="/custom/datadog-agent" + - !reference [.agent_build_script] + - ls -la $OMNIBUS_PACKAGE_DIR + - ls -la $INSTALL_DIR + - ls -la /custom/etc + - (ls -la /opt/datadog-agent 2>/dev/null && exit 1) || echo "/opt/datadog-agent has correctly not been generated" + - (ls -la /etc/datadog-agent 2>/dev/null && exit 1) || echo "/etc/datadog-agent has correctly not been generated" + variables: + KUBERNETES_CPU_REQUEST: 16 + KUBERNETES_MEMORY_REQUEST: "32Gi" + KUBERNETES_MEMORY_LIMIT: "32Gi" + artifacts: + expire_in: 2 weeks + paths: + - $OMNIBUS_PACKAGE_DIR + cache: + - !reference [.cache_omnibus_ruby_deps, cache] + + # build Agent 6 binaries for x86_64 datadog-agent-6-x64: extends: [.agent_build_common, .agent_build_x86, .agent_6_build] diff --git a/.gitlab/packaging/deb.yml b/.gitlab/packaging/deb.yml index 08a02809ed244..b45aad5855716 100644 --- a/.gitlab/packaging/deb.yml +++ b/.gitlab/packaging/deb.yml @@ -17,6 +17,7 @@ KUBERNETES_CPU_REQUEST: 16 KUBERNETES_MEMORY_REQUEST: "32Gi" KUBERNETES_MEMORY_LIMIT: "32Gi" + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/agent-deb.txt" cache: - !reference [.cache_omnibus_ruby_deps, cache] @@ -123,6 +124,9 @@ installer_deb-amd64: variables: DESTINATION_DEB: "datadog-installer_7_amd64.deb" DD_PROJECT: "installer" + # There are currently no files to check for in the installer so we + # explicitly disable the check + PACKAGE_REQUIRED_FILES_LIST: "" installer_deb-arm64: extends: [.package_deb_common, .package_deb_arm64, .package_deb_agent_7] @@ -133,6 +137,7 @@ installer_deb-arm64: variables: DESTINATION_DEB: "datadog-installer_7_arm64.deb" DD_PROJECT: "installer" + PACKAGE_REQUIRED_FILES_LIST: "" .package_iot_deb_common: extends: [.package_deb_agent_7] @@ -156,6 +161,7 @@ installer_deb-arm64: KUBERNETES_MEMORY_REQUEST: "32Gi" KUBERNETES_MEMORY_LIMIT: "32Gi" OMNIBUS_PACKAGE_ARTIFACT_DIR: $OMNIBUS_PACKAGE_DIR + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/iot-agent-deb.txt" cache: - !reference [.cache_omnibus_ruby_deps, cache] @@ -191,6 +197,7 @@ dogstatsd_deb-x64: variables: DD_PROJECT: dogstatsd DESTINATION_DEB: "datadog-dogstatsd_amd64.deb" + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-deb.txt" dogstatsd_deb-arm64: extends: [.package_deb_common, .package_deb_arm64, .package_deb_agent_7] @@ -201,4 +208,5 @@ dogstatsd_deb-arm64: variables: DD_PROJECT: dogstatsd DESTINATION_DEB: "datadog-dogstatsd_arm64.deb" + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-deb.txt" diff --git a/.gitlab/packaging/oci.yml b/.gitlab/packaging/oci.yml index b2dcd8eaf740a..0efb48a3e6e0e 100644 --- a/.gitlab/packaging/oci.yml +++ b/.gitlab/packaging/oci.yml @@ -6,13 +6,14 @@ image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] before_script: - - export PACKAGE_VERSION=$(inv agent.version --url-safe --major-version 7)-1 + - PACKAGE_VERSION="$(inv agent.version --url-safe --major-version 7)-1" || exit $? - export INSTALL_DIR=/opt/datadog-packages/${OCI_PRODUCT}/${PACKAGE_VERSION} variables: KUBERNETES_CPU_REQUEST: 16 KUBERNETES_MEMORY_REQUEST: "32Gi" KUBERNETES_MEMORY_LIMIT: "32Gi" script: + - !reference [.retrieve_linux_go_tools_deps] - rm -f $OMNIBUS_PACKAGE_DIR/*-dbg-*.tar.xz - ls -l $OMNIBUS_PACKAGE_DIR - python3 -m pip install -r tasks/libs/requirements-github.txt @@ -67,12 +68,12 @@ agent_oci: extends: .package_oci - needs: ["datadog-agent-oci-x64-a7", "datadog-agent-oci-arm64-a7", "windows_msi_and_bosh_zip_x64-a7"] + needs: ["datadog-agent-oci-x64-a7", "datadog-agent-oci-arm64-a7", "windows_msi_and_bosh_zip_x64-a7", "go_tools_deps"] variables: OCI_PRODUCT: "datadog-agent" installer_oci: extends: .package_oci - needs: ["installer-arm64-oci", "installer-amd64-oci", "windows-installer-amd64"] + needs: ["installer-arm64-oci", "installer-amd64-oci", "windows-installer-amd64", "go_tools_deps"] variables: OCI_PRODUCT: "datadog-installer" diff --git a/.gitlab/packaging/rpm.yml b/.gitlab/packaging/rpm.yml index d03aa99f9212f..7ab26f7d86e9b 100644 --- a/.gitlab/packaging/rpm.yml +++ b/.gitlab/packaging/rpm.yml @@ -8,9 +8,9 @@ script: - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY) - - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) || exit $?; export RPM_SIGNING_PASSPHRASE - inv -e omnibus.build --release-version "$RELEASE_VERSION" --major-version "$AGENT_MAJOR_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --target-project=${DD_PROJECT} ${OMNIBUS_EXTRA_ARGS} - ls -la $OMNIBUS_PACKAGE_DIR/ - !reference [.lint_linux_packages] @@ -24,6 +24,7 @@ KUBERNETES_MEMORY_REQUEST: "32Gi" KUBERNETES_MEMORY_LIMIT: "32Gi" OMNIBUS_PACKAGE_ARTIFACT_DIR: $OMNIBUS_PACKAGE_DIR + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/agent-rpm.txt" cache: - !reference [.cache_omnibus_ruby_deps, cache] @@ -111,24 +112,30 @@ installer_rpm-amd64: needs: ["installer-amd64"] variables: DD_PROJECT: installer + # There are currently no files to check for in the installer so we + # explicitly disable the check + PACKAGE_REQUIRED_FILES_LIST: "" installer_rpm-arm64: extends: [.package_rpm_common, .package_rpm_agent_7, .package_rpm_arm64] needs: ["installer-arm64"] variables: DD_PROJECT: installer + PACKAGE_REQUIRED_FILES_LIST: "" installer_suse_rpm-amd64: extends: [.package_suse_rpm_common, .package_rpm_agent_7, .package_rpm_x86] needs: ["installer-amd64"] variables: DD_PROJECT: installer + PACKAGE_REQUIRED_FILES_LIST: "" installer_suse_rpm-arm64: extends: [.package_suse_rpm_common, .package_rpm_agent_7, .package_rpm_arm64] needs: ["installer-arm64"] variables: DD_PROJECT: installer + PACKAGE_REQUIRED_FILES_LIST: "" .package_iot_rpm_common: rules: @@ -137,9 +144,9 @@ installer_suse_rpm-arm64: script: - echo "About to build for $RELEASE_VERSION" - !reference [.cache_omnibus_ruby_deps, setup] - - RPM_GPG_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY) - - printf -- "$RPM_GPG_KEY" | gpg --import --batch - - export RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) + - printf -- "$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_GPG_KEY)" | gpg --import --batch + - EXIT="${PIPESTATUS[0]}"; if [ $EXIT -ne 0 ]; then echo "Unable to locate credentials needs gitlab runner restart"; exit $EXIT; fi + - RPM_SIGNING_PASSPHRASE=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $RPM_SIGNING_PASSPHRASE) || exit $?; export RPM_SIGNING_PASSPHRASE - inv -e omnibus.build --release-version "$RELEASE_VERSION" --base-dir $OMNIBUS_BASE_DIR --skip-deps --flavor=iot ${OMNIBUS_EXTRA_ARGS} - ls -la $OMNIBUS_PACKAGE_DIR/ - !reference [.lint_linux_packages] @@ -154,6 +161,7 @@ installer_suse_rpm-arm64: KUBERNETES_MEMORY_LIMIT: "32Gi" OMNIBUS_PACKAGE_ARTIFACT_DIR: $OMNIBUS_PACKAGE_DIR RELEASE_VERSION: $RELEASE_VERSION_7 + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/iot-agent-rpm.txt" cache: - !reference [.cache_omnibus_ruby_deps, cache] @@ -201,9 +209,11 @@ dogstatsd_rpm-x64: needs: ["dogstatsd-x64"] variables: DD_PROJECT: dogstatsd + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-rpm.txt" dogstatsd_suse-x64: extends: [.package_suse_rpm_common, .package_rpm_agent_7, .package_rpm_x86] needs: ["dogstatsd-x64"] variables: DD_PROJECT: dogstatsd + PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-rpm.txt" diff --git a/.gitlab/pkg_metrics/pkg_metrics.yml b/.gitlab/pkg_metrics/pkg_metrics.yml index 94a48c2fe1004..21c647bbcd805 100644 --- a/.gitlab/pkg_metrics/pkg_metrics.yml +++ b/.gitlab/pkg_metrics/pkg_metrics.yml @@ -57,7 +57,7 @@ send_pkg_size: optional: true script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY # Allow failures: some packages are not always built, and therefore stats cannot be sent for them - set +e diff --git a/.gitlab/post_rc_build/post_rc_tasks.yml b/.gitlab/post_rc_build/post_rc_tasks.yml index 8cfab2abbd124..3c9ca13b377f4 100644 --- a/.gitlab/post_rc_build/post_rc_tasks.yml +++ b/.gitlab/post_rc_build/post_rc_tasks.yml @@ -11,8 +11,8 @@ update_rc_build_links: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - export ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $JIRA_READ_API_TOKEN) - - export ATLASSIAN_USERNAME=robot-jira-agentplatform@datadoghq.com + - ATLASSIAN_PASSWORD=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $JIRA_READ_API_TOKEN) || exit $?; export ATLASSIAN_PASSWORD + - ATLASSIAN_USERNAME=robot-jira-agentplatform@datadoghq.com; export ATLASSIAN_USERNAME - python3 -m pip install -r tasks/requirements_release_tasks.txt - PATCH=$(echo "$CI_COMMIT_REF_NAME" | cut -d'.' -f3 | cut -c1) - if [[ "$PATCH" == "0" ]]; then PATCH_OPTION=""; else PATCH_OPTION="-p"; fi diff --git a/.gitlab/setup/setup.yml b/.gitlab/setup/setup.yml index 7649437386917..39b6c2a20b0dd 100644 --- a/.gitlab/setup/setup.yml +++ b/.gitlab/setup/setup.yml @@ -4,7 +4,7 @@ setup_agent_version: image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] script: - - inv -e agent.version --cache-version + - inv -e agent.version --cache-version || exit $? - $S3_CP_CMD $CI_PROJECT_DIR/agent-version.cache $S3_ARTIFACTS_URI/agent-version.cache needs: [] @@ -18,15 +18,15 @@ github_rate_limit_info: script: - python3 -m pip install -r tasks/libs/requirements-github.txt datadog_api_client # Send stats for app 1 - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY) || exit $?; export GITHUB_KEY_B64 + - GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID) || exit $?; export GITHUB_APP_ID + - GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID) || exit $?; export GITHUB_INSTALLATION_ID + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 1 # Send stats for app 2 - - export GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY_2) - - export GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID_2) - - export GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID_2) - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_KEY_2) || exit $?; export GITHUB_KEY_B64 + - GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_ID_2) || exit $?; export GITHUB_APP_ID + - GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_INSTALLATION_ID_2) || exit $?; export GITHUB_INSTALLATION_ID + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - inv github.send-rate-limit-info-datadog --pipeline-id $CI_PIPELINE_ID --app-instance 2 allow_failure: true diff --git a/.gitlab/source_test/golang_deps_diff.yml b/.gitlab/source_test/golang_deps_diff.yml index 491a99cd520f2..6d156dd6f4963 100644 --- a/.gitlab/source_test/golang_deps_diff.yml +++ b/.gitlab/source_test/golang_deps_diff.yml @@ -15,7 +15,7 @@ golang_deps_diff: - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - inv -e diff.go-deps --report-file=deps-report.md --report-metrics --git-ref "${CI_COMMIT_REF_NAME}" artifacts: paths: @@ -64,7 +64,7 @@ golang_deps_send_count_metrics: - !reference [.retrieve_linux_go_deps] script: # Get API key to send metrics - - export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) + - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $API_KEY_ORG2) || exit $?; export DD_API_KEY - inv -e go-deps.send-count-metrics --git-sha "${CI_COMMIT_SHA}" --git-ref "${CI_COMMIT_REF_NAME}" golang_deps_test: diff --git a/.gitlab/source_test/include.yml b/.gitlab/source_test/include.yml index c71029608a40e..60666c8d4f5a1 100644 --- a/.gitlab/source_test/include.yml +++ b/.gitlab/source_test/include.yml @@ -12,5 +12,4 @@ include: - .gitlab/source_test/slack.yml - .gitlab/source_test/golang_deps_diff.yml - .gitlab/source_test/notify.yml - - .gitlab/source_test/technical_linters.yml - .gitlab/source_test/tooling_unit_tests.yml diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index 4b49e23974b35..015aea09496a8 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -50,8 +50,7 @@ .upload_coverage: # Upload coverage files to Codecov. Never fail on coverage upload. - - source /root/.bashrc - - export CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV_TOKEN) + - CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV_TOKEN) || exit $?; export CODECOV_TOKEN - inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true .linux_lint: @@ -264,10 +263,17 @@ new-e2e-unit-tests: extends: .linux_tests image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/test-infra-definitions/runner$TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX:$TEST_INFRA_DEFINITIONS_BUILDIMAGES tags: ["arch:amd64"] + needs: + - !reference [ .needs_new_e2e_template ] + - go_deps + - go_tools_deps before_script: + - !reference [.retrieve_linux_go_deps] + - !reference [.retrieve_linux_go_tools_deps] + - !reference [.retrieve_linux_go_e2e_deps] # Setup AWS Credentials - mkdir -p ~/.aws - - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config + - $CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_QA_PROFILE >> ~/.aws/config || exit $? - export AWS_PROFILE=agent-qa-ci # Use S3 backend - pulumi login "s3://dd-pulumi-state?region=us-east-1&awssdk=v2&profile=$AWS_PROFILE" diff --git a/.gitlab/source_test/macos.yml b/.gitlab/source_test/macos.yml index fe93fb8860e5b..3be1d5c904eda 100644 --- a/.gitlab/source_test/macos.yml +++ b/.gitlab/source_test/macos.yml @@ -30,24 +30,6 @@ tests_macos: reports: junit: "**/junit-out-*.xml" -lint_macos: - stage: source_test - rules: - - !reference [.except_mergequeue] - - when: on_success - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - needs: ["setup_agent_version"] - variables: - PYTHON_RUNTIMES: "3" - timeout: 6h - script: - - !reference [.setup_macos_github_app] - - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache . - - export VERSION_CACHE_CONTENT=$(cat agent-version.cache | base64 -) - - python3 -m pip install -r tasks/libs/requirements-github.txt - - inv -e github.trigger-macos --workflow-type "lint" --datadog-agent-ref "$CI_COMMIT_SHA" --python-runtimes "$PYTHON_RUNTIMES" --version-cache "$VERSION_CACHE_CONTENT" - .macos_gitlab: variables: PYTHON_RUNTIMES: "3" @@ -82,7 +64,6 @@ lint_macos: .lint_macos_gitlab: stage: source_test - allow_failure: true extends: .macos_gitlab needs: ["setup_agent_version"] script: @@ -98,10 +79,30 @@ lint_macos: variables: TEST_OUTPUT_FILE: test_output.json script: + - inv -e gitlab.generate-ci-visibility-links --output=$EXTERNAL_LINKS_PATH - FAST_TESTS_FLAG="" - if [[ "$FAST_TESTS" == "true" ]]; then FAST_TESTS_FLAG="--only-impacted-packages"; fi - inv -e test --rerun-fails=2 --python-runtimes $PYTHON_RUNTIMES --race --profile --cpus 12 --save-result-json $TEST_OUTPUT_FILE --junit-tar "junit-${CI_JOB_NAME}.tgz" $FAST_TESTS_FLAG - inv -e invoke-unit-tests + artifacts: + expire_in: 2 weeks + when: always + paths: + - $TEST_OUTPUT_FILE + - junit-*.tgz + reports: + junit: "**/junit-out-*.xml" + annotations: + - $EXTERNAL_LINKS_PATH + +.upload_junit_source: + - $CI_PROJECT_DIR/tools/ci/junit_upload.sh + +.upload_coverage: + # Upload coverage files to Codecov. Never fail on coverage upload. + - CODECOV_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $CODECOV_TOKEN) || exit $?; export CODECOV_TOKEN + - inv -e coverage.upload-to-codecov $COVERAGE_CACHE_FLAG || true + lint_macos_gitlab_amd64: extends: .lint_macos_gitlab @@ -109,6 +110,7 @@ lint_macos_gitlab_amd64: lint_macos_gitlab_arm64: extends: .lint_macos_gitlab + allow_failure: true rules: - !reference [.on_main] - !reference [.manual] @@ -117,7 +119,13 @@ lint_macos_gitlab_arm64: tests_macos_gitlab_amd64: extends: .tests_macos_gitlab tags: ["macos:monterey-amd64", "specific:true"] + after_script: + - !reference [.upload_junit_source] + - !reference [.upload_coverage] tests_macos_gitlab_arm64: extends: .tests_macos_gitlab tags: ["macos:monterey-arm64", "specific:true"] + after_script: + - !reference [.upload_junit_source] + - !reference [.upload_coverage] diff --git a/.gitlab/source_test/technical_linters.yml b/.gitlab/source_test/technical_linters.yml deleted file mode 100644 index c7759eda331bc..0000000000000 --- a/.gitlab/source_test/technical_linters.yml +++ /dev/null @@ -1,24 +0,0 @@ -lint_python: - stage: source_test - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - needs: [] - script: - - inv -e linter.python - -lint_update_go: - stage: source_test - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - needs: [] - script: - - inv -e linter.update-go - -validate_modules: - stage: source_test - image: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - needs: [] - script: - - inv -e modules.validate - - inv -e modules.validate-used-by-otel diff --git a/.gitlab/trigger_release/trigger_release.yml b/.gitlab/trigger_release/trigger_release.yml index cf2a4b3591a84..4be733f666c8c 100644 --- a/.gitlab/trigger_release/trigger_release.yml +++ b/.gitlab/trigger_release/trigger_release.yml @@ -18,8 +18,8 @@ script: # agent-release-management creates pipeline for both Agent 6 and Agent 7 # when triggered with major version 7 - - export RELEASE_VERSION=$(inv agent.version --major-version 7 --url-safe --omnibus-format)-1 - - export GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) + - RELEASE_VERSION="$(inv agent.version --major-version 7 --url-safe --omnibus-format)-1" || exit $? + - GITLAB_TOKEN=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $GITLAB_SCHEDULER_TOKEN) || exit $?; export GITLAB_TOKEN - 'inv pipeline.trigger-child-pipeline --project-name "DataDog/agent-release-management" --git-ref "main" --variable ACTION --variable AUTO_RELEASE diff --git a/.go-version b/.go-version index 013173af5e9bc..87b26e8b1aa0e 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.6 +1.22.7 diff --git a/.wwhrd.yml b/.wwhrd.yml index 84cd078577d5b..5ffc72c725368 100644 --- a/.wwhrd.yml +++ b/.wwhrd.yml @@ -45,4 +45,4 @@ exceptions: additional: # list here paths to additional licenses - golang/go: "raw.githubusercontent.com/golang/go/go1.22.6/LICENSE" + golang/go: "raw.githubusercontent.com/golang/go/go1.22.7/LICENSE" diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f0ccf802fe2cd..abbc7762d316e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,61 @@ Release Notes ============= +.. _Release Notes_7.57.2: + +7.57.2 +====== + +.. _Release Notes_7.57.2_Prelude: + +Prelude +------- + +Release on: 2024-09-24 + + +.. _Release Notes_7.57.2_Enhancement Notes: + +Enhancement Notes +----------------- + +- Agents are now built with Go ``1.22.7``. + + +.. _Release Notes_7.57.2_Bug Fixes: + +Bug Fixes +--------- + +- Fix OOM error with cluster agent auto instrumentation by increasing default memory request from 20Mi to 100Mi. + +- Fixes a panic caused by running the Agent on readonly filesystems. The + Agent returns integration launchers and handles memory gracefully. + + +.. _Release Notes_7.57.1: + +7.57.1 +====== + +.. _Release Notes_7.57.1_Prelude: + +Prelude +------- + +Release on: 2024-09-17 + +- Please refer to the `7.57.1 tag on integrations-core `_ for the list of changes on the Core Checks + +.. _Release Notes_7.57.1_Bug Fixes: + +Bug Fixes +--------- + +- APM: When the UDS listener cannot be created on the trace-agent, the process will log the error, instead of crashing. +- Fixes memory leak caused by container check. + + .. _Release Notes_7.57.0: 7.57.0 @@ -22,7 +77,7 @@ Release on: 2024-09-09 Upgrade Notes ------------- -- Update cURL to 8.7.1. +- Update cURL to 8.9.1. - Update OpenSSL from 3.0.14 to 3.3.1 (on Linux and macOS). @@ -82,12 +137,12 @@ Enhancement Notes These are used in the Single Step APM Instrumentation, improving the onboarding experience and minimizing the agent configuration. -- For the [Inferred Service Dependencies beta](https://docs.datadoghq.com/tracing/guide/inferred-service-opt-in/?tab=java), add two new `peer.hostname` precursor attributes, `out.host` and `dns.hostname`. This will improve coverage of inferred services because some tracer integrations only place the peer hostname in one of those attributes. +- For the [Inferred Service Dependencies beta](https://docs.datadoghq.com/tracing/guide/inferred-service-opt-in/?tab=java), add two new `peer.hostname` precursor attributes, `out.host` and `dns.hostname`. This will improve coverage of inferred services because some tracer integrations only place the peer hostname in one of those attributes. - APM stats for internal service overrides are now aggregated by the `_dd.base_service` tag only, enhancing visibility into specific base services. -- Include spans with `span.kind=consumer` for aggregation of - stats on peer tags. +- Include spans with `span.kind=consumer` for aggregation of + stats on peer tags. - IP address quantization on all peer tags is done the backend during ingestion. This change updates the Agent to apply the same IP address quantization. This reduces unnecessary aggregation that is currently done on raw IP addresses. And therefore, improves the aggregation performance of stats on peer tags. @@ -126,7 +181,7 @@ Enhancement Notes ``PROJECTLOCATION`` is misconfigured to an existing directory. - Adds a default upper limit of 10000 to the number of network traffic - paths that are captured at a single time. The user can increase or + paths that are captured at a single time. The user can increase or decrease this limit as needed. - Language detection can run on the core Agent without needing a gRPC server. @@ -168,11 +223,11 @@ Bug Fixes - Fix duplicate logging in Process Agent component's Enabled() method. -- Fixed bug in kubelet check when running in core agent that - was causing `kubernetes.kubelet.container.log_filesystem.used_bytes` - to be reported by the check for excluded/non-existing containers. - The metric was being reported in this case without tags. - This bug does not exist in the python integration version of the +- Fixed bug in kubelet check when running in core agent that + was causing `kubernetes.kubelet.container.log_filesystem.used_bytes` + to be reported by the check for excluded/non-existing containers. + The metric was being reported in this case without tags. + This bug does not exist in the python integration version of the kubelet check. - Fixes a bug on Windows in the driver installation custom actions that could prevent rollback from working properly if an installation failed or was canceled. @@ -281,7 +336,7 @@ Upgrade Notes New Features ------------ -- The core Agent now supports multiple configuration files in addition to the main ``datadog.yaml`` file. +- The core Agent now supports multiple configuration files in addition to the main ``datadog.yaml`` file. The -E flag can be used to specify additional configuration files to be loaded sequentially after the main ``datadog.yaml``. - When ``DD_SERVERLESS_STREAM_LOGS`` is enabled, DD_EXTENSION @@ -305,10 +360,10 @@ Enhancement Notes - APM: Add obfuscation support for OpenSearch statements within span metadata. This feature works in the same way as the existing Elasticsearch one, and is enabled by default. It is configured by binding ``apm_config.obfuscation.opensearch.*`` parameters to new obfuscation environment variables. In particular, bind: ``apm_config.obfuscation.opensearch.enabled`` to ``DD_APM_OBFUSCATION_OPENSEARCH_ENABLED``: It accepts a boolean value with default value true. - + ``apm_config.obfuscation.opensearch.keep_values`` to ``DD_APM_OBFUSCATION_OPENSEARCH_KEEP_VALUES`` It accepts a list of strings of the form ``["id1", "id2"]``. - + ``apm_config.obfuscation.opensearch.obfuscate_sql_values`` to ``DD_APM_OBFUSCATION_OPENSEARCH_OBFUSCATE_SQL_VALUES`` It accepts a list of strings of the form ``["key1", "key2"]``. @@ -348,7 +403,7 @@ Enhancement Notes to better support users with multi-byte character sets, for example, Korean, Arabic, etc. This should alleviate crashes caused by long queries using these characters. -- The OTLP ingestion endpoint now supports the same settings and protocol as +- The OTLP ingestion endpoint now supports the same settings and protocol as the OpenTelemetry Collector OTLP receiver v0.103.0. - APM: Probabilistic Sampler now only looks at the lower 64 bits of a trace ID by default to improve compatibility in distributed systems where some apps may truncate the trace ID. To maintain the previous behavior use the feature flag `probabilistic_sampler_full_trace_id`. @@ -391,7 +446,7 @@ Deprecation Notes Security Notes -------------- -- Updating OpenSSL to 3.0.14 to address CVE-2024-4741. +- Updating OpenSSL to 3.0.14 to address CVE-2024-4741 (on Linux and macOS). .. _Release Notes_7.56.0_Bug Fixes: @@ -407,7 +462,7 @@ Bug Fixes - Re-enable printing of checks metadata in the ``datadog-agent status`` collector section. -- Fix OTLP status output not being displayed in the GUI. +- Fix OTLP status output not being displayed in the GUI. - Fix issue where init config for ping took priority over instance config. diff --git a/Dockerfiles/agent-ot/Dockerfile.agent-otel b/Dockerfiles/agent-ot/Dockerfile.agent-otel index cb0fb6109e780..cf314ed6f47cc 100644 --- a/Dockerfiles/agent-ot/Dockerfile.agent-otel +++ b/Dockerfiles/agent-ot/Dockerfile.agent-otel @@ -1,9 +1,11 @@ -ARG AGENT_VERSION=nightly-ot-beta-main-jmx +ARG AGENT_VERSION=7.57.0-v1.0-ot-beta-jmx +ARG AGENT_BRANCH=7.57.x-otel-beta-v1 # Use the Ubuntu Slim AMD64 base image FROM ubuntu:24.04 AS builder # Set environment variables ARG AGENT_VERSION +ARG AGENT_BRANCH ENV DEBIAN_FRONTEND=noninteractive # Set the working directory @@ -24,7 +26,7 @@ RUN apt-get update && \ && rm -rf /var/lib/apt/lists/* # TEMP: Use github source code -RUN git clone --depth 1 https://github.com/DataDog/datadog-agent.git datadog-agent-${AGENT_VERSION} +RUN git clone --depth 1 -b "${AGENT_BRANCH}" --single-branch https://github.com/DataDog/datadog-agent.git datadog-agent-${AGENT_VERSION} # Once we have stable releases, we can use the following code to download the source code # TODO: use released agent version once we have an agent release with the otel binary @@ -77,7 +79,7 @@ RUN . venv/bin/activate && invoke collector.generate RUN . venv/bin/activate && invoke otel-agent.build # Use the final Datadog agent image -FROM datadog/agent-dev:${AGENT_VERSION} +FROM datadog/agent:${AGENT_VERSION} ARG AGENT_VERSION # Copy the built OTel agent from the builder stage COPY --from=builder /workspace/datadog-agent-${AGENT_VERSION}/bin/otel-agent/otel-agent /opt/datadog-agent/embedded/bin/otel-agent diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index f8555a000f46d..c9a5f5af7a085 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -1055,6 +1055,7 @@ core,github.com/go-redis/redis/v9/internal/util,BSD-2-Clause,Copyright (c) 2013 core,github.com/go-resty/resty/v2,MIT,"Copyright (c) 2015-2023 Jeevanandam M., https://myjeeva.com " core,github.com/go-sql-driver/mysql,MPL-2.0,"Aaron Hopkins | Achille Roussel | Aidan | Alex Snast | Alexey Palazhchenko | Andrew Reid | Animesh Ray | Ariel Mashraki | Arne Hormann | Asta Xie | Barracuda Networks, Inc. | Brian Hendriks | Bulat Gaifullin | Caine Jette | Carlos Nieto | Chris Kirkland | Chris Moos | Counting Ltd. | Craig Wilson | Daemonxiao <735462752 at qq.com> | Daniel Montoya | Daniel Nichter | Daniël van Eeden | Dave Protasowski | DigitalOcean Inc. | DisposaBoy | Dolthub Inc. | Egor Smolyakov | Erwan Martin | Evan Elias | Evan Shaw | Facebook Inc. | Frederick Mayle | GitHub Inc. | Google Inc. | Gustavo Kristic | Gusted | Hajime Nakagami | Hanno Braun | Henri Yandell | Hirotaka Yamamoto | Huyiguang | ICHINOSE Shogo | INADA Naoki | Ilia Cimpoes | InfoSum Ltd. | Jacek Szwec | James Harr | Janek Vedock | Jason Ng | Jean-Yves Pellé | Jeff Hodges | Jeffrey Charles | Jennifer Purevsuren | Jerome Meyer | Jiajia Zhong | Jian Zhen | Joshua Prunier | Julien Lefevre | Julien Schmidt | Justin Li | Justin Nuß | Kamil Dziedzic | Kei Kamikawa | Kevin Malachowski | Keybase Inc. | Kieron Woodhouse | Lance Tian | Lennart Rudolph | Leonardo YongUk Kim | Linh Tran Tuan | Lion Yang | Luca Looz | Lucas Liu | Luke Scott | Lunny Xiao | Maciej Zimnoch | Michael Woolnough | Microsoft Corp. | Multiplay Ltd. | Nathanial Murphy | Nicola Peduzzi | Oliver Bone | Olivier Mengué | Paul Bonser | Paulius Lozys | Percona LLC | Peter Schultz | Phil Porada | PingCAP Inc. | Pivotal Inc. | Rebecca Chin | Reed Allman | Richard Wilkes | Robert Russell | Runrioter Wung | Samantha Frank | Santhosh Kumar Tekuri | Shattered Silicon Ltd. | Sho Iizuka | Sho Ikeda | Shuode Li | Simon J Mudd | Soroush Pour | Stan Putrya | Stanley Gunawan | Steven Hartland | Stripe Inc. | Tan Jinhua <312841925 at qq.com> | Tetsuro Aoki | Thomas Wodarek | Tim Ruffles | Tom Jenkinson | Vladimir Kovpak | Vladyslav Zhelezniak | Xiangyu Hu | Xiaobing Jiang | Xiuming Chen | Xuehong Chan | Zendesk Inc. | Zhang Xiang | Zhenye Xie | Zhixin Wen | Ziheng Lyu | copyright doctrines of fair use, fair dealing, or other | dyves labs AG | oscarzhao " core,github.com/go-viper/mapstructure/v2,MIT,Copyright (c) 2013 Mitchell Hashimoto +core,github.com/go-viper/mapstructure/v2/internal/errors,MIT,Copyright (c) 2013 Mitchell Hashimoto core,github.com/go-zookeeper/zk,BSD-3-Clause,"Copyright (c) 2013, Samuel Stauffer " core,github.com/gobwas/glob,MIT,Copyright (c) 2016 Sergey Kamardin core,github.com/gobwas/glob/compiler,MIT,Copyright (c) 2016 Sergey Kamardin @@ -2402,7 +2403,6 @@ core,go.opentelemetry.io/collector/pdata/pmetric,Apache-2.0,Copyright The OpenTe core,go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/ptrace,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/pdata/testdata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/batchprocessor,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/batchprocessor/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2450,6 +2450,7 @@ core,go.opentelemetry.io/contrib/config,Apache-2.0,Copyright The OpenTelemetry A core,go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/contrib/propagators/b3,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2519,7 +2520,6 @@ core,go.opentelemetry.io/otel/semconv/v1.17.0,Apache-2.0,Copyright The OpenTelem core,go.opentelemetry.io/otel/semconv/v1.17.0/httpconv,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.20.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.21.0,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/otel/semconv/v1.24.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.25.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/semconv/v1.26.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/otel/trace,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2630,17 +2630,17 @@ core,golang.org/x/net/proxy,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/net/publicsuffix,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/net/trace,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/net/websocket,BSD-3-Clause,Copyright 2009 The Go Authors -core,golang.org/x/oauth2,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/authhandler,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/clientcredentials,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google/externalaccount,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google/internal/externalaccountauthorizeduser,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google/internal/impersonate,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/google/internal/stsexchange,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/internal,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/jws,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved -core,golang.org/x/oauth2/jwt,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved +core,golang.org/x/oauth2,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/authhandler,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/clientcredentials,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google/externalaccount,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google/internal/externalaccountauthorizeduser,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google/internal/impersonate,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/google/internal/stsexchange,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/internal,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/jws,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/oauth2/jwt,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/sync/errgroup,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/sync/semaphore,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/sync/singleflight,BSD-3-Clause,Copyright 2009 The Go Authors diff --git a/cmd/agent/common/autodiscovery.go b/cmd/agent/common/autodiscovery.go index 6323d13ba3c03..8ff855aeca19f 100644 --- a/cmd/agent/common/autodiscovery.go +++ b/cmd/agent/common/autodiscovery.go @@ -20,8 +20,10 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" confad "github.com/DataDog/datadog-agent/pkg/config/autodiscovery" + pkgconfigenv "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/util/jsonquery" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -45,34 +47,34 @@ func setupAutoDiscovery(confSearchPaths []string, wmeta workloadmeta.Component, ac.AddConfigProvider( providers.NewFileConfigProvider(acTelemetryStore), - config.Datadog().GetBool("autoconf_config_files_poll"), - time.Duration(config.Datadog().GetInt("autoconf_config_files_poll_interval"))*time.Second, + pkgconfigsetup.Datadog().GetBool("autoconf_config_files_poll"), + time.Duration(pkgconfigsetup.Datadog().GetInt("autoconf_config_files_poll_interval"))*time.Second, ) // Autodiscovery cannot easily use config.RegisterOverrideFunc() due to Unmarshalling extraConfigProviders, extraConfigListeners := confad.DiscoverComponentsFromConfig() - var extraEnvProviders []config.ConfigurationProviders - var extraEnvListeners []config.Listeners - if config.IsAutoconfigEnabled() && !config.IsCLCRunner() { + var extraEnvProviders []pkgconfigsetup.ConfigurationProviders + var extraEnvListeners []pkgconfigsetup.Listeners + if pkgconfigenv.IsAutoconfigEnabled(pkgconfigsetup.Datadog()) && !pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()) { extraEnvProviders, extraEnvListeners = confad.DiscoverComponentsFromEnv() } // Register additional configuration providers - var configProviders []config.ConfigurationProviders - var uniqueConfigProviders map[string]config.ConfigurationProviders - err := config.Datadog().UnmarshalKey("config_providers", &configProviders) + var configProviders []pkgconfigsetup.ConfigurationProviders + var uniqueConfigProviders map[string]pkgconfigsetup.ConfigurationProviders + err := structure.UnmarshalKey(pkgconfigsetup.Datadog(), "config_providers", &configProviders) if err == nil { - uniqueConfigProviders = make(map[string]config.ConfigurationProviders, len(configProviders)+len(extraEnvProviders)+len(configProviders)) + uniqueConfigProviders = make(map[string]pkgconfigsetup.ConfigurationProviders, len(configProviders)+len(extraEnvProviders)+len(configProviders)) for _, provider := range configProviders { uniqueConfigProviders[provider.Name] = provider } // Add extra config providers - for _, name := range config.Datadog().GetStringSlice("extra_config_providers") { + for _, name := range pkgconfigsetup.Datadog().GetStringSlice("extra_config_providers") { if _, found := uniqueConfigProviders[name]; !found { - uniqueConfigProviders[name] = config.ConfigurationProviders{Name: name, Polling: true} + uniqueConfigProviders[name] = pkgconfigsetup.ConfigurationProviders{Name: name, Polling: true} } else { log.Infof("Duplicate AD provider from extra_config_providers discarded as already present in config_providers: %s", name) } @@ -87,7 +89,7 @@ func setupAutoDiscovery(confSearchPaths []string, wmeta workloadmeta.Component, } if enableContainerProvider { - uniqueConfigProviders[names.KubeContainer] = config.ConfigurationProviders{Name: names.KubeContainer} + uniqueConfigProviders[names.KubeContainer] = pkgconfigsetup.ConfigurationProviders{Name: names.KubeContainer} } for _, provider := range extraConfigProviders { @@ -123,12 +125,12 @@ func setupAutoDiscovery(confSearchPaths []string, wmeta workloadmeta.Component, } } - var listeners []config.Listeners - err = config.Datadog().UnmarshalKey("listeners", &listeners) + var listeners []pkgconfigsetup.Listeners + err = structure.UnmarshalKey(pkgconfigsetup.Datadog(), "listeners", &listeners) if err == nil { // Add extra listeners - for _, name := range config.Datadog().GetStringSlice("extra_listeners") { - listeners = append(listeners, config.Listeners{Name: name}) + for _, name := range pkgconfigsetup.Datadog().GetStringSlice("extra_listeners") { + listeners = append(listeners, pkgconfigsetup.Listeners{Name: name}) } // The "docker" and "ecs" listeners were replaced with the diff --git a/cmd/agent/common/common.go b/cmd/agent/common/common.go index 4ac33147c8185..272fed027007f 100644 --- a/cmd/agent/common/common.go +++ b/cmd/agent/common/common.go @@ -15,9 +15,9 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/common/path" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -28,8 +28,8 @@ func GetPythonPaths() []string { return []string{ path.GetDistPath(), // common modules are shipped in the dist path directly or under the "checks/" sub-dir path.PyChecksPath, // integrations-core legacy checks - filepath.Join(path.GetDistPath(), "checks.d"), // custom checks in the "checks.d/" sub-dir of the dist path - config.Datadog().GetString("additional_checksd"), // custom checks, least precedent check location + filepath.Join(path.GetDistPath(), "checks.d"), // custom checks in the "checks.d/" sub-dir of the dist path + pkgconfigsetup.Datadog().GetString("additional_checksd"), // custom checks, least precedent check location } } @@ -43,10 +43,10 @@ func GetVersion(w http.ResponseWriter, _ *http.Request) { // NewSettingsClient returns a configured runtime settings client. func NewSettingsClient() (settings.Client, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } hc := util.GetClient(false) - return settingshttp.NewClient(hc, fmt.Sprintf("https://%v:%v/agent/config", ipcAddress, config.Datadog().GetInt("cmd_port")), "agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil + return settingshttp.NewClient(hc, fmt.Sprintf("https://%v:%v/agent/config", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")), "agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil } diff --git a/cmd/agent/common/common_windows.go b/cmd/agent/common/common_windows.go index 96b481944f63c..9bcf4cf48b75a 100644 --- a/cmd/agent/common/common_windows.go +++ b/cmd/agent/common/common_windows.go @@ -10,7 +10,7 @@ import ( "path/filepath" "github.com/DataDog/datadog-agent/cmd/agent/common/path" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/winutil" "github.com/DataDog/datadog-agent/pkg/util/winutil/messagestrings" @@ -48,11 +48,11 @@ func CheckAndUpgradeConfig() error { log.Debug("Previous config file not found, not upgrading") return nil } - config.Datadog().AddConfigPath(path.DefaultConfPath) - _, err := config.LoadWithoutSecret() + pkgconfigsetup.Datadog().AddConfigPath(path.DefaultConfPath) + _, err := pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil) if err == nil { // was able to read config, check for api key - if config.Datadog().GetString("api_key") != "" { + if pkgconfigsetup.Datadog().GetString("api_key") != "" { log.Debug("Datadog.yaml found, and API key present. Not upgrading config") return nil } diff --git a/cmd/agent/common/helpers.go b/cmd/agent/common/helpers.go index 023fb29c384ee..b8738de623365 100644 --- a/cmd/agent/common/helpers.go +++ b/cmd/agent/common/helpers.go @@ -7,13 +7,12 @@ package common import ( "github.com/DataDog/datadog-agent/comp/core/settings" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) // SetupInternalProfiling is a common helper to configure runtime settings for internal profiling. -func SetupInternalProfiling(settings settings.Component, cfg config.Reader, configPrefix string) { +func SetupInternalProfiling(settings settings.Component, cfg model.Reader, configPrefix string) { if v := cfg.GetInt(configPrefix + "internal_profiling.block_profile_rate"); v > 0 { if err := settings.SetRuntimeSetting("runtime_block_profile_rate", v, model.SourceAgentRuntime); err != nil { log.Errorf("Error setting block profile rate: %v", err) diff --git a/cmd/agent/common/import.go b/cmd/agent/common/import.go index 019c10789d260..6d64c0b8bad75 100644 --- a/cmd/agent/common/import.go +++ b/cmd/agent/common/import.go @@ -18,8 +18,8 @@ import ( "github.com/fatih/color" yaml "gopkg.in/yaml.v2" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/legacy" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // TransformationFunc type represents transformation applicable to byte slices @@ -30,7 +30,7 @@ func ImportConfig(oldConfigDir string, newConfigDir string, force bool) error { datadogConfPath := filepath.Join(oldConfigDir, "datadog.conf") datadogYamlPath := filepath.Join(newConfigDir, "datadog.yaml") traceAgentConfPath := filepath.Join(newConfigDir, "trace-agent.conf") - configConverter := config.NewConfigConverter() + configConverter := legacy.NewConfigConverter() const cfgExt = ".yaml" const dirExt = ".d" @@ -52,14 +52,14 @@ func ImportConfig(oldConfigDir string, newConfigDir string, force bool) error { } // setup the configuration system - config.Datadog().AddConfigPath(newConfigDir) - _, err = config.LoadWithoutSecret() + pkgconfigsetup.Datadog().AddConfigPath(newConfigDir) + _, err = pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil) if err != nil { return fmt.Errorf("unable to load Datadog config file: %s", err) } // we won't overwrite the conf file if it contains a valid api_key - if config.Datadog().GetString("api_key") != "" && !force { + if pkgconfigsetup.Datadog().GetString("api_key") != "" && !force { return fmt.Errorf("%s seems to contain a valid configuration, run the command again with --force or -f to overwrite it", datadogYamlPath) } @@ -136,7 +136,7 @@ func ImportConfig(oldConfigDir string, newConfigDir string, force bool) error { } // marshal the config object to YAML - b, err := yaml.Marshal(config.Datadog().AllSettings()) + b, err := yaml.Marshal(pkgconfigsetup.Datadog().AllSettings()) if err != nil { return fmt.Errorf("unable to marshal config to YAML: %v", err) } diff --git a/cmd/agent/common/misconfig/mounts.go b/cmd/agent/common/misconfig/mounts.go index 4fe51cf3ea17f..e9d4d8153583b 100644 --- a/cmd/agent/common/misconfig/mounts.go +++ b/cmd/agent/common/misconfig/mounts.go @@ -18,8 +18,8 @@ import ( "github.com/pkg/errors" "github.com/syndtr/gocapability/capability" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -68,7 +68,7 @@ func procMount() error { if !haveEgid { groups = append(groups, egid) } - path := config.Datadog().GetString("container_proc_root") + path := pkgconfigsetup.Datadog().GetString("container_proc_root") if env.IsContainerized() && path != "/proc" { path = filepath.Join(path, "1/mounts") } else { diff --git a/cmd/agent/common/test_helpers.go b/cmd/agent/common/test_helpers.go index 2daaf02efde50..aca2815135a43 100644 --- a/cmd/agent/common/test_helpers.go +++ b/cmd/agent/common/test_helpers.go @@ -16,13 +16,14 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/common/path" "github.com/DataDog/datadog-agent/comp/core/secrets" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" ) // SetupConfigForTest fires up the configuration system and returns warnings if any. -func SetupConfigForTest(confFilePath string) (*config.Warnings, error) { - cfg := config.Datadog() +func SetupConfigForTest(confFilePath string) (*model.Warnings, error) { + cfg := pkgconfigsetup.Datadog() origin := "datadog.yaml" // set the paths where a config file is expected if len(confFilePath) != 0 { @@ -36,7 +37,7 @@ func SetupConfigForTest(confFilePath string) (*config.Warnings, error) { } cfg.AddConfigPath(path.DefaultConfPath) // load the configuration - warnings, err := config.LoadDatadogCustom(cfg, origin, optional.NewNoneOption[secrets.Component](), nil) + warnings, err := pkgconfigsetup.LoadDatadogCustom(cfg, origin, optional.NewNoneOption[secrets.Component](), nil) if err != nil { // special-case permission-denied with a clearer error message if errors.Is(err, fs.ErrPermission) { diff --git a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example index 6f3e29eed0bfd..89299ee1c00f3 100644 --- a/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example +++ b/cmd/agent/dist/conf.d/network_path.d/conf.yaml.example @@ -21,6 +21,7 @@ instances: ## @param port - integer - optional ## Port of the target endpoint to monitor via Network Path. + ## For UDP, we do not recommend setting the port since it can make probes less reliable. ## If port is not set, a random port will be used. # # port: diff --git a/cmd/agent/subcommands/diagnose/command.go b/cmd/agent/subcommands/diagnose/command.go index 9710f8298329f..46f9ccea0e730 100644 --- a/cmd/agent/subcommands/diagnose/command.go +++ b/cmd/agent/subcommands/diagnose/command.go @@ -30,7 +30,7 @@ import ( workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -331,7 +331,7 @@ func printPayload(name payloadName, _ log.Component, config config.Component) er } c := util.GetClient(false) - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } diff --git a/cmd/agent/subcommands/dogstatsd/command.go b/cmd/agent/subcommands/dogstatsd/command.go index 80a1a955d6a1f..f81a6519624c2 100644 --- a/cmd/agent/subcommands/dogstatsd/command.go +++ b/cmd/agent/subcommands/dogstatsd/command.go @@ -25,7 +25,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -81,7 +81,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { func triggerDump(config cconfig.Component) (string, error) { c := util.GetClient(false) - addr, err := pkgconfig.GetIPCAddress() + addr, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } diff --git a/cmd/agent/subcommands/dogstatsdcapture/command.go b/cmd/agent/subcommands/dogstatsdcapture/command.go index 25e0ba2f75201..d0f6afeaa20a6 100644 --- a/cmd/agent/subcommands/dogstatsdcapture/command.go +++ b/cmd/agent/subcommands/dogstatsdcapture/command.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -102,7 +102,7 @@ func dogstatsdCapture(_ log.Component, config config.Component, cliParams *cliPa conn, err := grpc.DialContext( //nolint:staticcheck // TODO (ASC) fix grpc.DialContext is deprecated ctx, - fmt.Sprintf(":%v", pkgconfig.Datadog().GetInt("cmd_port")), + fmt.Sprintf(":%v", pkgconfigsetup.Datadog().GetInt("cmd_port")), grpc.WithTransportCredentials(creds), ) if err != nil { diff --git a/cmd/agent/subcommands/dogstatsdreplay/command.go b/cmd/agent/subcommands/dogstatsdreplay/command.go index 3186fe144f8e8..de85ffb6517f7 100644 --- a/cmd/agent/subcommands/dogstatsdreplay/command.go +++ b/cmd/agent/subcommands/dogstatsdreplay/command.go @@ -26,7 +26,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/impl" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -113,7 +113,7 @@ func dogstatsdReplay(_ log.Component, config config.Component, cliParams *cliPar apiconn, err := grpc.DialContext( //nolint:staticcheck // TODO (ASC) fix grpc.DialContext is deprecated ctx, - fmt.Sprintf(":%v", pkgconfig.Datadog().GetInt("cmd_port")), + fmt.Sprintf(":%v", pkgconfigsetup.Datadog().GetInt("cmd_port")), grpc.WithTransportCredentials(creds), ) if err != nil { @@ -133,7 +133,7 @@ func dogstatsdReplay(_ log.Component, config config.Component, cliParams *cliPar return err } - s := pkgconfig.Datadog().GetString("dogstatsd_socket") + s := pkgconfigsetup.Datadog().GetString("dogstatsd_socket") if s == "" { return fmt.Errorf("Dogstatsd UNIX socket disabled") } @@ -150,7 +150,7 @@ func dogstatsdReplay(_ log.Component, config config.Component, cliParams *cliPar defer syscall.Close(sk) err = syscall.SetsockoptInt(sk, syscall.SOL_SOCKET, syscall.SO_SNDBUF, - pkgconfig.Datadog().GetInt("dogstatsd_buffer_size")) + pkgconfigsetup.Datadog().GetInt("dogstatsd_buffer_size")) if err != nil { return err } diff --git a/cmd/agent/subcommands/dogstatsdstats/command.go b/cmd/agent/subcommands/dogstatsdstats/command.go index c8c12ce29f9da..aba4c10809db6 100644 --- a/cmd/agent/subcommands/dogstatsdstats/command.go +++ b/cmd/agent/subcommands/dogstatsdstats/command.go @@ -21,7 +21,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/input" @@ -71,11 +71,11 @@ func requestDogstatsdStats(_ log.Component, config config.Component, cliParams * var e error var s string c := util.GetClient(false) // FIX: get certificates right then make this true - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - urlstr := fmt.Sprintf("https://%v:%v/agent/dogstatsd-stats", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + urlstr := fmt.Sprintf("https://%v:%v/agent/dogstatsd-stats", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) // Set session token e = util.SetAuthToken(config) diff --git a/cmd/agent/subcommands/flare/command.go b/cmd/agent/subcommands/flare/command.go index d990b2aabe11c..c39613e753945 100644 --- a/cmd/agent/subcommands/flare/command.go +++ b/cmd/agent/subcommands/flare/command.go @@ -49,8 +49,8 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/resources/resourcesimpl" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/settings" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -168,7 +168,7 @@ func readProfileData(seconds int) (flare.ProfileData, error) { type pprofGetter func(path string) ([]byte, error) tcpGet := func(portConfig string) pprofGetter { - pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfig.Datadog().GetInt(portConfig)) + pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfigsetup.Datadog().GetInt(portConfig)) return func(path string) ([]byte, error) { return util.DoGet(c, pprofURL+path, util.LeaveConnectionOpen) } @@ -224,15 +224,15 @@ func readProfileData(seconds int) (flare.ProfileData, error) { "security-agent": serviceProfileCollector(tcpGet("security_agent.expvar_port"), seconds), } - if pkgconfig.Datadog().GetBool("process_config.enabled") || - pkgconfig.Datadog().GetBool("process_config.container_collection.enabled") || - pkgconfig.Datadog().GetBool("process_config.process_collection.enabled") { + if pkgconfigsetup.Datadog().GetBool("process_config.enabled") || + pkgconfigsetup.Datadog().GetBool("process_config.container_collection.enabled") || + pkgconfigsetup.Datadog().GetBool("process_config.process_collection.enabled") { agentCollectors["process"] = serviceProfileCollector(tcpGet("process_config.expvar_port"), seconds) } - if pkgconfig.Datadog().GetBool("apm_config.enabled") { - traceCpusec := pkgconfig.Datadog().GetInt("apm_config.receiver_timeout") + if pkgconfigsetup.Datadog().GetBool("apm_config.enabled") { + traceCpusec := pkgconfigsetup.Datadog().GetInt("apm_config.receiver_timeout") if traceCpusec > seconds { // do not exceed requested duration traceCpusec = seconds @@ -244,8 +244,8 @@ func readProfileData(seconds int) (flare.ProfileData, error) { agentCollectors["trace"] = serviceProfileCollector(tcpGet("apm_config.debug.port"), traceCpusec) } - if pkgconfig.SystemProbe().GetBool("system_probe_config.enabled") { - probeUtil, probeUtilErr := net.GetRemoteSystemProbeUtil(pkgconfig.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + if pkgconfigsetup.SystemProbe().GetBool("system_probe_config.enabled") { + probeUtil, probeUtilErr := net.GetRemoteSystemProbeUtil(pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if !errors.Is(probeUtilErr, net.ErrNotImplemented) { sysProbeGet := func() pprofGetter { @@ -386,16 +386,16 @@ func makeFlare(flareComp flare.Component, func requestArchive(flareComp flare.Component, pdata flare.ProfileData) (string, error) { fmt.Fprintln(color.Output, color.BlueString("Asking the agent to build the flare archive.")) c := util.GetClient(false) // FIX: get certificates right then make this true - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { fmt.Fprintln(color.Output, color.RedString(fmt.Sprintf("Error getting IPC address for the agent: %s", err))) return createArchive(flareComp, pdata, err) } - urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) // Set session token - if err = util.SetAuthToken(pkgconfig.Datadog()); err != nil { + if err = util.SetAuthToken(pkgconfigsetup.Datadog()); err != nil { fmt.Fprintln(color.Output, color.RedString(fmt.Sprintf("Error: %s", err))) return createArchive(flareComp, pdata, err) } diff --git a/cmd/agent/subcommands/integrations/command.go b/cmd/agent/subcommands/integrations/command.go index 867f85bfa945b..c47de16dca4da 100644 --- a/cmd/agent/subcommands/integrations/command.go +++ b/cmd/agent/subcommands/integrations/command.go @@ -27,7 +27,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/executable" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -556,7 +556,7 @@ func downloadWheel(cliParams *cliParams, integration, version, rootLayoutType st downloaderCmd.Env = environ // Proxy support - proxies := pkgconfig.Datadog().GetProxies() + proxies := pkgconfigsetup.Datadog().GetProxies() if proxies != nil { downloaderCmd.Env = append(downloaderCmd.Env, fmt.Sprintf("HTTP_PROXY=%s", proxies.HTTP), @@ -798,7 +798,7 @@ func getVersionFromReqLine(integration string, lines string) (*semver.Version, b } func moveConfigurationFilesOf(cliParams *cliParams, integration string) error { - confFolder := pkgconfig.Datadog().GetString("confd_path") + confFolder := pkgconfigsetup.Datadog().GetString("confd_path") check := getIntegrationName(integration) confFileDest := filepath.Join(confFolder, fmt.Sprintf("%s.d", check)) if err := os.MkdirAll(confFileDest, os.ModeDir|0755); err != nil { diff --git a/cmd/agent/subcommands/remoteconfig/command.go b/cmd/agent/subcommands/remoteconfig/command.go index b01422eddc686..1e5e8b5b45062 100644 --- a/cmd/agent/subcommands/remoteconfig/command.go +++ b/cmd/agent/subcommands/remoteconfig/command.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -57,7 +57,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { } func state(_ *cliParams, config config.Component) error { - if !pkgconfig.IsRemoteConfigEnabled(config) { + if !pkgconfigsetup.IsRemoteConfigEnabled(config) { return errors.New("remote configuration is not enabled") } fmt.Println("Fetching the configuration and director repos state..") @@ -75,12 +75,12 @@ func state(_ *cliParams, config config.Component) error { } ctx = metadata.NewOutgoingContext(ctx, md) - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - cli, err := agentgrpc.GetDDAgentSecureClient(ctx, ipcAddress, pkgconfig.GetIPCPort()) + cli, err := agentgrpc.GetDDAgentSecureClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } @@ -92,7 +92,7 @@ func state(_ *cliParams, config config.Component) error { } var stateHA *pbgo.GetStateConfigResponse - if pkgconfig.Datadog().GetBool("multi_region_failover.enabled") { + if pkgconfigsetup.Datadog().GetBool("multi_region_failover.enabled") { stateHA, err = cli.GetConfigStateHA(ctx, in) if err != nil { return fmt.Errorf("couldn't get the HA repositories state: %w", err) diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 6ff4f17804df5..76105a2e192ff 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -132,9 +132,9 @@ import ( profileStatus "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/status" "github.com/DataDog/datadog-agent/pkg/collector/python" "github.com/DataDog/datadog-agent/pkg/commonchecks" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/remote/data" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/jmxfetch" "github.com/DataDog/datadog-agent/pkg/serializer" clusteragentStatus "github.com/DataDog/datadog-agent/pkg/status/clusteragent" @@ -408,7 +408,7 @@ func getSharedFxOption() fx.Option { lc.Append(fx.Hook{ OnStart: func(_ context.Context) error { // setup the AutoConfig instance - common.LoadComponents(secretResolver, wmeta, ac, pkgconfig.Datadog().GetString("confd_path")) + common.LoadComponents(secretResolver, wmeta, ac, pkgconfigsetup.Datadog().GetString("confd_path")) return nil }, }) @@ -423,8 +423,7 @@ func getSharedFxOption() fx.Option { params.EnableNoAggregationPipeline = config.GetBool("dogstatsd_no_aggregation_pipeline") return params }), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewDefaultParams()), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDefaultParams()), eventplatformimpl.Module(eventplatformimpl.NewDefaultParams()), eventplatformreceiverimpl.Module(), @@ -511,17 +510,17 @@ func startAgent( log.Infof("Starting Datadog Agent v%v", version.AgentVersion) } - if err := util.SetupCoreDump(pkgconfig.Datadog()); err != nil { + if err := util.SetupCoreDump(pkgconfigsetup.Datadog()); err != nil { log.Warnf("Can't setup core dumps: %v, core dumps might not be available after a crash", err) } - if v := pkgconfig.Datadog().GetBool("internal_profiling.capture_all_allocations"); v { + if v := pkgconfigsetup.Datadog().GetBool("internal_profiling.capture_all_allocations"); v { runtime.MemProfileRate = 1 log.Infof("MemProfileRate set to 1, capturing every single memory allocation!") } // Setup Internal Profiling - common.SetupInternalProfiling(settings, pkgconfig.Datadog(), "") + common.SetupInternalProfiling(settings, pkgconfigsetup.Datadog(), "") // Setup expvar server telemetryHandler := telemetry.Handler() @@ -537,14 +536,14 @@ func startAgent( log.Infof("Hostname is: %s", hostnameDetected) // start remote configuration management - if pkgconfig.IsRemoteConfigEnabled(pkgconfig.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { // Subscribe to `AGENT_TASK` product rcclient.SubscribeAgentTask() // Subscribe to `APM_TRACING` product rcclient.SubscribeApmTracing() - if pkgconfig.Datadog().GetBool("remote_configuration.agent_integrations.enabled") { + if pkgconfigsetup.Datadog().GetBool("remote_configuration.agent_integrations.enabled") { // Spin up the config provider to schedule integrations through remote-config rcProvider := providers.NewRemoteConfigProvider() rcclient.Subscribe(data.ProductAgentIntegrations, rcProvider.IntegrationScheduleCallback) @@ -555,7 +554,7 @@ func startAgent( // start clc runner server // only start when the cluster agent is enabled and a cluster check runner host is enabled - if pkgconfig.Datadog().GetBool("cluster_agent.enabled") && pkgconfig.Datadog().GetBool("clc_runner_enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") && pkgconfigsetup.Datadog().GetBool("clc_runner_enabled") { if err = clcrunnerapi.StartCLCRunnerServer(map[string]http.Handler{ "/telemetry": telemetryHandler, }, ac); err != nil { @@ -564,7 +563,7 @@ func startAgent( } // Create the Leader election engine without initializing it - if pkgconfig.Datadog().GetBool("leader_election") { + if pkgconfigsetup.Datadog().GetBool("leader_election") { leaderelection.CreateGlobalLeaderEngine(ctx) } diff --git a/cmd/agent/subcommands/run/dependent_services_nix.go b/cmd/agent/subcommands/run/dependent_services_nix.go index 862bf36ccede9..ee2abf597945f 100644 --- a/cmd/agent/subcommands/run/dependent_services_nix.go +++ b/cmd/agent/subcommands/run/dependent_services_nix.go @@ -6,12 +6,12 @@ package run -import "github.com/DataDog/datadog-agent/pkg/config" +import "github.com/DataDog/datadog-agent/pkg/config/model" // Servicedef defines a service type Servicedef struct { name string - configKeys map[string]config.Config + configKeys map[string]model.Config } var subservices []Servicedef diff --git a/cmd/agent/subcommands/run/dependent_services_windows.go b/cmd/agent/subcommands/run/dependent_services_windows.go index ee607bbc0a741..c1b95c2fa4f44 100644 --- a/cmd/agent/subcommands/run/dependent_services_windows.go +++ b/cmd/agent/subcommands/run/dependent_services_windows.go @@ -13,7 +13,8 @@ import ( "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -22,7 +23,7 @@ type serviceInitFunc func() (err error) // Servicedef defines a service type Servicedef struct { name string - configKeys map[string]config.Config + configKeys map[string]model.Config serviceName string serviceInit serviceInitFunc @@ -31,40 +32,40 @@ type Servicedef struct { var subservices = []Servicedef{ { name: "apm", - configKeys: map[string]config.Config{ - "apm_config.enabled": config.Datadog(), + configKeys: map[string]model.Config{ + "apm_config.enabled": pkgconfigsetup.Datadog(), }, serviceName: "datadog-trace-agent", serviceInit: apmInit, }, { name: "process", - configKeys: map[string]config.Config{ - "process_config.enabled": config.Datadog(), - "process_config.process_collection.enabled": config.Datadog(), - "process_config.container_collection.enabled": config.Datadog(), - "process_config.process_discovery.enabled": config.Datadog(), - "network_config.enabled": config.SystemProbe(), - "system_probe_config.enabled": config.SystemProbe(), + configKeys: map[string]model.Config{ + "process_config.enabled": pkgconfigsetup.Datadog(), + "process_config.process_collection.enabled": pkgconfigsetup.Datadog(), + "process_config.container_collection.enabled": pkgconfigsetup.Datadog(), + "process_config.process_discovery.enabled": pkgconfigsetup.Datadog(), + "network_config.enabled": pkgconfigsetup.SystemProbe(), + "system_probe_config.enabled": pkgconfigsetup.SystemProbe(), }, serviceName: "datadog-process-agent", serviceInit: processInit, }, { name: "sysprobe", - configKeys: map[string]config.Config{ - "network_config.enabled": config.SystemProbe(), - "system_probe_config.enabled": config.SystemProbe(), - "windows_crash_detection.enabled": config.SystemProbe(), - "runtime_security_config.enabled": config.SystemProbe(), + configKeys: map[string]model.Config{ + "network_config.enabled": pkgconfigsetup.SystemProbe(), + "system_probe_config.enabled": pkgconfigsetup.SystemProbe(), + "windows_crash_detection.enabled": pkgconfigsetup.SystemProbe(), + "runtime_security_config.enabled": pkgconfigsetup.SystemProbe(), }, serviceName: "datadog-system-probe", serviceInit: sysprobeInit, }, { name: "cws", - configKeys: map[string]config.Config{ - "runtime_security_config.enabled": config.SystemProbe(), + configKeys: map[string]model.Config{ + "runtime_security_config.enabled": pkgconfigsetup.SystemProbe(), }, serviceName: "datadog-security-agent", serviceInit: securityInit, diff --git a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_listener.go b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_listener.go index 428dce7116f02..193eb6bcdd152 100644 --- a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_listener.go +++ b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_listener.go @@ -14,14 +14,14 @@ import ( "net" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // getCLCRunnerListener returns a listening connection for the cluster level check runner server // The server must only listen on the cluster check runner pod ip // The cluster check runner Agent won't start if the server host is not configured func getCLCRunnerListener() (net.Listener, error) { - podIP := config.Datadog().GetString("clc_runner_host") + podIP := pkgconfigsetup.Datadog().GetString("clc_runner_host") // This is not a security feature // util.IsForbidden only helps to avoid unnecessarily permissive server config if util.IsForbidden(podIP) { @@ -32,5 +32,5 @@ func getCLCRunnerListener() (net.Listener, error) { // IPv6 addresses must be formatted [ip]:port podIP = fmt.Sprintf("[%s]", podIP) } - return net.Listen("tcp", fmt.Sprintf("%v:%v", podIP, config.Datadog().GetInt("clc_runner_port"))) + return net.Listen("tcp", fmt.Sprintf("%v:%v", podIP, pkgconfigsetup.Datadog().GetInt("clc_runner_port"))) } diff --git a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go index 542cc88d42e5a..9b763d3a8b1df 100644 --- a/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go +++ b/cmd/agent/subcommands/run/internal/clcrunnerapi/clc_runner_server.go @@ -26,7 +26,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery" "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) var clcListener net.Listener @@ -56,12 +57,12 @@ func StartCLCRunnerServer(extraHandlers map[string]http.Handler, ac autodiscover // CLC Runner token // Use the Cluster Agent token - err = util.InitDCAAuthToken(config.Datadog()) + err = util.InitDCAAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } - hosts := []string{"127.0.0.1", "localhost", config.Datadog().GetString("clc_runner_host")} + hosts := []string{"127.0.0.1", "localhost", pkgconfigsetup.Datadog().GetString("clc_runner_host")} _, rootCertPEM, rootKey, err := security.GenerateRootCert(hosts, 2048) if err != nil { return fmt.Errorf("unable to start TLS server: %v", err) @@ -84,14 +85,14 @@ func StartCLCRunnerServer(extraHandlers map[string]http.Handler, ac autodiscover } // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := config.NewLogWriter(4, seelog.WarnLvl) + logWriter, _ := pkglogsetup.NewLogWriter(4, seelog.WarnLvl) srv := &http.Server{ Handler: r, ErrorLog: stdLog.New(logWriter, "Error from the clc runner http API server: ", 0), // log errors to seelog, TLSConfig: &tlsConfig, - WriteTimeout: config.Datadog().GetDuration("clc_runner_server_write_timeout") * time.Second, - ReadHeaderTimeout: config.Datadog().GetDuration("clc_runner_server_readheader_timeout") * time.Second, + WriteTimeout: pkgconfigsetup.Datadog().GetDuration("clc_runner_server_write_timeout") * time.Second, + ReadHeaderTimeout: pkgconfigsetup.Datadog().GetDuration("clc_runner_server_readheader_timeout") * time.Second, } tlsListener := tls.NewListener(clcListener, &tlsConfig) diff --git a/cmd/agent/subcommands/snmp/command.go b/cmd/agent/subcommands/snmp/command.go index b19402aec82f0..7cbabcee754a4 100644 --- a/cmd/agent/subcommands/snmp/command.go +++ b/cmd/agent/subcommands/snmp/command.go @@ -11,6 +11,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "net" "os" "strconv" @@ -184,6 +185,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { err := fxutil.OneShot(scanDevice, fx.Supply(connParams, globalParams, cmd), fx.Provide(func() argsType { return args }), + compressionimpl.Module(), fx.Supply(core.BundleParams{ ConfigParams: config.NewAgentParams(globalParams.ConfFilePath, config.WithExtraConfFiles(globalParams.ExtraConfFilePath), config.WithFleetPoliciesDirPath(globalParams.FleetPoliciesDirPath)), SecretParams: secrets.NewEnabledParams(), @@ -193,9 +195,8 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { forwarder.Bundle(defaultforwarder.NewParams()), eventplatformimpl.Module(eventplatformimpl.NewDefaultParams()), eventplatformreceiverimpl.Module(), - orchestratorimpl.Module(), + orchestratorimpl.Module(orchestratorimpl.NewDefaultParams()), fx.Provide( - orchestratorimpl.NewDefaultParams, demultiplexerimpl.NewDefaultParams, ), ) diff --git a/cmd/agent/subcommands/stop/command.go b/cmd/agent/subcommands/stop/command.go index f10ccbf51eadc..868b9baf5a212 100644 --- a/cmd/agent/subcommands/stop/command.go +++ b/cmd/agent/subcommands/stop/command.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -59,7 +59,7 @@ func stop(config config.Component, _ *cliParams, _ log.Component) error { if e != nil { return e } - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } diff --git a/cmd/agent/subcommands/streamep/command.go b/cmd/agent/subcommands/streamep/command.go index c6dbb1dc57a62..9c98b90c4e8cd 100644 --- a/cmd/agent/subcommands/streamep/command.go +++ b/cmd/agent/subcommands/streamep/command.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -56,7 +56,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { //nolint:revive // TODO(CINT) Fix revive linter func streamEventPlatform(_ log.Component, config config.Component, cliParams *cliParams) error { - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } @@ -78,7 +78,7 @@ func streamRequest(url string, body []byte, onChunk func([]byte)) error { c := util.GetClient(false) // Set session token - e = util.SetAuthToken(pkgconfig.Datadog()) + e = util.SetAuthToken(pkgconfigsetup.Datadog()) if e != nil { return e } diff --git a/cmd/agent/subcommands/streamlogs/command.go b/cmd/agent/subcommands/streamlogs/command.go index 57e66416e0f01..2088563f5e9da 100644 --- a/cmd/agent/subcommands/streamlogs/command.go +++ b/cmd/agent/subcommands/streamlogs/command.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -84,7 +84,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { //nolint:revive // TODO(AML) Fix revive linter func streamLogs(_ log.Component, config config.Component, cliParams *CliParams) error { - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } @@ -139,7 +139,7 @@ func streamRequest(url string, body []byte, duration time.Duration, onChunk func c.Timeout = duration } // Set session token - e = util.SetAuthToken(pkgconfig.Datadog()) + e = util.SetAuthToken(pkgconfigsetup.Datadog()) if e != nil { return e } diff --git a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go index b78a1c9223f7f..1c55a0f65dddb 100644 --- a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go +++ b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go @@ -55,7 +55,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" @@ -86,8 +86,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithResolvers())), compressionimpl.Module(), demultiplexerimpl.Module(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewDisabledParams()), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDisabledParams()), eventplatformimpl.Module(eventplatformimpl.NewDisabledParams()), eventplatformreceiverimpl.Module(), fx.Supply(demultiplexerimpl.NewDefaultParams()), @@ -144,7 +143,7 @@ func run( mainCtx, mainCtxCancel := context.WithCancel(context.Background()) defer mainCtxCancel() // Calling cancel twice is safe - if !pkgconfig.Datadog().IsSet("api_key") { + if !pkgconfigsetup.Datadog().IsSet("api_key") { pkglog.Critical("no API key configured, exiting") return nil } @@ -174,7 +173,7 @@ func run( return err } - common.LoadComponents(secretResolver, wmeta, ac, pkgconfig.Datadog().GetString("confd_path")) + common.LoadComponents(secretResolver, wmeta, ac, pkgconfigsetup.Datadog().GetString("confd_path")) // Set up check collector ac.AddScheduler("check", pkgcollector.InitCheckScheduler(optional.NewOption(collector), demultiplexer, logReceiver), true) @@ -217,19 +216,19 @@ func run( } func initializeCCCache(ctx context.Context) error { - pollInterval := time.Second * time.Duration(pkgconfig.Datadog().GetInt("cloud_foundry_cc.poll_interval")) + pollInterval := time.Second * time.Duration(pkgconfigsetup.Datadog().GetInt("cloud_foundry_cc.poll_interval")) _, err := cloudfoundry.ConfigureGlobalCCCache( ctx, - pkgconfig.Datadog().GetString("cloud_foundry_cc.url"), - pkgconfig.Datadog().GetString("cloud_foundry_cc.client_id"), - pkgconfig.Datadog().GetString("cloud_foundry_cc.client_secret"), - pkgconfig.Datadog().GetBool("cloud_foundry_cc.skip_ssl_validation"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_cc.url"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_cc.client_id"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_cc.client_secret"), + pkgconfigsetup.Datadog().GetBool("cloud_foundry_cc.skip_ssl_validation"), pollInterval, - pkgconfig.Datadog().GetInt("cloud_foundry_cc.apps_batch_size"), - pkgconfig.Datadog().GetBool("cluster_agent.refresh_on_cache_miss"), - pkgconfig.Datadog().GetBool("cluster_agent.serve_nozzle_data"), - pkgconfig.Datadog().GetBool("cluster_agent.sidecars_tags"), - pkgconfig.Datadog().GetBool("cluster_agent.isolation_segments_tags"), + pkgconfigsetup.Datadog().GetInt("cloud_foundry_cc.apps_batch_size"), + pkgconfigsetup.Datadog().GetBool("cluster_agent.refresh_on_cache_miss"), + pkgconfigsetup.Datadog().GetBool("cluster_agent.serve_nozzle_data"), + pkgconfigsetup.Datadog().GetBool("cluster_agent.sidecars_tags"), + pkgconfigsetup.Datadog().GetBool("cluster_agent.isolation_segments_tags"), nil, ) if err != nil { @@ -239,11 +238,11 @@ func initializeCCCache(ctx context.Context) error { } func initializeBBSCache(ctx context.Context) error { - pollInterval := time.Second * time.Duration(pkgconfig.Datadog().GetInt("cloud_foundry_bbs.poll_interval")) + pollInterval := time.Second * time.Duration(pkgconfigsetup.Datadog().GetInt("cloud_foundry_bbs.poll_interval")) // NOTE: we can't use GetPollInterval in ConfigureGlobalBBSCache, as that causes import cycle - includeListString := pkgconfig.Datadog().GetStringSlice("cloud_foundry_bbs.env_include") - excludeListString := pkgconfig.Datadog().GetStringSlice("cloud_foundry_bbs.env_exclude") + includeListString := pkgconfigsetup.Datadog().GetStringSlice("cloud_foundry_bbs.env_include") + excludeListString := pkgconfigsetup.Datadog().GetStringSlice("cloud_foundry_bbs.env_exclude") includeList := make([]*regexp.Regexp, len(includeListString)) excludeList := make([]*regexp.Regexp, len(excludeListString)) @@ -266,10 +265,10 @@ func initializeBBSCache(ctx context.Context) error { bc, err := cloudfoundry.ConfigureGlobalBBSCache( ctx, - pkgconfig.Datadog().GetString("cloud_foundry_bbs.url"), - pkgconfig.Datadog().GetString("cloud_foundry_bbs.ca_file"), - pkgconfig.Datadog().GetString("cloud_foundry_bbs.cert_file"), - pkgconfig.Datadog().GetString("cloud_foundry_bbs.key_file"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_bbs.url"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_bbs.ca_file"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_bbs.cert_file"), + pkgconfigsetup.Datadog().GetString("cloud_foundry_bbs.key_file"), pollInterval, includeList, excludeList, diff --git a/cmd/cluster-agent/admission/server.go b/cmd/cluster-agent/admission/server.go index f98cc0fef91fa..bff5012e1c4c1 100644 --- a/cmd/cluster-agent/admission/server.go +++ b/cmd/cluster-agent/admission/server.go @@ -20,12 +20,14 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" + "github.com/cihub/seelog" + "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/certificate" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/cihub/seelog" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" admiv1 "k8s.io/api/admission/v1" admiv1beta1 "k8s.io/api/admission/v1beta1" @@ -101,19 +103,19 @@ func (s *Server) Register(uri string, webhookName string, f WebhookFunc, dc dyna // Run starts the kubernetes admission webhook server. func (s *Server) Run(mainCtx context.Context, client kubernetes.Interface) error { var tlsMinVersion uint16 = tls.VersionTLS13 - if config.Datadog().GetBool("cluster_agent.allow_legacy_tls") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.allow_legacy_tls") { tlsMinVersion = tls.VersionTLS10 } - logWriter, _ := config.NewTLSHandshakeErrorWriter(4, seelog.WarnLvl) + logWriter, _ := pkglogsetup.NewTLSHandshakeErrorWriter(4, seelog.WarnLvl) server := &http.Server{ - Addr: fmt.Sprintf(":%d", config.Datadog().GetInt("admission_controller.port")), + Addr: fmt.Sprintf(":%d", pkgconfigsetup.Datadog().GetInt("admission_controller.port")), Handler: s.mux, ErrorLog: stdLog.New(logWriter, "Error from the admission controller http API server: ", 0), TLSConfig: &tls.Config{ GetCertificate: func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { secretNs := common.GetResourcesNamespace() - secretName := config.Datadog().GetString("admission_controller.certificate.secret_name") + secretName := pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name") cert, err := certificate.GetCertificateFromSecret(secretNs, secretName, client) if err != nil { log.Errorf("Couldn't fetch certificate: %v", err) diff --git a/cmd/cluster-agent/api/agent/agent.go b/cmd/cluster-agent/api/agent/agent.go index bc82056e2ee45..86a536d77cb20 100644 --- a/cmd/cluster-agent/api/agent/agent.go +++ b/cmd/cluster-agent/api/agent/agent.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/status" "github.com/DataDog/datadog-agent/comp/core/tagger" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -144,7 +144,7 @@ func makeFlare(w http.ResponseWriter, r *http.Request, statusComponent status.Co } } - logFile := config.Datadog().GetString("log_file") + logFile := pkgconfigsetup.Datadog().GetString("log_file") if logFile == "" { logFile = path.DefaultDCALogFile } diff --git a/cmd/cluster-agent/api/listener.go b/cmd/cluster-agent/api/listener.go index fb9886e0f5b44..0e713778be79f 100644 --- a/cmd/cluster-agent/api/listener.go +++ b/cmd/cluster-agent/api/listener.go @@ -14,10 +14,10 @@ import ( "fmt" "net" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // getListener returns a listening connection func getListener() (net.Listener, error) { - return net.Listen("tcp", fmt.Sprintf("0.0.0.0:%v", config.Datadog().GetInt("cluster_agent.cmd_port"))) + return net.Listen("tcp", fmt.Sprintf("0.0.0.0:%v", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"))) } diff --git a/cmd/cluster-agent/api/server.go b/cmd/cluster-agent/api/server.go index 779e795e53cf6..0b6ba72b63f08 100644 --- a/cmd/cluster-agent/api/server.go +++ b/cmd/cluster-agent/api/server.go @@ -41,9 +41,10 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) var ( @@ -79,10 +80,10 @@ func StartServer(ctx context.Context, w workloadmeta.Component, taggerComp tagge return fmt.Errorf("unable to create the api server: %v", err) } // Internal token - util.CreateAndSetAuthToken(pkgconfig.Datadog()) //nolint:errcheck + util.CreateAndSetAuthToken(pkgconfigsetup.Datadog()) //nolint:errcheck // DCA client token - util.InitDCAAuthToken(pkgconfig.Datadog()) //nolint:errcheck + util.InitDCAAuthToken(pkgconfigsetup.Datadog()) //nolint:errcheck // create cert hosts := []string{"127.0.0.1", "localhost"} @@ -107,12 +108,12 @@ func StartServer(ctx context.Context, w workloadmeta.Component, taggerComp tagge MinVersion: tls.VersionTLS13, } - if pkgconfig.Datadog().GetBool("cluster_agent.allow_legacy_tls") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.allow_legacy_tls") { tlsConfig.MinVersion = tls.VersionTLS10 } // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := pkgconfig.NewTLSHandshakeErrorWriter(4, seelog.WarnLvl) + logWriter, _ := pkglogsetup.NewTLSHandshakeErrorWriter(4, seelog.WarnLvl) authInterceptor := grpcutil.AuthInterceptor(func(token string) (interface{}, error) { if token != util.GetDCAAuthToken() { @@ -132,7 +133,7 @@ func StartServer(ctx context.Context, w workloadmeta.Component, taggerComp tagge taggerServer: taggerserver.NewServer(taggerComp), }) - timeout := pkgconfig.Datadog().GetDuration("cluster_agent.server.idle_timeout_seconds") * time.Second + timeout := pkgconfigsetup.Datadog().GetDuration("cluster_agent.server.idle_timeout_seconds") * time.Second srv := grpcutil.NewMuxedGRPCServer( listener.Addr().String(), tlsConfig, diff --git a/cmd/cluster-agent/api/server_test.go b/cmd/cluster-agent/api/server_test.go index e40c38ff141da..b83cc5201164c 100644 --- a/cmd/cluster-agent/api/server_test.go +++ b/cmd/cluster-agent/api/server_test.go @@ -15,14 +15,14 @@ import ( "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestValidateTokenMiddleware(t *testing.T) { mockConfig := configmock.New(t) mockConfig.SetWithoutSource("cluster_agent.auth_token", "abc123") - util.InitDCAAuthToken(config.Datadog()) + util.InitDCAAuthToken(pkgconfigsetup.Datadog()) tests := []struct { path, authToken string diff --git a/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go b/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go index 94e60ff1cb3a0..0dd0420cfe889 100644 --- a/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go +++ b/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go @@ -15,7 +15,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/api" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -23,7 +23,7 @@ import ( func installCloudFoundryMetadataEndpoints(r *mux.Router) { r.HandleFunc("/tags/cf/apps/{nodeName}", api.WithTelemetryWrapper("getCFAppsMetadataForNode", getCFAppsMetadataForNode)).Methods("GET") - if config.Datadog().GetBool("cluster_agent.serve_nozzle_data") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.serve_nozzle_data") { r.HandleFunc("/cf/apps/{guid}", api.WithTelemetryWrapper("getCFApplication", getCFApplication)).Methods("GET") r.HandleFunc("/cf/apps", api.WithTelemetryWrapper("getCFApplications", getCFApplications)).Methods("GET") r.HandleFunc("/cf/org_quotas", api.WithTelemetryWrapper("getCFOrgQuotas", getCFOrgQuotas)).Methods("GET") diff --git a/cmd/cluster-agent/api/v1/clusterchecks.go b/cmd/cluster-agent/api/v1/clusterchecks.go index 441ae2230ab17..47204a5fb7563 100644 --- a/cmd/cluster-agent/api/v1/clusterchecks.go +++ b/cmd/cluster-agent/api/v1/clusterchecks.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent" "github.com/DataDog/datadog-agent/pkg/clusteragent/api" cctypes "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" dcautil "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -193,7 +193,7 @@ func validateClientIP(addr string) (string, error) { return "", fmt.Errorf("cannot parse CLC runner address: %s", addr) } - if addr == "" && config.Datadog().GetBool("cluster_checks.advanced_dispatching_enabled") { + if addr == "" && pkgconfigsetup.Datadog().GetBool("cluster_checks.advanced_dispatching_enabled") { log.Warn("Cluster check dispatching error: cannot get runner IP from http headers. advanced_dispatching_enabled requires agent 6.17 or above.") } diff --git a/cmd/cluster-agent/api/v1/install.go b/cmd/cluster-agent/api/v1/install.go index a3388e3c2008d..ef49a8538eac6 100644 --- a/cmd/cluster-agent/api/v1/install.go +++ b/cmd/cluster-agent/api/v1/install.go @@ -12,7 +12,7 @@ import ( "github.com/gorilla/mux" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/clusteragent" @@ -21,7 +21,7 @@ import ( // InstallMetadataEndpoints registers endpoints for metadata func InstallMetadataEndpoints(r *mux.Router, w workloadmeta.Component) { log.Debug("Registering metadata endpoints") - if config.Datadog().GetBool("cloud_foundry") { + if pkgconfigsetup.Datadog().GetBool("cloud_foundry") { installCloudFoundryMetadataEndpoints(r) } else { installKubernetesMetadataEndpoints(r, w) diff --git a/cmd/cluster-agent/api/v1/kubernetes_metadata.go b/cmd/cluster-agent/api/v1/kubernetes_metadata.go index 53a63322caa24..2a3e046e4f522 100644 --- a/cmd/cluster-agent/api/v1/kubernetes_metadata.go +++ b/cmd/cluster-agent/api/v1/kubernetes_metadata.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/api" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" as "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" apicommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/controllers" @@ -109,7 +109,7 @@ func getNodeLabels(w http.ResponseWriter, r *http.Request, wmeta workloadmeta.Co } func getNodeAnnotations(w http.ResponseWriter, r *http.Request, wmeta workloadmeta.Component) { - getNodeMetadata(w, r, wmeta, func(km *workloadmeta.KubernetesMetadata) map[string]string { return km.Annotations }, "annotations", config.Datadog().GetStringSlice("kubernetes_node_annotations_as_host_aliases")) + getNodeMetadata(w, r, wmeta, func(km *workloadmeta.KubernetesMetadata) map[string]string { return km.Annotations }, "annotations", pkgconfigsetup.Datadog().GetStringSlice("kubernetes_node_annotations_as_host_aliases")) } // getNamespaceMetadataWithTransformerFunc is used when the node agent hits the DCA for some (or all) metadata of a specific namespace diff --git a/cmd/cluster-agent/custommetrics/server.go b/cmd/cluster-agent/custommetrics/server.go index bc7f2f9091d6c..d16ca95bb87c2 100644 --- a/cmd/cluster-agent/custommetrics/server.go +++ b/cmd/cluster-agent/custommetrics/server.go @@ -20,7 +20,7 @@ import ( datadogclient "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/externalmetrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" as "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -55,7 +55,7 @@ func RunServer(ctx context.Context, apiCl *as.APIClient, datadogCl optional.Opti cmd.FlagSet = pflag.NewFlagSet(cmd.Name, pflag.ExitOnError) var c []string - for k, v := range config.Datadog().GetStringMapString(metricsServerConf) { + for k, v := range pkgconfigsetup.Datadog().GetStringMapString(metricsServerConf) { c = append(c, fmt.Sprintf("--%s=%s", k, v)) } @@ -95,7 +95,7 @@ func (a *DatadogMetricsAdapter) makeProviderOrDie(ctx context.Context, apiCl *as return nil, err } - if config.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") { + if pkgconfigsetup.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") { if dc, ok := datadogCl.Get(); ok { return externalmetrics.NewDatadogMetricProvider(ctx, apiCl, dc) } @@ -122,9 +122,9 @@ func (a *DatadogMetricsAdapter) Config() (*apiserver.Config, error) { if !a.FlagSet.Lookup("secure-port").Changed { // Ensure backward compatibility. 443 by default, but will error out if incorrectly set. // refer to apiserver code in k8s.io/apiserver/pkg/server/option/serving.go - a.SecureServing.BindPort = config.Datadog().GetInt("external_metrics_provider.port") + a.SecureServing.BindPort = pkgconfigsetup.Datadog().GetInt("external_metrics_provider.port") // Default in External Metrics is TLS 1.2 - if !config.Datadog().GetBool("cluster_agent.allow_legacy_tls") { + if !pkgconfigsetup.Datadog().GetBool("cluster_agent.allow_legacy_tls") { a.SecureServing.MinTLSVersion = tlsVersion13Str } } diff --git a/cmd/cluster-agent/subcommands/check/command.go b/cmd/cluster-agent/subcommands/check/command.go index 77cc9135c9b7f..cceb56a853ab4 100644 --- a/cmd/cluster-agent/subcommands/check/command.go +++ b/cmd/cluster-agent/subcommands/check/command.go @@ -11,7 +11,7 @@ package check import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/command" "github.com/DataDog/datadog-agent/pkg/cli/subcommands/check" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pkgcommon "github.com/DataDog/datadog-agent/pkg/util/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection" @@ -22,7 +22,7 @@ import ( func Commands(globalParams *command.GlobalParams) []*cobra.Command { ctx, _ := pkgcommon.GetMainCtxCancel() // Create the Leader election engine without initializing it - if pkgconfig.Datadog().GetBool("leader_election") { + if pkgconfigsetup.Datadog().GetBool("leader_election") { leaderelection.CreateGlobalLeaderEngine(ctx) } diff --git a/cmd/cluster-agent/subcommands/config/command.go b/cmd/cluster-agent/subcommands/config/command.go index 1d01668fb5f28..24a6f44487990 100644 --- a/cmd/cluster-agent/subcommands/config/command.go +++ b/cmd/cluster-agent/subcommands/config/command.go @@ -14,9 +14,9 @@ import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/command" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/cli/subcommands/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/spf13/cobra" ) @@ -40,7 +40,7 @@ func newSettingsClient() (settings.Client, error) { apiConfigURL := fmt.Sprintf( "https://localhost:%v/config", - pkgconfig.Datadog().GetInt("cluster_agent.cmd_port"), + pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"), ) return settingshttp.NewClient(c, apiConfigURL, "datadog-cluster-agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil diff --git a/cmd/cluster-agent/subcommands/metamap/command.go b/cmd/cluster-agent/subcommands/metamap/command.go index f238dfeaf8a22..a5aa1281b6486 100644 --- a/cmd/cluster-agent/subcommands/metamap/command.go +++ b/cmd/cluster-agent/subcommands/metamap/command.go @@ -20,7 +20,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/render" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -71,13 +71,13 @@ func getMetadataMap(nodeName string) error { c := util.GetClient(false) // FIX: get certificates right then make this true var urlstr string if nodeName == "" { - urlstr = fmt.Sprintf("https://localhost:%v/api/v1/tags/pod", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr = fmt.Sprintf("https://localhost:%v/api/v1/tags/pod", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) } else { - urlstr = fmt.Sprintf("https://localhost:%v/api/v1/tags/pod/%s", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port"), nodeName) + urlstr = fmt.Sprintf("https://localhost:%v/api/v1/tags/pod/%s", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"), nodeName) } // Set session token - e = util.SetAuthToken(pkgconfig.Datadog()) + e = util.SetAuthToken(pkgconfigsetup.Datadog()) if e != nil { return e } diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index abc7704e601bc..ff65ec8a9531a 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -73,9 +73,9 @@ import ( clusteragentMetricsStatus "github.com/DataDog/datadog-agent/pkg/clusteragent/metricsstatus" orchestratorStatus "github.com/DataDog/datadog-agent/pkg/clusteragent/orchestrator" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" rcclient "github.com/DataDog/datadog-agent/pkg/config/remote/client" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" hostnameStatus "github.com/DataDog/datadog-agent/pkg/status/clusteragent/hostname" endpointsStatus "github.com/DataDog/datadog-agent/pkg/status/endpoints" "github.com/DataDog/datadog-agent/pkg/status/health" @@ -137,8 +137,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithResolvers(), defaultforwarder.WithDisableAPIKeyChecking())), compressionimpl.Module(), demultiplexerimpl.Module(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewDefaultParams()), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDefaultParams()), eventplatformimpl.Module(eventplatformimpl.NewDisabledParams()), eventplatformreceiverimpl.Module(), fx.Supply(demultiplexerimpl.NewDefaultParams()), @@ -339,7 +338,7 @@ func start(log log.Component, // Initialize and start remote configuration client var rcClient *rcclient.Client rcserv, isSet := rcService.Get() - if pkgconfig.IsRemoteConfigEnabled(config) && isSet { + if pkgconfigsetup.IsRemoteConfigEnabled(config) && isSet { var products []string if config.GetBool("admission_controller.auto_instrumentation.patcher.enabled") { products = append(products, state.ProductAPMTracing) diff --git a/cmd/cluster-agent/subcommands/start/compliance.go b/cmd/cluster-agent/subcommands/start/compliance.go index 157d6217913fe..780e390331cee 100644 --- a/cmd/cluster-agent/subcommands/start/compliance.go +++ b/cmd/cluster-agent/subcommands/start/compliance.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/compliance" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/client" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -44,12 +44,12 @@ func runCompliance(ctx context.Context, senderManager sender.SenderManager, wmet } func newLogContext(logsConfig *config.LogsConfigKeys, endpointPrefix string) (*config.Endpoints, *client.DestinationsContext, error) { - endpoints, err := config.BuildHTTPEndpointsWithConfig(coreconfig.Datadog(), logsConfig, endpointPrefix, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) + endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), logsConfig, endpointPrefix, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) if err != nil { - endpoints, err = config.BuildHTTPEndpoints(coreconfig.Datadog(), intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) + endpoints, err = config.BuildHTTPEndpoints(pkgconfigsetup.Datadog(), intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) if err == nil { - httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, coreconfig.Datadog()) - endpoints, err = config.BuildEndpoints(coreconfig.Datadog(), httpConnectivity, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) + httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, pkgconfigsetup.Datadog()) + endpoints, err = config.BuildEndpoints(pkgconfigsetup.Datadog(), httpConnectivity, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin) } } @@ -68,7 +68,7 @@ func newLogContext(logsConfig *config.LogsConfigKeys, endpointPrefix string) (*c } func newLogContextCompliance() (*config.Endpoints, *client.DestinationsContext, error) { - logsConfigComplianceKeys := config.NewLogsConfigKeys("compliance_config.endpoints.", coreconfig.Datadog()) + logsConfigComplianceKeys := config.NewLogsConfigKeys("compliance_config.endpoints.", pkgconfigsetup.Datadog()) return newLogContext(logsConfigComplianceKeys, "cspm-intake.") } @@ -79,8 +79,8 @@ func startCompliance(senderManager sender.SenderManager, wmeta workloadmeta.Comp } stopper.Add(ctx) - configDir := coreconfig.Datadog().GetString("compliance_config.dir") - checkInterval := coreconfig.Datadog().GetDuration("compliance_config.check_interval") + configDir := pkgconfigsetup.Datadog().GetString("compliance_config.dir") + checkInterval := pkgconfigsetup.Datadog().GetDuration("compliance_config.check_interval") hname, err := hostname.Get(context.TODO()) if err != nil { diff --git a/cmd/cluster-agent/subcommands/status/command.go b/cmd/cluster-agent/subcommands/status/command.go index 7f4e3084cb6f8..c92a08d568461 100644 --- a/cmd/cluster-agent/subcommands/status/command.go +++ b/cmd/cluster-agent/subcommands/status/command.go @@ -25,7 +25,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -80,7 +80,7 @@ func run(log log.Component, config config.Component, cliParams *cliParams) error url := url.URL{ Scheme: "https", - Host: fmt.Sprintf("localhost:%v", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")), + Host: fmt.Sprintf("localhost:%v", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")), Path: "/status", RawQuery: v.Encode(), } diff --git a/cmd/dogstatsd/subcommands/start/command.go b/cmd/dogstatsd/subcommands/start/command.go index 103c2314d10d2..03c16e03fc144 100644 --- a/cmd/dogstatsd/subcommands/start/command.go +++ b/cmd/dogstatsd/subcommands/start/command.go @@ -54,12 +54,13 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/runner" metadatarunnerimpl "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -77,7 +78,7 @@ type DogstatsdComponents struct { const ( // loggerName is the name of the dogstatsd logger - loggerName pkgconfig.LoggerName = "DSD" + loggerName pkglogsetup.LoggerName = "DSD" ) // MakeCommand returns the start subcommand for the 'dogstatsd' command. @@ -150,8 +151,7 @@ func RunDogstatsdFct(cliParams *CLIParams, defaultConfPath string, defaultLogFil compressionimpl.Module(), demultiplexerimpl.Module(), secretsimpl.Module(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewDisabledParams()), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDisabledParams()), eventplatformimpl.Module(eventplatformimpl.NewDisabledParams()), eventplatformreceiverimpl.Module(), hostnameimpl.Module(), @@ -246,7 +246,7 @@ func RunDogstatsd(_ context.Context, cliParams *CLIParams, config config.Compone }() // Setup logger - syslogURI := pkgconfig.GetSyslogURI() + syslogURI := pkglogsetup.GetSyslogURI(pkgconfigsetup.Datadog()) logFile := config.GetString("log_file") if logFile == "" { logFile = params.DefaultLogFile @@ -257,7 +257,7 @@ func RunDogstatsd(_ context.Context, cliParams *CLIParams, config config.Compone logFile = "" } - err = pkgconfig.SetupLogger( + err = pkglogsetup.SetupLogger( loggerName, config.GetString("log_level"), logFile, @@ -265,6 +265,7 @@ func RunDogstatsd(_ context.Context, cliParams *CLIParams, config config.Compone config.GetBool("syslog_rfc"), config.GetBool("log_to_console"), config.GetBool("log_format_json"), + pkgconfigsetup.Datadog(), ) if err != nil { log.Criticalf("Unable to setup logger: %s", err) diff --git a/cmd/installer/command/command.go b/cmd/installer/command/command.go index a40f3926ed3ed..9e6d9ecdda890 100644 --- a/cmd/installer/command/command.go +++ b/cmd/installer/command/command.go @@ -13,7 +13,7 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // common constants for all the updater subcommands. @@ -51,7 +51,7 @@ type SubcommandFactory func(globalParams *GlobalParams) []*cobra.Command // MakeCommand makes the top-level Cobra command for this app. func MakeCommand(subcommandFactories []SubcommandFactory) *cobra.Command { globalParams := GlobalParams{ - ConfFilePath: config.DefaultUpdaterLogFile, + ConfFilePath: pkgconfigsetup.DefaultUpdaterLogFile, } // AgentCmd is the root command diff --git a/cmd/installer/subcommands/daemon/run.go b/cmd/installer/subcommands/daemon/run.go index d614a70a277eb..b69b6dad8a5fc 100644 --- a/cmd/installer/subcommands/daemon/run.go +++ b/cmd/installer/subcommands/daemon/run.go @@ -7,6 +7,7 @@ package daemon import ( "context" + "github.com/spf13/cobra" "go.uber.org/fx" @@ -23,8 +24,8 @@ import ( "github.com/DataDog/datadog-agent/comp/updater/localapi/localapiimpl" "github.com/DataDog/datadog-agent/comp/updater/telemetry/telemetryimpl" "github.com/DataDog/datadog-agent/comp/updater/updater/updaterimpl" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/remote/service" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func runCommand(global *command.GlobalParams) *cobra.Command { @@ -47,7 +48,7 @@ func getCommonFxOption(global *command.GlobalParams) fx.Option { ConfigParams: config.NewAgentParams(global.ConfFilePath), SecretParams: secrets.NewEnabledParams(), SysprobeConfigParams: sysprobeconfigimpl.NewParams(), - LogParams: log.ForDaemon("INSTALLER", "installer.log_file", pkgconfig.DefaultUpdaterLogFile), + LogParams: log.ForDaemon("INSTALLER", "installer.log_file", pkgconfigsetup.DefaultUpdaterLogFile), }), core.Bundle(), fx.Supply(&rcservice.Params{ diff --git a/cmd/installer/subcommands/installer/command.go b/cmd/installer/subcommands/installer/command.go index 32ba5484edd54..547e3e357538a 100644 --- a/cmd/installer/subcommands/installer/command.go +++ b/cmd/installer/subcommands/installer/command.go @@ -51,6 +51,7 @@ func Commands(_ *command.GlobalParams) []*cobra.Command { return []*cobra.Command{ bootstrapCommand(), installCommand(), + setupCommand(), removeCommand(), installExperimentCommand(), removeExperimentCommand(), @@ -109,7 +110,7 @@ func newInstallerCmd(operation string) (_ *installerCmd, err error) { cmd.Stop(err) } }() - i, err := installer.NewInstaller(cmd.env) + i, err := installer.NewInstaller(cmd.env, "opt/datadog-packages/run/rc/cmd") if err != nil { return nil, err } @@ -119,6 +120,14 @@ func newInstallerCmd(operation string) (_ *installerCmd, err error) { }, nil } +func (i *installerCmd) stop(err error) { + i.cmd.Stop(err) + err = i.Installer.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "failed to close Installer: %v\n", err) + } +} + type bootstraperCmd struct { *cmd } @@ -216,6 +225,24 @@ func bootstrapCommand() *cobra.Command { return cmd } +func setupCommand() *cobra.Command { + var timeout time.Duration + cmd := &cobra.Command{ + Use: "setup", + Hidden: true, + GroupID: "installer", + RunE: func(_ *cobra.Command, _ []string) (err error) { + b := newBootstraperCmd("setup") + defer func() { b.Stop(err) }() + ctx, cancel := context.WithTimeout(b.ctx, timeout) + defer cancel() + return bootstraper.InstallDefaultPackages(ctx, b.env) + }, + } + cmd.Flags().DurationVarP(&timeout, "timeout", "T", 3*time.Minute, "timeout to bootstrap with") + return cmd +} + func installCommand() *cobra.Command { var installArgs []string cmd := &cobra.Command{ @@ -228,7 +255,7 @@ func installCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.url", args[0]) return i.Install(i.ctx, args[0], installArgs) }, @@ -248,7 +275,7 @@ func removeCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.package", args[0]) return i.Remove(i.ctx, args[0]) }, @@ -267,7 +294,7 @@ func purgeCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.Purge(i.ctx) return nil }, @@ -286,7 +313,7 @@ func installExperimentCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.url", args[0]) return i.InstallExperiment(i.ctx, args[0]) }, @@ -305,7 +332,7 @@ func removeExperimentCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.package", args[0]) return i.RemoveExperiment(i.ctx, args[0]) }, @@ -324,7 +351,7 @@ func promoteExperimentCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) i.span.SetTag("params.package", args[0]) return i.PromoteExperiment(i.ctx, args[0]) }, @@ -343,7 +370,7 @@ func garbageCollectCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) return i.GarbageCollect(i.ctx) }, } @@ -366,7 +393,7 @@ func isInstalledCommand() *cobra.Command { if err != nil { return err } - defer func() { i.Stop(err) }() + defer i.stop(err) installed, err := i.IsInstalled(i.ctx, args[0]) if err != nil { return err diff --git a/cmd/otel-agent/config/agent_config_test.go b/cmd/otel-agent/config/agent_config_test.go index c985c24859da2..fc6b4dd6b21ff 100644 --- a/cmd/otel-agent/config/agent_config_test.go +++ b/cmd/otel-agent/config/agent_config_test.go @@ -7,6 +7,8 @@ package config import ( "context" + "fmt" + "io/fs" "os" "strings" "testing" @@ -174,15 +176,25 @@ func (suite *ConfigTestSuite) TestEnvBadLogLevel() { assert.Error(t, err) } +func (suite *ConfigTestSuite) TestBadDDConfigFile() { + t := suite.T() + fileName := "testdata/config_default.yaml" + ddFileName := "testdata/doesnotexists.yaml" + _, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) + + assert.ErrorIs(t, err, fs.ErrNotExist) +} + func (suite *ConfigTestSuite) TestBadLogLevel() { t := suite.T() fileName := "testdata/config_default.yaml" ddFileName := "testdata/datadog_bad_log_level.yaml" _, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) - // log_level from service config takes precedence -> more verbose - // ddFlleName configures level warn, Telemetry defaults to info - assert.Error(t, err) + expectedError := fmt.Sprintf( + "invalid log level (%v) set in the Datadog Agent configuration", + pkgconfigsetup.Datadog().GetString("log_level")) + assert.ErrorContains(t, err, expectedError) } func (suite *ConfigTestSuite) TestNoDDExporter() { diff --git a/cmd/otel-agent/config/testdata/datadog_bad_log_level.yaml b/cmd/otel-agent/config/testdata/datadog_bad_log_level.yaml new file mode 100644 index 0000000000000..be99720314f33 --- /dev/null +++ b/cmd/otel-agent/config/testdata/datadog_bad_log_level.yaml @@ -0,0 +1,5 @@ +log_level: yabadabadoo + +otelcollector: + enabled: true + extension_url: "https://localhost:8888" diff --git a/cmd/otel-agent/subcommands/run/command.go b/cmd/otel-agent/subcommands/run/command.go index 2373bc4453363..10f85edd4ca67 100644 --- a/cmd/otel-agent/subcommands/run/command.go +++ b/cmd/otel-agent/subcommands/run/command.go @@ -37,7 +37,6 @@ import ( collectorcontribFx "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/fx" collectordef "github.com/DataDog/datadog-agent/comp/otelcol/collector/def" collectorfx "github.com/DataDog/datadog-agent/comp/otelcol/collector/fx" - configstorefx "github.com/DataDog/datadog-agent/comp/otelcol/configstore/fx" converter "github.com/DataDog/datadog-agent/comp/otelcol/converter/def" converterfx "github.com/DataDog/datadog-agent/comp/otelcol/converter/fx" "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline" @@ -107,7 +106,6 @@ func runOTelAgentCommand(ctx context.Context, params *subcommands.GlobalParams, fx.Provide(func(cp converter.Component) confmap.Converter { return cp }), - configstorefx.Module(), fx.Provide(func() (coreconfig.Component, error) { c, err := agentConfig.NewConfigComponent(context.Background(), params.CoreConfPath, params.ConfPaths) if err != nil { diff --git a/cmd/process-agent/command/command.go b/cmd/process-agent/command/command.go index 649a1c21cf614..4cc6f355b30a0 100644 --- a/cmd/process-agent/command/command.go +++ b/cmd/process-agent/command/command.go @@ -18,16 +18,17 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/filesystem" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) //nolint:revive // TODO(PROC) Fix revive linter -const LoggerName config.LoggerName = "PROCESS" +const LoggerName pkglogsetup.LoggerName = "PROCESS" // DaemonLogParams are the log params should be given to the `core.BundleParams` for when the process agent is running as a daemon -var DaemonLogParams = log.ForDaemon(string(LoggerName), "process_config.log_file", config.DefaultProcessAgentLogFile) +var DaemonLogParams = log.ForDaemon(string(LoggerName), "process_config.log_file", pkgconfigsetup.DefaultProcessAgentLogFile) // OneShotLogParams are the log params that are given to commands var OneShotLogParams = log.ForOneShot(string(LoggerName), "info", true) diff --git a/cmd/process-agent/command/main_common.go b/cmd/process-agent/command/main_common.go index d9d9c208b8d59..cd0fc37d6cb33 100644 --- a/cmd/process-agent/command/main_common.go +++ b/cmd/process-agent/command/main_common.go @@ -52,8 +52,8 @@ import ( remoteconfig "github.com/DataDog/datadog-agent/comp/remote-config" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" "github.com/DataDog/datadog-agent/pkg/collector/python" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/config/model" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" "github.com/DataDog/datadog-agent/pkg/process/metadata/workloadmeta/collector" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -315,7 +315,7 @@ func initMisc(deps miscDeps) error { // shouldStayAlive determines whether the process agent should stay alive when no checks are running. // This can happen when the checks are running on the core agent but a process agent container is // still brought up. The process-agent is kept alive to prevent crash loops. -func shouldStayAlive(cfg ddconfig.Reader) bool { +func shouldStayAlive(cfg model.Reader) bool { if env.IsKubernetes() && cfg.GetBool("process_config.run_in_core_agent.enabled") { log.Warn("The process-agent is staying alive to prevent crash loops due to the checks running on the core agent. Thus, the process-agent is idle. Update your Helm chart or Datadog Operator to the latest version to prevent this (https://docs.datadoghq.com/containers/kubernetes/installation/).") return true diff --git a/cmd/process-agent/subcommands/config/config.go b/cmd/process-agent/subcommands/config/config.go index e92c853ab5799..1acd0ec15e5e2 100644 --- a/cmd/process-agent/subcommands/config/config.go +++ b/cmd/process-agent/subcommands/config/config.go @@ -18,10 +18,11 @@ import ( "github.com/DataDog/datadog-agent/comp/process" "github.com/DataDog/datadog-agent/pkg/api/util" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/fetcher" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -179,9 +180,9 @@ func getConfigValue(deps dependencies, args []string) error { return nil } -func getClient(cfg ddconfig.Reader) (settings.Client, error) { +func getClient(cfg model.Reader) (settings.Client, error) { httpClient := apiutil.GetClient(false) - ipcAddress, err := ddconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) port := cfg.GetInt("process_config.cmd_port") if port <= 0 { diff --git a/cmd/process-agent/subcommands/status/status.go b/cmd/process-agent/subcommands/status/status.go index 99e0c03741f97..2660ee8fb71b4 100644 --- a/cmd/process-agent/subcommands/status/status.go +++ b/cmd/process-agent/subcommands/status/status.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/comp/process" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/collector/python" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/util/status" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -134,7 +134,7 @@ func getAndWriteStatus(log log.Component, statusURL string, w io.Writer) { } func getStatusURL() (string, error) { - addressPort, err := ddconfig.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("config error: %s", err.Error()) } diff --git a/cmd/process-agent/subcommands/status/status_test.go b/cmd/process-agent/subcommands/status/status_test.go index dfee6c3d3eb33..e6aabd23aeb17 100644 --- a/cmd/process-agent/subcommands/status/status_test.go +++ b/cmd/process-agent/subcommands/status/status_test.go @@ -19,8 +19,8 @@ import ( "github.com/DataDog/datadog-agent/cmd/process-agent/command" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/util/status" "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -66,7 +66,7 @@ func TestNotRunning(t *testing.T) { cfg := configmock.New(t) cfg.SetWithoutSource("process_config.cmd_port", 8082) - addressPort, err := config.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) require.NoError(t, err) statusURL := fmt.Sprintf("http://%s/agent/status", addressPort) @@ -81,7 +81,7 @@ func TestNotRunning(t *testing.T) { func TestError(t *testing.T) { cfg := configmock.New(t) cfg.SetWithoutSource("cmd_host", "8.8.8.8") // Non-local ip address will cause error in `GetIPCAddress` - _, ipcError := config.GetIPCAddress() + _, ipcError := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) var errText, expectedErrText strings.Builder url, err := getStatusURL() diff --git a/cmd/process-agent/subcommands/taggerlist/tagger_list.go b/cmd/process-agent/subcommands/taggerlist/tagger_list.go index 07222e4612555..8d0f55ee18c52 100644 --- a/cmd/process-agent/subcommands/taggerlist/tagger_list.go +++ b/cmd/process-agent/subcommands/taggerlist/tagger_list.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/api" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -62,7 +62,7 @@ func taggerList(deps dependencies) error { } func getTaggerURL() (string, error) { - addressPort, err := ddconfig.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("config error: %s", err.Error()) } diff --git a/cmd/process-agent/subcommands/workloadlist/command.go b/cmd/process-agent/subcommands/workloadlist/command.go index c17d82429bb8e..a123e4c4f9fa2 100644 --- a/cmd/process-agent/subcommands/workloadlist/command.go +++ b/cmd/process-agent/subcommands/workloadlist/command.go @@ -20,7 +20,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/util" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -86,7 +86,7 @@ func workloadList(_ log.Component, config config.Component, cliParams *cliParams } func workloadURL(verbose bool) (string, error) { - addressPort, err := ddconfig.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("config error: %s", err.Error()) } diff --git a/cmd/security-agent/api/agent/agent.go b/cmd/security-agent/api/agent/agent.go index 46f54069f66af..06ad276eae538 100644 --- a/cmd/security-agent/api/agent/agent.go +++ b/cmd/security-agent/api/agent/agent.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/settings" "github.com/DataDog/datadog-agent/comp/core/status" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -140,7 +140,7 @@ func (a *Agent) getHealth(w http.ResponseWriter, _ *http.Request) { func (a *Agent) makeFlare(w http.ResponseWriter, _ *http.Request) { log.Infof("Making a flare") w.Header().Set("Content-Type", "application/json") - logFile := config.Datadog().GetString("security_agent.log_file") + logFile := pkgconfigsetup.Datadog().GetString("security_agent.log_file") filePath, err := flare.CreateSecurityAgentArchive(false, logFile, a.statusComponent) if err != nil || filePath == "" { diff --git a/cmd/security-agent/api/listener.go b/cmd/security-agent/api/listener.go index 822c86afd7b0c..7f5d5658e54d8 100644 --- a/cmd/security-agent/api/listener.go +++ b/cmd/security-agent/api/listener.go @@ -9,14 +9,14 @@ import ( "fmt" "net" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // newListener creates a listening connection func newListener() (net.Listener, error) { - address, err := config.GetIPCAddress() + address, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - return net.Listen("tcp", fmt.Sprintf("%v:%v", address, config.Datadog().GetInt("security_agent.cmd_port"))) + return net.Listen("tcp", fmt.Sprintf("%v:%v", address, pkgconfigsetup.Datadog().GetInt("security_agent.cmd_port"))) } diff --git a/cmd/security-agent/api/server.go b/cmd/security-agent/api/server.go index 60bd02ceb05ea..3b44b83e39436 100644 --- a/cmd/security-agent/api/server.go +++ b/cmd/security-agent/api/server.go @@ -29,7 +29,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) // Server implements security agent API server @@ -61,7 +62,7 @@ func (s *Server) Start() error { // Validate token for every request r.Use(validateToken) - err := util.CreateAndSetAuthToken(config.Datadog()) + err := util.CreateAndSetAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } @@ -89,13 +90,13 @@ func (s *Server) Start() error { } // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := config.NewLogWriter(4, seelog.ErrorLvl) + logWriter, _ := pkglogsetup.NewLogWriter(4, seelog.ErrorLvl) srv := &http.Server{ Handler: r, ErrorLog: stdLog.New(logWriter, "Error from the agent http API server: ", 0), // log errors to seelog, TLSConfig: &tlsConfig, - WriteTimeout: config.Datadog().GetDuration("server_timeout") * time.Second, + WriteTimeout: pkgconfigsetup.Datadog().GetDuration("server_timeout") * time.Second, } tlsListener := tls.NewListener(s.listener, &tlsConfig) diff --git a/cmd/security-agent/subcommands/config/config.go b/cmd/security-agent/subcommands/config/config.go index 4ea160937b704..f001e4cf66e3d 100644 --- a/cmd/security-agent/subcommands/config/config.go +++ b/cmd/security-agent/subcommands/config/config.go @@ -20,10 +20,10 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/fetcher" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -147,13 +147,13 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { return []*cobra.Command{cmd} } func getSettingsClient(_ *cobra.Command, _ []string) (settings.Client, error) { - err := util.SetAuthToken(pkgconfig.Datadog()) + err := util.SetAuthToken(pkgconfigsetup.Datadog()) if err != nil { return nil, err } c := util.GetClient(false) - apiConfigURL := fmt.Sprintf("https://localhost:%v/agent/config", pkgconfig.Datadog().GetInt("security_agent.cmd_port")) + apiConfigURL := fmt.Sprintf("https://localhost:%v/agent/config", pkgconfigsetup.Datadog().GetInt("security_agent.cmd_port")) return settingshttp.NewClient(c, apiConfigURL, "security-agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil } diff --git a/cmd/security-agent/subcommands/runtime/activity_dump.go b/cmd/security-agent/subcommands/runtime/activity_dump.go index 46465a0986dd2..934090326979c 100644 --- a/cmd/security-agent/subcommands/runtime/activity_dump.go +++ b/cmd/security-agent/subcommands/runtime/activity_dump.go @@ -14,6 +14,7 @@ import ( "github.com/spf13/cobra" "go.uber.org/fx" + "gopkg.in/yaml.v2" "github.com/DataDog/datadog-agent/cmd/security-agent/command" "github.com/DataDog/datadog-agent/comp/core" @@ -23,6 +24,7 @@ import ( secagent "github.com/DataDog/datadog-agent/pkg/security/agent" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/proto/api" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" "github.com/DataDog/datadog-agent/pkg/security/security_profile/dump" "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -57,6 +59,7 @@ func activityDumpCommands(globalParams *command.GlobalParams) []*cobra.Command { activityDumpCmd.AddCommand(listCommands(globalParams)...) activityDumpCmd.AddCommand(stopCommands(globalParams)...) activityDumpCmd.AddCommand(diffCommands(globalParams)...) + activityDumpCmd.AddCommand(activityDumpToWorkloadPolicyCommands(globalParams)...) return []*cobra.Command{activityDumpCmd} } @@ -615,3 +618,160 @@ func stopActivityDump(_ log.Component, _ config.Component, _ secrets.Component, fmt.Println("done!") return nil } + +type activityDumpToWorkloadPolicyCliParams struct { + *command.GlobalParams + + input string + output string + kill bool + allowlist bool + lineage bool + service string + imageName string + imageTag string + fim bool +} + +func activityDumpToWorkloadPolicyCommands(globalParams *command.GlobalParams) []*cobra.Command { + cliParams := &activityDumpToWorkloadPolicyCliParams{ + GlobalParams: globalParams, + } + + ActivityDumpWorkloadPolicyCmd := &cobra.Command{ + Use: "workload-policy", + Hidden: true, + Short: "convert an activity dump to a workload policy", + RunE: func(_ *cobra.Command, _ []string) error { + return fxutil.OneShot(activityDumpToWorkloadPolicy, + fx.Supply(cliParams), + fx.Supply(core.BundleParams{ + ConfigParams: config.NewSecurityAgentParams(globalParams.ConfigFilePaths), + SecretParams: secrets.NewEnabledParams(), + LogParams: log.ForOneShot(command.LoggerName, "info", true)}), + core.Bundle(), + ) + }, + } + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.input, + "input", + "", + "path to the activity-dump file", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.output, + "output", + "", + "path to the generated workload policy file", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().BoolVar( + &cliParams.kill, + "kill", + false, + "generate kill action with the workload policy", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().BoolVar( + &cliParams.fim, + "fim", + false, + "generate fim rules with the workload policy", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().BoolVar( + &cliParams.allowlist, + "allowlist", + false, + "generate allow list rules", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().BoolVar( + &cliParams.lineage, + "lineage", + false, + "generate lineage rules", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.service, + "service", + "", + "apply on specified service", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.imageTag, + "image-tag", + "", + "apply on specified image tag", + ) + + ActivityDumpWorkloadPolicyCmd.Flags().StringVar( + &cliParams.imageName, + "image-name", + "", + "apply on specified image name", + ) + + return []*cobra.Command{ActivityDumpWorkloadPolicyCmd} +} + +func activityDumpToWorkloadPolicy(_ log.Component, _ config.Component, _ secrets.Component, args *activityDumpToWorkloadPolicyCliParams) error { + + opts := dump.SECLRuleOpts{ + EnableKill: args.kill, + AllowList: args.allowlist, + Lineage: args.lineage, + Service: args.service, + ImageName: args.imageName, + ImageTag: args.imageTag, + FIM: args.fim, + } + + ads, err := dump.LoadActivityDumpsFromFiles(args.input) + if err != nil { + return err + } + + generatedRules := dump.GenerateRules(ads, opts) + generatedRules = utils.BuildPatterns(generatedRules) + + policyDef := rules.PolicyDef{ + Rules: generatedRules, + } + + // Verify policy syntax + var policyName string + if len(args.imageName) > 0 { + policyName = fmt.Sprintf("%s_policy", args.imageName) + } else { + policyName = "workload_policy" + } + policy, err := rules.LoadPolicyFromDefinition(policyName, "workload", &policyDef, nil, nil) + + if err != nil { + return fmt.Errorf("error in generated ruleset's syntax: '%s'", err) + } + + b, err := yaml.Marshal(policy) + if err != nil { + return err + } + + output := os.Stdout + if args.output != "" && args.output != "-" { + output, err = os.Create(args.output) + if err != nil { + return err + } + defer output.Close() + } + + fmt.Fprint(output, string(b)) + + return nil +} diff --git a/cmd/security-agent/subcommands/runtime/activity_dump_test.go b/cmd/security-agent/subcommands/runtime/activity_dump_test.go index ed1bca1cdf6f1..6257ed713de2d 100644 --- a/cmd/security-agent/subcommands/runtime/activity_dump_test.go +++ b/cmd/security-agent/subcommands/runtime/activity_dump_test.go @@ -53,3 +53,11 @@ func TestDumpActivityDumpCommand(t *testing.T) { generateActivityDump, func() {}) } + +func TestActivityDumpToWorkloadPolicyCommand(t *testing.T) { + fxutil.TestOneShotSubcommand(t, + Commands(&command.GlobalParams{}), + []string{"runtime", "activity-dump", "workload-policy", "--input", "file"}, + activityDumpToWorkloadPolicy, + func() {}) +} diff --git a/cmd/security-agent/subcommands/runtime/command.go b/cmd/security-agent/subcommands/runtime/command.go index 7725d89c81e44..ec5196e7aab12 100644 --- a/cmd/security-agent/subcommands/runtime/command.go +++ b/cmd/security-agent/subcommands/runtime/command.go @@ -33,7 +33,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" secagent "github.com/DataDog/datadog-agent/pkg/security/agent" "github.com/DataDog/datadog-agent/pkg/security/common" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -105,7 +105,7 @@ func evalCommands(globalParams *command.GlobalParams) []*cobra.Command { }, } - evalCmd.Flags().StringVar(&evalArgs.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + evalCmd.Flags().StringVar(&evalArgs.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") evalCmd.Flags().StringVar(&evalArgs.ruleID, "rule-id", "", "Rule ID to evaluate") _ = evalCmd.MarkFlagRequired("rule-id") evalCmd.Flags().StringVar(&evalArgs.eventFile, "event-file", "", "File of the event data") @@ -138,7 +138,7 @@ func commonCheckPoliciesCommands(globalParams *command.GlobalParams) []*cobra.Co }, } - commonCheckPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + commonCheckPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") commonCheckPoliciesCmd.Flags().BoolVar(&cliParams.evaluateAllPolicySources, "loaded-policies", false, "Evaluate loaded policies") if runtime.GOOS == "linux" { commonCheckPoliciesCmd.Flags().BoolVar(&cliParams.windowsModel, "windows-model", false, "Evaluate policies using the Windows model") diff --git a/cmd/security-agent/subcommands/runtime/deprecated_commands.go b/cmd/security-agent/subcommands/runtime/deprecated_commands.go index 888ac84bd4d9c..bd695ef7d0449 100644 --- a/cmd/security-agent/subcommands/runtime/deprecated_commands.go +++ b/cmd/security-agent/subcommands/runtime/deprecated_commands.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -42,7 +42,7 @@ func checkPoliciesCommands(globalParams *command.GlobalParams) []*cobra.Command Deprecated: "please use `security-agent runtime policy check` instead", } - checkPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + checkPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") return []*cobra.Command{checkPoliciesCmd} } diff --git a/cmd/security-agent/subcommands/runtime/security_profile.go b/cmd/security-agent/subcommands/runtime/security_profile.go index 27d65dad73854..e02a85c37cca7 100644 --- a/cmd/security-agent/subcommands/runtime/security_profile.go +++ b/cmd/security-agent/subcommands/runtime/security_profile.go @@ -41,14 +41,14 @@ func securityProfileCommands(globalParams *command.GlobalParams) []*cobra.Comman Short: "security profile commands", } - securityProfileCmd.AddCommand(securityProfileShowCommands(globalParams)...) + securityProfileCmd.AddCommand(showSecurityProfileCommands(globalParams)...) securityProfileCmd.AddCommand(listSecurityProfileCommands(globalParams)...) securityProfileCmd.AddCommand(saveSecurityProfileCommands(globalParams)...) return []*cobra.Command{securityProfileCmd} } -func securityProfileShowCommands(globalParams *command.GlobalParams) []*cobra.Command { +func showSecurityProfileCommands(globalParams *command.GlobalParams) []*cobra.Command { cliParams := &securityProfileCliParams{ GlobalParams: globalParams, } diff --git a/cmd/security-agent/subcommands/start/command.go b/cmd/security-agent/subcommands/start/command.go index c4cf04e4a0dc0..b8631b19a4d8b 100644 --- a/cmd/security-agent/subcommands/start/command.go +++ b/cmd/security-agent/subcommands/start/command.go @@ -55,10 +55,9 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl" "github.com/DataDog/datadog-agent/pkg/collector/python" pkgCompliance "github.com/DataDog/datadog-agent/pkg/compliance" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" - "github.com/DataDog/datadog-agent/pkg/config/setup" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/agent" "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/status/health" @@ -97,7 +96,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { ConfigParams: config.NewSecurityAgentParams(params.ConfigFilePaths, config.WithFleetPoliciesDirPath(globalParams.FleetPoliciesDirPath)), SysprobeConfigParams: sysprobeconfigimpl.NewParams(sysprobeconfigimpl.WithSysProbeConfFilePath(globalParams.SysProbeConfFilePath), sysprobeconfigimpl.WithFleetPoliciesDirPath(globalParams.FleetPoliciesDirPath)), SecretParams: secrets.NewEnabledParams(), - LogParams: log.ForDaemon(command.LoggerName, "security_agent.log_file", setup.DefaultSecurityAgentLogFile), + LogParams: log.ForDaemon(command.LoggerName, "security_agent.log_file", pkgconfigsetup.DefaultSecurityAgentLogFile), }), core.Bundle(), dogstatsd.ClientBundle, @@ -123,7 +122,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { return startstop.NewSerialStopper() }), fx.Provide(func(config config.Component, statsd statsd.Component) (ddgostatsd.ClientInterface, error) { - return statsd.CreateForHostPort(setup.GetBindHost(config), config.GetInt("dogstatsd_port")) + return statsd.CreateForHostPort(pkgconfigsetup.GetBindHost(config), config.GetInt("dogstatsd_port")) }), fx.Provide(func(stopper startstop.Stopper, log log.Component, config config.Component, statsdClient ddgostatsd.ClientInterface, wmeta workloadmeta.Component) (status.InformationProvider, *agent.RuntimeSecurityAgent, error) { hostnameDetected, err := utils.GetHostnameWithContextAndFallback(context.TODO()) @@ -284,7 +283,7 @@ func RunAgent(log log.Component, config config.Component, telemetry telemetry.Co // Setup expvar server port := config.GetString("security_agent.expvar_port") - pkgconfig.Datadog().Set("expvar_port", port, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("expvar_port", port, model.SourceAgentRuntime) if config.GetBool("telemetry.enabled") { http.Handle("/telemetry", telemetry.Handler()) } diff --git a/cmd/serverless-init/cloudservice/cloudrun.go b/cmd/serverless-init/cloudservice/cloudrun.go index d664fb2f7cc93..120f3725367b8 100644 --- a/cmd/serverless-init/cloudservice/cloudrun.go +++ b/cmd/serverless-init/cloudservice/cloudrun.go @@ -6,21 +6,28 @@ package cloudservice import ( + "fmt" + "github.com/DataDog/datadog-agent/pkg/util/log" "os" "github.com/DataDog/datadog-agent/cmd/serverless-init/cloudservice/helper" ) const ( - revisionNameEnvVar = "K_REVISION" //nolint:revive // TODO(SERV) Fix revive linter - ServiceNameEnvVar = "K_SERVICE" + revisionNameEnvVar = "K_REVISION" + ServiceNameEnvVar = "K_SERVICE" // ServiceNameEnvVar is also used in the trace package + configurationNameEnvVar = "K_CONFIGURATION" + functionTypeEnvVar = "FUNCTION_SIGNATURE_TYPE" + functionTargetEnvVar = "FUNCTION_TARGET" // exists as a cloudrunfunction env var for all runtimes except Go ) var metadataHelperFunc = helper.GetMetaData // CloudRun has helper functions for getting Google Cloud Run data -type CloudRun struct{} +type CloudRun struct { + cloudRunFunctionMode bool +} // GetTags returns a map of gcp-related tags. func (c *CloudRun) GetTags() map[string]string { @@ -28,6 +35,7 @@ func (c *CloudRun) GetTags() map[string]string { revisionName := os.Getenv(revisionNameEnvVar) serviceName := os.Getenv(ServiceNameEnvVar) + configName := os.Getenv(configurationNameEnvVar) if revisionName != "" { tags["revision_name"] = revisionName @@ -37,21 +45,48 @@ func (c *CloudRun) GetTags() map[string]string { tags["service_name"] = serviceName } + if configName != "" { + tags["configuration_name"] = configName + } + + if c.cloudRunFunctionMode { + tags = getFunctionTags(tags) + } tags["origin"] = c.GetOrigin() tags["_dd.origin"] = c.GetOrigin() return tags } +func getFunctionTags(tags map[string]string) map[string]string { + functionTarget := os.Getenv(functionTargetEnvVar) + functionSignatureType := os.Getenv(functionTypeEnvVar) + + if functionTarget != "" { + tags["function_target"] = functionTarget + } + + if functionSignatureType != "" { + tags["function_signature_type"] = functionSignatureType + } + return tags +} + // GetOrigin returns the `origin` attribute type for the given // cloud service. func (c *CloudRun) GetOrigin() string { + if c.cloudRunFunctionMode { + return "cloudfunctions" + } return "cloudrun" } // GetPrefix returns the prefix that we're prefixing all // metrics with. func (c *CloudRun) GetPrefix() string { + if c.cloudRunFunctionMode { + return "gcp.cloudfunctions" + } return "gcp.run" } @@ -64,3 +99,9 @@ func isCloudRunService() bool { _, exists := os.LookupEnv(ServiceNameEnvVar) return exists } + +func isCloudRunFunction() bool { + _, cloudRunFunctionMode := os.LookupEnv(functionTargetEnvVar) + log.Debug(fmt.Sprintf("cloud function mode SET TO: %t", cloudRunFunctionMode)) + return cloudRunFunctionMode +} diff --git a/cmd/serverless-init/cloudservice/cloudrun_test.go b/cmd/serverless-init/cloudservice/cloudrun_test.go index 4832b30f48478..41babaa632a1c 100644 --- a/cmd/serverless-init/cloudservice/cloudrun_test.go +++ b/cmd/serverless-init/cloudservice/cloudrun_test.go @@ -78,3 +78,45 @@ func TestGetCloudRunTagsWithEnvironmentVariables(t *testing.T) { "_dd.origin": "cloudrun", }, tags) } + +func TestGetCloudRunFunctionTagsWithEnvironmentVariables(t *testing.T) { + service := &CloudRun{cloudRunFunctionMode: true} + + metadataHelperFunc = func(*helper.GCPConfig) *helper.GCPMetadata { + return &helper.GCPMetadata{ + ContainerID: &helper.Info{ + TagName: "container_id", + Value: "test_container", + }, + Region: &helper.Info{ + TagName: "region", + Value: "test_region", + }, + ProjectID: &helper.Info{ + TagName: "project_id", + Value: "test_project", + }, + } + } + + t.Setenv("K_SERVICE", "test_service") + t.Setenv("K_REVISION", "test_revision") + t.Setenv("K_CONFIGURATION", "test_config") + t.Setenv("FUNCTION_SIGNATURE_TYPE", "test_signature") + t.Setenv("FUNCTION_TARGET", "test_target") + + tags := service.GetTags() + + assert.Equal(t, map[string]string{ + "container_id": "test_container", + "region": "test_region", + "origin": "cloudfunctions", + "project_id": "test_project", + "service_name": "test_service", + "revision_name": "test_revision", + "configuration_name": "test_config", + "_dd.origin": "cloudfunctions", + "function_target": "test_target", + "function_signature_type": "test_signature", + }, tags) +} diff --git a/cmd/serverless-init/cloudservice/service.go b/cmd/serverless-init/cloudservice/service.go index 0449d28d51533..54c9a3576a35a 100644 --- a/cmd/serverless-init/cloudservice/service.go +++ b/cmd/serverless-init/cloudservice/service.go @@ -51,6 +51,9 @@ func (l *LocalService) Init() error { //nolint:revive // TODO(SERV) Fix revive linter func GetCloudServiceType() CloudService { if isCloudRunService() { + if isCloudRunFunction() { + return &CloudRun{cloudRunFunctionMode: true} + } return &CloudRun{} } diff --git a/cmd/serverless-init/main.go b/cmd/serverless-init/main.go index 82493f7f9b5a6..0db7244b7774b 100644 --- a/cmd/serverless-init/main.go +++ b/cmd/serverless-init/main.go @@ -41,8 +41,8 @@ import ( "github.com/DataDog/datadog-agent/cmd/serverless-init/metric" serverlessInitTag "github.com/DataDog/datadog-agent/cmd/serverless-init/tag" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/serverless/metrics" "github.com/DataDog/datadog-agent/pkg/serverless/otlp" @@ -111,7 +111,7 @@ func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *se tracelog.SetLogger(corelogger{}) // load proxy settings - pkgconfig.LoadProxyFromEnv(pkgconfig.Datadog()) + pkgconfigsetup.LoadProxyFromEnv(pkgconfigsetup.Datadog()) cloudService := cloudservice.GetCloudServiceType() @@ -123,7 +123,7 @@ func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *se tags := serverlessInitTag.GetBaseTagsMapWithMetadata( serverlessTag.MergeWithOverwrite( - serverlessTag.ArrayToMap(configUtils.GetConfiguredTags(pkgconfig.Datadog(), false)), + serverlessTag.ArrayToMap(configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false)), cloudService.GetTags()), modeConf.TagVersionMode) @@ -134,7 +134,7 @@ func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *se // The datadog-agent requires Load to be called or it could // panic down the line. - _, err := pkgconfig.LoadWithoutSecret() + _, err := pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil) if err != nil { log.Debugf("Error loading config: %v\n", err) } @@ -151,7 +151,7 @@ func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *se return cloudService, agentLogConfig, traceAgent, metricAgent, logsAgent } func setupTraceAgent(tags map[string]string) trace.ServerlessTraceAgent { - traceAgent := trace.StartServerlessTraceAgent(pkgconfig.Datadog().GetBool("apm_config.enabled"), &trace.LoadConfig{Path: datadogConfigPath}, nil, random.Random.Uint64()) + traceAgent := trace.StartServerlessTraceAgent(pkgconfigsetup.Datadog().GetBool("apm_config.enabled"), &trace.LoadConfig{Path: datadogConfigPath}, nil, random.Random.Uint64()) traceAgent.SetTags(tags) go func() { for range time.Tick(3 * time.Second) { @@ -162,8 +162,8 @@ func setupTraceAgent(tags map[string]string) trace.ServerlessTraceAgent { } func setupMetricAgent(tags map[string]string) *metrics.ServerlessMetricAgent { - pkgconfig.Datadog().Set("use_v2_api.series", false, model.SourceAgentRuntime) - pkgconfig.Datadog().Set("dogstatsd_socket", "", model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("use_v2_api.series", false, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("dogstatsd_socket", "", model.SourceAgentRuntime) metricAgent := &metrics.ServerlessMetricAgent{ SketchesBucketOffset: time.Second * 0, diff --git a/cmd/serverless-init/tag/tag_test.go b/cmd/serverless-init/tag/tag_test.go index 007604aa88002..7548b5dff151d 100644 --- a/cmd/serverless-init/tag/tag_test.go +++ b/cmd/serverless-init/tag/tag_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" serverlessTag "github.com/DataDog/datadog-agent/pkg/serverless/tags" ) @@ -97,7 +97,7 @@ func TestDdTags(t *testing.T) { overwritingTags := map[string]string{ "originalKey": "overWrittenValue", } - mergedTags := serverlessTag.MergeWithOverwrite(serverlessTag.ArrayToMap(configUtils.GetConfiguredTags(config.Datadog(), false)), overwritingTags) + mergedTags := serverlessTag.MergeWithOverwrite(serverlessTag.ArrayToMap(configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false)), overwritingTags) assert.Equal(t, "overWrittenValue", mergedTags["originalKey"]) assert.Equal(t, "value2", mergedTags["key2"]) assert.Equal(t, "value3", mergedTags["key3"]) diff --git a/cmd/serverless/dependencies_linux_amd64.txt b/cmd/serverless/dependencies_linux_amd64.txt index c31cb5dcebd87..0953b2963a9db 100644 --- a/cmd/serverless/dependencies_linux_amd64.txt +++ b/cmd/serverless/dependencies_linux_amd64.txt @@ -139,12 +139,12 @@ github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types github.com/DataDog/datadog-agent/pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/collector/check/id github.com/DataDog/datadog-agent/pkg/collector/check/stats -github.com/DataDog/datadog-agent/pkg/config github.com/DataDog/datadog-agent/pkg/config/env github.com/DataDog/datadog-agent/pkg/config/model github.com/DataDog/datadog-agent/pkg/config/remote/data github.com/DataDog/datadog-agent/pkg/config/setup github.com/DataDog/datadog-agent/pkg/config/setup/constants +github.com/DataDog/datadog-agent/pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis github.com/DataDog/datadog-agent/pkg/errors @@ -438,6 +438,7 @@ github.com/go-logr/logr github.com/go-logr/logr/funcr github.com/go-logr/stdr github.com/go-viper/mapstructure/v2 +github.com/go-viper/mapstructure/v2/internal/errors github.com/godbus/dbus/v5 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/jsonpb @@ -663,6 +664,7 @@ go.opentelemetry.io/contrib/config go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil go.opentelemetry.io/contrib/propagators/b3 @@ -725,7 +727,6 @@ go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.25.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/trace diff --git a/cmd/serverless/dependencies_linux_arm64.txt b/cmd/serverless/dependencies_linux_arm64.txt index afc9f4e4fa881..da674cfca6e69 100644 --- a/cmd/serverless/dependencies_linux_arm64.txt +++ b/cmd/serverless/dependencies_linux_arm64.txt @@ -139,12 +139,12 @@ github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types github.com/DataDog/datadog-agent/pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/collector/check/id github.com/DataDog/datadog-agent/pkg/collector/check/stats -github.com/DataDog/datadog-agent/pkg/config github.com/DataDog/datadog-agent/pkg/config/env github.com/DataDog/datadog-agent/pkg/config/model github.com/DataDog/datadog-agent/pkg/config/remote/data github.com/DataDog/datadog-agent/pkg/config/setup github.com/DataDog/datadog-agent/pkg/config/setup/constants +github.com/DataDog/datadog-agent/pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis github.com/DataDog/datadog-agent/pkg/errors @@ -438,6 +438,7 @@ github.com/go-logr/logr github.com/go-logr/logr/funcr github.com/go-logr/stdr github.com/go-viper/mapstructure/v2 +github.com/go-viper/mapstructure/v2/internal/errors github.com/godbus/dbus/v5 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/jsonpb @@ -662,6 +663,7 @@ go.opentelemetry.io/contrib/config go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil go.opentelemetry.io/contrib/propagators/b3 @@ -724,7 +726,6 @@ go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.25.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/trace diff --git a/cmd/serverless/main.go b/cmd/serverless/main.go index 122368a28d675..68865f96aa14e 100644 --- a/cmd/serverless/main.go +++ b/cmd/serverless/main.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" taggernoop "github.com/DataDog/datadog-agent/comp/core/tagger/noopimpl" logConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless" @@ -42,6 +42,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) // AWS Lambda is writing the Lambda function files in /var/task, we want the @@ -49,12 +50,12 @@ import ( var datadogConfigPath = "/var/task/datadog.yaml" const ( - loggerName config.LoggerName = "DD_EXTENSION" - logLevelEnvVar = "DD_LOG_LEVEL" - flushStrategyEnvVar = "DD_SERVERLESS_FLUSH_STRATEGY" - logsLogsTypeSubscribed = "DD_LOGS_CONFIG_LAMBDA_LOGS_TYPE" - extensionRegistrationRoute = "/2020-01-01/extension/register" - extensionRegistrationTimeout = 5 * time.Second + loggerName pkglogsetup.LoggerName = "DD_EXTENSION" + logLevelEnvVar = "DD_LOG_LEVEL" + flushStrategyEnvVar = "DD_SERVERLESS_FLUSH_STRATEGY" + logsLogsTypeSubscribed = "DD_LOGS_CONFIG_LAMBDA_LOGS_TYPE" + extensionRegistrationRoute = "/2020-01-01/extension/register" + extensionRegistrationTimeout = 5 * time.Second // httpServerAddr will be the default addr used to run the HTTP server listening // to calls from the client libraries and to logs from the AWS environment. @@ -144,7 +145,7 @@ func runAgent(tagger tagger.Component) { setupProxy(appsecProxyProcessor, ta, serverlessDaemon) - serverlessDaemon.ComputeGlobalTags(configUtils.GetConfiguredTags(config.Datadog(), true)) + serverlessDaemon.ComputeGlobalTags(configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true)) stopCh := startInvocationLoop(serverlessDaemon, serverlessID) @@ -210,7 +211,7 @@ func startMetricAgent(serverlessDaemon *daemon.Daemon, logChannel chan *logConfi } metricAgent.Start(daemon.FlushTimeout, &metrics.MetricConfig{}, &metrics.MetricDogStatsD{}) serverlessDaemon.SetStatsdServer(metricAgent) - serverlessDaemon.SetupLogCollectionHandler(logsAPICollectionRoute, logChannel, config.Datadog().GetBool("serverless.logs_enabled"), config.Datadog().GetBool("enhanced_metrics"), lambdaInitMetricChan) + serverlessDaemon.SetupLogCollectionHandler(logsAPICollectionRoute, logChannel, pkgconfigsetup.Datadog().GetBool("serverless.logs_enabled"), pkgconfigsetup.Datadog().GetBool("enhanced_metrics"), lambdaInitMetricChan) return metricAgent } @@ -256,10 +257,10 @@ func startCommunicationServer(startTime time.Time) *daemon.Daemon { func setupLambdaAgentOverrides() { flavor.SetFlavor(flavor.ServerlessAgent) - config.Datadog().Set("use_v2_api.series", false, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("use_v2_api.series", false, model.SourceAgentRuntime) // TODO(duncanista): figure out how this is used and if it's necessary for Serverless - config.Datadog().Set("dogstatsd_socket", "", model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("dogstatsd_socket", "", model.SourceAgentRuntime) // Disable remote configuration for now as it just spams the debug logs // and provides no value. @@ -335,7 +336,7 @@ func startOtlpAgent(wg *sync.WaitGroup, metricAgent *metrics.ServerlessMetricAge func startTraceAgent(wg *sync.WaitGroup, lambdaSpanChan chan *pb.Span, coldStartSpanId uint64, serverlessDaemon *daemon.Daemon) { defer wg.Done() - traceAgent := trace.StartServerlessTraceAgent(config.Datadog().GetBool("apm_config.enabled"), &trace.LoadConfig{Path: datadogConfigPath}, lambdaSpanChan, coldStartSpanId) + traceAgent := trace.StartServerlessTraceAgent(pkgconfigsetup.Datadog().GetBool("apm_config.enabled"), &trace.LoadConfig{Path: datadogConfigPath}, lambdaSpanChan, coldStartSpanId) serverlessDaemon.SetTraceAgent(traceAgent) } @@ -372,9 +373,9 @@ func setupApiKey() bool { } func loadConfig() { - config.Datadog().SetConfigFile(datadogConfigPath) + pkgconfigsetup.Datadog().SetConfigFile(datadogConfigPath) // Load datadog.yaml file into the config, so that metricAgent can pick these configurations - if _, err := config.LoadWithoutSecret(); err != nil { + if _, err := pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil); err != nil { log.Errorf("Error happened when loading configuration from datadog.yaml for metric agent: %s", err) } } @@ -401,7 +402,7 @@ func setupLogger() { } // init the logger configuring it to not log in a file (the first empty string) - if err := config.SetupLogger( + if err := pkglogsetup.SetupLogger( loggerName, logLevel, "", // logFile -> by setting this to an empty string, we don't write the logs to any file @@ -409,6 +410,7 @@ func setupLogger() { false, // syslog_rfc true, // log_to_console false, // log_format_json + pkgconfigsetup.Datadog(), ); err != nil { log.Errorf("Unable to setup logger: %s", err) } diff --git a/cmd/system-probe/config/adjust.go b/cmd/system-probe/config/adjust.go index f83ee25eb80e2..37c0fb29cfeb9 100644 --- a/cmd/system-probe/config/adjust.go +++ b/cmd/system-probe/config/adjust.go @@ -10,7 +10,6 @@ import ( "fmt" "sync" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -18,7 +17,7 @@ import ( var adjustMtx sync.Mutex // Adjust makes changes to the raw config based on deprecations and inferences. -func Adjust(cfg config.Config) { +func Adjust(cfg model.Config) { adjustMtx.Lock() defer adjustMtx.Unlock() if cfg.GetBool(spNS("adjusted")) { @@ -59,7 +58,7 @@ func Adjust(cfg config.Config) { // validateString validates the string configuration value at `key` using a custom provided function `valFn`. // If `key` is not set or `valFn` returns an error, the `defaultVal` is used instead. -func validateString(cfg config.Config, key string, defaultVal string, valFn func(string) error) { +func validateString(cfg model.Config, key string, defaultVal string, valFn func(string) error) { if cfg.IsSet(key) { if err := valFn(cfg.GetString(key)); err != nil { log.Errorf("error validating `%s`: %s, using default value of `%s`", key, err, defaultVal) @@ -72,7 +71,7 @@ func validateString(cfg config.Config, key string, defaultVal string, valFn func // validateInt validates the int configuration value at `key` using a custom provided function `valFn`. // If `key` is not set or `valFn` returns an error, the `defaultVal` is used instead. -func validateInt(cfg config.Config, key string, defaultVal int, valFn func(int) error) { +func validateInt(cfg model.Config, key string, defaultVal int, valFn func(int) error) { if cfg.IsSet(key) { if err := valFn(cfg.GetInt(key)); err != nil { log.Errorf("error validating `%s`: %s, using default value of `%d`", key, err, defaultVal) @@ -85,7 +84,7 @@ func validateInt(cfg config.Config, key string, defaultVal int, valFn func(int) // validateInt64 validates the int64 configuration value at `key` using a custom provided function `valFn`. // If `key` is not set or `valFn` returns an error, the `defaultVal` is used instead. -func validateInt64(cfg config.Config, key string, defaultVal int64, valFn func(int64) error) { +func validateInt64(cfg model.Config, key string, defaultVal int64, valFn func(int64) error) { if cfg.IsSet(key) { if err := valFn(cfg.GetInt64(key)); err != nil { log.Errorf("error validating `%s`: %s. using default value of `%d`", key, err, defaultVal) @@ -97,7 +96,7 @@ func validateInt64(cfg config.Config, key string, defaultVal int64, valFn func(i } // applyDefault sets configuration `key` to `defaultVal` only if not previously set. -func applyDefault(cfg config.Config, key string, defaultVal interface{}) { +func applyDefault(cfg model.Config, key string, defaultVal interface{}) { if !cfg.IsSet(key) { cfg.Set(key, defaultVal, model.SourceAgentRuntime) } @@ -105,47 +104,47 @@ func applyDefault(cfg config.Config, key string, defaultVal interface{}) { // deprecateBool logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateBool(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateBool(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.GetBool(oldkey) }) } // deprecateInt64 logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateInt64(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateInt64(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.GetInt64(oldkey) }) } // deprecateGeneric logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateGeneric(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateGeneric(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.Get(oldkey) }) } // deprecateInt logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateInt(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateInt(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.GetInt(oldkey) }) } // deprecateString logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateString(cfg config.Config, oldkey string, newkey string) { - deprecateCustom(cfg, oldkey, newkey, func(cfg config.Config) interface{} { +func deprecateString(cfg model.Config, oldkey string, newkey string) { + deprecateCustom(cfg, oldkey, newkey, func(cfg model.Config) interface{} { return cfg.GetString(oldkey) }) } // deprecateCustom logs a deprecation message if `oldkey` is used. // It sets `newkey` to the value obtained from `getFn`, but only if `oldkey` is set and `newkey` is not set. -func deprecateCustom(cfg config.Config, oldkey string, newkey string, getFn func(config.Config) interface{}) { +func deprecateCustom(cfg model.Config, oldkey string, newkey string, getFn func(model.Config) interface{}) { if cfg.IsSet(oldkey) { log.Warn(deprecationMessage(oldkey, newkey)) if !cfg.IsSet(newkey) { @@ -160,7 +159,7 @@ func deprecationMessage(oldkey, newkey string) string { } // limitMaxInt logs a warning and sets `key` to `max` if the value exceeds `max`. -func limitMaxInt(cfg config.Config, key string, max int) { +func limitMaxInt(cfg model.Config, key string, max int) { val := cfg.GetInt(key) if val > max { log.Warnf("configuration key `%s` was set to `%d`, using maximum value `%d` instead", key, val, max) @@ -169,7 +168,7 @@ func limitMaxInt(cfg config.Config, key string, max int) { } // limitMaxInt64 logs a warning and sets `key` to `max` if the value exceeds `max`. -func limitMaxInt64(cfg config.Config, key string, max int64) { +func limitMaxInt64(cfg model.Config, key string, max int64) { val := cfg.GetInt64(key) if val > max { log.Warnf("configuration key `%s` was set to `%d`, using maximum value `%d` instead", key, val, max) diff --git a/cmd/system-probe/config/adjust_npm.go b/cmd/system-probe/config/adjust_npm.go index ab0fc468bf553..e1be10ae08d79 100644 --- a/cmd/system-probe/config/adjust_npm.go +++ b/cmd/system-probe/config/adjust_npm.go @@ -11,7 +11,6 @@ import ( "math" "runtime" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -24,7 +23,7 @@ const ( defaultMaxTrackedConnections = 65536 ) -func adjustNetwork(cfg config.Config) { +func adjustNetwork(cfg model.Config) { ebpflessEnabled := cfg.GetBool(netNS("enable_ebpfless")) limitMaxInt(cfg, spNS("max_conns_per_message"), maxConnsMessageBatchSize) diff --git a/cmd/system-probe/config/adjust_security.go b/cmd/system-probe/config/adjust_security.go index 46ac8f70593f8..f5b3d27ddf80c 100644 --- a/cmd/system-probe/config/adjust_security.go +++ b/cmd/system-probe/config/adjust_security.go @@ -9,12 +9,11 @@ import ( "runtime" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" ) -func adjustSecurity(cfg config.Config) { - deprecateCustom(cfg, secNS("activity_dump.cgroup_dump_timeout"), secNS("activity_dump.dump_duration"), func(cfg config.Config) interface{} { +func adjustSecurity(cfg model.Config) { + deprecateCustom(cfg, secNS("activity_dump.cgroup_dump_timeout"), secNS("activity_dump.dump_duration"), func(cfg model.Config) interface{} { // convert old minutes int value to time.Duration return time.Duration(cfg.GetInt(secNS("activity_dump.cgroup_dump_timeout"))) * time.Minute }) @@ -23,7 +22,7 @@ func adjustSecurity(cfg config.Config) { cfg, secNS("runtime_security_config.security_profile.anomaly_detection.auto_suppression.enabled"), secNS("runtime_security_config.security_profile.auto_suppression.enabled"), - func(cfg config.Config) interface{} { + func(cfg model.Config) interface{} { // convert old auto suppression parameter to the new one return cfg.GetBool(secNS("runtime_security_config.security_profile.anomaly_detection.auto_suppression.enabled")) }, diff --git a/cmd/system-probe/config/adjust_usm.go b/cmd/system-probe/config/adjust_usm.go index 05946c7fb72ed..d7e164a9020f1 100644 --- a/cmd/system-probe/config/adjust_usm.go +++ b/cmd/system-probe/config/adjust_usm.go @@ -9,14 +9,14 @@ import ( "fmt" "runtime" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) const ( maxHTTPFrag = 512 // matches hard limit currently imposed in NPM driver ) -func adjustUSM(cfg config.Config) { +func adjustUSM(cfg model.Config) { if cfg.GetBool(smNS("enabled")) { applyDefault(cfg, netNS("enable_http_monitoring"), true) applyDefault(cfg, netNS("enable_https_monitoring"), true) diff --git a/cmd/system-probe/config/config.go b/cmd/system-probe/config/config.go index ff21f22169103..e6f0575248e2a 100644 --- a/cmd/system-probe/config/config.go +++ b/cmd/system-probe/config/config.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/comp/core/secrets" - aconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -51,22 +51,22 @@ func New(configPath string, fleetPoliciesDirPath string) (*types.Config, error) } func newSysprobeConfig(configPath string, fleetPoliciesDirPath string) (*types.Config, error) { - aconfig.SystemProbe().SetConfigName("system-probe") + pkgconfigsetup.SystemProbe().SetConfigName("system-probe") // set the paths where a config file is expected if len(configPath) != 0 { // if the configuration file path was supplied on the command line, // add that first, so it's first in line - aconfig.SystemProbe().AddConfigPath(configPath) + pkgconfigsetup.SystemProbe().AddConfigPath(configPath) // If they set a config file directly, let's try to honor that if strings.HasSuffix(configPath, ".yaml") { - aconfig.SystemProbe().SetConfigFile(configPath) + pkgconfigsetup.SystemProbe().SetConfigFile(configPath) } } else { // only add default if a custom configPath was not supplied - aconfig.SystemProbe().AddConfigPath(defaultConfigDir) + pkgconfigsetup.SystemProbe().AddConfigPath(defaultConfigDir) } // load the configuration - err := aconfig.LoadCustom(aconfig.SystemProbe(), aconfig.Datadog().GetEnvVars()) + err := pkgconfigsetup.LoadCustom(pkgconfigsetup.SystemProbe(), pkgconfigsetup.Datadog().GetEnvVars()) if err != nil { if errors.Is(err, fs.ErrPermission) { // special-case permission-denied with a clearer error message @@ -84,10 +84,10 @@ func newSysprobeConfig(configPath string, fleetPoliciesDirPath string) (*types.C // Load the remote configuration if fleetPoliciesDirPath == "" { - fleetPoliciesDirPath = aconfig.SystemProbe().GetString("fleet_policies_dir") + fleetPoliciesDirPath = pkgconfigsetup.SystemProbe().GetString("fleet_policies_dir") } if fleetPoliciesDirPath != "" { - err := aconfig.SystemProbe().MergeFleetPolicy(path.Join(fleetPoliciesDirPath, "system-probe.yaml")) + err := pkgconfigsetup.SystemProbe().MergeFleetPolicy(path.Join(fleetPoliciesDirPath, "system-probe.yaml")) if err != nil { return nil, err } @@ -97,7 +97,7 @@ func newSysprobeConfig(configPath string, fleetPoliciesDirPath string) (*types.C } func load() (*types.Config, error) { - cfg := aconfig.SystemProbe() + cfg := pkgconfigsetup.SystemProbe() Adjust(cfg) c := &types.Config{ @@ -114,15 +114,16 @@ func load() (*types.Config, error) { HealthPort: cfg.GetInt(spNS("health_port")), TelemetryEnabled: cfg.GetBool(spNS("telemetry_enabled")), - StatsdHost: aconfig.GetBindHost(), + StatsdHost: pkgconfigsetup.GetBindHost(pkgconfigsetup.Datadog()), StatsdPort: cfg.GetInt("dogstatsd_port"), } npmEnabled := cfg.GetBool(netNS("enabled")) usmEnabled := cfg.GetBool(smNS("enabled")) ccmEnabled := cfg.GetBool(ccmNS("enabled")) + csmEnabled := cfg.GetBool(secNS("enabled")) - if npmEnabled || usmEnabled || ccmEnabled { + if npmEnabled || usmEnabled || ccmEnabled || csmEnabled { c.EnabledModules[NetworkTracerModule] = struct{}{} } if cfg.GetBool(spNS("enable_tcp_queue_length")) { @@ -181,12 +182,12 @@ func load() (*types.Config, error) { // SetupOptionalDatadogConfigWithDir loads the datadog.yaml config file from a given config directory but will not fail on a missing file func SetupOptionalDatadogConfigWithDir(configDir, configFile string) error { - aconfig.Datadog().AddConfigPath(configDir) + pkgconfigsetup.Datadog().AddConfigPath(configDir) if configFile != "" { - aconfig.Datadog().SetConfigFile(configFile) + pkgconfigsetup.Datadog().SetConfigFile(configFile) } // load the configuration - _, err := aconfig.LoadDatadogCustom(aconfig.Datadog(), "datadog.yaml", optional.NewNoneOption[secrets.Component](), aconfig.SystemProbe().GetEnvVars()) + _, err := pkgconfigsetup.LoadDatadogCustom(pkgconfigsetup.Datadog(), "datadog.yaml", optional.NewNoneOption[secrets.Component](), pkgconfigsetup.SystemProbe().GetEnvVars()) // If `!failOnMissingFile`, do not issue an error if we cannot find the default config file. var e viper.ConfigFileNotFoundError if err != nil && !errors.As(err, &e) { diff --git a/cmd/system-probe/config/config_linux_test.go b/cmd/system-probe/config/config_linux_test.go index aab12a5c52ed5..0998e2ce69b74 100644 --- a/cmd/system-probe/config/config_linux_test.go +++ b/cmd/system-probe/config/config_linux_test.go @@ -73,17 +73,25 @@ func TestEventStreamEnabledForSupportedKernelsLinux(t *testing.T) { func TestNPMEnabled(t *testing.T) { tests := []struct { - npm, usm, ccm bool - npmEnabled bool + npm, usm, ccm, csm bool + npmEnabled bool }{ - {false, false, false, false}, - {false, false, true, true}, - {false, true, false, true}, - {false, true, true, true}, - {true, false, false, true}, - {true, false, true, true}, - {true, true, false, true}, - {true, true, true, true}, + {false, false, false, false, false}, + {false, false, true, false, true}, + {false, true, false, false, true}, + {false, true, true, false, true}, + {true, false, false, false, true}, + {true, false, true, false, true}, + {true, true, false, false, true}, + {true, true, true, false, true}, + {false, false, false, true, true}, + {false, false, true, true, true}, + {false, true, false, true, true}, + {false, true, true, true, true}, + {true, false, false, true, true}, + {true, false, true, true, true}, + {true, true, false, true, true}, + {true, true, true, true, true}, } mock.NewSystemProbe(t) @@ -92,6 +100,7 @@ func TestNPMEnabled(t *testing.T) { t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLED", strconv.FormatBool(te.npm)) t.Setenv("DD_SYSTEM_PROBE_SERVICE_MONITORING_ENABLED", strconv.FormatBool(te.usm)) t.Setenv("DD_CCM_NETWORK_CONFIG_ENABLED", strconv.FormatBool(te.ccm)) + t.Setenv("DD_RUNTIME_SECURITY_CONFIG_ENABLED", strconv.FormatBool(te.csm)) cfg, err := New("", "") require.NoError(t, err) assert.Equal(t, te.npmEnabled, cfg.ModuleIsEnabled(NetworkTracerModule), "unexpected network tracer module enablement: npm: %v, usm: %v, ccm: %v", te.npm, te.usm, te.ccm) diff --git a/cmd/system-probe/modules/eventmonitor.go b/cmd/system-probe/modules/eventmonitor.go index d94555cddd939..d90b39b3ea1f1 100644 --- a/cmd/system-probe/modules/eventmonitor.go +++ b/cmd/system-probe/modules/eventmonitor.go @@ -41,7 +41,7 @@ func createEventMonitorModule(_ *sysconfigtypes.Config, deps module.FactoryDepen secmodule.DisableRuntimeSecurity(secconfig) } - evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, deps.WMeta, deps.Telemetry) + evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, deps.Telemetry) if err != nil { log.Errorf("error initializing event monitoring module: %v", err) return nil, module.ErrNotEnabled diff --git a/cmd/system-probe/modules/eventmonitor_linux.go b/cmd/system-probe/modules/eventmonitor_linux.go index ebea6228c580d..bf63a33276723 100644 --- a/cmd/system-probe/modules/eventmonitor_linux.go +++ b/cmd/system-probe/modules/eventmonitor_linux.go @@ -10,7 +10,7 @@ package modules import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/cmd/system-probe/config" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/eventmonitor" netconfig "github.com/DataDog/datadog-agent/pkg/network/config" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" @@ -24,7 +24,7 @@ var EventMonitor = module.Factory{ ConfigNamespaces: eventMonitorModuleConfigNamespaces, Fn: createEventMonitorModule, NeedsEBPF: func() bool { - return !coreconfig.SystemProbe().GetBool("runtime_security_config.ebpfless.enabled") + return !pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.ebpfless.enabled") }, } diff --git a/cmd/system-probe/subcommands/run/command.go b/cmd/system-probe/subcommands/run/command.go index c61f0d1316d99..e5461f8c483db 100644 --- a/cmd/system-probe/subcommands/run/command.go +++ b/cmd/system-probe/subcommands/run/command.go @@ -50,7 +50,6 @@ import ( compstatsd "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient/rcclientimpl" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/model" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" @@ -384,7 +383,7 @@ func stopSystemProbe() { } // setupInternalProfiling is a common helper to configure runtime settings for internal profiling. -func setupInternalProfiling(settings settings.Component, cfg ddconfig.Reader, configPrefix string, log log.Component) { +func setupInternalProfiling(settings settings.Component, cfg model.Reader, configPrefix string, log log.Component) { if v := cfg.GetInt(configPrefix + "internal_profiling.block_profile_rate"); v > 0 { if err := settings.SetRuntimeSetting("runtime_block_profile_rate", v, model.SourceAgentRuntime); err != nil { log.Errorf("Error setting block profile rate: %v", err) diff --git a/cmd/system-probe/subcommands/runtime/command.go b/cmd/system-probe/subcommands/runtime/command.go index 5016d28166e48..2bce948b2764c 100644 --- a/cmd/system-probe/subcommands/runtime/command.go +++ b/cmd/system-probe/subcommands/runtime/command.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" secagent "github.com/DataDog/datadog-agent/pkg/security/agent" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" pconfig "github.com/DataDog/datadog-agent/pkg/security/probe/config" @@ -99,7 +99,7 @@ func evalCommands(globalParams *command.GlobalParams) []*cobra.Command { }, } - evalCmd.Flags().StringVar(&evalArgs.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + evalCmd.Flags().StringVar(&evalArgs.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") evalCmd.Flags().StringVar(&evalArgs.ruleID, "rule-id", "", "Rule ID to evaluate") _ = evalCmd.MarkFlagRequired("rule-id") evalCmd.Flags().StringVar(&evalArgs.eventFile, "event-file", "", "File of the event data") @@ -132,7 +132,7 @@ func commonCheckPoliciesCommands(globalParams *command.GlobalParams) []*cobra.Co }, } - commonCheckPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfig.DefaultRuntimePoliciesDir, "Path to policies directory") + commonCheckPoliciesCmd.Flags().StringVar(&cliParams.dir, "policies-dir", pkgconfigsetup.DefaultRuntimePoliciesDir, "Path to policies directory") commonCheckPoliciesCmd.Flags().BoolVar(&cliParams.evaluateAllPolicySources, "loaded-policies", false, "Evaluate loaded policies") if runtime.GOOS == "linux" { commonCheckPoliciesCmd.Flags().BoolVar(&cliParams.windowsModel, "windows-model", false, "Evaluate policies using the Windows model") diff --git a/comp/README.md b/comp/README.md index 357b628c7a3c8..5c6b4887087f5 100644 --- a/comp/README.md +++ b/comp/README.md @@ -375,10 +375,6 @@ Package collector defines the OpenTelemetry Collector component. Package collectorcontrib defines the OTel collector-contrib component -### [comp/otelcol/configstore](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/otelcol/configstore) - -Package configstore defines the otel agent configstore component. - ### [comp/otelcol/converter](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/otelcol/converter) Package converter defines the otel agent converter component. diff --git a/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go b/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go index 1fe9aa55b1dfa..9b0824b8fad41 100644 --- a/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go +++ b/comp/agent/jmxlogger/jmxloggerimpl/jmxlogger.go @@ -14,9 +14,10 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/common/path" "github.com/DataDog/datadog-agent/comp/agent/jmxlogger" "github.com/DataDog/datadog-agent/comp/core/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) // Module defines the fx options for this component. @@ -41,7 +42,7 @@ func newJMXLogger(deps dependencies) (jmxlogger.Component, error) { return logger{}, nil } if deps.Params.fromCLI { - err := pkgconfig.SetupJMXLogger(deps.Params.logFile, "", false, true, false) + err := pkglogsetup.SetupJMXLogger(deps.Params.logFile, "", false, true, false, pkgconfigsetup.Datadog()) if err != nil { err = fmt.Errorf("Unable to set up JMX logger: %v", err) } @@ -49,7 +50,7 @@ func newJMXLogger(deps dependencies) (jmxlogger.Component, error) { } // Setup logger - syslogURI := pkgconfig.GetSyslogURI() + syslogURI := pkglogsetup.GetSyslogURI(pkgconfigsetup.Datadog()) jmxLogFile := config.GetString("jmx_log_file") if jmxLogFile == "" { jmxLogFile = path.DefaultJmxLogFile @@ -61,12 +62,13 @@ func newJMXLogger(deps dependencies) (jmxlogger.Component, error) { } // Setup JMX logger - jmxLoggerSetupErr := pkgconfig.SetupJMXLogger( + jmxLoggerSetupErr := pkglogsetup.SetupJMXLogger( jmxLogFile, syslogURI, config.GetBool("syslog_rfc"), config.GetBool("log_to_console"), config.GetBool("log_format_json"), + pkgconfigsetup.Datadog(), ) if jmxLoggerSetupErr != nil { diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go b/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go index a4a4f21b576bb..cec4437ab09f8 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" "github.com/DataDog/datadog-agent/comp/serializer/compression" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -180,8 +180,8 @@ func initTestAgentDemultiplexerWithFlushInterval(log log.Component, hostname hos opts.DontStartForwarders = true opts.EnableNoAggregationPipeline = true - sharedForwarderOptions := defaultforwarder.NewOptions(config.Datadog(), log, nil) - sharedForwarder := defaultforwarder.NewDefaultForwarder(config.Datadog(), log, sharedForwarderOptions) + sharedForwarderOptions := defaultforwarder.NewOptions(pkgconfigsetup.Datadog(), log, nil) + sharedForwarder := defaultforwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), log, sharedForwarderOptions) orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostname)) demux := aggregator.InitAndStartAgentDemultiplexer(log, sharedForwarder, &orchestratorForwarder, opts, eventPlatformForwarder, compressor, "hostname") diff --git a/comp/api/api/apiimpl/internal/config/endpoint.go b/comp/api/api/apiimpl/internal/config/endpoint.go index 40e7751a0b2b2..4b812d1e522b3 100644 --- a/comp/api/api/apiimpl/internal/config/endpoint.go +++ b/comp/api/api/apiimpl/internal/config/endpoint.go @@ -17,7 +17,8 @@ import ( gorilla "github.com/gorilla/mux" api "github.com/DataDog/datadog-agent/comp/api/api/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" util "github.com/DataDog/datadog-agent/pkg/util/common" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -25,7 +26,7 @@ import ( const prefixPathSuffix string = "." type configEndpoint struct { - cfg config.Reader + cfg model.Reader authorizedConfigPaths api.AuthorizedSet // runtime metrics about the config endpoint usage @@ -109,18 +110,18 @@ func (c *configEndpoint) getAllConfigValuesHandler(w http.ResponseWriter, r *htt // GetConfigEndpointMuxCore builds and returns the mux for the config endpoint with default values // for the core agent func GetConfigEndpointMuxCore() *gorilla.Router { - return GetConfigEndpointMux(config.Datadog(), api.AuthorizedConfigPathsCore, "core") + return GetConfigEndpointMux(pkgconfigsetup.Datadog(), api.AuthorizedConfigPathsCore, "core") } // GetConfigEndpointMux builds and returns the mux for the config endpoint, with the given config, // authorized paths, and expvar namespace -func GetConfigEndpointMux(cfg config.Reader, authorizedConfigPaths api.AuthorizedSet, expvarNamespace string) *gorilla.Router { +func GetConfigEndpointMux(cfg model.Reader, authorizedConfigPaths api.AuthorizedSet, expvarNamespace string) *gorilla.Router { mux, _ := getConfigEndpoint(cfg, authorizedConfigPaths, expvarNamespace) return mux } // getConfigEndpoint builds and returns the mux and the endpoint state. -func getConfigEndpoint(cfg config.Reader, authorizedConfigPaths api.AuthorizedSet, expvarNamespace string) (*gorilla.Router, *configEndpoint) { +func getConfigEndpoint(cfg model.Reader, authorizedConfigPaths api.AuthorizedSet, expvarNamespace string) (*gorilla.Router, *configEndpoint) { configEndpoint := &configEndpoint{ cfg: cfg, authorizedConfigPaths: authorizedConfigPaths, @@ -142,7 +143,7 @@ func getConfigEndpoint(cfg config.Reader, authorizedConfigPaths api.AuthorizedSe return configEndpointMux, configEndpoint } -func encodeInterfaceSliceToStringMap(c config.Reader, key string) ([]map[string]string, error) { +func encodeInterfaceSliceToStringMap(c model.Reader, key string) ([]map[string]string, error) { value := c.Get(key) if value == nil { return nil, nil diff --git a/comp/api/api/apiimpl/listener.go b/comp/api/api/apiimpl/listener.go index be0857a438926..7fdbafcc9a68b 100644 --- a/comp/api/api/apiimpl/listener.go +++ b/comp/api/api/apiimpl/listener.go @@ -10,16 +10,16 @@ import ( "net" "strconv" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // getIPCAddressPort returns a listening connection func getIPCAddressPort() (string, error) { - address, err := config.GetIPCAddress() + address, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } - return fmt.Sprintf("%v:%v", address, config.Datadog().GetInt("cmd_port")), nil + return fmt.Sprintf("%v:%v", address, pkgconfigsetup.Datadog().GetInt("cmd_port")), nil } // getListener returns a listening connection @@ -29,12 +29,12 @@ func getListener(address string) (net.Listener, error) { // returns whether the IPC server is enabled, and if so its host and host:port func getIPCServerAddressPort() (string, string, bool) { - ipcServerPort := config.Datadog().GetInt("agent_ipc.port") + ipcServerPort := pkgconfigsetup.Datadog().GetInt("agent_ipc.port") if ipcServerPort == 0 { return "", "", false } - ipcServerHost := config.Datadog().GetString("agent_ipc.host") + ipcServerHost := pkgconfigsetup.Datadog().GetString("agent_ipc.host") ipcServerHostPort := net.JoinHostPort(ipcServerHost, strconv.Itoa(ipcServerPort)) return ipcServerHost, ipcServerHostPort, true diff --git a/comp/api/api/apiimpl/server.go b/comp/api/api/apiimpl/server.go index 1c4d0bffb7d8c..d1a3105ee5d3d 100644 --- a/comp/api/api/apiimpl/server.go +++ b/comp/api/api/apiimpl/server.go @@ -16,13 +16,13 @@ import ( "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/observability" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) func startServer(listener net.Listener, srv *http.Server, name string) { // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := config.NewLogWriter(5, seelog.ErrorLvl) + logWriter, _ := pkglogsetup.NewLogWriter(5, seelog.ErrorLvl) srv.ErrorLog = stdLog.New(logWriter, fmt.Sprintf("Error from the Agent HTTP server '%s': ", name), 0) // log errors to seelog diff --git a/comp/api/api/apiimpl/server_cmd.go b/comp/api/api/apiimpl/server_cmd.go index 117b4d18c6be3..56215842c06d0 100644 --- a/comp/api/api/apiimpl/server_cmd.go +++ b/comp/api/api/apiimpl/server_cmd.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/observability" taggerserver "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/server" workloadmetaServer "github.com/DataDog/datadog-agent/comp/core/workloadmeta/server" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" ) @@ -123,7 +123,7 @@ func (server *apiServer) startCMDServer( cmdAddr, tlsConfig, s, - grpcutil.TimeoutHandlerFunc(cmdMuxHandler, time.Duration(config.Datadog().GetInt64("server_timeout"))*time.Second), + grpcutil.TimeoutHandlerFunc(cmdMuxHandler, time.Duration(pkgconfigsetup.Datadog().GetInt64("server_timeout"))*time.Second), ) startServer(server.cmdListener, srv, cmdServerName) diff --git a/comp/api/api/apiimpl/server_ipc.go b/comp/api/api/apiimpl/server_ipc.go index bfc50218c99c9..10e17509f75e3 100644 --- a/comp/api/api/apiimpl/server_ipc.go +++ b/comp/api/api/apiimpl/server_ipc.go @@ -12,7 +12,7 @@ import ( configendpoint "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/internal/config" "github.com/DataDog/datadog-agent/comp/api/api/apiimpl/observability" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ipcServerName string = "IPC API Server" @@ -38,7 +38,7 @@ func (server *apiServer) startIPCServer(ipcServerAddr string, tlsConfig *tls.Con ipcServer := &http.Server{ Addr: ipcServerAddr, - Handler: http.TimeoutHandler(ipcMuxHandler, time.Duration(config.Datadog().GetInt64("server_timeout"))*time.Second, "timeout"), + Handler: http.TimeoutHandler(ipcMuxHandler, time.Duration(pkgconfigsetup.Datadog().GetInt64("server_timeout"))*time.Second, "timeout"), TLSConfig: tlsConfig, } diff --git a/comp/autoscaling/datadogclient/impl/client.go b/comp/autoscaling/datadogclient/impl/client.go index 406b793b916ff..ee204f1f921f3 100644 --- a/comp/autoscaling/datadogclient/impl/client.go +++ b/comp/autoscaling/datadogclient/impl/client.go @@ -14,6 +14,7 @@ import ( configComponent "github.com/DataDog/datadog-agent/comp/core/config" logComp "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/status" + "github.com/DataDog/datadog-agent/pkg/config/structure" "gopkg.in/zorkian/go-datadog-api.v2" ) @@ -120,7 +121,7 @@ func (d *datadogClientWrapper) refreshClient() { func createDatadogClient(cfg configComponent.Component, logger logComp.Component) (datadogclient.Component, error) { if cfg.IsSet(metricsRedundantEndpointConfig) { var endpoints []endpoint - if err := cfg.UnmarshalKey(metricsRedundantEndpointConfig, &endpoints); err != nil { + if err := structure.UnmarshalKey(cfg, metricsRedundantEndpointConfig, &endpoints); err != nil { return nil, fmt.Errorf("could not parse %s: %v", metricsRedundantEndpointConfig, err) } diff --git a/comp/collector/collector/collectorimpl/collector.go b/comp/collector/collector/collectorimpl/collector.go index 5b262c93d0223..f90b0c61cec88 100644 --- a/comp/collector/collector/collectorimpl/collector.go +++ b/comp/collector/collector/collectorimpl/collector.go @@ -10,7 +10,6 @@ import ( "context" "encoding/json" "fmt" - "os" "sync" "time" @@ -149,7 +148,7 @@ func (c *collectorImpl) fillFlare(fb flaretypes.FlareBuilder) error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - scanRequest := host.NewScanRequest("/", os.DirFS("/")) + scanRequest := host.NewHostScanRequest() scanResult := scanner.PerformScan(ctx, scanRequest, scanner.GetCollector(scanRequest.Collector())) if scanResult.Error != nil { return scanResult.Error diff --git a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go index 700d3247f7e5f..48047e5b06d0f 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go @@ -37,7 +37,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -479,7 +479,7 @@ func (ac *AutoConfig) processNewConfig(config integration.Config) integration.Co // AddListeners tries to initialise the listeners listed in the given configs. A first // try is done synchronously. If a listener fails with a ErrWillRetry, the initialization // will be re-triggered later until success or ErrPermaFail. -func (ac *AutoConfig) AddListeners(listenerConfigs []config.Listeners) { +func (ac *AutoConfig) AddListeners(listenerConfigs []pkgconfigsetup.Listeners) { ac.addListenerCandidates(listenerConfigs) remaining := ac.initListenerCandidates() if !remaining { @@ -495,7 +495,7 @@ func (ac *AutoConfig) AddListeners(listenerConfigs []config.Listeners) { } } -func (ac *AutoConfig) addListenerCandidates(listenerConfigs []config.Listeners) { +func (ac *AutoConfig) addListenerCandidates(listenerConfigs []pkgconfigsetup.Listeners) { ac.m.Lock() defer ac.m.Unlock() diff --git a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go index 6b4a269ccea64..61c5abe56402d 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go @@ -35,8 +35,9 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/DataDog/datadog-agent/pkg/util/retry" ) @@ -86,7 +87,7 @@ func (l *MockListener) fakeFactory(listeners.Config, *acTelemetry.Store) (listen return l, nil } -var mockListenenerConfig = config.Listeners{ +var mockListenenerConfig = pkgconfigsetup.Listeners{ Name: "mock", } @@ -173,14 +174,15 @@ type AutoConfigTestSuite struct { // SetupSuite saves the original listener registry func (suite *AutoConfigTestSuite) SetupSuite() { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) } @@ -218,7 +220,7 @@ func (suite *AutoConfigTestSuite) TestAddListener() { ml := &MockListener{} listeners.Register("mock", ml.fakeFactory, ac.serviceListenerFactories) - ac.AddListeners([]config.Listeners{mockListenenerConfig}) + ac.AddListeners([]pkgconfigsetup.Listeners{mockListenenerConfig}) ac.m.Lock() require.Len(suite.T(), ac.listeners, 1) @@ -255,7 +257,7 @@ func (suite *AutoConfigTestSuite) TestStop() { ml := &MockListener{} listeners.Register("mock", ml.fakeFactory, ac.serviceListenerFactories) - ac.AddListeners([]config.Listeners{mockListenenerConfig}) + ac.AddListeners([]pkgconfigsetup.Listeners{mockListenenerConfig}) ac.Stop() @@ -300,7 +302,7 @@ func (suite *AutoConfigTestSuite) TestListenerRetry() { } listeners.Register("retry", retryFactory.make, ac.serviceListenerFactories) - configs := []config.Listeners{ + configs := []pkgconfigsetup.Listeners{ {Name: "noerr"}, {Name: "fail"}, {Name: "retry"}, diff --git a/comp/core/autodiscovery/autodiscoveryimpl/secrets.go b/comp/core/autodiscovery/autodiscoveryimpl/secrets.go index 68267d658848f..89b942fab6109 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/secrets.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/secrets.go @@ -10,12 +10,12 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/secrets" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) func decryptConfig(conf integration.Config, secretResolver secrets.Component) (integration.Config, error) { - if config.Datadog().GetBool("secret_backend_skip_checks") { + if pkgconfigsetup.Datadog().GetBool("secret_backend_skip_checks") { log.Tracef("'secret_backend_skip_checks' is enabled, not decrypting configuration %q", conf.Name) return conf, nil } diff --git a/comp/core/autodiscovery/common/utils/container_collect_all.go b/comp/core/autodiscovery/common/utils/container_collect_all.go index 9c040979459ca..08105f317841c 100644 --- a/comp/core/autodiscovery/common/utils/container_collect_all.go +++ b/comp/core/autodiscovery/common/utils/container_collect_all.go @@ -7,7 +7,7 @@ package utils import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // AddContainerCollectAllConfigs adds a config template containing an empty @@ -15,7 +15,7 @@ import ( // will be filtered out during config resolution if another config template // also has logs configuration. func AddContainerCollectAllConfigs(configs []integration.Config, adIdentifier string) []integration.Config { - if !config.Datadog().GetBool("logs_config.container_collect_all") { + if !pkgconfigsetup.Datadog().GetBool("logs_config.container_collect_all") { return configs } diff --git a/comp/core/autodiscovery/common/utils/prometheus.go b/comp/core/autodiscovery/common/utils/prometheus.go index ce5fb7091131b..8f8b76fba7d65 100644 --- a/comp/core/autodiscovery/common/utils/prometheus.go +++ b/comp/core/autodiscovery/common/utils/prometheus.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -24,7 +24,7 @@ const ( // buildInstances generates check config instances based on the Prometheus config and the object annotations // The second returned value is true if more than one instance is found func buildInstances(pc *types.PrometheusCheck, annotations map[string]string, namespacedName string) ([]integration.Data, bool) { - openmetricsVersion := config.Datadog().GetInt("prometheus_scrape.version") + openmetricsVersion := pkgconfigsetup.Datadog().GetInt("prometheus_scrape.version") instances := []integration.Data{} for k, v := range pc.AD.KubeAnnotations.Incl { diff --git a/comp/core/autodiscovery/common/utils/prometheus_apiserver_test.go b/comp/core/autodiscovery/common/utils/prometheus_apiserver_test.go index 2844bb561d39b..3d3bb4a349e19 100644 --- a/comp/core/autodiscovery/common/utils/prometheus_apiserver_test.go +++ b/comp/core/autodiscovery/common/utils/prometheus_apiserver_test.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -220,7 +220,7 @@ func TestConfigsForService(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config.Datadog().SetWithoutSource("prometheus_scrape.version", tt.version) + pkgconfigsetup.Datadog().SetWithoutSource("prometheus_scrape.version", tt.version) assert.NoError(t, tt.check.Init(tt.version)) assert.ElementsMatch(t, tt.want, ConfigsForService(tt.check, tt.svc)) }) diff --git a/comp/core/autodiscovery/common/utils/prometheus_kubelet_test.go b/comp/core/autodiscovery/common/utils/prometheus_kubelet_test.go index 54983bd39e84d..d13b5919b6c69 100644 --- a/comp/core/autodiscovery/common/utils/prometheus_kubelet_test.go +++ b/comp/core/autodiscovery/common/utils/prometheus_kubelet_test.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/stretchr/testify/assert" @@ -515,7 +515,7 @@ func TestConfigsForPod(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config.Datadog().SetWithoutSource("prometheus_scrape.version", tt.version) + pkgconfigsetup.Datadog().SetWithoutSource("prometheus_scrape.version", tt.version) tt.check.Init(tt.version) assert.ElementsMatch(t, tt.want, ConfigsForPod(tt.check, tt.pod)) }) diff --git a/comp/core/autodiscovery/component.go b/comp/core/autodiscovery/component.go index ac97e99b05744..28d74363ffedc 100644 --- a/comp/core/autodiscovery/component.go +++ b/comp/core/autodiscovery/component.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/scheduler" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Component is the component type. @@ -26,7 +26,7 @@ type Component interface { ForceRanOnceFlag() HasRunOnce() bool GetAllConfigs() []integration.Config - AddListeners(listenerConfigs []config.Listeners) + AddListeners(listenerConfigs []pkgconfigsetup.Listeners) AddScheduler(name string, s scheduler.Scheduler, replayConfigs bool) RemoveScheduler(name string) MapOverLoadedConfigs(f func(map[string]integration.Config)) diff --git a/comp/core/autodiscovery/listeners/common.go b/comp/core/autodiscovery/listeners/common.go index e9f137895889a..07037631658fc 100644 --- a/comp/core/autodiscovery/listeners/common.go +++ b/comp/core/autodiscovery/listeners/common.go @@ -14,7 +14,7 @@ import ( "strconv" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -102,7 +102,7 @@ func (f *containerFilters) IsExcluded(filter containers.FilterType, annotations // getPrometheusIncludeAnnotations returns the Prometheus AD include annotations based on the Prometheus config func getPrometheusIncludeAnnotations() types.PrometheusAnnotations { annotations := types.PrometheusAnnotations{} - tmpConfigString := config.Datadog().GetString("prometheus_scrape.checks") + tmpConfigString := pkgconfigsetup.Datadog().GetString("prometheus_scrape.checks") var checks []*types.PrometheusCheck if len(tmpConfigString) > 0 { @@ -120,7 +120,7 @@ func getPrometheusIncludeAnnotations() types.PrometheusAnnotations { } for _, check := range checks { - if err := check.Init(config.Datadog().GetInt("prometheus_scrape.version")); err != nil { + if err := check.Init(pkgconfigsetup.Datadog().GetInt("prometheus_scrape.version")); err != nil { log.Errorf("Couldn't init check configuration: %v", err) continue } diff --git a/comp/core/autodiscovery/listeners/container.go b/comp/core/autodiscovery/listeners/container.go index f7afa5ec80cd5..6864d46e98019 100644 --- a/comp/core/autodiscovery/listeners/container.go +++ b/comp/core/autodiscovery/listeners/container.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" pkgcontainersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" "github.com/DataDog/datadog-agent/pkg/util/docker" @@ -89,7 +89,7 @@ func (l *ContainerListener) createContainerService(entity workloadmeta.Entity) { // stopped. if !container.State.Running && !container.State.FinishedAt.IsZero() { finishedAt := container.State.FinishedAt - excludeAge := time.Duration(config.Datadog().GetInt("container_exclude_stopped_age")) * time.Hour + excludeAge := time.Duration(pkgconfigsetup.Datadog().GetInt("container_exclude_stopped_age")) * time.Hour if time.Since(finishedAt) > excludeAge { log.Debugf("container %q not running for too long, skipping", container.ID) return diff --git a/comp/core/autodiscovery/listeners/kubelet.go b/comp/core/autodiscovery/listeners/kubelet.go index 187401ea1ead0..94094f358eec1 100644 --- a/comp/core/autodiscovery/listeners/kubelet.go +++ b/comp/core/autodiscovery/listeners/kubelet.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -136,7 +136,7 @@ func (l *KubeletListener) createContainerService( // stopped. if !container.State.Running && !container.State.FinishedAt.IsZero() { finishedAt := container.State.FinishedAt - excludeAge := time.Duration(config.Datadog().GetInt("container_exclude_stopped_age")) * time.Hour + excludeAge := time.Duration(pkgconfigsetup.Datadog().GetInt("container_exclude_stopped_age")) * time.Hour if time.Since(finishedAt) > excludeAge { log.Debugf("container %q not running for too long, skipping", container.ID) return diff --git a/comp/core/autodiscovery/listeners/service.go b/comp/core/autodiscovery/listeners/service.go index f8aada6167d1d..841af2456caa8 100644 --- a/comp/core/autodiscovery/listeners/service.go +++ b/comp/core/autodiscovery/listeners/service.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" taggercommon "github.com/DataDog/datadog-agent/comp/core/tagger/common" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -172,7 +172,7 @@ func (s *service) filterTemplatesOverriddenChecks(configs map[string]integration // added by the config provider (AddContainerCollectAllConfigs) if the service // has any other templates containing logs config. func (s *service) filterTemplatesContainerCollectAll(configs map[string]integration.Config) { - if !config.Datadog().GetBool("logs_config.container_collect_all") { + if !pkgconfigsetup.Datadog().GetBool("logs_config.container_collect_all") { return } diff --git a/comp/core/autodiscovery/listeners/snmp_test.go b/comp/core/autodiscovery/listeners/snmp_test.go index 55a12eafb101c..0e63b1a2e1376 100644 --- a/comp/core/autodiscovery/listeners/snmp_test.go +++ b/comp/core/autodiscovery/listeners/snmp_test.go @@ -10,8 +10,8 @@ import ( "strconv" "testing" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/snmp" "github.com/DataDog/datadog-agent/pkg/snmp/snmpintegration" @@ -43,7 +43,7 @@ func TestSNMPListener(t *testing.T) { } } - l, err := NewSNMPListener(&config.Listeners{}, nil) + l, err := NewSNMPListener(&pkgconfigsetup.Listeners{}, nil) assert.Equal(t, nil, err) l.Listen(newSvc, delSvc) @@ -142,7 +142,7 @@ func TestSNMPListenerIgnoredAdresses(t *testing.T) { } } - l, err := NewSNMPListener(&config.Listeners{}, nil) + l, err := NewSNMPListener(&pkgconfigsetup.Listeners{}, nil) assert.Equal(t, nil, err) l.Listen(newSvc, delSvc) diff --git a/comp/core/autodiscovery/listeners/staticconfig.go b/comp/core/autodiscovery/listeners/staticconfig.go index 9ff27685b9a69..0e537c49bf189 100644 --- a/comp/core/autodiscovery/listeners/staticconfig.go +++ b/comp/core/autodiscovery/listeners/staticconfig.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" ) @@ -51,7 +51,7 @@ func (l *StaticConfigListener) createServices() { "container_lifecycle", "sbom", } { - if enabled := config.Datadog().GetBool(staticCheck + ".enabled"); enabled { + if enabled := pkgconfigsetup.Datadog().GetBool(staticCheck + ".enabled"); enabled { l.newService <- &StaticConfigService{adIdentifier: "_" + staticCheck} } } diff --git a/comp/core/autodiscovery/providers/cloudfoundry.go b/comp/core/autodiscovery/providers/cloudfoundry.go index 3bfdd72d071da..0c6fb89c96d4a 100644 --- a/comp/core/autodiscovery/providers/cloudfoundry.go +++ b/comp/core/autodiscovery/providers/cloudfoundry.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -34,7 +34,7 @@ type CloudFoundryConfigProvider struct { } // NewCloudFoundryConfigProvider instantiates a new CloudFoundryConfigProvider from given config -func NewCloudFoundryConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewCloudFoundryConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { cfp := CloudFoundryConfigProvider{ lastCollected: time.Now(), } diff --git a/comp/core/autodiscovery/providers/cloudfoundry_nop.go b/comp/core/autodiscovery/providers/cloudfoundry_nop.go index 232dd534bd940..60894f3747d55 100644 --- a/comp/core/autodiscovery/providers/cloudfoundry_nop.go +++ b/comp/core/autodiscovery/providers/cloudfoundry_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewCloudFoundryConfigProvider instantiates a new CloudFoundryConfigProvider from given config -var NewCloudFoundryConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewCloudFoundryConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/clusterchecks.go b/comp/core/autodiscovery/providers/clusterchecks.go index d3c98c3a959f1..e8b3678abf170 100644 --- a/comp/core/autodiscovery/providers/clusterchecks.go +++ b/comp/core/autodiscovery/providers/clusterchecks.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ddErrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -42,9 +42,9 @@ type ClusterChecksConfigProvider struct { // NewClusterChecksConfigProvider returns a new ConfigProvider collecting // cluster check configurations from the cluster-agent. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewClusterChecksConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewClusterChecksConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { if providerConfig == nil { - providerConfig = &config.ConfigurationProviders{} + providerConfig = &pkgconfigsetup.ConfigurationProviders{} } c := &ClusterChecksConfigProvider{ @@ -52,11 +52,11 @@ func NewClusterChecksConfigProvider(providerConfig *config.ConfigurationProvider degradedDuration: defaultDegradedDeadline, } - c.identifier = config.Datadog().GetString("clc_runner_id") + c.identifier = pkgconfigsetup.Datadog().GetString("clc_runner_id") if c.identifier == "" { c.identifier, _ = hostname.Get(context.TODO()) - if config.Datadog().GetBool("cloud_foundry") { - boshID := config.Datadog().GetString("bosh_id") + if pkgconfigsetup.Datadog().GetBool("cloud_foundry") { + boshID := pkgconfigsetup.Datadog().GetString("bosh_id") if boshID == "" { log.Warn("configuration variable cloud_foundry is set to true, but bosh_id is empty, can't retrieve node name") } else { @@ -178,7 +178,7 @@ func (c *ClusterChecksConfigProvider) Collect(ctx context.Context) ([]integratio // This usually happens when scheduling a lot of checks on a CLC, especially larger checks // with `Configure()` implemented, like KSM Core and Orchestrator checks func (c *ClusterChecksConfigProvider) heartbeatSender(ctx context.Context) { - expirationTimeout := time.Duration(config.Datadog().GetInt("cluster_checks.node_expiration_timeout")) * time.Second + expirationTimeout := time.Duration(pkgconfigsetup.Datadog().GetInt("cluster_checks.node_expiration_timeout")) * time.Second heartTicker := time.NewTicker(time.Second) defer heartTicker.Stop() diff --git a/comp/core/autodiscovery/providers/config_reader.go b/comp/core/autodiscovery/providers/config_reader.go index 8b4621d0f9866..9f7b8cd13f8fd 100644 --- a/comp/core/autodiscovery/providers/config_reader.go +++ b/comp/core/autodiscovery/providers/config_reader.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/configresolver" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fargate" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -67,11 +67,11 @@ var doOnce sync.Once // InitConfigFilesReader should be called at agent startup. func InitConfigFilesReader(paths []string) { fileCacheExpiration := 5 * time.Minute - if config.Datadog().GetBool("autoconf_config_files_poll") { + if pkgconfigsetup.Datadog().GetBool("autoconf_config_files_poll") { // Removing some time (1s) to avoid races with polling interval. // If cache expiration is set to be == ticker interval the cache may be used if t1B (cache read time) - t0B (ticker time) < t1A (cache store time) - t0A (ticker time). // Which is likely to be the case because the code path on a cache write is slower. - configExpSeconds := config.Datadog().GetInt("autoconf_config_files_poll_interval") - 1 + configExpSeconds := pkgconfigsetup.Datadog().GetInt("autoconf_config_files_poll_interval") - 1 // If we are below < 1, cache is basically disabled, we cannot put 0 as it's considered no expiration by cache.Cache if configExpSeconds < 1 { fileCacheExpiration = time.Nanosecond @@ -243,7 +243,7 @@ func collectEntry(file os.DirEntry, path string, integrationName string, integra absPath := filepath.Join(path, fileName) // skip auto conf files based on the agent configuration - if fileName == "auto_conf.yaml" && containsString(config.Datadog().GetStringSlice("ignore_autoconf"), integrationName) { + if fileName == "auto_conf.yaml" && containsString(pkgconfigsetup.Datadog().GetStringSlice("ignore_autoconf"), integrationName) { log.Infof("Skipping 'auto_conf.yaml' for integration '%s'", integrationName) entry.err = fmt.Errorf("'auto_conf.yaml' for integration '%s' is skipped", integrationName) return entry, integrationErrors @@ -398,7 +398,7 @@ func GetIntegrationConfigFromFile(name, fpath string) (integration.Config, error if fargate.IsFargateInstance() { // In Fargate, since no host tags are applied in the backend, // add the configured DD_TAGS/DD_EXTRA_TAGS to the instance tags. - tags := configUtils.GetConfiguredTags(config.Datadog(), false) + tags := configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false) err := dataConf.MergeAdditionalTags(tags) if err != nil { log.Debugf("Could not add agent-level tags to instance of %v: %v", fpath, err) diff --git a/comp/core/autodiscovery/providers/consul.go b/comp/core/autodiscovery/providers/consul.go index 04957065793ff..107694e975c59 100644 --- a/comp/core/autodiscovery/providers/consul.go +++ b/comp/core/autodiscovery/providers/consul.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -53,9 +53,9 @@ type ConsulConfigProvider struct { } // NewConsulConfigProvider creates a client connection to consul and create a new ConsulConfigProvider -func NewConsulConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewConsulConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { if providerConfig == nil { - providerConfig = &config.ConfigurationProviders{} + providerConfig = &pkgconfigsetup.ConfigurationProviders{} } consulURL, err := url.Parse(providerConfig.TemplateURL) diff --git a/comp/core/autodiscovery/providers/consul_nop.go b/comp/core/autodiscovery/providers/consul_nop.go index 5a7272f3bc3c1..c38fdff9974cd 100644 --- a/comp/core/autodiscovery/providers/consul_nop.go +++ b/comp/core/autodiscovery/providers/consul_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewConsulConfigProvider creates a client connection to consul and create a new ConsulConfigProvider -var NewConsulConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewConsulConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/container.go b/comp/core/autodiscovery/providers/container.go index 7577fc2f89e03..846d967682b60 100644 --- a/comp/core/autodiscovery/providers/container.go +++ b/comp/core/autodiscovery/providers/container.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -34,7 +34,7 @@ type ContainerConfigProvider struct { // NewContainerConfigProvider returns a new ConfigProvider subscribed to both container // and pods -func NewContainerConfigProvider(_ *config.ConfigurationProviders, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) { +func NewContainerConfigProvider(_ *pkgconfigsetup.ConfigurationProviders, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) { return &ContainerConfigProvider{ workloadmetaStore: wmeta, configCache: make(map[string]map[string]integration.Config), diff --git a/comp/core/autodiscovery/providers/endpointschecks.go b/comp/core/autodiscovery/providers/endpointschecks.go index e6a55897c84cb..38a09c6c84006 100644 --- a/comp/core/autodiscovery/providers/endpointschecks.go +++ b/comp/core/autodiscovery/providers/endpointschecks.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" @@ -35,7 +35,7 @@ type EndpointsChecksConfigProvider struct { // NewEndpointsChecksConfigProvider returns a new ConfigProvider collecting // endpoints check configurations from the cluster-agent. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewEndpointsChecksConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewEndpointsChecksConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { c := &EndpointsChecksConfigProvider{ degradedDuration: defaultDegradedDeadline, } @@ -110,8 +110,8 @@ func (c *EndpointsChecksConfigProvider) Collect(ctx context.Context) ([]integrat // getNodename retrieves current node name from kubelet (if running on Kubernetes) // or bosh ID of current node (if running on Cloud Foundry). func getNodename(ctx context.Context) (string, error) { - if config.Datadog().GetBool("cloud_foundry") { - boshID := config.Datadog().GetString("bosh_id") + if pkgconfigsetup.Datadog().GetBool("cloud_foundry") { + boshID := pkgconfigsetup.Datadog().GetString("bosh_id") if boshID == "" { return "", fmt.Errorf("configuration variable cloud_foundry is set to true, but bosh_id is empty, can't retrieve node name") } diff --git a/comp/core/autodiscovery/providers/endpointschecks_nop.go b/comp/core/autodiscovery/providers/endpointschecks_nop.go index 6dc6386a834f9..3af1df32d23d1 100644 --- a/comp/core/autodiscovery/providers/endpointschecks_nop.go +++ b/comp/core/autodiscovery/providers/endpointschecks_nop.go @@ -9,10 +9,10 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewEndpointsChecksConfigProvider returns a new ConfigProvider collecting // endpoints check configurations from the cluster-agent. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -var NewEndpointsChecksConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewEndpointsChecksConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/etcd.go b/comp/core/autodiscovery/providers/etcd.go index 58178a0b6065d..bb08f9306f9d4 100644 --- a/comp/core/autodiscovery/providers/etcd.go +++ b/comp/core/autodiscovery/providers/etcd.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -37,9 +37,9 @@ type EtcdConfigProvider struct { } // NewEtcdConfigProvider creates a client connection to etcd and create a new EtcdConfigProvider -func NewEtcdConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewEtcdConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { if providerConfig == nil { - providerConfig = &config.ConfigurationProviders{} + providerConfig = &pkgconfigsetup.ConfigurationProviders{} } clientCfg := client.Config{ diff --git a/comp/core/autodiscovery/providers/etcd_nop.go b/comp/core/autodiscovery/providers/etcd_nop.go index b3e1f8675f8be..bee94b3beb8aa 100644 --- a/comp/core/autodiscovery/providers/etcd_nop.go +++ b/comp/core/autodiscovery/providers/etcd_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewEtcdConfigProvider creates a client connection to etcd and create a new EtcdConfigProvider -var NewEtcdConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewEtcdConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/file_test.go b/comp/core/autodiscovery/providers/file_test.go index eaed5fc0dfa39..021487c1ffc54 100644 --- a/comp/core/autodiscovery/providers/file_test.go +++ b/comp/core/autodiscovery/providers/file_test.go @@ -14,7 +14,7 @@ import ( acTelemetry "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/stretchr/testify/assert" @@ -22,7 +22,7 @@ import ( func TestCollect(t *testing.T) { ctx := context.Background() - config.Datadog().SetWithoutSource("ignore_autoconf", []string{"ignored"}) + pkgconfigsetup.Datadog().SetWithoutSource("ignore_autoconf", []string{"ignored"}) paths := []string{"tests", "foo/bar"} telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) diff --git a/comp/core/autodiscovery/providers/kube_endpoints.go b/comp/core/autodiscovery/providers/kube_endpoints.go index a5263d4be9fe9..1f8b72aa7b226 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints.go +++ b/comp/core/autodiscovery/providers/kube_endpoints.go @@ -22,7 +22,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -59,7 +60,7 @@ type configInfo struct { // NewKubeEndpointsConfigProvider returns a new ConfigProvider connected to apiserver. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewKubeEndpointsConfigProvider(_ *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) { +func NewKubeEndpointsConfigProvider(_ *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) { // Using GetAPIClient (no wait) as Client should already be initialized by Cluster Agent main entrypoint before ac, err := apiserver.GetAPIClient() if err != nil { @@ -99,7 +100,7 @@ func NewKubeEndpointsConfigProvider(_ *config.ConfigurationProviders, telemetryS return nil, fmt.Errorf("cannot add event handler to endpoint informer: %s", err) } - if config.Datadog().GetBool("cluster_checks.support_hybrid_ignore_ad_tags") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.support_hybrid_ignore_ad_tags") { log.Warnf("The `cluster_checks.support_hybrid_ignore_ad_tags` flag is" + " deprecated and will be removed in a future version. Please replace " + "`ad.datadoghq.com/endpoints.ignore_autodiscovery_tags` in your service annotations" + @@ -123,7 +124,7 @@ func (k *kubeEndpointsConfigProvider) Collect(context.Context) ([]integration.Co k.setUpToDate(true) var generatedConfigs []integration.Config - parsedConfigsInfo := k.parseServiceAnnotationsForEndpoints(services, config.Datadog()) + parsedConfigsInfo := k.parseServiceAnnotationsForEndpoints(services, pkgconfigsetup.Datadog()) for _, conf := range parsedConfigsInfo { kep, err := k.endpointsLister.Endpoints(conf.namespace).Get(conf.name) if err != nil { @@ -230,7 +231,7 @@ func (k *kubeEndpointsConfigProvider) setUpToDate(v bool) { k.upToDate = v } -func (k *kubeEndpointsConfigProvider) parseServiceAnnotationsForEndpoints(services []*v1.Service, cfg config.Config) []configInfo { +func (k *kubeEndpointsConfigProvider) parseServiceAnnotationsForEndpoints(services []*v1.Service, cfg model.Config) []configInfo { var configsInfo []configInfo setEndpointIDs := map[string]struct{}{} diff --git a/comp/core/autodiscovery/providers/kube_endpoints_file.go b/comp/core/autodiscovery/providers/kube_endpoints_file.go index 67b19d1d600c8..c9d1ceb568800 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints_file.go +++ b/comp/core/autodiscovery/providers/kube_endpoints_file.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -57,7 +57,7 @@ type KubeEndpointsFileConfigProvider struct { } // NewKubeEndpointsFileConfigProvider returns a new KubeEndpointsFileConfigProvider -func NewKubeEndpointsFileConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewKubeEndpointsFileConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { templates, _, err := ReadConfigFiles(WithAdvancedADOnly) if err != nil { return nil, err diff --git a/comp/core/autodiscovery/providers/kube_endpoints_file_nop.go b/comp/core/autodiscovery/providers/kube_endpoints_file_nop.go index a2e5b1ef58239..4a5b59aa8c4df 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints_file_nop.go +++ b/comp/core/autodiscovery/providers/kube_endpoints_file_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewKubeEndpointsFileConfigProvider returns a new KubeEndpointsFileConfigProvider -var NewKubeEndpointsFileConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewKubeEndpointsFileConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/kube_endpoints_nop.go b/comp/core/autodiscovery/providers/kube_endpoints_nop.go index a6004ed3cf91a..b17ed36295ea6 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints_nop.go +++ b/comp/core/autodiscovery/providers/kube_endpoints_nop.go @@ -9,9 +9,9 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewKubeEndpointsConfigProvider returns a new ConfigProvider connected to apiserver. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -var NewKubeEndpointsConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewKubeEndpointsConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/kube_endpoints_test.go b/comp/core/autodiscovery/providers/kube_endpoints_test.go index 3ea35201e26a0..d3891c5a65da0 100644 --- a/comp/core/autodiscovery/providers/kube_endpoints_test.go +++ b/comp/core/autodiscovery/providers/kube_endpoints_test.go @@ -26,7 +26,7 @@ import ( acTelemetry "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -271,7 +271,7 @@ func TestParseKubeServiceAnnotationsForEndpoints(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) if tc.hybrid { cfg.SetWithoutSource("cluster_checks.support_hybrid_ignore_ad_tags", true) } diff --git a/comp/core/autodiscovery/providers/kube_services.go b/comp/core/autodiscovery/providers/kube_services.go index 53f9cf65aec98..6cb325916697a 100644 --- a/comp/core/autodiscovery/providers/kube_services.go +++ b/comp/core/autodiscovery/providers/kube_services.go @@ -21,7 +21,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -41,7 +42,7 @@ type KubeServiceConfigProvider struct { // NewKubeServiceConfigProvider returns a new ConfigProvider connected to apiserver. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewKubeServiceConfigProvider(_ *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) { +func NewKubeServiceConfigProvider(_ *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) { // Using GetAPIClient() (no retry) ac, err := apiserver.GetAPIClient() if err != nil { @@ -67,7 +68,7 @@ func NewKubeServiceConfigProvider(_ *config.ConfigurationProviders, telemetrySto return nil, fmt.Errorf("cannot add event handler to services informer: %s", err) } - if config.Datadog().GetBool("cluster_checks.support_hybrid_ignore_ad_tags") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.support_hybrid_ignore_ad_tags") { log.Warnf("The `cluster_checks.support_hybrid_ignore_ad_tags` flag is" + " deprecated and will be removed in a future version. Please replace " + "`ad.datadoghq.com/service.ignore_autodiscovery_tags` in your service annotations" + @@ -92,7 +93,7 @@ func (k *KubeServiceConfigProvider) Collect(ctx context.Context) ([]integration. } k.upToDate = true - return k.parseServiceAnnotations(services, config.Datadog()) + return k.parseServiceAnnotations(services, pkgconfigsetup.Datadog()) } // IsUpToDate allows to cache configs as long as no changes are detected in the apiserver @@ -162,7 +163,7 @@ func valuesDiffer(first, second map[string]string, prefix string) bool { return matchingInFirst != matchingInSecond } -func (k *KubeServiceConfigProvider) parseServiceAnnotations(services []*v1.Service, ddConf config.Config) ([]integration.Config, error) { +func (k *KubeServiceConfigProvider) parseServiceAnnotations(services []*v1.Service, ddConf model.Config) ([]integration.Config, error) { var configs []integration.Config setServiceIDs := map[string]struct{}{} diff --git a/comp/core/autodiscovery/providers/kube_services_file.go b/comp/core/autodiscovery/providers/kube_services_file.go index 721ed15e5d79a..15bcccde50a28 100644 --- a/comp/core/autodiscovery/providers/kube_services_file.go +++ b/comp/core/autodiscovery/providers/kube_services_file.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" ) @@ -22,7 +22,7 @@ type KubeServiceFileConfigProvider struct { } // NewKubeServiceFileConfigProvider returns a new KubeServiceFileConfigProvider -func NewKubeServiceFileConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewKubeServiceFileConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { return &KubeServiceFileConfigProvider{}, nil } diff --git a/comp/core/autodiscovery/providers/kube_services_file_nop.go b/comp/core/autodiscovery/providers/kube_services_file_nop.go index 8724b87313061..a5c5db3a700f2 100644 --- a/comp/core/autodiscovery/providers/kube_services_file_nop.go +++ b/comp/core/autodiscovery/providers/kube_services_file_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewKubeServiceFileConfigProvider returns a new KubeServiceFileConfigProvider -var NewKubeServiceFileConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewKubeServiceFileConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/kube_services_nop.go b/comp/core/autodiscovery/providers/kube_services_nop.go index 4f64323cb985c..f1941c07a7373 100644 --- a/comp/core/autodiscovery/providers/kube_services_nop.go +++ b/comp/core/autodiscovery/providers/kube_services_nop.go @@ -9,9 +9,9 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewKubeServiceConfigProvider returns a new ConfigProvider connected to apiserver. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -var NewKubeServiceConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewKubeServiceConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/kube_services_test.go b/comp/core/autodiscovery/providers/kube_services_test.go index 57da4676c08f1..1d395d937c163 100644 --- a/comp/core/autodiscovery/providers/kube_services_test.go +++ b/comp/core/autodiscovery/providers/kube_services_test.go @@ -26,7 +26,7 @@ import ( acTelemetry "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" ) @@ -241,7 +241,7 @@ func TestParseKubeServiceAnnotations(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) if tc.hybrid { cfg.SetWithoutSource("cluster_checks.support_hybrid_ignore_ad_tags", true) } diff --git a/comp/core/autodiscovery/providers/prometheus_common.go b/comp/core/autodiscovery/providers/prometheus_common.go index 1d75a2f1c65ac..a75657f578e17 100644 --- a/comp/core/autodiscovery/providers/prometheus_common.go +++ b/comp/core/autodiscovery/providers/prometheus_common.go @@ -7,14 +7,14 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) // getPrometheusConfigs reads and initializes the openmetrics checks from the configuration // It defines a default openmetrics instances with default AD if the checks configuration is empty func getPrometheusConfigs() ([]*types.PrometheusCheck, error) { - checks, err := types.PrometheusScrapeChecksTransformer(config.Datadog().GetString("prometheus_scrape.checks")) + checks, err := types.PrometheusScrapeChecksTransformer(pkgconfigsetup.Datadog().GetString("prometheus_scrape.checks")) if err != nil { return []*types.PrometheusCheck{}, err } @@ -26,7 +26,7 @@ func getPrometheusConfigs() ([]*types.PrometheusCheck, error) { validChecks := []*types.PrometheusCheck{} for i, check := range checks { - if err := check.Init(config.Datadog().GetInt("prometheus_scrape.version")); err != nil { + if err := check.Init(pkgconfigsetup.Datadog().GetInt("prometheus_scrape.version")); err != nil { log.Errorf("Ignoring check configuration (# %d): %v", i+1, err) continue } diff --git a/comp/core/autodiscovery/providers/prometheus_common_test.go b/comp/core/autodiscovery/providers/prometheus_common_test.go index 7611ee95faecf..bd610c5753168 100644 --- a/comp/core/autodiscovery/providers/prometheus_common_test.go +++ b/comp/core/autodiscovery/providers/prometheus_common_test.go @@ -11,7 +11,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/stretchr/testify/assert" ) @@ -198,7 +198,7 @@ func TestGetPrometheusConfigs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { confBytes, _ := json.Marshal(tt.config) - config.Datadog().SetWithoutSource("prometheus_scrape.checks", string(confBytes)) + pkgconfigsetup.Datadog().SetWithoutSource("prometheus_scrape.checks", string(confBytes)) checks, err := getPrometheusConfigs() if (err != nil) != tt.wantErr { t.Errorf("getPrometheusConfigs() error = %v, wantErr %v", err, tt.wantErr) diff --git a/comp/core/autodiscovery/providers/prometheus_pods.go b/comp/core/autodiscovery/providers/prometheus_pods.go index 37634a8dbade8..fe8ede52eaacc 100644 --- a/comp/core/autodiscovery/providers/prometheus_pods.go +++ b/comp/core/autodiscovery/providers/prometheus_pods.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" ) @@ -28,7 +28,7 @@ type PrometheusPodsConfigProvider struct { // NewPrometheusPodsConfigProvider returns a new Prometheus ConfigProvider connected to kubelet. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -func NewPrometheusPodsConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewPrometheusPodsConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { checks, err := getPrometheusConfigs() if err != nil { return nil, err diff --git a/comp/core/autodiscovery/providers/prometheus_pods_nop.go b/comp/core/autodiscovery/providers/prometheus_pods_nop.go index e6ce37bbc9a27..3bf1ad0095f1d 100644 --- a/comp/core/autodiscovery/providers/prometheus_pods_nop.go +++ b/comp/core/autodiscovery/providers/prometheus_pods_nop.go @@ -9,9 +9,9 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewPrometheusPodsConfigProvider returns a new Prometheus ConfigProvider connected to kubelet. // Connectivity is not checked at this stage to allow for retries, Collect will do it. -var NewPrometheusPodsConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewPrometheusPodsConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/prometheus_services.go b/comp/core/autodiscovery/providers/prometheus_services.go index 72d5a33b28326..7bc52e5f1f4ec 100644 --- a/comp/core/autodiscovery/providers/prometheus_services.go +++ b/comp/core/autodiscovery/providers/prometheus_services.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -66,7 +66,7 @@ type PrometheusServicesConfigProvider struct { } // NewPrometheusServicesConfigProvider returns a new Prometheus ConfigProvider connected to kube apiserver -func NewPrometheusServicesConfigProvider(*config.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { +func NewPrometheusServicesConfigProvider(*pkgconfigsetup.ConfigurationProviders, *telemetry.Store) (ConfigProvider, error) { // Using GetAPIClient (no wait) as Client should already be initialized by Cluster Agent main entrypoint before ac, err := apiserver.GetAPIClient() if err != nil { @@ -81,7 +81,7 @@ func NewPrometheusServicesConfigProvider(*config.ConfigurationProviders, *teleme var endpointsInformer infov1.EndpointsInformer var endpointsLister listersv1.EndpointsLister - collectEndpoints := config.Datadog().GetBool("prometheus_scrape.service_endpoints") + collectEndpoints := pkgconfigsetup.Datadog().GetBool("prometheus_scrape.service_endpoints") if collectEndpoints { endpointsInformer = ac.InformerFactory.Core().V1().Endpoints() if endpointsInformer == nil { diff --git a/comp/core/autodiscovery/providers/prometheus_services_nop.go b/comp/core/autodiscovery/providers/prometheus_services_nop.go index 945daf2c80eee..eddd3ab680cae 100644 --- a/comp/core/autodiscovery/providers/prometheus_services_nop.go +++ b/comp/core/autodiscovery/providers/prometheus_services_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewPrometheusServicesConfigProvider returns a new Prometheus ConfigProvider connected to kube apiserver -var NewPrometheusServicesConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewPrometheusServicesConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/autodiscovery/providers/prometheus_services_test.go b/comp/core/autodiscovery/providers/prometheus_services_test.go index bc8b9a99b9d1d..9fbb2fd47b7f6 100644 --- a/comp/core/autodiscovery/providers/prometheus_services_test.go +++ b/comp/core/autodiscovery/providers/prometheus_services_test.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/common/types" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -259,7 +259,7 @@ func TestPrometheusServicesCollect(t *testing.T) { }, } - config.Datadog().SetWithoutSource("prometheus_scrape.version", 2) + pkgconfigsetup.Datadog().SetWithoutSource("prometheus_scrape.version", 2) for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() diff --git a/comp/core/autodiscovery/providers/providers.go b/comp/core/autodiscovery/providers/providers.go index 9336bb21d74fc..5eb4b3b2cf853 100644 --- a/comp/core/autodiscovery/providers/providers.go +++ b/comp/core/autodiscovery/providers/providers.go @@ -12,17 +12,17 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) // RegisterProvider adds a loader to the providers catalog func RegisterProvider(name string, - factory func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error), + factory func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error), providerCatalog map[string]ConfigProviderFactory) { RegisterProviderWithComponents( name, - func(providerConfig *config.ConfigurationProviders, _ workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) { + func(providerConfig *pkgconfigsetup.ConfigurationProviders, _ workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) { return factory(providerConfig, telemetryStore) }, providerCatalog, @@ -61,7 +61,7 @@ func RegisterProviders(providerCatalog map[string]ConfigProviderFactory) { } // ConfigProviderFactory is any function capable to create a ConfigProvider instance -type ConfigProviderFactory func(providerConfig *config.ConfigurationProviders, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) +type ConfigProviderFactory func(providerConfig *pkgconfigsetup.ConfigurationProviders, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (ConfigProvider, error) // ConfigProvider represents a source of `integration.Config` values // that can either be applied immediately or resolved for a service and diff --git a/comp/core/autodiscovery/providers/remote_config.go b/comp/core/autodiscovery/providers/remote_config.go index 19dd0907e4e23..24b00bbd68b33 100644 --- a/comp/core/autodiscovery/providers/remote_config.go +++ b/comp/core/autodiscovery/providers/remote_config.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -96,7 +96,7 @@ func (rc *RemoteConfigProvider) IntegrationScheduleCallback(updates map[string]s defer rc.mu.Unlock() var err error - allowedIntegration := config.GetRemoteConfigurationAllowedIntegrations(config.Datadog()) + allowedIntegration := pkgconfigsetup.GetRemoteConfigurationAllowedIntegrations(pkgconfigsetup.Datadog()) newCache := make(map[string]integration.Config, 0) // Now schedule everything diff --git a/comp/core/autodiscovery/providers/utils.go b/comp/core/autodiscovery/providers/utils.go index cf637776c825e..1abdc10007028 100644 --- a/comp/core/autodiscovery/providers/utils.go +++ b/comp/core/autodiscovery/providers/utils.go @@ -10,7 +10,7 @@ import ( "strconv" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ( @@ -26,20 +26,20 @@ const ( ) func buildStoreKey(key ...string) string { - parts := []string{config.Datadog().GetString("autoconf_template_dir")} + parts := []string{pkgconfigsetup.Datadog().GetString("autoconf_template_dir")} parts = append(parts, key...) return path.Join(parts...) } // GetPollInterval computes the poll interval from the config -func GetPollInterval(cp config.ConfigurationProviders) time.Duration { +func GetPollInterval(cp pkgconfigsetup.ConfigurationProviders) time.Duration { if cp.PollInterval != "" { customInterval, err := time.ParseDuration(cp.PollInterval) if err == nil { return customInterval } } - return config.Datadog().GetDuration("ad_config_poll_interval") * time.Second + return pkgconfigsetup.Datadog().GetDuration("ad_config_poll_interval") * time.Second } // providerCache supports monitoring a service for changes either to the number diff --git a/comp/core/autodiscovery/providers/utils_test.go b/comp/core/autodiscovery/providers/utils_test.go index 23fc5fc9d112a..8357cc46e8c4d 100644 --- a/comp/core/autodiscovery/providers/utils_test.go +++ b/comp/core/autodiscovery/providers/utils_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestBuildStoreKey(t *testing.T) { @@ -28,13 +28,13 @@ func TestBuildStoreKey(t *testing.T) { } func TestGetPollInterval(t *testing.T) { - cp := config.ConfigurationProviders{} + cp := pkgconfigsetup.ConfigurationProviders{} assert.Equal(t, GetPollInterval(cp), 10*time.Second) - cp = config.ConfigurationProviders{ + cp = pkgconfigsetup.ConfigurationProviders{ PollInterval: "foo", } assert.Equal(t, GetPollInterval(cp), 10*time.Second) - cp = config.ConfigurationProviders{ + cp = pkgconfigsetup.ConfigurationProviders{ PollInterval: "1s", } assert.Equal(t, GetPollInterval(cp), 1*time.Second) diff --git a/comp/core/autodiscovery/providers/zookeeper.go b/comp/core/autodiscovery/providers/zookeeper.go index e9da1ed8c515e..7815bcd9011aa 100644 --- a/comp/core/autodiscovery/providers/zookeeper.go +++ b/comp/core/autodiscovery/providers/zookeeper.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -41,9 +41,9 @@ type ZookeeperConfigProvider struct { } // NewZookeeperConfigProvider returns a new Client connected to a Zookeeper backend. -func NewZookeeperConfigProvider(providerConfig *config.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { +func NewZookeeperConfigProvider(providerConfig *pkgconfigsetup.ConfigurationProviders, _ *telemetry.Store) (ConfigProvider, error) { if providerConfig == nil { - providerConfig = &config.ConfigurationProviders{} + providerConfig = &pkgconfigsetup.ConfigurationProviders{} } urls := strings.Split(providerConfig.TemplateURL, ",") diff --git a/comp/core/autodiscovery/providers/zookeeper_nop.go b/comp/core/autodiscovery/providers/zookeeper_nop.go index 91306ec420ad4..30c86050595a4 100644 --- a/comp/core/autodiscovery/providers/zookeeper_nop.go +++ b/comp/core/autodiscovery/providers/zookeeper_nop.go @@ -9,8 +9,8 @@ package providers import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NewZookeeperConfigProvider returns a new Client connected to a Zookeeper backend. -var NewZookeeperConfigProvider func(providerConfig *config.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) +var NewZookeeperConfigProvider func(providerConfig *pkgconfigsetup.ConfigurationProviders, telemetryStore *telemetry.Store) (ConfigProvider, error) diff --git a/comp/core/gui/guiimpl/checks.go b/comp/core/gui/guiimpl/checks.go index c72a4e8f39c64..f57acb9cbac1d 100644 --- a/comp/core/gui/guiimpl/checks.go +++ b/comp/core/gui/guiimpl/checks.go @@ -28,19 +28,19 @@ import ( pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" checkstats "github.com/DataDog/datadog-agent/pkg/collector/check/stats" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) var ( configPaths = []string{ - config.Datadog().GetString("confd_path"), // Custom checks - filepath.Join(path.GetDistPath(), "conf.d"), // Default check configs + pkgconfigsetup.Datadog().GetString("confd_path"), // Custom checks + filepath.Join(path.GetDistPath(), "conf.d"), // Default check configs } checkPaths = []string{ - filepath.Join(path.GetDistPath(), "checks.d"), // Custom checks - config.Datadog().GetString("additional_checksd"), // Custom checks + filepath.Join(path.GetDistPath(), "checks.d"), // Custom checks + pkgconfigsetup.Datadog().GetString("additional_checksd"), // Custom checks path.PyChecksPath, // Integrations-core checks } ) @@ -247,7 +247,7 @@ func setCheckConfigFile(w http.ResponseWriter, r *http.Request) { var checkConfFolderPath, defaultCheckConfFolderPath string if checkFolder != "" { - checkConfFolderPath, err = securejoin.SecureJoin(config.Datadog().GetString("confd_path"), checkFolder) + checkConfFolderPath, err = securejoin.SecureJoin(pkgconfigsetup.Datadog().GetString("confd_path"), checkFolder) if err != nil { http.Error(w, "invalid checkFolder path", http.StatusBadRequest) log.Errorf("Error: Unable to join provided \"confd_path\" setting path with checkFolder: %s", err.Error()) @@ -260,7 +260,7 @@ func setCheckConfigFile(w http.ResponseWriter, r *http.Request) { return } } else { - checkConfFolderPath = config.Datadog().GetString("confd_path") + checkConfFolderPath = pkgconfigsetup.Datadog().GetString("confd_path") defaultCheckConfFolderPath = filepath.Join(path.GetDistPath(), "conf.d") } @@ -352,7 +352,7 @@ func getWheelsChecks() ([]string, error) { } for _, integration := range integrations { - if _, ok := config.StandardJMXIntegrations[integration]; !ok { + if _, ok := pkgconfigsetup.StandardJMXIntegrations[integration]; !ok { pyChecks = append(pyChecks, integration) } } @@ -391,7 +391,7 @@ func listChecks(w http.ResponseWriter, _ *http.Request) { integrations = append(integrations, goIntegrations...) // Get jmx-checks - for integration := range config.StandardJMXIntegrations { + for integration := range pkgconfigsetup.StandardJMXIntegrations { integrations = append(integrations, integration) } diff --git a/comp/core/hostname/hostnameimpl/service_test.go b/comp/core/hostname/hostnameimpl/service_test.go index 1b2177f30b080..29d6626c292dd 100644 --- a/comp/core/hostname/hostnameimpl/service_test.go +++ b/comp/core/hostname/hostnameimpl/service_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/comp/core/hostname" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/stretchr/testify/assert" @@ -21,9 +21,9 @@ func TestGet(t *testing.T) { t.Cleanup(func() { // erase cache cache.Cache.Delete(cache.BuildAgentKey("hostname")) - config.Datadog().SetWithoutSource("hostname", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") }) - config.Datadog().SetWithoutSource("hostname", "test-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "test-hostname") s := fxutil.Test[hostname.Component](t, Module()) name, err := s.Get(context.Background()) require.NoError(t, err) @@ -34,9 +34,9 @@ func TestGetWithProvider(t *testing.T) { t.Cleanup(func() { // erase cache) cache.Cache.Delete(cache.BuildAgentKey("hostname")) - config.Datadog().SetWithoutSource("hostname", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") }) - config.Datadog().SetWithoutSource("hostname", "test-hostname2") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "test-hostname2") s := fxutil.Test[hostname.Component](t, Module()) data, err := s.GetWithProvider(context.Background()) require.NoError(t, err) diff --git a/comp/core/hostname/remotehostnameimpl/hostname.go b/comp/core/hostname/remotehostnameimpl/hostname.go index 7732f9a4fbe84..6bb3d3273a810 100644 --- a/comp/core/hostname/remotehostnameimpl/hostname.go +++ b/comp/core/hostname/remotehostnameimpl/hostname.go @@ -12,7 +12,7 @@ import ( "github.com/avast/retry-go/v4" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/grpc" @@ -94,12 +94,12 @@ func getHostnameWithContext(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - client, err := grpc.GetDDAgentClient(ctx, ipcAddress, config.GetIPCPort()) + client, err := grpc.GetDDAgentClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } diff --git a/comp/core/sysprobeconfig/component.go b/comp/core/sysprobeconfig/component.go index c5484b2d9d856..5a87cfdf04115 100644 --- a/comp/core/sysprobeconfig/component.go +++ b/comp/core/sysprobeconfig/component.go @@ -16,21 +16,22 @@ package sysprobeconfig import ( + "go.uber.org/fx" + sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" - "go.uber.org/fx" ) // team: ebpf-platform // Component is the component type. type Component interface { - config.ReaderWriter + model.ReaderWriter // Warnings returns config warnings collected during setup. - Warnings() *config.Warnings + Warnings() *model.Warnings // SysProbeObject returns the wrapper sysconfig SysProbeObject() *sysconfigtypes.Config diff --git a/comp/core/sysprobeconfig/sysprobeconfigimpl/config.go b/comp/core/sysprobeconfig/sysprobeconfigimpl/config.go index ba5d6bca73bb8..83582e117cda1 100644 --- a/comp/core/sysprobeconfig/sysprobeconfigimpl/config.go +++ b/comp/core/sysprobeconfig/sysprobeconfigimpl/config.go @@ -13,7 +13,8 @@ import ( sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -29,12 +30,12 @@ func Module() fxutil.Module { type cfg struct { // this component is currently implementing a thin wrapper around pkg/config, // and uses globals in that package. - config.Config + model.Config syscfg *sysconfigtypes.Config // warnings are the warnings generated during setup - warnings *config.Warnings + warnings *model.Warnings } // sysprobeconfigDependencies is an interface that mimics the fx-oriented dependencies struct (This is copied from the main agent configuration.) @@ -64,14 +65,14 @@ func newConfig(deps dependencies) (sysprobeconfig.Component, error) { return nil, err } - return &cfg{Config: config.SystemProbe(), syscfg: syscfg}, nil + return &cfg{Config: pkgconfigsetup.SystemProbe(), syscfg: syscfg}, nil } -func (c *cfg) Warnings() *config.Warnings { +func (c *cfg) Warnings() *model.Warnings { return c.warnings } -func (c *cfg) Object() config.Reader { +func (c *cfg) Object() model.Reader { return c } diff --git a/comp/core/tagger/params.go b/comp/core/tagger/params.go index 3e4482d6594ec..6b1bead6bf503 100644 --- a/comp/core/tagger/params.go +++ b/comp/core/tagger/params.go @@ -7,7 +7,7 @@ package tagger import ( "github.com/DataDog/datadog-agent/comp/core/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // AgentTypeForTagger represents agent types that tagger is used for @@ -29,7 +29,7 @@ type Params struct { // NewTaggerParamsForCoreAgent is a constructor function for creating core agent tagger params func NewTaggerParamsForCoreAgent(_ config.Component) Params { - if pkgconfig.IsCLCRunner() { + if pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()) { return NewCLCRunnerRemoteTaggerParams() } return NewTaggerParams() diff --git a/comp/core/tagger/taggerimpl/collectors/ecs_common.go b/comp/core/tagger/taggerimpl/collectors/ecs_common.go index 161a92e09fa92..c4170ad398b15 100644 --- a/comp/core/tagger/taggerimpl/collectors/ecs_common.go +++ b/comp/core/tagger/taggerimpl/collectors/ecs_common.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/tagger/taglist" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func addResourceTags(t *taglist.TagList, m map[string]string) { @@ -19,7 +19,7 @@ func addResourceTags(t *taglist.TagList, m map[string]string) { continue } - if config.Datadog().GetBool("ecs_resource_tags_replace_colon") { + if pkgconfigsetup.Datadog().GetBool("ecs_resource_tags_replace_colon") { k = strings.ReplaceAll(k, ":", "_") } diff --git a/comp/core/tagger/taggerimpl/collectors/ecs_common_test.go b/comp/core/tagger/taggerimpl/collectors/ecs_common_test.go index 1826cd343beaf..488302b108e06 100644 --- a/comp/core/tagger/taggerimpl/collectors/ecs_common_test.go +++ b/comp/core/tagger/taggerimpl/collectors/ecs_common_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/comp/core/tagger/taglist" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/stretchr/testify/assert" ) @@ -51,10 +51,10 @@ func TestAddResourceTags(t *testing.T) { expectedTags.AddLow("environment", "sandbox") expectedTags.AddLow("project", "ecs-test") expectedTags.AddLow("foo_bar_baz", "val") - config.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", true) + pkgconfigsetup.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", true) return expectedTags }, - resetFunc: func() { config.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", false) }, + resetFunc: func() { pkgconfigsetup.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", false) }, }, { name: "replace colon enabled, do not replace tag value", @@ -70,10 +70,10 @@ func TestAddResourceTags(t *testing.T) { expectedTags.AddLow("environment", "sandbox") expectedTags.AddLow("project", "ecs-test") expectedTags.AddLow("foo_bar_baz", "val1:val2") - config.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", true) + pkgconfigsetup.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", true) return expectedTags }, - resetFunc: func() { config.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", false) }, + resetFunc: func() { pkgconfigsetup.Datadog().SetWithoutSource("ecs_resource_tags_replace_colon", false) }, }, { name: "replace colon disabled", diff --git a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go index 9dfa487f002f4..dad04320b2ca5 100644 --- a/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go +++ b/comp/core/tagger/taggerimpl/collectors/workloadmeta_extract.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/tags" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -360,7 +360,7 @@ func (c *WorkloadMetaCollector) handleKubePod(ev workloadmeta.Event) []*types.Ta } kubeServiceDisabled := false - for _, disabledTag := range config.Datadog().GetStringSlice("kubernetes_ad_tags_disabled") { + for _, disabledTag := range pkgconfigsetup.Datadog().GetStringSlice("kubernetes_ad_tags_disabled") { if disabledTag == "kube_service" { kubeServiceDisabled = true break @@ -447,7 +447,7 @@ func (c *WorkloadMetaCollector) handleECSTask(ev workloadmeta.Event) []*types.Ta taskTags.AddOrchestrator(tags.TaskARN, task.ID) if task.ClusterName != "" { - if !config.Datadog().GetBool("disable_cluster_name_tag_key") { + if !pkgconfigsetup.Datadog().GetBool("disable_cluster_name_tag_key") { taskTags.AddLow(tags.ClusterName, task.ClusterName) } taskTags.AddLow(tags.EcsClusterName, task.ClusterName) diff --git a/comp/core/tagger/taggerimpl/tagger.go b/comp/core/tagger/taggerimpl/tagger.go index 87bd87086b736..b26eaed4b804c 100644 --- a/comp/core/tagger/taggerimpl/tagger.go +++ b/comp/core/tagger/taggerimpl/tagger.go @@ -425,30 +425,30 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty // | empty | not empty || container prefix + originFromMsg | // | none | not empty || container prefix + originFromMsg | if t.datadogConfig.dogstatsdOptOutEnabled && originInfo.Cardinality == "none" { - originInfo.FromUDS = packets.NoOrigin - originInfo.FromTag = "" - originInfo.FromMsg = "" + originInfo.ContainerIDFromSocket = packets.NoOrigin + originInfo.PodUID = "" + originInfo.ContainerID = "" return } // We use the UDS socket origin if no origin ID was specify in the tags // or 'dogstatsd_entity_id_precedence' is set to False (default false). - if originInfo.FromUDS != packets.NoOrigin && - (originInfo.FromTag == "" || !t.datadogConfig.dogstatsdEntityIDPrecedenceEnabled) { - if err := t.AccumulateTagsFor(originInfo.FromUDS, cardinality, tb); err != nil { + if originInfo.ContainerIDFromSocket != packets.NoOrigin && + (originInfo.PodUID == "" || !t.datadogConfig.dogstatsdEntityIDPrecedenceEnabled) { + if err := t.AccumulateTagsFor(originInfo.ContainerIDFromSocket, cardinality, tb); err != nil { t.log.Errorf("%s", err.Error()) } } // originFromClient can either be originInfo.FromTag or originInfo.FromMsg originFromClient := "" - if originInfo.FromTag != "" && originInfo.FromTag != "none" { + if originInfo.PodUID != "" && originInfo.PodUID != "none" { // Check if the value is not "none" in order to avoid calling the tagger for entity that doesn't exist. // Currently only supported for pods - originFromClient = types.NewEntityID(types.KubernetesPodUID, originInfo.FromTag).String() - } else if originInfo.FromTag == "" && len(originInfo.FromMsg) > 0 { + originFromClient = types.NewEntityID(types.KubernetesPodUID, originInfo.PodUID).String() + } else if originInfo.PodUID == "" && len(originInfo.ContainerID) > 0 { // originInfo.FromMsg is the container ID sent by the newer clients. - originFromClient = types.NewEntityID(types.ContainerID, originInfo.FromMsg).String() + originFromClient = types.NewEntityID(types.ContainerID, originInfo.ContainerID).String() } if originFromClient != "" { @@ -459,18 +459,18 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty } default: // Tag using Local Data - if originInfo.FromUDS != packets.NoOrigin { - if err := t.AccumulateTagsFor(originInfo.FromUDS, cardinality, tb); err != nil { + if originInfo.ContainerIDFromSocket != packets.NoOrigin { + if err := t.AccumulateTagsFor(originInfo.ContainerIDFromSocket, cardinality, tb); err != nil { t.log.Errorf("%s", err.Error()) } } - if err := t.AccumulateTagsFor(types.ContainerID.ToUID(originInfo.FromMsg), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.FromMsg, err) + if err := t.AccumulateTagsFor(types.ContainerID.ToUID(originInfo.ContainerID), cardinality, tb); err != nil { + t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.ContainerID, err) } - if err := t.AccumulateTagsFor(types.KubernetesPodUID.ToUID(originInfo.FromTag), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.FromTag, err) + if err := t.AccumulateTagsFor(types.KubernetesPodUID.ToUID(originInfo.PodUID), cardinality, tb); err != nil { + t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.PodUID, err) } // Tag using External Data. @@ -503,7 +503,7 @@ func (t *TaggerClient) EnrichTags(tb tagset.TagsAccumulator, originInfo taggerty // Accumulate tags for pod UID if parsedExternalData.podUID != "" { if err := t.AccumulateTagsFor(types.KubernetesPodUID.ToUID(parsedExternalData.podUID), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.FromMsg, err) + t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.ContainerID, err) } } diff --git a/comp/core/tagger/taggerimpl/tagger_test.go b/comp/core/tagger/taggerimpl/tagger_test.go index 600fdfd631685..258d23766c8d5 100644 --- a/comp/core/tagger/taggerimpl/tagger_test.go +++ b/comp/core/tagger/taggerimpl/tagger_test.go @@ -58,25 +58,25 @@ func TestEnrichTags(t *testing.T) { }, { name: "with local data (containerID) and low cardinality", - originInfo: taggertypes.OriginInfo{FromMsg: "container", Cardinality: "low"}, + originInfo: taggertypes.OriginInfo{ContainerID: "container", Cardinality: "low"}, expectedTags: []string{"container-low"}, cidProvider: &fakeCIDProvider{}, }, { name: "with local data (containerID) and high cardinality", - originInfo: taggertypes.OriginInfo{FromMsg: "container", Cardinality: "high"}, + originInfo: taggertypes.OriginInfo{ContainerID: "container", Cardinality: "high"}, expectedTags: []string{"container-low", "container-orch", "container-high"}, cidProvider: &fakeCIDProvider{}, }, { name: "with local data (podUID) and low cardinality", - originInfo: taggertypes.OriginInfo{FromTag: "pod", Cardinality: "low"}, + originInfo: taggertypes.OriginInfo{PodUID: "pod", Cardinality: "low"}, expectedTags: []string{"pod-low"}, cidProvider: &fakeCIDProvider{}, }, { name: "with local data (podUID) and high cardinality", - originInfo: taggertypes.OriginInfo{FromTag: "pod", Cardinality: "high"}, + originInfo: taggertypes.OriginInfo{PodUID: "pod", Cardinality: "high"}, expectedTags: []string{"pod-low", "pod-orch", "pod-high"}, cidProvider: &fakeCIDProvider{}, }, @@ -94,7 +94,7 @@ func TestEnrichTagsOrchestrator(t *testing.T) { defer fakeTagger.ResetTagger() fakeTagger.SetTags("foo://bar", "fooSource", []string{"lowTag"}, []string{"orchTag"}, nil, nil) tb := tagset.NewHashingTagsAccumulator() - fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo://bar", Cardinality: "orchestrator"}) + fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://bar", Cardinality: "orchestrator"}) assert.Equal(t, []string{"lowTag", "orchTag"}, tb.Get()) } @@ -105,9 +105,9 @@ func TestEnrichTagsOptOut(t *testing.T) { cfg.SetWithoutSource("dogstatsd_origin_optout_enabled", true) fakeTagger.SetTags("foo://bar", "fooSource", []string{"lowTag"}, []string{"orchTag"}, nil, nil) tb := tagset.NewHashingTagsAccumulator() - fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo://originID", FromTag: "pod-uid", FromMsg: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) + fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://originID", PodUID: "pod-uid", ContainerID: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) assert.Equal(t, []string{}, tb.Get()) - fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{FromUDS: "foo://originID", FromMsg: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) + fakeTagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "foo://originID", ContainerID: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) assert.Equal(t, []string{}, tb.Get()) } diff --git a/comp/core/tagger/taglist/taglist.go b/comp/core/tagger/taglist/taglist.go index f8e36c9318755..9a2bf1196e083 100644 --- a/comp/core/tagger/taglist/taglist.go +++ b/comp/core/tagger/taglist/taglist.go @@ -10,7 +10,7 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // TagList allows collector to incremental build a tag list @@ -30,7 +30,7 @@ func NewTagList() *TagList { orchestratorCardTags: make(map[string]bool), highCardTags: make(map[string]bool), standardTags: make(map[string]bool), - splitList: config.Datadog().GetStringMapString("tag_value_split_separator"), + splitList: pkgconfigsetup.Datadog().GetStringMapString("tag_value_split_separator"), } } diff --git a/comp/core/telemetry/telemetryimpl/telemetry_mock.go b/comp/core/telemetry/telemetryimpl/telemetry_mock.go index 5d7a161c299cf..5d33b0fb4538e 100644 --- a/comp/core/telemetry/telemetryimpl/telemetry_mock.go +++ b/comp/core/telemetry/telemetryimpl/telemetry_mock.go @@ -44,9 +44,10 @@ func newMock(deps testDependencies) telemetry.Mock { telemetry := &telemetryImplMock{ telemetryImpl{ - mutex: &mutex, - registry: reg, - meterProvider: provider, + mutex: &mutex, + registry: reg, + meterProvider: provider, + defaultRegistry: prometheus.NewRegistry(), }, } diff --git a/comp/core/workloadmeta/collectors/internal/cloudfoundry/container/cf_container.go b/comp/core/workloadmeta/collectors/internal/cloudfoundry/container/cf_container.go index d19169605b4cc..172cfc3ea8c0b 100644 --- a/comp/core/workloadmeta/collectors/internal/cloudfoundry/container/cf_container.go +++ b/comp/core/workloadmeta/collectors/internal/cloudfoundry/container/cf_container.go @@ -15,8 +15,8 @@ import ( "go.uber.org/fx" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/common" @@ -57,7 +57,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } // Detect if we're on a PCF container - if !config.Datadog().GetBool("cloud_foundry_buildpack") { + if !pkgconfigsetup.Datadog().GetBool("cloud_foundry_buildpack") { return errors.NewDisabled(componentName, "Agent is not running on a CloudFoundry container") } diff --git a/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm.go b/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm.go index 7f8cb0263a984..6c274f1790449 100644 --- a/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm.go +++ b/comp/core/workloadmeta/collectors/internal/cloudfoundry/vm/cf_vm.go @@ -15,8 +15,8 @@ import ( "go.uber.org/fx" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" @@ -72,10 +72,10 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error return err } - c.nodeName = config.Datadog().GetString("bosh_id") + c.nodeName = pkgconfigsetup.Datadog().GetString("bosh_id") // Check for Cluster Agent availability (will be retried at each pull) - c.dcaEnabled = config.Datadog().GetBool("cluster_agent.enabled") + c.dcaEnabled = pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") c.dcaClient = c.getDCAClient() return nil diff --git a/comp/core/workloadmeta/collectors/internal/containerd/containerd.go b/comp/core/workloadmeta/collectors/internal/containerd/containerd.go index 08a592772291d..efc5524d149a2 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/containerd.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/containerd.go @@ -20,8 +20,8 @@ import ( "go.uber.org/fx" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" agentErrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/sbom/scanner" "github.com/DataDog/datadog-agent/pkg/status/health" @@ -430,5 +430,5 @@ func (c *collector) cacheExitInfo(id string, exitCode *int64, exitTS time.Time) } func imageMetadataCollectionIsEnabled() bool { - return config.Datadog().GetBool("container_image.enabled") + return pkgconfigsetup.Datadog().GetBool("container_image.enabled") } diff --git a/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go b/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go index 91dbfca719f09..9867d2a6cb093 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/image_sbom_trivy.go @@ -14,7 +14,7 @@ import ( "github.com/CycloneDX/cyclonedx-go" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/sbom/collectors/containerd" @@ -23,7 +23,7 @@ import ( ) func sbomCollectionIsEnabled() bool { - return imageMetadataCollectionIsEnabled() && config.Datadog().GetBool("sbom.container_image.enabled") + return imageMetadataCollectionIsEnabled() && pkgconfigsetup.Datadog().GetBool("sbom.container_image.enabled") } func (c *collector) startSBOMCollection(ctx context.Context) error { diff --git a/comp/core/workloadmeta/collectors/internal/containerd/network_linux.go b/comp/core/workloadmeta/collectors/internal/containerd/network_linux.go index f42b8cc219d39..0a56688795975 100644 --- a/comp/core/workloadmeta/collectors/internal/containerd/network_linux.go +++ b/comp/core/workloadmeta/collectors/internal/containerd/network_linux.go @@ -12,8 +12,8 @@ import ( "github.com/containerd/containerd" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" cutil "github.com/DataDog/datadog-agent/pkg/util/containerd" "github.com/DataDog/datadog-agent/pkg/util/system" ) @@ -48,7 +48,7 @@ func extractIP(namespace string, container containerd.Container, containerdClien // of them. for _, taskPid := range taskPids { IPs, err := system.ParseProcessIPs( - config.Datadog().GetString("container_proc_root"), + pkgconfigsetup.Datadog().GetString("container_proc_root"), int(taskPid.Pid), func(ip string) bool { return ip != "127.0.0.1" }, ) diff --git a/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go b/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go index fbeb54b4377c7..12f6d016421a3 100644 --- a/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go +++ b/comp/core/workloadmeta/collectors/internal/docker/image_sbom_trivy.go @@ -15,7 +15,7 @@ import ( "github.com/CycloneDX/cyclonedx-go" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/sbom/collectors/docker" "github.com/DataDog/datadog-agent/pkg/sbom/scanner" @@ -24,11 +24,11 @@ import ( ) func imageMetadataCollectionIsEnabled() bool { - return config.Datadog().GetBool("container_image.enabled") + return pkgconfigsetup.Datadog().GetBool("container_image.enabled") } func sbomCollectionIsEnabled() bool { - return imageMetadataCollectionIsEnabled() && config.Datadog().GetBool("sbom.container_image.enabled") + return imageMetadataCollectionIsEnabled() && pkgconfigsetup.Datadog().GetBool("sbom.container_image.enabled") } func (c *collector) startSBOMCollection(ctx context.Context) error { diff --git a/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go b/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go index 8dd66cacd8a79..f4a54be516779 100644 --- a/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go +++ b/comp/core/workloadmeta/collectors/internal/kubemetadata/kubemetadata.go @@ -19,8 +19,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configutils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" @@ -83,7 +83,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error // If DCA is enabled and can't communicate with the DCA, let worloadmeta retry. var errDCA error - if config.Datadog().GetBool("cluster_agent.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") { c.dcaEnabled = false c.dcaClient, errDCA = clusteragent.GetClusterAgentClient() if errDCA != nil { @@ -95,7 +95,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } // We return the permanent fail only if fallback is disabled - if retry.IsErrPermaFail(errDCA) && !config.Datadog().GetBool("cluster_agent.tagging_fallback") { + if retry.IsErrPermaFail(errDCA) && !pkgconfigsetup.Datadog().GetBool("cluster_agent.tagging_fallback") { return errDCA } @@ -106,7 +106,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } // Fallback to local metamapper if DCA not enabled, or in permafail state with fallback enabled. - if !config.Datadog().GetBool("cluster_agent.enabled") || errDCA != nil { + if !pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") || errDCA != nil { // Using GetAPIClient as error returned follows the IsErrWillRetry/IsErrPermaFail // Workloadmeta will retry calling this method until permafail c.apiClient, err = apiserver.GetAPIClient() @@ -115,9 +115,9 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } } - c.updateFreq = time.Duration(config.Datadog().GetInt("kubernetes_metadata_tag_update_freq")) * time.Second + c.updateFreq = time.Duration(pkgconfigsetup.Datadog().GetInt("kubernetes_metadata_tag_update_freq")) * time.Second - metadataAsTags := configutils.GetMetadataAsTags(config.Datadog()) + metadataAsTags := configutils.GetMetadataAsTags(pkgconfigsetup.Datadog()) c.collectNamespaceLabels = len(metadataAsTags.GetNamespaceLabelsAsTags()) > 0 c.collectNamespaceAnnotations = len(metadataAsTags.GetNamespaceAnnotationsAsTags()) > 0 diff --git a/comp/core/workloadmeta/collectors/internal/podman/podman.go b/comp/core/workloadmeta/collectors/internal/podman/podman.go index 9c171d5ed168c..1c6784fea6686 100644 --- a/comp/core/workloadmeta/collectors/internal/podman/podman.go +++ b/comp/core/workloadmeta/collectors/internal/podman/podman.go @@ -18,8 +18,8 @@ import ( "go.uber.org/fx" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" dderrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -68,7 +68,7 @@ func (c *collector) Start(_ context.Context, store workloadmeta.Component) error } var dbPath string - dbPath = config.Datadog().GetString("podman_db_path") + dbPath = pkgconfigsetup.Datadog().GetString("podman_db_path") // We verify the user-provided path exists to prevent the collector entering a failing loop. if dbPath != "" && !dbIsAccessible(dbPath) { diff --git a/comp/core/workloadmeta/collectors/internal/process/process_collector.go b/comp/core/workloadmeta/collectors/internal/process/process_collector.go index ee69b4c91e7fd..3d677913016bd 100644 --- a/comp/core/workloadmeta/collectors/internal/process/process_collector.go +++ b/comp/core/workloadmeta/collectors/internal/process/process_collector.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" processwlm "github.com/DataDog/datadog-agent/pkg/process/metadata/workloadmeta" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" @@ -50,7 +50,7 @@ type collector struct { // NewCollector returns a new local process collector provider and an error. // Currently, this is only used on Linux when language detection and run in core agent are enabled. func NewCollector() (workloadmeta.CollectorProvider, error) { - wlmExtractor := processwlm.GetSharedWorkloadMetaExtractor(config.SystemProbe()) + wlmExtractor := processwlm.GetSharedWorkloadMetaExtractor(pkgconfigsetup.SystemProbe()) processData := NewProcessData() processData.Register(wlmExtractor) @@ -81,7 +81,7 @@ func (c *collector) Start(ctx context.Context, store workloadmeta.Component) err // If process collection is disabled, the collector will gather the basic process and container data // necessary for language detection. - if !config.Datadog().GetBool("process_config.process_collection.enabled") { + if !pkgconfigsetup.Datadog().GetBool("process_config.process_collection.enabled") { collectionTicker := c.collectionClock.Ticker(10 * time.Second) if c.containerProvider == nil { c.containerProvider = proccontainers.GetSharedContainerProvider(store) diff --git a/comp/core/workloadmeta/collectors/internal/remote/generic.go b/comp/core/workloadmeta/collectors/internal/remote/generic.go index 51b122cf13a4f..3a697cc50ae0f 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/generic.go +++ b/comp/core/workloadmeta/collectors/internal/remote/generic.go @@ -25,7 +25,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/telemetry" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -148,7 +148,7 @@ func (c *GenericCollector) startWorkloadmetaStream(maxElapsed time.Duration) err default: } - token, err := security.FetchAuthToken(pkgconfig.Datadog()) + token, err := security.FetchAuthToken(pkgconfigsetup.Datadog()) if err != nil { err = fmt.Errorf("unable to fetch authentication token: %w", err) log.Warnf("unable to establish entity stream between agents, will possibly retry: %s", err) @@ -179,7 +179,7 @@ func (c *GenericCollector) startWorkloadmetaStream(maxElapsed time.Duration) err // Run will run the generic collector streaming loop func (c *GenericCollector) Run() { - recvWithoutTimeout := pkgconfig.Datadog().GetBool("workloadmeta.remote.recv_without_timeout") + recvWithoutTimeout := pkgconfigsetup.Datadog().GetBool("workloadmeta.remote.recv_without_timeout") for { select { diff --git a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go index 40ee9c33cd0e1..fc36cfa790f25 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go +++ b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go @@ -20,7 +20,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/remote" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" @@ -72,7 +73,7 @@ func (s *stream) Recv() (interface{}, error) { type streamHandler struct { port int - config.Reader + model.Reader } // workloadmetaEventFromProcessEventSet converts the given ProcessEventSet into a workloadmeta.Event @@ -119,7 +120,7 @@ func NewCollector() (workloadmeta.CollectorProvider, error) { Collector: &remote.GenericCollector{ CollectorID: collectorID, // TODO(components): make sure StreamHandler uses the config component not pkg/config - StreamHandler: &streamHandler{Reader: config.Datadog()}, + StreamHandler: &streamHandler{Reader: pkgconfigsetup.Datadog()}, Catalog: workloadmeta.NodeAgent, Insecure: true, // wlm extractor currently does not support TLS }, diff --git a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go index a9e46426cc829..71e47ded6be31 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go +++ b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector_test.go @@ -30,7 +30,7 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -65,11 +65,11 @@ func (s *mockServer) StreamEntities(_ *pbgo.ProcessStreamEntitiesRequest, out pb func TestCollection(t *testing.T) { // Create Auth Token for the client - if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfig.Datadog())); os.IsNotExist(err) { - security.CreateOrFetchToken(pkgconfig.Datadog()) + if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())); os.IsNotExist(err) { + security.CreateOrFetchToken(pkgconfigsetup.Datadog()) defer func() { // cleanup - os.Remove(security.GetAuthTokenFilepath(pkgconfig.Datadog())) + os.Remove(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())) }() } creationTime := time.Now().Unix() @@ -296,18 +296,25 @@ func TestCollection(t *testing.T) { require.NoError(t, err) // Number of events expected. Each response can hold multiple events, either Set or Unset - numberOfEvents := len(test.preEvents) + expectedNumberOfEvents := len(test.preEvents) for _, ev := range test.serverResponses { - numberOfEvents += len(ev.SetEvents) + len(ev.UnsetEvents) + expectedNumberOfEvents += len(ev.SetEvents) + len(ev.UnsetEvents) } // Keep listening to workloadmeta until enough events are received. It is possible that the // first bundle does not hold any events. Thus, it is required to look at the number of events // in the bundle. - for i := 0; i < numberOfEvents; { + // Also, when a problem occurs and a re-sync is triggered, we might + // receive duplicate events, so we need to keep a map of received + // events to account for duplicates. + eventsReceived := make(map[workloadmeta.Event]struct{}) + for len(eventsReceived) < expectedNumberOfEvents { bundle := <-ch - close(bundle.Ch) - i += len(bundle.Events) + bundle.Acknowledge() + + for _, ev := range bundle.Events { + eventsReceived[ev] = struct{}{} + } } mockStore.Unsubscribe(ch) diff --git a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go index 79d69f7bb33b2..ec4770a214260 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go +++ b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta.go @@ -18,7 +18,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/remote" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/proto" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" ) @@ -88,7 +89,7 @@ func (s *stream) Recv() (interface{}, error) { type streamHandler struct { port int filter *workloadmeta.Filter - config.Config + model.Config } // NewCollector returns a CollectorProvider to build a remote workloadmeta collector, and an error if any. @@ -102,7 +103,7 @@ func NewCollector(deps dependencies) (workloadmeta.CollectorProvider, error) { CollectorID: collectorID, StreamHandler: &streamHandler{ filter: deps.Params.Filter, - Config: config.Datadog(), + Config: pkgconfigsetup.Datadog(), }, Catalog: workloadmeta.Remote, }, diff --git a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go index f4e4b47395b23..9045a35ac7c37 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go +++ b/comp/core/workloadmeta/collectors/internal/remote/workloadmeta/workloadmeta_test.go @@ -29,7 +29,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/proto" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/server" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -191,11 +191,11 @@ func TestHandleWorkloadmetaStreamResponse(t *testing.T) { func TestCollection(t *testing.T) { // Create Auth Token for the client - if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfig.Datadog())); os.IsNotExist(err) { - security.CreateOrFetchToken(pkgconfig.Datadog()) + if _, err := os.Stat(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())); os.IsNotExist(err) { + security.CreateOrFetchToken(pkgconfigsetup.Datadog()) defer func() { // cleanup - os.Remove(security.GetAuthTokenFilepath(pkgconfig.Datadog())) + os.Remove(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())) }() } diff --git a/comp/core/workloadmeta/collectors/util/process_util_linux.go b/comp/core/workloadmeta/collectors/util/process_util_linux.go index 840ff7f7088ac..62e4381e00cd7 100644 --- a/comp/core/workloadmeta/collectors/util/process_util_linux.go +++ b/comp/core/workloadmeta/collectors/util/process_util_linux.go @@ -8,7 +8,7 @@ package util import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/flavor" ) @@ -20,8 +20,8 @@ func LocalProcessCollectorIsEnabled() bool { return false } - processChecksInCoreAgent := config.Datadog().GetBool("process_config.run_in_core_agent.enabled") - langDetectionEnabled := config.Datadog().GetBool("language_detection.enabled") + processChecksInCoreAgent := pkgconfigsetup.Datadog().GetBool("process_config.run_in_core_agent.enabled") + langDetectionEnabled := pkgconfigsetup.Datadog().GetBool("language_detection.enabled") return langDetectionEnabled && processChecksInCoreAgent } diff --git a/comp/dogstatsd/listeners/named_pipe_nowindows.go b/comp/dogstatsd/listeners/named_pipe_nowindows.go index 827468549b927..3e8ca506d9808 100644 --- a/comp/dogstatsd/listeners/named_pipe_nowindows.go +++ b/comp/dogstatsd/listeners/named_pipe_nowindows.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // NamedPipeListener implements the StatsdListener interface for named pipe protocol. @@ -22,7 +22,7 @@ type NamedPipeListener struct{} // //nolint:revive // TODO(AML) Fix revive linter func NewNamedPipeListener(_ string, _ chan packets.Packets, - _ *packets.PoolManager[packets.Packet], _ config.Reader, _ replay.Component, _ *TelemetryStore, _ *packets.TelemetryStore, _ telemetry.Component) (*NamedPipeListener, error) { + _ *packets.PoolManager[packets.Packet], _ model.Reader, _ replay.Component, _ *TelemetryStore, _ *packets.TelemetryStore, _ telemetry.Component) (*NamedPipeListener, error) { return nil, errors.New("named pipe is only supported on Windows") } diff --git a/comp/dogstatsd/listeners/named_pipe_windows.go b/comp/dogstatsd/listeners/named_pipe_windows.go index e50c08629277c..a7061013b9adc 100644 --- a/comp/dogstatsd/listeners/named_pipe_windows.go +++ b/comp/dogstatsd/listeners/named_pipe_windows.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" winio "github.com/Microsoft/go-winio" @@ -42,7 +42,7 @@ type NamedPipeListener struct { // NewNamedPipeListener returns an named pipe Statsd listener func NewNamedPipeListener(pipeName string, packetOut chan packets.Packets, - sharedPacketPoolManager *packets.PoolManager[packets.Packet], cfg config.Reader, capture replay.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetrycomp telemetry.Component) (*NamedPipeListener, error) { + sharedPacketPoolManager *packets.PoolManager[packets.Packet], cfg model.Reader, capture replay.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetrycomp telemetry.Component) (*NamedPipeListener, error) { bufferSize := cfg.GetInt("dogstatsd_buffer_size") return newNamedPipeListener( diff --git a/comp/dogstatsd/listeners/ratelimit/mem_based_rate_limiter.go b/comp/dogstatsd/listeners/ratelimit/mem_based_rate_limiter.go index fb335ae81e258..e1ceef441c14c 100644 --- a/comp/dogstatsd/listeners/ratelimit/mem_based_rate_limiter.go +++ b/comp/dogstatsd/listeners/ratelimit/mem_based_rate_limiter.go @@ -13,7 +13,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -46,7 +46,7 @@ var ballast []byte //nolint:unused var ballastOnce sync.Once // BuildMemBasedRateLimiter builds a new instance of *MemBasedRateLimiter -func BuildMemBasedRateLimiter(cfg config.Reader, telemetry telemetry.Component) (*MemBasedRateLimiter, error) { +func BuildMemBasedRateLimiter(cfg model.Reader, telemetry telemetry.Component) (*MemBasedRateLimiter, error) { var memoryUsage memoryUsage var err error if memoryUsage, err = newCgroupMemoryUsage(); err == nil { @@ -91,7 +91,7 @@ func BuildMemBasedRateLimiter(cfg config.Reader, telemetry telemetry.Component) ) } -func getConfigFloat(cfg config.Reader, subkey string) float64 { +func getConfigFloat(cfg model.Reader, subkey string) float64 { return cfg.GetFloat64("dogstatsd_mem_based_rate_limiter." + subkey) } diff --git a/comp/dogstatsd/listeners/udp.go b/comp/dogstatsd/listeners/udp.go index 4d528714b0dc0..f7b71c49e87ee 100644 --- a/comp/dogstatsd/listeners/udp.go +++ b/comp/dogstatsd/listeners/udp.go @@ -15,7 +15,8 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -50,7 +51,7 @@ type UDPListener struct { } // NewUDPListener returns an idle UDP Statsd listener -func NewUDPListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], cfg config.Reader, capture replay.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore) (*UDPListener, error) { +func NewUDPListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], cfg model.Reader, capture replay.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore) (*UDPListener, error) { var err error var url string @@ -63,7 +64,7 @@ func NewUDPListener(packetOut chan packets.Packets, sharedPacketPoolManager *pac // Listen to all network interfaces url = fmt.Sprintf(":%s", port) } else { - url = net.JoinHostPort(config.GetBindHostFromConfig(cfg), port) + url = net.JoinHostPort(pkgconfigsetup.GetBindHostFromConfig(cfg), port) } addr, err := net.ResolveUDPAddr("udp", url) diff --git a/comp/dogstatsd/listeners/uds_common.go b/comp/dogstatsd/listeners/uds_common.go index 4465ead476240..b8480c8ff2ef7 100644 --- a/comp/dogstatsd/listeners/uds_common.go +++ b/comp/dogstatsd/listeners/uds_common.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" @@ -56,7 +56,7 @@ type UDSListener struct { trafficCapture replay.Component pidMap pidmap.Component OriginDetection bool - config config.Reader + config model.Reader wmeta optional.Option[workloadmeta.Component] @@ -79,7 +79,7 @@ type UDSListener struct { // CloseFunction is a function that closes a connection type CloseFunction func(unixConn *net.UnixConn) error -func setupUnixConn(conn *net.UnixConn, originDetection bool, config config.Reader) (bool, error) { +func setupUnixConn(conn *net.UnixConn, originDetection bool, config model.Reader) (bool, error) { if originDetection { err := enableUDSPassCred(conn) if err != nil { @@ -133,7 +133,7 @@ func NewUDSOobPoolManager() *packets.PoolManager[[]byte] { } // NewUDSListener returns an idle UDS Statsd listener -func NewUDSListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg config.Reader, capture replay.Component, transport string, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSListener, error) { +func NewUDSListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, transport string, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSListener, error) { originDetection := cfg.GetBool("dogstatsd_origin_detection") listener := &UDSListener{ diff --git a/comp/dogstatsd/listeners/uds_datagram.go b/comp/dogstatsd/listeners/uds_datagram.go index 5f6a59f1d86ce..654cbad487bed 100644 --- a/comp/dogstatsd/listeners/uds_datagram.go +++ b/comp/dogstatsd/listeners/uds_datagram.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -27,7 +27,7 @@ type UDSDatagramListener struct { } // NewUDSDatagramListener returns an idle UDS datagram Statsd listener -func NewUDSDatagramListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPoolManager *packets.PoolManager[[]byte], cfg config.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetryComponent telemetry.Component) (*UDSDatagramListener, error) { +func NewUDSDatagramListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetryComponent telemetry.Component) (*UDSDatagramListener, error) { socketPath := cfg.GetString("dogstatsd_socket") transport := "unixgram" diff --git a/comp/dogstatsd/listeners/uds_stream.go b/comp/dogstatsd/listeners/uds_stream.go index 2c04ef3e9a0cf..494f78a93fbc6 100644 --- a/comp/dogstatsd/listeners/uds_stream.go +++ b/comp/dogstatsd/listeners/uds_stream.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -29,7 +29,7 @@ type UDSStreamListener struct { } // NewUDSStreamListener returns an idle UDS datagram Statsd listener -func NewUDSStreamListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg config.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSStreamListener, error) { +func NewUDSStreamListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSStreamListener, error) { socketPath := cfg.GetString("dogstatsd_stream_socket") transport := "unix" diff --git a/comp/dogstatsd/mapper/mapper_test.go b/comp/dogstatsd/mapper/mapper_test.go index ff05c83dcc281..c6139d4febf12 100644 --- a/comp/dogstatsd/mapper/mapper_test.go +++ b/comp/dogstatsd/mapper/mapper_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" configComponent "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/pkg/config/structure" ) func TestMappings(t *testing.T) { @@ -519,7 +520,7 @@ func getMapper(t *testing.T, configString string) (*MetricMapper, error) { cfg := configComponent.NewMockFromYAML(t, configString) - err := cfg.UnmarshalKey("dogstatsd_mapper_profiles", &profiles) + err := structure.UnmarshalKey(cfg, "dogstatsd_mapper_profiles", &profiles) if err != nil { return nil, err } diff --git a/comp/dogstatsd/packets/packet_manager_windows.go b/comp/dogstatsd/packets/packet_manager_windows.go index 695415f93ef60..32f79b661720f 100644 --- a/comp/dogstatsd/packets/packet_manager_windows.go +++ b/comp/dogstatsd/packets/packet_manager_windows.go @@ -9,7 +9,7 @@ package packets import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // PacketManager gathers everything required to create and assemble packets. @@ -20,7 +20,7 @@ type PacketManager struct { } // NewPacketManagerFromConfig creates a PacketManager from the relevant config settings. -func NewPacketManagerFromConfig(packetOut chan Packets, sharedPacketPoolManager *PoolManager[Packet], cfg config.Reader, telemetryStore *TelemetryStore) *PacketManager { +func NewPacketManagerFromConfig(packetOut chan Packets, sharedPacketPoolManager *PoolManager[Packet], cfg model.Reader, telemetryStore *TelemetryStore) *PacketManager { bufferSize := cfg.GetInt("dogstatsd_buffer_size") packetsBufferSize := cfg.GetInt("dogstatsd_packet_buffer_size") flushTimeout := cfg.GetDuration("dogstatsd_packet_buffer_flush_timeout") diff --git a/comp/dogstatsd/packets/pool.go b/comp/dogstatsd/packets/pool.go index 527c970834367..f0ee0b0a2a359 100644 --- a/comp/dogstatsd/packets/pool.go +++ b/comp/dogstatsd/packets/pool.go @@ -6,7 +6,7 @@ package packets import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) @@ -42,7 +42,7 @@ func NewPool(bufferSize int, packetsTelemetry *TelemetryStore) *Pool { return packet }), // telemetry - tlmEnabled: usedByTestTelemetry || utils.IsTelemetryEnabled(config.Datadog()), + tlmEnabled: usedByTestTelemetry || utils.IsTelemetryEnabled(pkgconfigsetup.Datadog()), packetsTelemetry: packetsTelemetry, } } diff --git a/comp/dogstatsd/replay/impl/capture.go b/comp/dogstatsd/replay/impl/capture.go index e299487388ef0..c2800904f15ee 100644 --- a/comp/dogstatsd/replay/impl/capture.go +++ b/comp/dogstatsd/replay/impl/capture.go @@ -19,7 +19,7 @@ import ( compdef "github.com/DataDog/datadog-agent/comp/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) //nolint:revive // TODO(AML) Fix revive linter @@ -31,7 +31,7 @@ type Requires struct { // trafficCapture allows capturing traffic from our listeners and writing it to file type trafficCapture struct { writer *TrafficCaptureWriter - config config.Reader + config model.Reader startUpError error sync.RWMutex diff --git a/comp/dogstatsd/server/batch.go b/comp/dogstatsd/server/batch.go index 06c7fd7a615df..d340be63c4121 100644 --- a/comp/dogstatsd/server/batch.go +++ b/comp/dogstatsd/server/batch.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" @@ -94,15 +94,15 @@ func (s *shardKeyGeneratorPerOrigin) Generate(sample metrics.MetricSample, shard // We fall back on the generic sharding if: // - the sample has a custom cardinality // - we don't have the origin - if sample.OriginInfo.Cardinality != "" || (sample.OriginInfo.FromUDS == "" && sample.OriginInfo.FromTag == "" && sample.OriginInfo.FromMsg == "") { + if sample.OriginInfo.Cardinality != "" || (sample.OriginInfo.ContainerIDFromSocket == "" && sample.OriginInfo.PodUID == "" && sample.OriginInfo.ContainerID == "") { return s.shardKeyGeneratorBase.Generate(sample, shards) } // Otherwise, we isolate the samples based on the origin. i, j := uint64(0), uint64(0) - i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.FromTag) - i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.FromMsg) - i, _ = murmur3.SeedStringSum128(i, j, sample.OriginInfo.FromUDS) + i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.PodUID) + i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.ContainerID) + i, _ = murmur3.SeedStringSum128(i, j, sample.OriginInfo.ContainerIDFromSocket) return fastrange(ckey.ContextKey(i), shards) } @@ -162,7 +162,7 @@ func newBatcher(demux aggregator.DemultiplexerWithAggregator, tlmChannel telemet } func getShardGenerator() shardKeyGenerator { - isolated := config.Datadog().GetString("dogstatsd_pipeline_autoadjust_strategy") == aggregator.AutoAdjustStrategyPerOrigin + isolated := pkgconfigsetup.Datadog().GetString("dogstatsd_pipeline_autoadjust_strategy") == aggregator.AutoAdjustStrategyPerOrigin base := shardKeyGeneratorBase{ keyGenerator: ckey.NewKeyGenerator(), diff --git a/comp/dogstatsd/server/enrich.go b/comp/dogstatsd/server/enrich.go index a5b2c04516e92..4441521b66bad 100644 --- a/comp/dogstatsd/server/enrich.go +++ b/comp/dogstatsd/server/enrich.go @@ -40,10 +40,10 @@ func extractTagsMetadata(tags []string, originFromUDS string, originFromMsg []by host := conf.defaultHostname metricSource := metrics.MetricSourceDogstatsd origin := taggertypes.OriginInfo{ - FromUDS: originFromUDS, - FromMsg: string(originFromMsg), - ExternalData: externalData, - ProductOrigin: taggertypes.ProductOriginDogStatsD, + ContainerIDFromSocket: originFromUDS, + ContainerID: string(originFromMsg), + ExternalData: externalData, + ProductOrigin: taggertypes.ProductOriginDogStatsD, } n := 0 @@ -52,7 +52,7 @@ func extractTagsMetadata(tags []string, originFromUDS string, originFromMsg []by host = tag[len(hostTagPrefix):] continue } else if strings.HasPrefix(tag, entityIDTagPrefix) { - origin.FromTag = tag[len(entityIDTagPrefix):] + origin.PodUID = tag[len(entityIDTagPrefix):] continue } else if strings.HasPrefix(tag, CardinalityTagPrefix) { origin.Cardinality = tag[len(CardinalityTagPrefix):] diff --git a/comp/dogstatsd/server/enrich_test.go b/comp/dogstatsd/server/enrich_test.go index f6eafda67c579..29b79597fac09 100644 --- a/comp/dogstatsd/server/enrich_test.go +++ b/comp/dogstatsd/server/enrich_test.go @@ -98,9 +98,9 @@ func TestConvertParseMultiple(t *testing.T) { assert.Equal(t, metricType, parsed[0].Mtype) assert.Equal(t, 0, len(parsed[0].Tags)) assert.Equal(t, "default-hostname", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) assert.Equal(t, "daemon", parsed[1].Name) @@ -108,9 +108,9 @@ func TestConvertParseMultiple(t *testing.T) { assert.Equal(t, metricType, parsed[1].Mtype) assert.Equal(t, 0, len(parsed[1].Tags)) assert.Equal(t, "default-hostname", parsed[1].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[1].SampleRate, epsilon) } } @@ -132,9 +132,9 @@ func TestConvertParseSingle(t *testing.T) { assert.Equal(t, metricType, parsed[0].Mtype) assert.Equal(t, 0, len(parsed[0].Tags)) assert.Equal(t, "default-hostname", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -158,9 +158,9 @@ func TestConvertParseSingleWithTags(t *testing.T) { assert.Equal(t, "protocol:http", parsed[0].Tags[0]) assert.Equal(t, "bench", parsed[0].Tags[1]) assert.Equal(t, "default-hostname", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -184,9 +184,9 @@ func TestConvertParseSingleWithHostTags(t *testing.T) { assert.Equal(t, "protocol:http", parsed[0].Tags[0]) assert.Equal(t, "bench", parsed[0].Tags[1]) assert.Equal(t, "custom-host", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -210,9 +210,9 @@ func TestConvertParseSingleWithEmptyHostTags(t *testing.T) { assert.Equal(t, "protocol:http", parsed[0].Tags[0]) assert.Equal(t, "bench", parsed[0].Tags[1]) assert.Equal(t, "", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -234,9 +234,9 @@ func TestConvertParseSingleWithSampleRate(t *testing.T) { assert.Equal(t, metricType, parsed[0].Mtype) assert.Equal(t, 0, len(parsed[0].Tags)) assert.Equal(t, "default-hostname", parsed[0].Host) - assert.Equal(t, "", parsed[0].OriginInfo.FromUDS) - assert.Equal(t, "", parsed[0].OriginInfo.FromTag) - assert.Equal(t, "", parsed[0].OriginInfo.FromMsg) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed[0].OriginInfo.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) assert.InEpsilon(t, 0.21, parsed[0].SampleRate, epsilon) } } @@ -255,9 +255,9 @@ func TestConvertParseSet(t *testing.T) { assert.Equal(t, metrics.SetType, parsed.Mtype) assert.Equal(t, 0, len(parsed.Tags)) assert.Equal(t, "default-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -275,9 +275,9 @@ func TestConvertParseSetUnicode(t *testing.T) { assert.Equal(t, metrics.SetType, parsed.Mtype) assert.Equal(t, 0, len(parsed.Tags)) assert.Equal(t, "default-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -295,9 +295,9 @@ func TestConvertParseGaugeWithPoundOnly(t *testing.T) { assert.Equal(t, metrics.GaugeType, parsed.Mtype) assert.Equal(t, 0, len(parsed.Tags)) assert.Equal(t, "default-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -316,9 +316,9 @@ func TestConvertParseGaugeWithUnicode(t *testing.T) { require.Equal(t, 1, len(parsed.Tags)) assert.Equal(t, "intitulé:T0µ", parsed.Tags[0]) assert.Equal(t, "default-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -392,9 +392,9 @@ func TestConvertServiceCheckMinimal(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -439,9 +439,9 @@ func TestConvertServiceCheckMetadataTimestamp(t *testing.T) { assert.Equal(t, int64(21), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -457,9 +457,9 @@ func TestConvertServiceCheckMetadataHostname(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -475,9 +475,9 @@ func TestConvertServiceCheckMetadataHostnameInTag(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{}, sc.Tags) } @@ -493,9 +493,9 @@ func TestConvertServiceCheckMetadataEmptyHostTag(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{"other:tag"}, sc.Tags) } @@ -511,9 +511,9 @@ func TestConvertServiceCheckMetadataTags(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{"tag1", "tag2:test", "tag3"}, sc.Tags) } @@ -529,9 +529,9 @@ func TestConvertServiceCheckMetadataMessage(t *testing.T) { assert.Equal(t, int64(0), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "this is fine", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -547,9 +547,9 @@ func TestConvertServiceCheckMetadataMultiple(t *testing.T) { assert.Equal(t, int64(21), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "this is fine", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{"tag1:test", "tag2"}, sc.Tags) // multiple time the same tag @@ -560,9 +560,9 @@ func TestConvertServiceCheckMetadataMultiple(t *testing.T) { assert.Equal(t, int64(22), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -577,9 +577,9 @@ func TestServiceCheckOriginTag(t *testing.T) { assert.Equal(t, int64(21), sc.Ts) assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "this is fine", sc.Message) - assert.Equal(t, "", sc.OriginInfo.FromUDS) - assert.Equal(t, "testID", sc.OriginInfo.FromTag) - assert.Equal(t, "", sc.OriginInfo.FromMsg) + assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "testID", sc.OriginInfo.PodUID) + assert.Equal(t, "", sc.OriginInfo.ContainerID) assert.Equal(t, []string{"tag1:test", "tag2"}, sc.Tags) } @@ -600,9 +600,9 @@ func TestConvertEventMinimal(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMultilinesText(t *testing.T) { @@ -622,9 +622,9 @@ func TestConvertEventMultilinesText(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventPipeInTitle(t *testing.T) { @@ -644,9 +644,9 @@ func TestConvertEventPipeInTitle(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventError(t *testing.T) { @@ -734,9 +734,9 @@ func TestConvertEventMetadataTimestamp(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataPriority(t *testing.T) { @@ -756,9 +756,9 @@ func TestConvertEventMetadataPriority(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataHostname(t *testing.T) { @@ -778,9 +778,9 @@ func TestConvertEventMetadataHostname(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataHostnameInTag(t *testing.T) { @@ -800,9 +800,9 @@ func TestConvertEventMetadataHostnameInTag(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataEmptyHostTag(t *testing.T) { @@ -822,9 +822,9 @@ func TestConvertEventMetadataEmptyHostTag(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataAlertType(t *testing.T) { @@ -844,9 +844,9 @@ func TestConvertEventMetadataAlertType(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataAggregatioKey(t *testing.T) { @@ -866,9 +866,9 @@ func TestConvertEventMetadataAggregatioKey(t *testing.T) { assert.Equal(t, "some aggregation key", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataSourceType(t *testing.T) { @@ -888,9 +888,9 @@ func TestConvertEventMetadataSourceType(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "this is the source", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataTags(t *testing.T) { @@ -910,9 +910,9 @@ func TestConvertEventMetadataTags(t *testing.T) { assert.Equal(t, "", e.AggregationKey) assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertEventMetadataMultiple(t *testing.T) { @@ -932,9 +932,9 @@ func TestConvertEventMetadataMultiple(t *testing.T) { assert.Equal(t, "aggKey", e.AggregationKey) assert.Equal(t, "source test", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestEventOriginTag(t *testing.T) { @@ -954,9 +954,9 @@ func TestEventOriginTag(t *testing.T) { assert.Equal(t, "aggKey", e.AggregationKey) assert.Equal(t, "source test", e.SourceTypeName) assert.Equal(t, "", e.EventType) - assert.Equal(t, "", e.OriginInfo.FromUDS) - assert.Equal(t, "testID", e.OriginInfo.FromTag) - assert.Equal(t, "", e.OriginInfo.FromMsg) + assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "testID", e.OriginInfo.PodUID) + assert.Equal(t, "", e.OriginInfo.ContainerID) } func TestConvertNamespace(t *testing.T) { @@ -1062,9 +1062,9 @@ func TestConvertEntityOriginDetectionNoTags(t *testing.T) { assert.Equal(t, "sometag1:somevalue1", parsed.Tags[0]) assert.Equal(t, "sometag2:somevalue2", parsed.Tags[1]) assert.Equal(t, "my-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "foo", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "foo", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -1081,9 +1081,9 @@ func TestConvertEntityOriginDetectionTags(t *testing.T) { require.Equal(t, 2, len(parsed.Tags)) assert.ElementsMatch(t, []string{"sometag1:somevalue1", "sometag2:somevalue2"}, parsed.Tags) assert.Equal(t, "my-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "foo", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "foo", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -1101,9 +1101,9 @@ func TestConvertEntityOriginDetectionTagsError(t *testing.T) { assert.Equal(t, "sometag1:somevalue1", parsed.Tags[0]) assert.Equal(t, "sometag2:somevalue2", parsed.Tags[1]) assert.Equal(t, "my-hostname", parsed.Host) - assert.Equal(t, "", parsed.OriginInfo.FromUDS) - assert.Equal(t, "foo", parsed.OriginInfo.FromTag) - assert.Equal(t, "", parsed.OriginInfo.FromMsg) + assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) + assert.Equal(t, "foo", parsed.OriginInfo.PodUID) + assert.Equal(t, "", parsed.OriginInfo.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -1151,7 +1151,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1167,7 +1167,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: nil, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1183,7 +1183,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "my-id"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "my-id"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1199,7 +1199,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "none"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "none"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1215,7 +1215,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1231,7 +1231,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: "high"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "high"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1247,7 +1247,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: "orchestrator"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "orchestrator"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1263,7 +1263,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: "low"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "low"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1279,7 +1279,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: "unknown"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "unknown"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1295,7 +1295,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "42", Cardinality: ""}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: ""}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1311,7 +1311,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromTag: "pod-uid", FromMsg: "container-id"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "pod-uid", ContainerID: "container-id"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1327,7 +1327,7 @@ func TestEnrichTags(t *testing.T) { }, wantedTags: []string{"env:prod"}, wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{FromUDS: "originID", FromMsg: "container-id"}, + wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", ContainerID: "container-id"}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1359,10 +1359,10 @@ func TestEnrichTags(t *testing.T) { wantedTags: []string{"env:prod"}, wantedHost: "foo", wantedOrigin: taggertypes.OriginInfo{ - FromUDS: "originID", - FromTag: "pod-uid", - FromMsg: "container-id", - ExternalData: "it-false,cn-container_name,pu-pod_uid", + ContainerIDFromSocket: "originID", + PodUID: "pod-uid", + ContainerID: "container-id", + ExternalData: "it-false,cn-container_name,pu-pod_uid", }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, diff --git a/comp/dogstatsd/server/float64_list_pool.go b/comp/dogstatsd/server/float64_list_pool.go index c55859e78b772..fc4c870641e36 100644 --- a/comp/dogstatsd/server/float64_list_pool.go +++ b/comp/dogstatsd/server/float64_list_pool.go @@ -9,7 +9,7 @@ import ( "sync" "github.com/DataDog/datadog-agent/comp/core/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" ) @@ -37,7 +37,7 @@ func newFloat64ListPool(telemetrycomp telemetry.Component) *float64ListPool { }, }, // telemetry - tlmEnabled: utils.IsTelemetryEnabled(config.Datadog()), + tlmEnabled: utils.IsTelemetryEnabled(pkgconfigsetup.Datadog()), tlmFloat64ListPoolGet: telemetrycomp.NewCounter("dogstatsd", "float64_list_pool_get", nil, "Count of get done in the float64_list pool"), tlmFloat64ListPoolPut: telemetrycomp.NewCounter("dogstatsd", "float64_list_pool_put", diff --git a/comp/dogstatsd/server/parse.go b/comp/dogstatsd/server/parse.go index 6fe618a702267..3c15ad8b05a5b 100644 --- a/comp/dogstatsd/server/parse.go +++ b/comp/dogstatsd/server/parse.go @@ -13,7 +13,7 @@ import ( "unsafe" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -72,7 +72,7 @@ type parser struct { provider provider.Provider } -func newParser(cfg config.Reader, float64List *float64ListPool, workerNum int, wmeta optional.Option[workloadmeta.Component], stringInternerTelemetry *stringInternerTelemetry) *parser { +func newParser(cfg model.Reader, float64List *float64ListPool, workerNum int, wmeta optional.Option[workloadmeta.Component], stringInternerTelemetry *stringInternerTelemetry) *parser { stringInternerCacheSize := cfg.GetInt("dogstatsd_string_interner_size") readTimestamps := cfg.GetBool("dogstatsd_no_aggregation_pipeline") diff --git a/comp/dogstatsd/server/server.go b/comp/dogstatsd/server/server.go index cb57d37f193f7..059fdafdff191 100644 --- a/comp/dogstatsd/server/server.go +++ b/comp/dogstatsd/server/server.go @@ -30,6 +30,7 @@ import ( serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -835,7 +836,7 @@ func getBuckets(cfg model.Reader, logger log.Component, option string) []float64 func getDogstatsdMappingProfiles(cfg model.Reader) ([]mapper.MappingProfileConfig, error) { var mappings []mapper.MappingProfileConfig if cfg.IsSet("dogstatsd_mapper_profiles") { - err := cfg.UnmarshalKey("dogstatsd_mapper_profiles", &mappings) + err := structure.UnmarshalKey(cfg, "dogstatsd_mapper_profiles", &mappings) if err != nil { return []mapper.MappingProfileConfig{}, fmt.Errorf("Could not parse dogstatsd_mapper_profiles: %v", err) } diff --git a/comp/dogstatsd/server/server_bench_test.go b/comp/dogstatsd/server/server_bench_test.go index 3efdeb31adefd..368541c5829f4 100644 --- a/comp/dogstatsd/server/server_bench_test.go +++ b/comp/dogstatsd/server/server_bench_test.go @@ -11,8 +11,9 @@ import ( "testing" "time" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" ) @@ -34,7 +35,7 @@ func benchParsePackets(b *testing.B, rawPacket []byte) { deps := fulfillDeps(b) s := deps.Server.(*server) // our logger will log dogstatsd packet by default if nothing is setup - pkgconfig.SetupLogger("", "off", "", "", false, true, false) + pkglogsetup.SetupLogger("", "off", "", "", false, true, false, pkgconfigsetup.Datadog()) histogram := deps.Telemetry.NewHistogram("test-dogstatsd", "channel_latency", @@ -87,7 +88,7 @@ func BenchmarkPbarseMetricMessage(b *testing.B) { deps := fulfillDeps(b) s := deps.Server.(*server) // our logger will log dogstatsd packet by default if nothing is setup - pkgconfig.SetupLogger("", "off", "", "", false, true, false) + pkglogsetup.SetupLogger("", "off", "", "", false, true, false, pkgconfigsetup.Datadog()) demux := deps.Demultiplexer @@ -139,7 +140,7 @@ func benchmarkMapperControl(b *testing.B, yaml string) { s := deps.Server.(*server) // our logger will log dogstatsd packet by default if nothing is setup - pkgconfig.SetupLogger("", "off", "", "", false, true, false) + pkglogsetup.SetupLogger("", "off", "", "", false, true, false, pkgconfigsetup.Datadog()) demux := deps.Demultiplexer diff --git a/comp/dogstatsd/server/server_test.go b/comp/dogstatsd/server/server_test.go index e60036ae2a5fd..53c865af4a654 100644 --- a/comp/dogstatsd/server/server_test.go +++ b/comp/dogstatsd/server/server_test.go @@ -44,9 +44,9 @@ import ( serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -743,7 +743,7 @@ func TestNoMappingsConfig(t *testing.T) { cfg["dogstatsd_port"] = listeners.RandomPortName deps := fulfillDepsWithConfigOverride(t, cfg) s := deps.Server.(*server) - cw := deps.Config.(config.Writer) + cw := deps.Config.(model.Writer) cw.SetWithoutSource("dogstatsd_port", listeners.RandomPortName) samples := []metrics.MetricSample{} @@ -1147,19 +1147,19 @@ func testContainerIDParsing(t *testing.T, cfg map[string]interface{}) { metrics, err := s.parseMetricMessage(nil, parser, []byte("metric.name:123|g|c:metric-container"), "", "", false) assert.NoError(err) assert.Len(metrics, 1) - assert.Equal("metric-container", metrics[0].OriginInfo.FromMsg) + assert.Equal("metric-container", metrics[0].OriginInfo.ContainerID) // Event event, err := s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "") assert.NoError(err) assert.NotNil(event) - assert.Equal("event-container", event.OriginInfo.FromMsg) + assert.Equal("event-container", event.OriginInfo.ContainerID) // Service check serviceCheck, err := s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "") assert.NoError(err) assert.NotNil(serviceCheck) - assert.Equal("service-check-container", serviceCheck.OriginInfo.FromMsg) + assert.Equal("service-check-container", serviceCheck.OriginInfo.ContainerID) } func TestContainerIDParsing(t *testing.T) { @@ -1191,19 +1191,19 @@ func TestOrigin(t *testing.T) { metrics, err := s.parseMetricMessage(nil, parser, []byte("metric.name:123|g|c:metric-container|#dd.internal.card:none"), "", "", false) assert.NoError(err) assert.Len(metrics, 1) - assert.Equal("metric-container", metrics[0].OriginInfo.FromMsg) + assert.Equal("metric-container", metrics[0].OriginInfo.ContainerID) // Event event, err := s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container|#dd.internal.card:none"), "") assert.NoError(err) assert.NotNil(event) - assert.Equal("event-container", event.OriginInfo.FromMsg) + assert.Equal("event-container", event.OriginInfo.ContainerID) // Service check serviceCheck, err := s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container|#dd.internal.card:none"), "") assert.NoError(err) assert.NotNil(serviceCheck) - assert.Equal("service-check-container", serviceCheck.OriginInfo.FromMsg) + assert.Equal("service-check-container", serviceCheck.OriginInfo.ContainerID) }) } diff --git a/comp/dogstatsd/server/serverless.go b/comp/dogstatsd/server/serverless.go index 4995ade81b07e..5426c4f2132c1 100644 --- a/comp/dogstatsd/server/serverless.go +++ b/comp/dogstatsd/server/serverless.go @@ -16,7 +16,7 @@ import ( replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/impl-noop" "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -31,7 +31,7 @@ type ServerlessDogstatsd interface { //nolint:revive // TODO(AML) Fix revive linter func NewServerlessServer(demux aggregator.Demultiplexer) (ServerlessDogstatsd, error) { wmeta := optional.NewNoneOption[workloadmeta.Component]() - s := newServerCompat(config.Datadog(), logComponentImpl.NewTemporaryLoggerWithoutInit(), replay.NewNoopTrafficCapture(), serverdebugimpl.NewServerlessServerDebug(), true, demux, wmeta, pidmapimpl.NewServerlessPidMap(), telemetry.GetCompatComponent()) + s := newServerCompat(pkgconfigsetup.Datadog(), logComponentImpl.NewTemporaryLoggerWithoutInit(), replay.NewNoopTrafficCapture(), serverdebugimpl.NewServerlessServerDebug(), true, demux, wmeta, pidmapimpl.NewServerlessPidMap(), telemetry.GetCompatComponent()) err := s.start(context.TODO()) if err != nil { diff --git a/comp/dogstatsd/serverDebug/serverdebugimpl/debug.go b/comp/dogstatsd/serverDebug/serverdebugimpl/debug.go index 887a2dd517f73..cfc062f825108 100644 --- a/comp/dogstatsd/serverDebug/serverdebugimpl/debug.go +++ b/comp/dogstatsd/serverDebug/serverdebugimpl/debug.go @@ -26,10 +26,12 @@ import ( logComponentImpl "github.com/DataDog/datadog-agent/comp/core/log/impl" serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) // Module defines the fx options for this component. @@ -74,7 +76,7 @@ type serverDebugImpl struct { // NewServerlessServerDebug creates a new instance of serverDebug.Component func NewServerlessServerDebug() serverdebug.Component { - return newServerDebugCompat(logComponentImpl.NewTemporaryLoggerWithoutInit(), config.Datadog()) + return newServerDebugCompat(logComponentImpl.NewTemporaryLoggerWithoutInit(), pkgconfigsetup.Datadog()) } // newServerDebug creates a new instance of a ServerDebug @@ -82,7 +84,7 @@ func newServerDebug(deps dependencies) serverdebug.Component { return newServerDebugCompat(deps.Log, deps.Config) } -func newServerDebugCompat(l log.Component, cfg config.Reader) serverdebug.Component { +func newServerDebugCompat(l log.Component, cfg model.Reader) serverdebug.Component { sd := &serverDebugImpl{ log: l, enabled: atomic.NewBool(false), @@ -277,7 +279,7 @@ func (d *serverDebugImpl) disableMetricsStats() { } // build a local dogstatsd logger and bubbling up any errors -func (d *serverDebugImpl) getDogstatsdDebug(cfg config.Reader) slog.LoggerInterface { +func (d *serverDebugImpl) getDogstatsdDebug(cfg model.Reader) slog.LoggerInterface { var dogstatsdLogger slog.LoggerInterface @@ -289,7 +291,7 @@ func (d *serverDebugImpl) getDogstatsdDebug(cfg config.Reader) slog.LoggerInterf // Set up dogstatsdLogger if cfg.GetBool("dogstatsd_logging_enabled") { - logger, e := config.SetupDogstatsdLogger(logFile) + logger, e := pkglogsetup.SetupDogstatsdLogger(logFile, pkgconfigsetup.Datadog()) if e != nil { // use component logger instead of global logger. d.log.Errorf("Unable to set up Dogstatsd logger: %v. || Please reach out to Datadog support at https://docs.datadoghq.com/help/ ", e) diff --git a/comp/forwarder/defaultforwarder/forwarder_health.go b/comp/forwarder/defaultforwarder/forwarder_health.go index fbd29370ec5cf..d046f72de5e45 100644 --- a/comp/forwarder/defaultforwarder/forwarder_health.go +++ b/comp/forwarder/defaultforwarder/forwarder_health.go @@ -149,15 +149,28 @@ func (fh *forwarderHealth) healthCheckLoop() { } for { - select { - case <-fh.stop: - return - case <-validateTicker.C: - valid := fh.checkValidAPIKey() - if !valid { - fh.log.Errorf("No valid api key found, reporting the forwarder as unhealthy.") + // only read from the health channel if the api key is valid + if valid { + select { + case <-fh.stop: + return + case <-validateTicker.C: + valid = fh.checkValidAPIKey() + if !valid { + fh.log.Errorf("No valid api key found, reporting the forwarder as unhealthy.") + } + case <-fh.health.C: + } + } else { + select { + case <-fh.stop: + return + case <-validateTicker.C: + valid = fh.checkValidAPIKey() + if !valid { + fh.log.Errorf("No valid api key found, reporting the forwarder as unhealthy.") + } } - case <-fh.health.C: } } } diff --git a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go index b4432b81ffc7f..7e0f51f509689 100644 --- a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go +++ b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go @@ -20,7 +20,8 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver/eventplatformreceiverimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" @@ -58,7 +59,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ // raise the default batch_max_concurrent_send from 0 to 10 to ensure this pipeline is able to handle 4k events/s defaultBatchMaxConcurrentSend: 10, defaultBatchMaxContentSize: 10e6, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, // High input chan size is needed to handle high number of DBM events being flushed by DBM integrations defaultInputChanSize: 500, }, @@ -72,7 +73,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ // raise the default batch_max_concurrent_send from 0 to 10 to ensure this pipeline is able to handle 4k events/s defaultBatchMaxConcurrentSend: 10, defaultBatchMaxContentSize: 20e6, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, // High input chan size is needed to handle high number of DBM events being flushed by DBM integrations defaultInputChanSize: 500, }, @@ -89,7 +90,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ // raise the default batch_max_concurrent_send from 0 to 10 to ensure this pipeline is able to handle 4k events/s defaultBatchMaxConcurrentSend: 10, defaultBatchMaxContentSize: 20e6, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, // High input chan size is needed to handle high number of DBM events being flushed by DBM integrations defaultInputChanSize: 500, }, @@ -103,7 +104,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ // raise the default batch_max_concurrent_send from 0 to 10 to ensure this pipeline is able to handle 4k events/s defaultBatchMaxConcurrentSend: 10, defaultBatchMaxContentSize: 20e6, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, // High input chan size is needed to handle high number of DBM events being flushed by DBM integrations defaultInputChanSize: 500, }, @@ -115,9 +116,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "ndm-intake.", intakeTrackType: "ndm", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeSnmpTraps, @@ -127,9 +128,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "snmp-traps-intake.", intakeTrackType: "ndmtraps", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeNetworkDevicesNetFlow, @@ -139,7 +140,7 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "ndmflow-intake.", intakeTrackType: "ndmflow", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, // Each NetFlow flow is about 500 bytes // 10k BatchMaxSize is about 5Mo of content size @@ -162,9 +163,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "netpath-intake.", intakeTrackType: "netpath", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeContainerLifecycle, @@ -174,9 +175,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "contlcycle-intake.", intakeTrackType: "contlcycle", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeContainerImages, @@ -186,9 +187,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "contimage-intake.", intakeTrackType: "contimage", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeContainerSBOM, @@ -198,9 +199,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "sbom-intake.", intakeTrackType: "sbom", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, { eventType: eventplatform.EventTypeServiceDiscovery, @@ -210,9 +211,9 @@ var passthroughPipelineDescs = []passthroughPipelineDesc{ hostnameEndpointPrefix: "instrumentation-telemetry-intake.", intakeTrackType: "apmtelemetry", defaultBatchMaxConcurrentSend: 10, - defaultBatchMaxContentSize: pkgconfig.DefaultBatchMaxContentSize, - defaultBatchMaxSize: pkgconfig.DefaultBatchMaxSize, - defaultInputChanSize: pkgconfig.DefaultInputChanSize, + defaultBatchMaxContentSize: pkgconfigsetup.DefaultBatchMaxContentSize, + defaultBatchMaxSize: pkgconfigsetup.DefaultBatchMaxSize, + defaultInputChanSize: pkgconfigsetup.DefaultInputChanSize, }, } @@ -246,8 +247,8 @@ func Diagnose() []diagnosis.Diagnosis { var diagnoses []diagnosis.Diagnosis for _, desc := range passthroughPipelineDescs { - configKeys := config.NewLogsConfigKeys(desc.endpointsConfigPrefix, pkgconfig.Datadog()) - endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfig.Datadog(), configKeys, desc.hostnameEndpointPrefix, desc.intakeTrackType, config.DefaultIntakeProtocol, config.DefaultIntakeOrigin) + configKeys := config.NewLogsConfigKeys(desc.endpointsConfigPrefix, pkgconfigsetup.Datadog()) + endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), configKeys, desc.hostnameEndpointPrefix, desc.intakeTrackType, config.DefaultIntakeProtocol, config.DefaultIntakeOrigin) if err != nil { diagnoses = append(diagnoses, diagnosis.Diagnosis{ Result: diagnosis.DiagnosisFail, @@ -259,7 +260,7 @@ func Diagnose() []diagnosis.Diagnosis { continue } - url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, pkgconfig.Datadog()) + url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, pkgconfigsetup.Datadog()) name := fmt.Sprintf("Connectivity to %s", url) if err == nil { diagnoses = append(diagnoses, diagnosis.Diagnosis{ @@ -370,9 +371,9 @@ type passthroughPipelineDesc struct { // newHTTPPassthroughPipeline creates a new HTTP-only event platform pipeline that sends messages directly to intake // without any of the processing that exists in regular logs pipelines. -func newHTTPPassthroughPipeline(coreConfig pkgconfig.Reader, eventPlatformReceiver eventplatformreceiver.Component, desc passthroughPipelineDesc, destinationsContext *client.DestinationsContext, pipelineID int) (p *passthroughPipeline, err error) { - configKeys := config.NewLogsConfigKeys(desc.endpointsConfigPrefix, pkgconfig.Datadog()) - endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfig.Datadog(), configKeys, desc.hostnameEndpointPrefix, desc.intakeTrackType, config.DefaultIntakeProtocol, config.DefaultIntakeOrigin) +func newHTTPPassthroughPipeline(coreConfig model.Reader, eventPlatformReceiver eventplatformreceiver.Component, desc passthroughPipelineDesc, destinationsContext *client.DestinationsContext, pipelineID int) (p *passthroughPipeline, err error) { + configKeys := config.NewLogsConfigKeys(desc.endpointsConfigPrefix, pkgconfigsetup.Datadog()) + endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), configKeys, desc.hostnameEndpointPrefix, desc.intakeTrackType, config.DefaultIntakeProtocol, config.DefaultIntakeOrigin) if err != nil { return nil, err } @@ -383,24 +384,24 @@ func newHTTPPassthroughPipeline(coreConfig pkgconfig.Reader, eventPlatformReceiv if endpoints.BatchMaxConcurrentSend <= 0 { endpoints.BatchMaxConcurrentSend = desc.defaultBatchMaxConcurrentSend } - if endpoints.BatchMaxContentSize <= pkgconfig.DefaultBatchMaxContentSize { + if endpoints.BatchMaxContentSize <= pkgconfigsetup.DefaultBatchMaxContentSize { endpoints.BatchMaxContentSize = desc.defaultBatchMaxContentSize } - if endpoints.BatchMaxSize <= pkgconfig.DefaultBatchMaxSize { + if endpoints.BatchMaxSize <= pkgconfigsetup.DefaultBatchMaxSize { endpoints.BatchMaxSize = desc.defaultBatchMaxSize } - if endpoints.InputChanSize <= pkgconfig.DefaultInputChanSize { + if endpoints.InputChanSize <= pkgconfigsetup.DefaultInputChanSize { endpoints.InputChanSize = desc.defaultInputChanSize } reliable := []client.Destination{} for i, endpoint := range endpoints.GetReliableEndpoints() { telemetryName := fmt.Sprintf("%s_%d_reliable_%d", desc.eventType, pipelineID, i) - reliable = append(reliable, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, true, telemetryName, pkgconfig.Datadog())) + reliable = append(reliable, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, true, telemetryName, pkgconfigsetup.Datadog())) } additionals := []client.Destination{} for i, endpoint := range endpoints.GetUnReliableEndpoints() { telemetryName := fmt.Sprintf("%s_%d_unreliable_%d", desc.eventType, pipelineID, i) - additionals = append(additionals, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, false, telemetryName, pkgconfig.Datadog())) + additionals = append(additionals, logshttp.NewDestination(endpoint, desc.contentType, destinationsContext, endpoints.BatchMaxConcurrentSend, false, telemetryName, pkgconfigsetup.Datadog())) } destinations := client.NewDestinations(reliable, additionals) inputChan := make(chan *message.Message, endpoints.InputChanSize) @@ -464,7 +465,7 @@ func joinHosts(endpoints []config.Endpoint) string { return strings.Join(additionalHosts, ",") } -func newDefaultEventPlatformForwarder(config pkgconfig.Reader, eventPlatformReceiver eventplatformreceiver.Component) *defaultEventPlatformForwarder { +func newDefaultEventPlatformForwarder(config model.Reader, eventPlatformReceiver eventplatformreceiver.Component) *defaultEventPlatformForwarder { destinationsCtx := client.NewDestinationsContext() destinationsCtx.Start() pipelines := make(map[string]*passthroughPipeline) @@ -523,7 +524,7 @@ func NewNoopEventPlatformForwarder(hostname hostnameinterface.Component) eventpl } func newNoopEventPlatformForwarder(hostname hostnameinterface.Component) *defaultEventPlatformForwarder { - f := newDefaultEventPlatformForwarder(pkgconfig.Datadog(), eventplatformreceiverimpl.NewReceiver(hostname).Comp) + f := newDefaultEventPlatformForwarder(pkgconfigsetup.Datadog(), eventplatformreceiverimpl.NewReceiver(hostname).Comp) // remove the senders for _, p := range f.pipelines { p.strategy = nil diff --git a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go index 151f85d2a7061..a656b5e4b04cc 100644 --- a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go +++ b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go @@ -20,9 +20,10 @@ import ( ) // Module defines the fx options for this component. -func Module() fxutil.Module { +func Module(params Params) fxutil.Module { return fxutil.Component( - fx.Provide(newOrchestratorForwarder)) + fx.Provide(newOrchestratorForwarder), + fx.Supply(params)) } // newOrchestratorForwarder builds the orchestrator forwarder. diff --git a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go index ba079b7bb4c2b..7fedf5deeece4 100644 --- a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go +++ b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go @@ -25,9 +25,10 @@ import ( ) // Module defines the fx options for this component. -func Module() fxutil.Module { +func Module(params Params) fxutil.Module { return fxutil.Component( - fx.Provide(newOrchestratorForwarder)) + fx.Provide(newOrchestratorForwarder), + fx.Supply(params)) } // newOrchestratorForwarder returns an orchestratorForwarder diff --git a/comp/logs/agent/agentimpl/agent.go b/comp/logs/agent/agentimpl/agent.go index b5626e3d9b54b..0849d55b2b7ef 100644 --- a/comp/logs/agent/agentimpl/agent.go +++ b/comp/logs/agent/agentimpl/agent.go @@ -31,7 +31,7 @@ import ( integrationsimpl "github.com/DataDog/datadog-agent/comp/logs/integrations/impl" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" rctypes "github.com/DataDog/datadog-agent/comp/remote-config/rcclient/types" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" @@ -99,7 +99,7 @@ type provides struct { // a description of its operation. type logAgent struct { log log.Component - config pkgConfig.Reader + config model.Reader inventoryAgent inventoryagent.Component hostname hostname.Component tagger tagger.Component diff --git a/comp/logs/agent/agentimpl/agent_core_init.go b/comp/logs/agent/agentimpl/agent_core_init.go index 903fbbce2da63..c05729c40d0d6 100644 --- a/comp/logs/agent/agentimpl/agent_core_init.go +++ b/comp/logs/agent/agentimpl/agent_core_init.go @@ -13,7 +13,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/client/http" @@ -79,7 +79,7 @@ func (a *logAgent) SetupPipeline(processingRules []*config.ProcessingRule, wmeta } // buildEndpoints builds endpoints for the logs agent -func buildEndpoints(coreConfig pkgConfig.Reader) (*config.Endpoints, error) { +func buildEndpoints(coreConfig model.Reader) (*config.Endpoints, error) { httpConnectivity := config.HTTPConnectivityFailure if endpoints, err := config.BuildHTTPEndpointsWithVectorOverride(coreConfig, intakeTrackType, config.AgentJSONIntakeProtocol, config.DefaultIntakeOrigin); err == nil { httpConnectivity = http.CheckConnectivity(endpoints.Main, coreConfig) diff --git a/comp/logs/agent/agentimpl/agent_serverless_init.go b/comp/logs/agent/agentimpl/agent_serverless_init.go index aff03ee85d561..31dbf3e41d2dc 100644 --- a/comp/logs/agent/agentimpl/agent_serverless_init.go +++ b/comp/logs/agent/agentimpl/agent_serverless_init.go @@ -13,8 +13,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" @@ -75,7 +75,7 @@ func (a *logAgent) SetupPipeline( } // buildEndpoints builds endpoints for the logs agent -func buildEndpoints(coreConfig pkgConfig.Reader) (*config.Endpoints, error) { +func buildEndpoints(coreConfig model.Reader) (*config.Endpoints, error) { config, err := config.BuildServerlessEndpoints(coreConfig, intakeTrackType, config.DefaultIntakeProtocol) if err != nil { return nil, err diff --git a/comp/logs/agent/agentimpl/agent_test.go b/comp/logs/agent/agentimpl/agent_test.go index d130981ae7920..c5cefb6be5113 100644 --- a/comp/logs/agent/agentimpl/agent_test.go +++ b/comp/logs/agent/agentimpl/agent_test.go @@ -36,8 +36,8 @@ import ( flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent/inventoryagentimpl" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/client/http" "github.com/DataDog/datadog-agent/pkg/logs/client/mock" "github.com/DataDog/datadog-agent/pkg/logs/client/tcp" @@ -149,7 +149,7 @@ func createAgent(suite *AgentTestSuite, endpoints *config.Endpoints) (*logAgent, } func (suite *AgentTestSuite) testAgent(endpoints *config.Endpoints) { - coreConfig.SetFeatures(suite.T(), env.Docker, env.Kubernetes) + env.SetFeatures(suite.T(), env.Docker, env.Kubernetes) agent, sources, _ := createAgent(suite, endpoints) @@ -185,7 +185,7 @@ func (suite *AgentTestSuite) TestAgentTcp() { } func (suite *AgentTestSuite) TestAgentHttp() { - server := http.NewTestServer(200, coreConfig.Datadog()) + server := http.NewTestServer(200, pkgconfigsetup.Datadog()) defer server.Stop() endpoints := config.NewEndpoints(server.Endpoint, nil, false, true) @@ -196,7 +196,7 @@ func (suite *AgentTestSuite) TestAgentStopsWithWrongBackendTcp() { endpoint := config.NewEndpoint("", "fake:", 0, false) endpoints := config.NewEndpoints(endpoint, []config.Endpoint{}, true, false) - coreConfig.SetFeatures(suite.T(), env.Docker, env.Kubernetes) + env.SetFeatures(suite.T(), env.Docker, env.Kubernetes) agent, sources, _ := createAgent(suite, endpoints) diff --git a/comp/logs/agent/agentimpl/serverless.go b/comp/logs/agent/agentimpl/serverless.go index 22e06ae5a8beb..3e280f8e2921c 100644 --- a/comp/logs/agent/agentimpl/serverless.go +++ b/comp/logs/agent/agentimpl/serverless.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/logs/agent" flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/service" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/tailers" @@ -24,7 +24,7 @@ import ( func NewServerlessLogsAgent(tagger tagger.Component) agent.ServerlessLogsAgent { logsAgent := &logAgent{ log: logComponent.NewTemporaryLoggerWithoutInit(), - config: pkgConfig.Datadog(), + config: pkgconfigsetup.Datadog(), started: atomic.NewUint32(0), sources: sources.NewLogSources(), diff --git a/comp/logs/agent/config/config.go b/comp/logs/agent/config/config.go index 5c188e2139168..4a383358329a1 100644 --- a/comp/logs/agent/config/config.go +++ b/comp/logs/agent/config/config.go @@ -15,6 +15,7 @@ import ( "time" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/config/structure" pkgconfigutils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -70,7 +71,7 @@ func GlobalProcessingRules(coreConfig pkgconfigmodel.Reader) ([]*ProcessingRule, if s, ok := raw.(string); ok && s != "" { err = json.Unmarshal([]byte(s), &rules) } else { - err = coreConfig.UnmarshalKey("logs_config.processing_rules", &rules) + err = structure.UnmarshalKey(coreConfig, "logs_config.processing_rules", &rules, structure.ConvertEmptyStringToNil) } if err != nil { return nil, err diff --git a/comp/logs/agent/config/config_keys.go b/comp/logs/agent/config/config_keys.go index df5ee1ad4abe6..0f9dbbf2b36de 100644 --- a/comp/logs/agent/config/config_keys.go +++ b/comp/logs/agent/config/config_keys.go @@ -11,6 +11,7 @@ import ( pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -152,7 +153,7 @@ func (l *LogsConfigKeys) getAdditionalEndpoints() []unmarshalEndpoint { if s, ok := raw.(string); ok && s != "" { err = json.Unmarshal([]byte(s), &endpoints) } else { - err = l.getConfig().UnmarshalKey(configKey, &endpoints) + err = structure.UnmarshalKey(l.getConfig(), configKey, &endpoints, structure.EnableSquash) } if err != nil { log.Warnf("Could not parse additional_endpoints for logs: %v", err) diff --git a/comp/logs/agent/config/go.mod b/comp/logs/agent/config/go.mod index ee7c4157195db..2471fd0f3416f 100644 --- a/comp/logs/agent/config/go.mod +++ b/comp/logs/agent/config/go.mod @@ -16,6 +16,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model/ github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/telemetry => ../../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable @@ -35,12 +36,13 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 github.com/DataDog/viper v1.13.5 github.com/stretchr/testify v1.9.0 go.uber.org/fx v1.22.2 @@ -49,19 +51,19 @@ require ( require ( github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect diff --git a/comp/metadata/host/hostimpl/hosttags/tags.go b/comp/metadata/host/hostimpl/hosttags/tags.go index 9215071ea4b33..606a874bf7fd3 100644 --- a/comp/metadata/host/hostimpl/hosttags/tags.go +++ b/comp/metadata/host/hostimpl/hosttags/tags.go @@ -12,8 +12,8 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/config/model" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/cache" @@ -44,7 +44,7 @@ type providerDef struct { getTags func(context.Context) ([]string, error) } -func getProvidersDefinitions(conf config.Reader) map[string]*providerDef { +func getProvidersDefinitions(conf model.Reader) map[string]*providerDef { providers := make(map[string]*providerDef) if conf.GetBool("collect_gce_tags") { @@ -100,7 +100,7 @@ func appendAndSplitTags(target []string, tags []string, splits map[string]string // - First one controlled by `cached` boolean, used for performances (cache all tags) // - Second one per provider, to avoid missing host tags for 30 minutes when a component fails (for instance, Cluster Agent). // This second layer is always on. -func Get(ctx context.Context, cached bool, conf config.Reader) *Tags { +func Get(ctx context.Context, cached bool, conf model.Reader) *Tags { if cached { if x, found := cache.Cache.Get(tagsCacheKey); found { tags := x.(*Tags) diff --git a/comp/metadata/host/hostimpl/hosttags/tags_test.go b/comp/metadata/host/hostimpl/hosttags/tags_test.go index ebd42ed745b1b..64460410ac143 100644 --- a/comp/metadata/host/hostimpl/hosttags/tags_test.go +++ b/comp/metadata/host/hostimpl/hosttags/tags_test.go @@ -15,9 +15,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" - model "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/config/model" ) func setupTest(t *testing.T) (model.Config, context.Context) { @@ -111,7 +110,7 @@ func TestHostTagsCache(t *testing.T) { var fooErr error nbCall := 0 - getProvidersDefinitionsFunc = func(config.Reader) map[string]*providerDef { + getProvidersDefinitionsFunc = func(model.Reader) map[string]*providerDef { return map[string]*providerDef{ "foo": { retries: 2, diff --git a/comp/metadata/host/hostimpl/utils/common.go b/comp/metadata/host/hostimpl/utils/common.go index b4e4c5ef6a992..08e5888a99359 100644 --- a/comp/metadata/host/hostimpl/utils/common.go +++ b/comp/metadata/host/hostimpl/utils/common.go @@ -6,7 +6,7 @@ package utils import ( - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/uuid" "github.com/DataDog/datadog-agent/pkg/version" @@ -21,7 +21,7 @@ type CommonPayload struct { } // GetCommonPayload fills and return the common metadata payload -func GetCommonPayload(hostname string, conf config.Reader) *CommonPayload { +func GetCommonPayload(hostname string, conf model.Reader) *CommonPayload { return &CommonPayload{ // olivier: I _think_ `APIKey` is only a legacy field, and // is not actually used by the backend diff --git a/comp/metadata/host/hostimpl/utils/host.go b/comp/metadata/host/hostimpl/utils/host.go index 5646dfbd44287..72ed11ad307cc 100644 --- a/comp/metadata/host/hostimpl/utils/host.go +++ b/comp/metadata/host/hostimpl/utils/host.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" "github.com/DataDog/datadog-agent/comp/otelcol/otlp" "github.com/DataDog/datadog-agent/pkg/collector/python" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/status" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" @@ -118,14 +118,14 @@ func getNetworkMeta(ctx context.Context) *NetworkMeta { return networkMeta } -func getLogsMeta(conf config.Reader) *LogsMeta { +func getLogsMeta(conf model.Reader) *LogsMeta { return &LogsMeta{ Transport: string(status.GetCurrentTransport()), AutoMultilineEnabled: conf.GetBool("logs_config.auto_multi_line_detection"), } } -func getInstallMethod(conf config.Reader) *InstallMethod { +func getInstallMethod(conf model.Reader) *InstallMethod { install, err := installinfoGet(conf) if err != nil { return &InstallMethod{ @@ -145,7 +145,7 @@ func getInstallMethod(conf config.Reader) *InstallMethod { // metadata payload. The NoProxy maps contain any errors or warnings due to the behavior changing when // no_proxy_nonexact_match is enabled. ProxyBehaviorChanged is true in the metadata if there would be any errors or // warnings indicating that there would a behavior change if 'no_proxy_nonexact_match' was enabled. -func getProxyMeta(conf config.Reader) *ProxyMeta { +func getProxyMeta(conf model.Reader) *ProxyMeta { NoProxyNonexactMatchExplicitlySetState := false NoProxyNonexactMatch := false if conf.IsSet("no_proxy_nonexact_match") { @@ -168,7 +168,7 @@ func GetOSVersion() string { // GetPayload builds a metadata payload every time is called. // Some data is collected only once, some is cached, some is collected at every call. -func GetPayload(ctx context.Context, conf config.Reader) *Payload { +func GetPayload(ctx context.Context, conf model.Reader) *Payload { hostnameData, err := hostname.GetWithProvider(ctx) if err != nil { log.Errorf("Error grabbing hostname for status: %v", err) @@ -200,7 +200,7 @@ func GetPayload(ctx context.Context, conf config.Reader) *Payload { // GetFromCache returns the payload from the cache if it exists, otherwise it creates it. // The metadata reporting should always grab it fresh. Any other uses, e.g. status, should use this -func GetFromCache(ctx context.Context, conf config.Reader) *Payload { +func GetFromCache(ctx context.Context, conf model.Reader) *Payload { data, found := cache.Cache.Get(hostCacheKey) if !found { return GetPayload(ctx, conf) diff --git a/comp/metadata/host/hostimpl/utils/host_test.go b/comp/metadata/host/hostimpl/utils/host_test.go index 6e8da546812fb..8a7d5294ae0d2 100644 --- a/comp/metadata/host/hostimpl/utils/host_test.go +++ b/comp/metadata/host/hostimpl/utils/host_test.go @@ -15,8 +15,8 @@ import ( "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/pkg/collector/python" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/status" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" @@ -31,13 +31,13 @@ func TestOTLPEnabled(t *testing.T) { ctx := context.Background() conf := configmock.New(t) - defer func(orig func(cfg config.Reader) bool) { otlpIsEnabled = orig }(otlpIsEnabled) + defer func(orig func(cfg model.Reader) bool) { otlpIsEnabled = orig }(otlpIsEnabled) - otlpIsEnabled = func(config.Reader) bool { return false } + otlpIsEnabled = func(model.Reader) bool { return false } p := GetPayload(ctx, conf) assert.False(t, p.OtlpMeta.Enabled) - otlpIsEnabled = func(config.Reader) bool { return true } + otlpIsEnabled = func(model.Reader) bool { return true } p = GetPayload(ctx, conf) assert.True(t, p.OtlpMeta.Enabled) } @@ -69,18 +69,18 @@ func TestGetLogsMeta(t *testing.T) { func TestGetInstallMethod(t *testing.T) { conf := configmock.New(t) - defer func(orig func(conf config.Reader) (*installinfo.InstallInfo, error)) { + defer func(orig func(conf model.Reader) (*installinfo.InstallInfo, error)) { installinfoGet = orig }(installinfoGet) - installinfoGet = func(config.Reader) (*installinfo.InstallInfo, error) { return nil, fmt.Errorf("an error") } + installinfoGet = func(model.Reader) (*installinfo.InstallInfo, error) { return nil, fmt.Errorf("an error") } installMethod := getInstallMethod(conf) assert.Equal(t, "undefined", installMethod.ToolVersion) assert.Nil(t, installMethod.Tool) assert.Nil(t, installMethod.InstallerVersion) - installinfoGet = func(config.Reader) (*installinfo.InstallInfo, error) { + installinfoGet = func(model.Reader) (*installinfo.InstallInfo, error) { return &installinfo.InstallInfo{ ToolVersion: "chef-15", Tool: "chef", diff --git a/comp/metadata/host/hostimpl/utils/meta.go b/comp/metadata/host/hostimpl/utils/meta.go index a395a82f02824..bbd5f2b479cf9 100644 --- a/comp/metadata/host/hostimpl/utils/meta.go +++ b/comp/metadata/host/hostimpl/utils/meta.go @@ -10,7 +10,7 @@ import ( "os" "time" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" @@ -38,7 +38,7 @@ type Meta struct { // GetMetaFromCache returns the metadata information about the host from the cache and returns it, if the cache is // empty, then it queries the information directly -func GetMetaFromCache(ctx context.Context, conf config.Reader) *Meta { +func GetMetaFromCache(ctx context.Context, conf model.Reader) *Meta { res, _ := cache.Get[*Meta]( metaCacheKey, func() (*Meta, error) { @@ -49,7 +49,7 @@ func GetMetaFromCache(ctx context.Context, conf config.Reader) *Meta { } // GetMeta returns the metadata information about the host and refreshes the cache -func GetMeta(ctx context.Context, conf config.Reader) *Meta { +func GetMeta(ctx context.Context, conf model.Reader) *Meta { osHostname, _ := os.Hostname() tzname, _ := time.Now().Zone() ec2Hostname, _ := ec2.GetHostname(ctx) diff --git a/comp/metadata/internal/util/inventory_enabled.go b/comp/metadata/internal/util/inventory_enabled.go index ee4ebbda3c0df..aa351bd780881 100644 --- a/comp/metadata/internal/util/inventory_enabled.go +++ b/comp/metadata/internal/util/inventory_enabled.go @@ -6,13 +6,13 @@ package util import ( - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) // InventoryEnabled returs true if 'enable_metadata_collection' and 'inventories_enabled' are set to true in the // configuration. -func InventoryEnabled(conf config.Reader) bool { +func InventoryEnabled(conf model.Reader) bool { if !conf.GetBool("enable_metadata_collection") { log.Debug("Metadata collection disabled: inventories payload will not be collected nor sent") return false diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go index 564f38ea35b1f..dd5f29d4ff4d2 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent_test.go @@ -24,7 +24,6 @@ import ( logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig" "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" configFetcher "github.com/DataDog/datadog-agent/pkg/config/fetcher" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" @@ -274,7 +273,7 @@ func TestConfigRefresh(t *testing.T) { ia := getTestInventoryPayload(t, nil, nil) assert.False(t, ia.RefreshTriggered()) - pkgconfig.Datadog().Set("inventories_max_interval", 10*60, pkgconfigmodel.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("inventories_max_interval", 10*60, pkgconfigmodel.SourceAgentRuntime) assert.True(t, ia.RefreshTriggered()) } diff --git a/comp/metadata/inventoryotel/inventoryotelimpl/inventoryotel_test.go b/comp/metadata/inventoryotel/inventoryotelimpl/inventoryotel_test.go index 4617b590bc214..1ce722fc511a1 100644 --- a/comp/metadata/inventoryotel/inventoryotelimpl/inventoryotel_test.go +++ b/comp/metadata/inventoryotel/inventoryotelimpl/inventoryotel_test.go @@ -20,8 +20,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" serializermock "github.com/DataDog/datadog-agent/pkg/serializer/mocks" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -101,7 +101,7 @@ func TestConfigRefresh(t *testing.T) { io := getTestInventoryPayload(t, nil) assert.False(t, io.RefreshTriggered()) - pkgconfig.Datadog().Set("inventories_max_interval", 10*60, pkgconfigmodel.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("inventories_max_interval", 10*60, pkgconfigmodel.SourceAgentRuntime) assert.True(t, io.RefreshTriggered()) } diff --git a/comp/netflow/config/config.go b/comp/netflow/config/config.go index ed5db3eb41bbc..1854ed39dfc1a 100644 --- a/comp/netflow/config/config.go +++ b/comp/netflow/config/config.go @@ -12,6 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/snmp/utils" "github.com/DataDog/datadog-agent/comp/netflow/common" @@ -59,7 +60,7 @@ type Mapping struct { func ReadConfig(conf config.Component, logger log.Component) (*NetflowConfig, error) { var mainConfig NetflowConfig - err := conf.UnmarshalKey("network_devices.netflow", &mainConfig) + err := structure.UnmarshalKey(conf, "network_devices.netflow", &mainConfig) if err != nil { return nil, err } diff --git a/comp/netflow/config/config_test.go b/comp/netflow/config/config_test.go index 89f232a601a0b..4fd6a9331dbda 100644 --- a/comp/netflow/config/config_test.go +++ b/comp/netflow/config/config_test.go @@ -14,7 +14,7 @@ import ( logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" "github.com/DataDog/datadog-agent/comp/netflow/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestReadConfig(t *testing.T) { @@ -216,11 +216,11 @@ network_devices: } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(tt.configYaml)) + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(tt.configYaml)) require.NoError(t, err) - readConfig, err := ReadConfig(config.Datadog(), logger) + readConfig, err := ReadConfig(pkgconfigsetup.Datadog(), logger) if tt.expectedError != "" { assert.ErrorContains(t, err, tt.expectedError) assert.Nil(t, readConfig) diff --git a/comp/netflow/flowaggregator/flowaccumulator_test.go b/comp/netflow/flowaggregator/flowaccumulator_test.go index 8e51b56ac1f82..1707ba14a9ed9 100644 --- a/comp/netflow/flowaggregator/flowaccumulator_test.go +++ b/comp/netflow/flowaggregator/flowaccumulator_test.go @@ -306,3 +306,77 @@ func Test_flowAccumulator_flush(t *testing.T) { _, ok = acc.flows[flow.AggregationHash()] assert.False(t, ok) } + +func Test_flowAccumulator_detectHashCollision(t *testing.T) { + logger := logmock.New(t) + rdnsQuerier := fxutil.Test[rdnsquerier.Component](t, rdnsquerierfxmock.MockModule()) + synFlag := uint32(2) + timeNow = MockTimeNow + flushInterval := 60 * time.Second + flowContextTTL := 60 * time.Second + + // Given + flowA1 := &common.Flow{ + FlowType: common.TypeNetFlow9, + ExporterAddr: []byte{127, 0, 0, 1}, + StartTimestamp: 1234568, + EndTimestamp: 1234569, + Bytes: 20, + Packets: 4, + SrcAddr: []byte{10, 10, 10, 10}, + DstAddr: []byte{10, 10, 10, 20}, + IPProtocol: uint32(6), + SrcPort: 1000, + DstPort: 80, + TCPFlags: synFlag, + } + flowA2 := &common.Flow{ + FlowType: common.TypeNetFlow9, + ExporterAddr: []byte{127, 0, 0, 1}, + StartTimestamp: 1234568, + EndTimestamp: 1234569, + Bytes: 20, + Packets: 4, + SrcAddr: []byte{10, 10, 10, 10}, + DstAddr: []byte{10, 10, 10, 20}, + IPProtocol: uint32(6), + SrcPort: 1000, + DstPort: 80, + TCPFlags: synFlag, + } + flowB1 := &common.Flow{ + FlowType: common.TypeNetFlow9, + ExporterAddr: []byte{127, 0, 0, 1}, + StartTimestamp: 1234568, + EndTimestamp: 1234569, + Bytes: 100, + Packets: 10, + SrcAddr: []byte{10, 10, 10, 10}, + DstAddr: []byte{10, 10, 10, 30}, + IPProtocol: uint32(6), + SrcPort: 80, + DstPort: 2001, + } + + // When + acc := newFlowAccumulator(flushInterval, flowContextTTL, common.DefaultAggregatorPortRollupThreshold, false, logger, rdnsQuerier) + + // Then + assert.Equal(t, uint64(0), acc.hashCollisionFlowCount.Load()) + + // test valid hash collision (same flow object) does not increment flow count + aggHash1 := flowA1.AggregationHash() + acc.detectHashCollision(aggHash1, *flowA1, *flowA1) + assert.Equal(t, uint64(0), acc.hashCollisionFlowCount.Load()) + + // test valid hash collision (same data, new flow object) does not increment flow count + // Note: not a realistic use case as hashes will be different, but testing for completeness + aggHash2 := flowA2.AggregationHash() + acc.detectHashCollision(aggHash2, *flowA1, *flowA2) + assert.Equal(t, uint64(0), acc.hashCollisionFlowCount.Load()) + + // test invalid hash collision (different flow context, same hash) increments flow count + aggHash3 := flowB1.AggregationHash() + acc.detectHashCollision(aggHash3, *flowA1, *flowB1) + assert.Equal(t, uint64(1), acc.hashCollisionFlowCount.Load()) +} diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go index 770e90ed5e188..9b1f1ee475f83 100644 --- a/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go +++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector_test.go @@ -133,6 +133,7 @@ func Test_NpCollector_runningAndProcessing(t *testing.T) { event1 := []byte(` { "timestamp": 0, + "agent_version": "", "namespace": "my-ns1", "pathtrace_id": "pathtrace-id-111", "origin":"network_traffic", @@ -166,6 +167,7 @@ func Test_NpCollector_runningAndProcessing(t *testing.T) { event2 := []byte(` { "timestamp": 0, + "agent_version": "", "namespace": "my-ns1", "pathtrace_id": "pathtrace-id-222", "origin":"network_traffic", diff --git a/comp/otelcol/collector/impl/collector.go b/comp/otelcol/collector/impl/collector.go index 87fcbae39ce63..3713a354c8287 100644 --- a/comp/otelcol/collector/impl/collector.go +++ b/comp/otelcol/collector/impl/collector.go @@ -21,13 +21,17 @@ import ( "go.opentelemetry.io/collector/confmap/provider/yamlprovider" "go.opentelemetry.io/collector/otelcol" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/tagger" + "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" compdef "github.com/DataDog/datadog-agent/comp/def" collectorcontrib "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def" collector "github.com/DataDog/datadog-agent/comp/otelcol/collector/def" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" ddextension "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl" "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter" @@ -39,9 +43,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/serializer" zapAgent "github.com/DataDog/datadog-agent/pkg/util/log/zap" "github.com/DataDog/datadog-agent/pkg/util/optional" - "github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) type collectorImpl struct { @@ -59,7 +60,6 @@ type Requires struct { // Log specifies the logging component. Log log.Component Provider confmap.Converter - ConfigStore configstore.Component Config config.Component CollectorContrib collectorcontrib.Component Serializer serializer.MetricSerializer @@ -110,15 +110,19 @@ func newConfigProviderSettings(reqs Requires, enhanced bool) otelcol.ConfigProvi } } +func generateID(group, resource, namespace, name string) string { + return string(util.GenerateKubeMetadataEntityID(group, resource, namespace, name)) +} + func addFactories(reqs Requires, factories otelcol.Factories) { if v, ok := reqs.LogsAgent.Get(); ok { factories.Exporters[datadogexporter.Type] = datadogexporter.NewFactory(reqs.TraceAgent, reqs.Serializer, v, reqs.SourceProvider, reqs.StatsdClientWrapper) } else { factories.Exporters[datadogexporter.Type] = datadogexporter.NewFactory(reqs.TraceAgent, reqs.Serializer, nil, reqs.SourceProvider, reqs.StatsdClientWrapper) } - factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactory(reqs.Tagger) - factories.Extensions[ddextension.Type] = ddextension.NewFactory(reqs.ConfigStore) + factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactory(reqs.Tagger, generateID) factories.Connectors[component.MustNewType("datadog")] = datadogconnector.NewFactory() + factories.Extensions[ddextension.Type] = ddextension.NewFactory(&factories, newConfigProviderSettings(reqs, false)) } // NewComponent returns a new instance of the collector component. @@ -130,11 +134,6 @@ func NewComponent(reqs Requires) (Provides, error) { addFactories(reqs, factories) converterEnabled := reqs.Config.GetBool("otelcollector.converter.enabled") - err = reqs.ConfigStore.AddConfigs(newConfigProviderSettings(reqs, false), newConfigProviderSettings(reqs, converterEnabled), factories) - if err != nil { - return Provides{}, err - } - // Replace default core to use Agent logger options := []zap.Option{ zap.WrapCore(func(zapcore.Core) zapcore.Core { diff --git a/comp/otelcol/collector/impl/collector_test.go b/comp/otelcol/collector/impl/collector_test.go deleted file mode 100644 index c7a2e2c1f50ff..0000000000000 --- a/comp/otelcol/collector/impl/collector_test.go +++ /dev/null @@ -1,196 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build otlp - -// Package collectorimpl provides the implementation of the collector component for OTel Agent -package collectorimpl - -import ( - "os" - "path/filepath" - "testing" - - compdef "github.com/DataDog/datadog-agent/comp/def" - collectorcontribimpl "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" - converter "github.com/DataDog/datadog-agent/comp/otelcol/converter/impl" - "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/confmap/confmaptest" - "gopkg.in/yaml.v3" -) - -type lifecycle struct{} - -func (*lifecycle) Append(compdef.Hook) {} - -func uriFromFile(filename string) []string { - return []string{filepath.Join("testdata", filename)} -} - -func yamlBytesToMap(bytesConfig []byte) (map[string]any, error) { - var configMap = map[string]interface{}{} - err := yaml.Unmarshal(bytesConfig, configMap) - if err != nil { - return nil, err - } - return configMap, nil -} - -func TestGetConfDump(t *testing.T) { - configstore, err := configstore.NewConfigStore() - assert.NoError(t, err) - - provider, err := converter.NewConverter(converter.Requires{}) - assert.NoError(t, err) - - conf := setup.Datadog() - - reqs := Requires{ - CollectorContrib: collectorcontribimpl.NewComponent(), - Config: conf, - URIs: uriFromFile("simple-dd/config.yaml"), - ConfigStore: configstore, - Lc: &lifecycle{}, - Provider: provider, - } - _, err = NewComponent(reqs) - assert.NoError(t, err) - - t.Run("provided-string", func(t *testing.T) { - actualString, _ := configstore.GetProvidedConfAsString() - actualStringMap, err := yamlBytesToMap([]byte(actualString)) - assert.NoError(t, err) - - expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-provided-result.yaml")) - assert.NoError(t, err) - expectedMap, err := yamlBytesToMap(expectedBytes) - assert.NoError(t, err) - - assert.Equal(t, expectedMap, actualStringMap) - }) - - t.Run("provided-confmap", func(t *testing.T) { - actualConfmap, _ := configstore.GetProvidedConf() - // marshal to yaml and then to map to drop the types for comparison - bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) - assert.NoError(t, err) - actualStringMap, err := yamlBytesToMap(bytesConf) - assert.NoError(t, err) - - expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-provided-result.yaml") - expectedStringMap := expectedMap.ToStringMap() - assert.NoError(t, err) - - assert.Equal(t, expectedStringMap, actualStringMap) - }) - - t.Run("enhanced-string", func(t *testing.T) { - actualString, _ := configstore.GetEnhancedConfAsString() - actualStringMap, err := yamlBytesToMap([]byte(actualString)) - assert.NoError(t, err) - - expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-enhanced-result.yaml")) - assert.NoError(t, err) - expectedMap, err := yamlBytesToMap(expectedBytes) - assert.NoError(t, err) - - assert.Equal(t, expectedMap, actualStringMap) - }) - - t.Run("enhance-confmap", func(t *testing.T) { - actualConfmap, _ := configstore.GetEnhancedConf() - // marshal to yaml and then to map to drop the types for comparison - bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) - assert.NoError(t, err) - actualStringMap, err := yamlBytesToMap(bytesConf) - assert.NoError(t, err) - - expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-enhanced-result.yaml") - expectedStringMap := expectedMap.ToStringMap() - assert.NoError(t, err) - - assert.Equal(t, expectedStringMap, actualStringMap) - }) -} - -func TestGetConfDumpConverterDisabled(t *testing.T) { - configstore, err := configstore.NewConfigStore() - assert.NoError(t, err) - - provider, err := converter.NewConverter(converter.Requires{}) - assert.NoError(t, err) - - conf := setup.Datadog() - conf.SetWithoutSource("otelcollector.converter.enabled", false) - - reqs := Requires{ - CollectorContrib: collectorcontribimpl.NewComponent(), - Config: conf, - URIs: uriFromFile("simple-dd/config.yaml"), - ConfigStore: configstore, - Lc: &lifecycle{}, - Provider: provider, - } - _, err = NewComponent(reqs) - assert.NoError(t, err) - - t.Run("provided-string", func(t *testing.T) { - actualString, _ := configstore.GetProvidedConfAsString() - actualStringMap, err := yamlBytesToMap([]byte(actualString)) - assert.NoError(t, err) - - expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-provided-result.yaml")) - assert.NoError(t, err) - expectedMap, err := yamlBytesToMap(expectedBytes) - assert.NoError(t, err) - - assert.Equal(t, expectedMap, actualStringMap) - }) - - t.Run("provided-confmap", func(t *testing.T) { - actualConfmap, _ := configstore.GetProvidedConf() - // marshal to yaml and then to map to drop the types for comparison - bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) - assert.NoError(t, err) - actualStringMap, err := yamlBytesToMap(bytesConf) - assert.NoError(t, err) - - expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-provided-result.yaml") - expectedStringMap := expectedMap.ToStringMap() - assert.NoError(t, err) - - assert.Equal(t, expectedStringMap, actualStringMap) - }) - - t.Run("enhanced-string", func(t *testing.T) { - actualString, _ := configstore.GetEnhancedConfAsString() - actualStringMap, err := yamlBytesToMap([]byte(actualString)) - assert.NoError(t, err) - - expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-provided-result.yaml")) - assert.NoError(t, err) - expectedMap, err := yamlBytesToMap(expectedBytes) - assert.NoError(t, err) - - assert.Equal(t, expectedMap, actualStringMap) - }) - - t.Run("enhance-confmap", func(t *testing.T) { - actualConfmap, _ := configstore.GetEnhancedConf() - // marshal to yaml and then to map to drop the types for comparison - bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) - assert.NoError(t, err) - actualStringMap, err := yamlBytesToMap(bytesConf) - assert.NoError(t, err) - - expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-provided-result.yaml") - expectedStringMap := expectedMap.ToStringMap() - assert.NoError(t, err) - - assert.Equal(t, expectedStringMap, actualStringMap) - }) -} diff --git a/comp/otelcol/configstore/def/component.go b/comp/otelcol/configstore/def/component.go deleted file mode 100644 index 86943670052e4..0000000000000 --- a/comp/otelcol/configstore/def/component.go +++ /dev/null @@ -1,23 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2024-present Datadog, Inc. - -// Package configstore defines the otel agent configstore component. -package configstore - -import ( - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/otelcol" -) - -// team: opentelemetry - -// Component provides functions to store and expose the provided and enhanced configs. -type Component interface { - AddConfigs(otelcol.ConfigProviderSettings, otelcol.ConfigProviderSettings, otelcol.Factories) error - GetProvidedConf() (*confmap.Conf, error) - GetEnhancedConf() (*confmap.Conf, error) - GetProvidedConfAsString() (string, error) - GetEnhancedConfAsString() (string, error) -} diff --git a/comp/otelcol/configstore/def/go.mod b/comp/otelcol/configstore/def/go.mod deleted file mode 100644 index b8b63f40b0278..0000000000000 --- a/comp/otelcol/configstore/def/go.mod +++ /dev/null @@ -1,88 +0,0 @@ -module github.com/DataDog/datadog-agent/comp/otelcol/configstore/def - -go 1.22.0 - -require ( - go.opentelemetry.io/collector/confmap v0.104.0 - go.opentelemetry.io/collector/otelcol v0.104.0 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf/maps v0.1.1 // indirect - github.com/knadh/koanf/providers/confmap v0.1.0 // indirect - github.com/knadh/koanf/v2 v2.1.1 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect - github.com/prometheus/procfs v0.15.0 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.104.0 // indirect - go.opentelemetry.io/collector/component v0.104.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect - go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/consumer v0.104.0 // indirect - go.opentelemetry.io/collector/exporter v0.104.0 // indirect - go.opentelemetry.io/collector/extension v0.104.0 // indirect - go.opentelemetry.io/collector/featuregate v1.11.0 // indirect - go.opentelemetry.io/collector/pdata v1.11.0 // indirect - go.opentelemetry.io/collector/processor v0.104.0 // indirect - go.opentelemetry.io/collector/receiver v0.104.0 // indirect - go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/comp/otelcol/configstore/def/go.sum b/comp/otelcol/configstore/def/go.sum deleted file mode 100644 index 4c8662c4b1dfa..0000000000000 --- a/comp/otelcol/configstore/def/go.sum +++ /dev/null @@ -1,328 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= -github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= -github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= -github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= -github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= -github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= -go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= -go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= -go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= -go.opentelemetry.io/collector/config/configauth v0.104.0 h1:ULtjugImijpKuLgGVt0E0HwiZT7+uDUEtMquh1ODB24= -go.opentelemetry.io/collector/config/configauth v0.104.0/go.mod h1:Til+nLLrQwwhgmfcGTX4ZRcNuMhdaWhBW1jH9DLTabQ= -go.opentelemetry.io/collector/config/configcompression v1.11.0 h1:oTwbcLh7mWHSDUIZXkRJVdNAMoBGS39XF68goTMOQq8= -go.opentelemetry.io/collector/config/configcompression v1.11.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/confighttp v0.104.0 h1:KSY0FSHSjuPyrR6iA2g5oFTozYFpYcy0ssJny8gTNTQ= -go.opentelemetry.io/collector/config/confighttp v0.104.0/go.mod h1:YgSXwuMYHANzzv+IBjHXaBMG/4G2mrseIpICHj+LB3U= -go.opentelemetry.io/collector/config/configopaque v1.11.0 h1:Pt06PXWVmRaiSX63mzwT8Z9SV/hOc6VHNZbfZ10YY4o= -go.opentelemetry.io/collector/config/configopaque v1.11.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.104.0 h1:bMmLz2+r+REpO7cDOR+srOJHfitqTZfSZCffDpKfwWk= -go.opentelemetry.io/collector/config/configtls v0.104.0/go.mod h1:e33o7TWcKfe4ToLFyGISEPGMgp6ezf3yHRGY4gs9nKk= -go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4fH+54puSBuzm6GQbuEZ2o= -go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= -go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= -go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= -go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= -go.opentelemetry.io/collector/connector v0.104.0/go.mod h1:78SEHel3B3taFnSBg/syW4OV9aU1Ec9KjgbgHf/L8JA= -go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= -go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= -go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= -go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= -go.opentelemetry.io/collector/extension v0.104.0 h1:bftkgFMKya/QIwK+bOxEAPVs/TvTez+s1mlaiUznJkA= -go.opentelemetry.io/collector/extension v0.104.0/go.mod h1:x7K0KyM1JGrtLbafEbRoVp0VpGBHpyx9hu87bsja6S4= -go.opentelemetry.io/collector/extension/auth v0.104.0 h1:SelhccGCrqLThPlkbv6lbAowHsjgOTAWcAPz085IEC4= -go.opentelemetry.io/collector/extension/auth v0.104.0/go.mod h1:s3/C7LTSfa91QK0JPMTRIvH/gCv+a4DGiiNeTAX9OhI= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 h1:rJ9Sw6DR27s6bW7lWBjJhjth5CXpltAHBKIgUFgVwFs= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0/go.mod h1:85Exj8r237PIvaXL1a/S0KeVNnm3kQNpVXtu0O2Zk5k= -go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= -go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= -go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= -go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= -go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= -go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= -go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= -go.opentelemetry.io/collector/receiver v0.104.0 h1:URL1ExkYYd+qbndm7CdGvI2mxzsv/pNfmwJ+1QSQ9/o= -go.opentelemetry.io/collector/receiver v0.104.0/go.mod h1:+enTCZQLf6dRRANWvykXEzrlRw2JDppXJtoYWd/Dd54= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5JI9J3P77SFUrE= -go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/comp/otelcol/configstore/fx/fx.go b/comp/otelcol/configstore/fx/fx.go deleted file mode 100644 index 2f89f318f0554..0000000000000 --- a/comp/otelcol/configstore/fx/fx.go +++ /dev/null @@ -1,23 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2024-present Datadog, Inc. - -// Package configstorefx provides fx access for the configstore component -package configstorefx - -import ( - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" - configstoreimpl "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" -) - -// Module defines the fx options for this component. -func Module() fxutil.Module { - return fxutil.Component( - fxutil.ProvideComponentConstructor( - configstoreimpl.NewConfigStore, - ), - fxutil.ProvideOptional[configstore.Component](), - ) -} diff --git a/comp/otelcol/configstore/impl/configstore.go b/comp/otelcol/configstore/impl/configstore.go deleted file mode 100644 index 3c0922ee2b383..0000000000000 --- a/comp/otelcol/configstore/impl/configstore.go +++ /dev/null @@ -1,127 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2024-present Datadog, Inc. - -// Package configstoreimpl provides the implementation of the otel-agent configstore. -package configstoreimpl - -import ( - "context" - "fmt" - "sync" - - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/otelcol" - "gopkg.in/yaml.v2" -) - -type configStoreImpl struct { - provided *otelcol.Config - enhanced *otelcol.Config - mu sync.RWMutex -} - -// NewConfigStore currently only supports a single URI in the uris slice, and this URI needs to be a file path. -func NewConfigStore() (configstore.Component, error) { - return &configStoreImpl{}, nil -} - -func (c *configStoreImpl) AddConfigs(providedCPS otelcol.ConfigProviderSettings, enhancedCPS otelcol.ConfigProviderSettings, factories otelcol.Factories) error { - // Provided - ocpProvided, err := otelcol.NewConfigProvider(providedCPS) - if err != nil { - return fmt.Errorf("failed to create configprovider: %w", err) - } - providedConf, err := ocpProvided.Get(context.Background(), factories) - if err != nil { - return err - } - c.addProvidedConf(providedConf) - - // Enhanced - ocpEnhanced, err := otelcol.NewConfigProvider(enhancedCPS) - if err != nil { - return fmt.Errorf("failed to create configprovider: %w", err) - } - enhancedConf, err := ocpEnhanced.Get(context.Background(), factories) - if err != nil { - return err - } - c.addEnhancedConf(enhancedConf) - - return nil -} - -// addProvidedConf stores the config into configStoreImpl. -func (c *configStoreImpl) addProvidedConf(config *otelcol.Config) { - c.mu.Lock() - defer c.mu.Unlock() - - c.provided = config -} - -// addEnhancedConf stores the config into configStoreImpl. -func (c *configStoreImpl) addEnhancedConf(config *otelcol.Config) { - c.mu.Lock() - defer c.mu.Unlock() - - c.enhanced = config -} - -// GetProvidedConf returns a string representing the enhanced collector configuration. -func (c *configStoreImpl) GetProvidedConf() (*confmap.Conf, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - conf := confmap.New() - err := conf.Marshal(c.provided) - if err != nil { - return nil, err - } - return conf, nil -} - -// GetEnhancedConf returns a string representing the enhanced collector configuration. -func (c *configStoreImpl) GetEnhancedConf() (*confmap.Conf, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - conf := confmap.New() - err := conf.Marshal(c.enhanced) - if err != nil { - return nil, err - } - return conf, nil -} - -// GetProvidedConf returns a string representing the enhanced collector configuration. -func (c *configStoreImpl) GetProvidedConfAsString() (string, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - return confToString(c.provided) -} - -// GetEnhancedConf returns a string representing the enhanced collector configuration. -func (c *configStoreImpl) GetEnhancedConfAsString() (string, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - return confToString(c.enhanced) -} - -func confToString(conf *otelcol.Config) (string, error) { - cfg := confmap.New() - err := cfg.Marshal(conf) - if err != nil { - return "", err - } - bytesConf, err := yaml.Marshal(cfg.ToStringMap()) - if err != nil { - return "", err - } - - return string(bytesConf), nil -} diff --git a/comp/otelcol/configstore/impl/go.mod b/comp/otelcol/configstore/impl/go.mod deleted file mode 100644 index 8b263d73c0d03..0000000000000 --- a/comp/otelcol/configstore/impl/go.mod +++ /dev/null @@ -1,94 +0,0 @@ -module github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl - -go 1.22.0 - -require ( - go.opentelemetry.io/collector/confmap v0.104.0 - go.opentelemetry.io/collector/otelcol v0.104.0 -) - -replace github.com/DataDog/datadog-agent/comp/otelcol/configstore/def => ../def - -require ( - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def v0.56.0-rc.3 - gopkg.in/yaml.v2 v2.4.0 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.5.0 // indirect - github.com/knadh/koanf/v2 v2.1.1 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect - github.com/prometheus/procfs v0.15.0 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.104.0 // indirect - go.opentelemetry.io/collector/component v0.104.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect - go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/consumer v0.104.0 // indirect - go.opentelemetry.io/collector/exporter v0.104.0 // indirect - go.opentelemetry.io/collector/extension v0.104.0 // indirect - go.opentelemetry.io/collector/featuregate v1.11.0 // indirect - go.opentelemetry.io/collector/pdata v1.11.0 // indirect - go.opentelemetry.io/collector/processor v0.104.0 // indirect - go.opentelemetry.io/collector/receiver v0.104.0 // indirect - go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/comp/otelcol/configstore/impl/go.sum b/comp/otelcol/configstore/impl/go.sum deleted file mode 100644 index d3cae0a850882..0000000000000 --- a/comp/otelcol/configstore/impl/go.sum +++ /dev/null @@ -1,585 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= -github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= -github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= -github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= -github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= -github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= -go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= -go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= -go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= -go.opentelemetry.io/collector/config/configauth v0.104.0 h1:ULtjugImijpKuLgGVt0E0HwiZT7+uDUEtMquh1ODB24= -go.opentelemetry.io/collector/config/configauth v0.104.0/go.mod h1:Til+nLLrQwwhgmfcGTX4ZRcNuMhdaWhBW1jH9DLTabQ= -go.opentelemetry.io/collector/config/configcompression v1.11.0 h1:oTwbcLh7mWHSDUIZXkRJVdNAMoBGS39XF68goTMOQq8= -go.opentelemetry.io/collector/config/configcompression v1.11.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/confighttp v0.104.0 h1:KSY0FSHSjuPyrR6iA2g5oFTozYFpYcy0ssJny8gTNTQ= -go.opentelemetry.io/collector/config/confighttp v0.104.0/go.mod h1:YgSXwuMYHANzzv+IBjHXaBMG/4G2mrseIpICHj+LB3U= -go.opentelemetry.io/collector/config/configopaque v1.11.0 h1:Pt06PXWVmRaiSX63mzwT8Z9SV/hOc6VHNZbfZ10YY4o= -go.opentelemetry.io/collector/config/configopaque v1.11.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.104.0 h1:bMmLz2+r+REpO7cDOR+srOJHfitqTZfSZCffDpKfwWk= -go.opentelemetry.io/collector/config/configtls v0.104.0/go.mod h1:e33o7TWcKfe4ToLFyGISEPGMgp6ezf3yHRGY4gs9nKk= -go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4fH+54puSBuzm6GQbuEZ2o= -go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= -go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= -go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= -go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= -go.opentelemetry.io/collector/connector v0.104.0/go.mod h1:78SEHel3B3taFnSBg/syW4OV9aU1Ec9KjgbgHf/L8JA= -go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= -go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= -go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= -go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= -go.opentelemetry.io/collector/extension v0.104.0 h1:bftkgFMKya/QIwK+bOxEAPVs/TvTez+s1mlaiUznJkA= -go.opentelemetry.io/collector/extension v0.104.0/go.mod h1:x7K0KyM1JGrtLbafEbRoVp0VpGBHpyx9hu87bsja6S4= -go.opentelemetry.io/collector/extension/auth v0.104.0 h1:SelhccGCrqLThPlkbv6lbAowHsjgOTAWcAPz085IEC4= -go.opentelemetry.io/collector/extension/auth v0.104.0/go.mod h1:s3/C7LTSfa91QK0JPMTRIvH/gCv+a4DGiiNeTAX9OhI= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 h1:rJ9Sw6DR27s6bW7lWBjJhjth5CXpltAHBKIgUFgVwFs= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0/go.mod h1:85Exj8r237PIvaXL1a/S0KeVNnm3kQNpVXtu0O2Zk5k= -go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= -go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= -go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= -go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= -go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= -go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= -go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= -go.opentelemetry.io/collector/receiver v0.104.0 h1:URL1ExkYYd+qbndm7CdGvI2mxzsv/pNfmwJ+1QSQ9/o= -go.opentelemetry.io/collector/receiver v0.104.0/go.mod h1:+enTCZQLf6dRRANWvykXEzrlRw2JDppXJtoYWd/Dd54= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5JI9J3P77SFUrE= -go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d h1:Aqf0fiIdUQEj0Gn9mKFFXoQfTTEaNopWpfVyYADxiSg= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Od4k8V1LQSizPRUK4OzZ7TBE/20k+jPczUDAEyvn69Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/comp/otelcol/converter/impl/extensions.go b/comp/otelcol/converter/impl/extensions.go index 1d3911210bf62..7b9e878a7b0e9 100644 --- a/comp/otelcol/converter/impl/extensions.go +++ b/comp/otelcol/converter/impl/extensions.go @@ -7,7 +7,6 @@ package converterimpl import ( - ddextension "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl" "go.opentelemetry.io/collector/confmap" ) @@ -30,7 +29,7 @@ var ( healthCheckConfig any // datadog - datadogName = ddextension.Type.String() + datadogName = "ddflare" datadogEnhancedName = datadogName + "/" + ddAutoconfiguredSuffix datadogConfig any diff --git a/comp/otelcol/converter/impl/go.mod b/comp/otelcol/converter/impl/go.mod index cd6b7408cf61a..ddbd3057c3a11 100644 --- a/comp/otelcol/converter/impl/go.mod +++ b/comp/otelcol/converter/impl/go.mod @@ -11,8 +11,6 @@ replace ( github.com/DataDog/datadog-agent/comp/core/secrets => ../../../../comp/core/secrets github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../../comp/core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../../comp/def - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def => ../../configstore/def/ - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl => ../../configstore/impl/ github.com/DataDog/datadog-agent/comp/otelcol/converter/def => ../def github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def => ../../ddflareextension/def/ github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl => ../../ddflareextension/impl/ @@ -36,9 +34,8 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/comp/core/config v0.55.2 + github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/confmap v0.104.0 go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0 @@ -53,138 +50,73 @@ require ( require ( github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.0.0-00010101000000-000000000000 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 // indirect github.com/DataDog/viper v1.13.5 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/klauspost/compress v1.17.9 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect - github.com/magiconair/properties v1.8.1 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/rs/cors v1.11.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.12 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect + github.com/prometheus/client_golang v1.20.2 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/afero v1.1.2 // indirect - github.com/spf13/cast v1.5.1 // indirect + github.com/shoenig/test v1.7.1 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector v0.104.0 // indirect - go.opentelemetry.io/collector/component v0.104.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.104.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.11.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.104.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.11.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.104.0 // indirect - go.opentelemetry.io/collector/config/internal v0.104.0 // indirect - go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/consumer v0.104.0 // indirect - go.opentelemetry.io/collector/exporter v0.104.0 // indirect - go.opentelemetry.io/collector/extension v0.104.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect go.opentelemetry.io/collector/featuregate v1.11.0 // indirect - go.opentelemetry.io/collector/otelcol v0.104.0 // indirect - go.opentelemetry.io/collector/pdata v1.11.0 // indirect - go.opentelemetry.io/collector/processor v0.104.0 // indirect - go.opentelemetry.io/collector/receiver v0.104.0 // indirect - go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/tools v0.24.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/otelcol/converter/impl/go.sum b/comp/otelcol/converter/impl/go.sum index 6e2637ed12906..2dc43b9ac6134 100644 --- a/comp/otelcol/converter/impl/go.sum +++ b/comp/otelcol/converter/impl/go.sum @@ -1,13 +1,10 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= -github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -18,9 +15,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -28,7 +24,6 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -44,14 +39,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -60,7 +51,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -71,46 +61,26 @@ github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -120,37 +90,25 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9G github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= -github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= -github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= @@ -166,70 +124,46 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= -github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= -github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0 h1:6dvpPt8pCcV+TfMnnanFk2NQYf9HN1voSS9iIHdW+L8= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0/go.mod h1:MfSM6mt9qH3vHCaj2rlX6IY/7fN+zCLzNJC25XG9rNU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0 h1:SveJtKEP2pXyCbucjrDzbBGQUUgrU+vBMTyUgy0tplc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0/go.mod h1:HdVNjnRruSyRiqXvPBy/ZVumw7zjegmoJmFRgtBnaQU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0 h1:dcs3PHXBShL5+DWmDrNXnESlehQjRjIaVE84GPyZL5E= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0/go.mod h1:Vh707OU/o72qqlDGS+8WVkMCTIlmiTfy3k6PQeq/tgY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0 h1:4ke4j/y7AQnRAyYveB+KGcdjVYEKVrwTxc3BDHagdd0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0/go.mod h1:I2zX9YBggIum9LAHXN1DqqbYOENrHXbXdkXouhwVCHw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0 h1:/koTWTWCFF7tBYkDX5UzCaEc/ceTU8jij/Yzuj0So3M= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0/go.mod h1:KWVekIHTPScOrLKVYOiijxfEdGK5OBhD4EFNBh96ESg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0 h1:4dU16tXhXWUfOYHoDtpEJHYze1ltgMFWvD1jWVeARRI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0/go.mod h1:poM/ch3rxaWlkiGV3ohdEDALhfwx6jaKd1z7xk6iY0o= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0 h1:dOPRk39L5bwQNbxJ7mSUyHan0un/r9DV9X7G+YrktGk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0/go.mod h1:nyUlZ88VgBDoA9SfmUs0RcsVzqts9z0PpLxjFZPjD3w= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.104.0 h1:4ES79GC+1fyDlLmC2ASM7MpKGLx1LIBpL8wE7G3zzSA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.104.0/go.mod h1:h5v/Xn0jreStYi9nyPHjwfYseH8Xe3DznsUNS5R4Oqg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0 h1:Pl4rXXpRG/xJuNWUS3I/w1jViHcrssMf47bGX/Ug/KY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0/go.mod h1:tP4dyc5+g/qoXYb8lmNj+y+Nhphn4MkL23/np0Zhx2g= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0 h1:Vwkk+0+cppH+TrmdiVFWcshhdvh2g2IZEj16V8SLjLw= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0/go.mod h1:QmV2JbLC0lzzi0hMUKv5hJ824wdzvYInjVJsphQQ5Uo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -240,8 +174,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -254,16 +188,11 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -272,39 +201,31 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= -github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -314,38 +235,11 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= -go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= -go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= -go.opentelemetry.io/collector/config/configauth v0.104.0 h1:ULtjugImijpKuLgGVt0E0HwiZT7+uDUEtMquh1ODB24= -go.opentelemetry.io/collector/config/configauth v0.104.0/go.mod h1:Til+nLLrQwwhgmfcGTX4ZRcNuMhdaWhBW1jH9DLTabQ= -go.opentelemetry.io/collector/config/configcompression v1.11.0 h1:oTwbcLh7mWHSDUIZXkRJVdNAMoBGS39XF68goTMOQq8= -go.opentelemetry.io/collector/config/configcompression v1.11.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/configgrpc v0.104.0 h1:E3RtqryQPOm/trJmhlJZj6cCqJNKgv9fOEQvSEpzsFM= -go.opentelemetry.io/collector/config/configgrpc v0.104.0/go.mod h1:tu3ifnJ5pv+4rZcaqNWfvVLjNKb8icSPoClN3THN8PU= -go.opentelemetry.io/collector/config/confighttp v0.104.0 h1:KSY0FSHSjuPyrR6iA2g5oFTozYFpYcy0ssJny8gTNTQ= -go.opentelemetry.io/collector/config/confighttp v0.104.0/go.mod h1:YgSXwuMYHANzzv+IBjHXaBMG/4G2mrseIpICHj+LB3U= -go.opentelemetry.io/collector/config/confignet v0.104.0 h1:i7AOTJf4EQox3SEt1YtQFQR+BwXr3v5D9x3Ai9/ovy8= -go.opentelemetry.io/collector/config/confignet v0.104.0/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= -go.opentelemetry.io/collector/config/configopaque v1.11.0 h1:Pt06PXWVmRaiSX63mzwT8Z9SV/hOc6VHNZbfZ10YY4o= -go.opentelemetry.io/collector/config/configopaque v1.11.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configretry v1.11.0 h1:UdEDD0ThxPU7+n2EiKJxVTvDCGygXu9hTfT6LOQv9DY= -go.opentelemetry.io/collector/config/configretry v1.11.0/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.104.0 h1:bMmLz2+r+REpO7cDOR+srOJHfitqTZfSZCffDpKfwWk= -go.opentelemetry.io/collector/config/configtls v0.104.0/go.mod h1:e33o7TWcKfe4ToLFyGISEPGMgp6ezf3yHRGY4gs9nKk= -go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4fH+54puSBuzm6GQbuEZ2o= -go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0 h1:7BhJk71V8xhm8wUpuHG4CVRAPu8JajKj8VmGZ6zS7SA= @@ -360,86 +254,18 @@ go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0 h1:y07I19l go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0/go.mod h1:WV1HOa0z3Ln5ZkwEW7Cm2pCHkfzYY9kBe0dLy8DqeYA= go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0 h1:itBGhyEbX+iz8kz3nc4PYxQx4bL7y87xXNUcGnbKPuY= go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0/go.mod h1:iPVsTBkRFHZ21UEfSGWk8c4maOzTp6BWWpTk+l6PjJI= -go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= -go.opentelemetry.io/collector/connector v0.104.0/go.mod h1:78SEHel3B3taFnSBg/syW4OV9aU1Ec9KjgbgHf/L8JA= -go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= -go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= -go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= -go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= -go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= -go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0/go.mod h1:fAF7Q3Xh0OkxYWUycdrNNDXkyz3nhHIRKDkez0aQ6zg= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 h1:JkNCOj7DdyJhcYIaRqtS/X+YtAPRjE4pcruyY6LoM7c= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0/go.mod h1:6rs4Xugs7tIC3IFbAC+fj56zLiVc7osXC5UTjk/Mkw4= -go.opentelemetry.io/collector/extension v0.104.0 h1:bftkgFMKya/QIwK+bOxEAPVs/TvTez+s1mlaiUznJkA= -go.opentelemetry.io/collector/extension v0.104.0/go.mod h1:x7K0KyM1JGrtLbafEbRoVp0VpGBHpyx9hu87bsja6S4= -go.opentelemetry.io/collector/extension/auth v0.104.0 h1:SelhccGCrqLThPlkbv6lbAowHsjgOTAWcAPz085IEC4= -go.opentelemetry.io/collector/extension/auth v0.104.0/go.mod h1:s3/C7LTSfa91QK0JPMTRIvH/gCv+a4DGiiNeTAX9OhI= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 h1:rJ9Sw6DR27s6bW7lWBjJhjth5CXpltAHBKIgUFgVwFs= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0/go.mod h1:85Exj8r237PIvaXL1a/S0KeVNnm3kQNpVXtu0O2Zk5k= go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= -go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= -go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= -go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= -go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= -go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= -go.opentelemetry.io/collector/processor/batchprocessor v0.104.0 h1:6xXvHYkPjwM1zdzliDM2H/omTGgIOkY96JTCln7CFZQ= -go.opentelemetry.io/collector/processor/batchprocessor v0.104.0/go.mod h1:f1VfVdiOlqtJDAvQy8YONEee19nJ3haxNeiMPy59w8M= -go.opentelemetry.io/collector/receiver v0.104.0 h1:URL1ExkYYd+qbndm7CdGvI2mxzsv/pNfmwJ+1QSQ9/o= -go.opentelemetry.io/collector/receiver v0.104.0/go.mod h1:+enTCZQLf6dRRANWvykXEzrlRw2JDppXJtoYWd/Dd54= -go.opentelemetry.io/collector/receiver/nopreceiver v0.104.0 h1:xkfiTIGEXMXosYbZe8C8tIEZiw+gEL8QhCxz8slSYcM= -go.opentelemetry.io/collector/receiver/nopreceiver v0.104.0/go.mod h1:9vZPqdvOBDh9fKugWiv8WIINkF+TFpOw7RhvZxctZ9w= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 h1:t9cACuSc7kY09guws7VyB/z9QnG7/zWLC1NQ29WH4+o= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0/go.mod h1:sPIIO4F6uit1i/XQgfe2WryvdO5Hr16bQgZTaXcR8mM= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5JI9J3P77SFUrE= -go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -462,8 +288,6 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= @@ -472,10 +296,6 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -488,19 +308,12 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -511,23 +324,17 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -537,46 +344,18 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d h1:Aqf0fiIdUQEj0Gn9mKFFXoQfTTEaNopWpfVyYADxiSg= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Od4k8V1LQSizPRUK4OzZ7TBE/20k+jPczUDAEyvn69Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -596,7 +375,6 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/comp/otelcol/ddflareextension/README.md b/comp/otelcol/ddflareextension/README.md index 79a9677e4207e..13f5902877c4e 100644 --- a/comp/otelcol/ddflareextension/README.md +++ b/comp/otelcol/ddflareextension/README.md @@ -43,14 +43,22 @@ The flare will collect all environment variables, and these can be found in `ote The flare also adds data collected from extensions. These extensions are added automatically by the [converter component](../converter/README.md). The data collected is from extensions: - health_check: Found in `otel/otel-flare/health_check`. -Will contain a JSON of the latest health check, for example: +Contains a JSON of the latest health check, for example: ``` {"status":"Server available","upSince":"2024-08-14T14:54:00.575804+02:00","uptime":"28.470434291s"} ``` - pprof: Found in `otel/otel-flare/pprof` + +Contains a allocs (`dd-autoconfigured_debug_pprof_allocs`), heap (`dd-autoconfigured_debug_pprof_heap`) and cpu (`dd-autoconfigured_debug_pprof_profile`) profile. Profiles can be opened with the [pprof tool](https://github.com/google/pprof), e.g. +``` +go tool pprof -http=: otel/otel-flare/pprof/dd-autoconfigured_debug_pprof_heap.dat +``` + - zpages: Found in `otel/otel-flare/zpages` +Contains extension (`dd-autoconfigured_debug_extensionz`), feature (`dd-autoconfigured_debug_featurez`), pipeline (`dd-autoconfigured_debug_pipelinez`), service (`dd-autoconfigured_debug_servicez`) and trace (`dd-autoconfigured_debug_tracez`) data. The data is in html format, and can be input in a html viewer. + ### Logs The flare will collect the otel-agent logs which can be found in `logs/otel-agent.log`. diff --git a/comp/otelcol/ddflareextension/impl/config.go b/comp/otelcol/ddflareextension/impl/config.go index e5c7aa8a66aba..640ed5c2030af 100644 --- a/comp/otelcol/ddflareextension/impl/config.go +++ b/comp/otelcol/ddflareextension/impl/config.go @@ -10,10 +10,10 @@ import ( "errors" "fmt" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/otelcol" ) type extractDebugEndpoint func(conf *confmap.Conf) (string, error) @@ -32,7 +32,8 @@ var ( type Config struct { HTTPConfig *confighttp.ServerConfig `mapstructure:",squash"` - ConfigStore configstore.Component + factories *otelcol.Factories + configProviderSettings otelcol.ConfigProviderSettings } var _ component.Config = (*Config)(nil) diff --git a/comp/otelcol/ddflareextension/impl/config_test.go b/comp/otelcol/ddflareextension/impl/config_test.go index b3c15eab8a784..6151e67518391 100644 --- a/comp/otelcol/ddflareextension/impl/config_test.go +++ b/comp/otelcol/ddflareextension/impl/config_test.go @@ -9,19 +9,13 @@ package ddflareextensionimpl import ( "testing" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/confmap" ) -func getTestConfig(t *testing.T) *Config { - conv, err := configstore.NewConfigStore() - require.NoError(t, err) - +func getTestConfig() *Config { return &Config{ - ConfigStore: conv, HTTPConfig: &confighttp.ServerConfig{ Endpoint: "localhost:0", }, @@ -29,7 +23,7 @@ func getTestConfig(t *testing.T) *Config { } func TestValidate(t *testing.T) { - cfg := getTestConfig(t) + cfg := getTestConfig() err := cfg.Validate() assert.NoError(t, err) @@ -44,7 +38,7 @@ func TestValidate(t *testing.T) { } func TestUnmarshal(t *testing.T) { - cfg := getTestConfig(t) + cfg := getTestConfig() endpoint := "localhost:1234" diff --git a/comp/otelcol/ddflareextension/impl/configstore.go b/comp/otelcol/ddflareextension/impl/configstore.go new file mode 100644 index 0000000000000..2c96798795f22 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/configstore.go @@ -0,0 +1,93 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package ddflareextensionimpl defines the OpenTelemetry Extension implementation. +package ddflareextensionimpl + +import ( + "sync" + + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/otelcol" + "gopkg.in/yaml.v2" +) + +type configStore struct { + provided *otelcol.Config + enhanced *otelcol.Config + mu sync.RWMutex +} + +// setProvidedConf stores the config into configStoreImpl. +func (c *configStore) setProvidedConf(config *otelcol.Config) { + c.mu.Lock() + defer c.mu.Unlock() + + c.provided = config +} + +// setEnhancedConf stores the config into configStoreImpl. +func (c *configStore) setEnhancedConf(config *otelcol.Config) { + c.mu.Lock() + defer c.mu.Unlock() + + c.enhanced = config +} + +func confToString(conf *otelcol.Config) (string, error) { + cfg := confmap.New() + err := cfg.Marshal(conf) + if err != nil { + return "", err + } + bytesConf, err := yaml.Marshal(cfg.ToStringMap()) + if err != nil { + return "", err + } + + return string(bytesConf), nil +} + +// getProvidedConf returns a string representing the enhanced collector configuration. +func (c *configStore) getProvidedConf() (*confmap.Conf, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + conf := confmap.New() + err := conf.Marshal(c.provided) + if err != nil { + return nil, err + } + return conf, nil +} + +// getEnhancedConf returns a string representing the enhanced collector configuration. +func (c *configStore) getEnhancedConf() (*confmap.Conf, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + conf := confmap.New() + err := conf.Marshal(c.enhanced) + if err != nil { + return nil, err + } + return conf, nil +} + +// getProvidedConfAsString returns a string representing the enhanced collector configuration string. +func (c *configStore) getProvidedConfAsString() (string, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + return confToString(c.provided) +} + +// getEnhancedConfAsString returns a string representing the enhanced collector configuration string. +func (c *configStore) getEnhancedConfAsString() (string, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + return confToString(c.enhanced) +} diff --git a/comp/otelcol/ddflareextension/impl/configstore_test.go b/comp/otelcol/ddflareextension/impl/configstore_test.go new file mode 100644 index 0000000000000..b6d3c2e616681 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/configstore_test.go @@ -0,0 +1,197 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package ddflareextensionimpl defines the OpenTelemetry Extension implementation. +package ddflareextensionimpl + +import ( + "context" + "os" + "path/filepath" + "testing" + + converterimpl "github.com/DataDog/datadog-agent/comp/otelcol/converter/impl" + "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter" + "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector" + "go.opentelemetry.io/collector/component/componenttest" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/confmap/converter/expandconverter" + "go.opentelemetry.io/collector/confmap/provider/envprovider" + "go.opentelemetry.io/collector/confmap/provider/fileprovider" + "go.opentelemetry.io/collector/confmap/provider/httpprovider" + "go.opentelemetry.io/collector/confmap/provider/httpsprovider" + "go.opentelemetry.io/collector/confmap/provider/yamlprovider" + "go.opentelemetry.io/collector/otelcol" + "gopkg.in/yaml.v2" +) + +// this is only used for config unmarshalling. +func addFactories(factories otelcol.Factories) { + factories.Exporters[datadogexporter.Type] = datadogexporter.NewFactory(nil, nil, nil, nil, nil) + factories.Processors[infraattributesprocessor.Type] = infraattributesprocessor.NewFactory(nil, nil) + factories.Connectors[component.MustNewType("datadog")] = datadogconnector.NewFactory() + factories.Extensions[Type] = NewFactory(nil, otelcol.ConfigProviderSettings{}) +} + +func TestGetConfDump(t *testing.T) { + // get factories + factories, err := components() + assert.NoError(t, err) + addFactories(factories) + + // extension config + config := Config{ + HTTPConfig: &confighttp.ServerConfig{ + Endpoint: "localhost:0", + }, + factories: &factories, + configProviderSettings: newConfigProviderSettings(uriFromFile("simple-dd/config.yaml"), false), + } + extension, err := NewExtension(context.TODO(), &config, componenttest.NewNopTelemetrySettings(), component.BuildInfo{}) + assert.NoError(t, err) + + ext, ok := extension.(*ddExtension) + assert.True(t, ok) + + t.Run("provided-string", func(t *testing.T) { + actualString, _ := ext.configStore.getProvidedConfAsString() + actualStringMap, err := yamlBytesToMap([]byte(actualString)) + assert.NoError(t, err) + + expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-provided-result.yaml")) + assert.NoError(t, err) + expectedMap, err := yamlBytesToMap(expectedBytes) + assert.NoError(t, err) + + assert.Equal(t, expectedMap, actualStringMap) + }) + + t.Run("provided-confmap", func(t *testing.T) { + actualConfmap, _ := ext.configStore.getProvidedConf() + // marshal to yaml and then to map to drop the types for comparison + bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) + assert.NoError(t, err) + actualStringMap, err := yamlBytesToMap(bytesConf) + assert.NoError(t, err) + + expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-provided-result.yaml") + assert.NoError(t, err) + // this step is required for type matching + expectedStringMapBytes, err := yaml.Marshal(expectedMap.ToStringMap()) + assert.NoError(t, err) + expectedStringMap, err := yamlBytesToMap(expectedStringMapBytes) + assert.NoError(t, err) + + assert.Equal(t, expectedStringMap, actualStringMap) + }) + + conf := confmapFromResolverSettings(t, newResolverSettings(uriFromFile("simple-dd/config.yaml"), true)) + err = ext.NotifyConfig(context.TODO(), conf) + assert.NoError(t, err) + + t.Run("enhanced-string", func(t *testing.T) { + actualString, _ := ext.configStore.getEnhancedConfAsString() + actualStringMap, err := yamlBytesToMap([]byte(actualString)) + assert.NoError(t, err) + + expectedBytes, err := os.ReadFile(filepath.Join("testdata", "simple-dd", "config-enhanced-result.yaml")) + assert.NoError(t, err) + expectedMap, err := yamlBytesToMap(expectedBytes) + assert.NoError(t, err) + + assert.Equal(t, expectedMap, actualStringMap) + }) + + t.Run("enhance-confmap", func(t *testing.T) { + actualConfmap, _ := ext.configStore.getEnhancedConf() + // marshal to yaml and then to map to drop the types for comparison + bytesConf, err := yaml.Marshal(actualConfmap.ToStringMap()) + assert.NoError(t, err) + actualStringMap, err := yamlBytesToMap(bytesConf) + assert.NoError(t, err) + + expectedMap, err := confmaptest.LoadConf("testdata/simple-dd/config-enhanced-result.yaml") + assert.NoError(t, err) + // this step is required for type matching + expectedStringMapBytes, err := yaml.Marshal(expectedMap.ToStringMap()) + assert.NoError(t, err) + expectedStringMap, err := yamlBytesToMap(expectedStringMapBytes) + assert.NoError(t, err) + + assert.Equal(t, expectedStringMap, actualStringMap) + }) +} + +func confmapFromResolverSettings(t *testing.T, resolverSettings confmap.ResolverSettings) *confmap.Conf { + resolver, err := confmap.NewResolver(resolverSettings) + assert.NoError(t, err) + conf, err := resolver.Resolve(context.TODO()) + assert.NoError(t, err) + return conf +} + +func uriFromFile(filename string) []string { + return []string{filepath.Join("testdata", filename)} +} + +func yamlBytesToMap(bytesConfig []byte) (map[string]any, error) { + var configMap = map[string]interface{}{} + err := yaml.Unmarshal(bytesConfig, configMap) + if err != nil { + return nil, err + } + return configMap, nil +} + +type converterFactory struct { + converter confmap.Converter +} + +func (c *converterFactory) Create(_ confmap.ConverterSettings) confmap.Converter { + return c.converter +} + +func newResolverSettings(uris []string, enhanced bool) confmap.ResolverSettings { + return confmap.ResolverSettings{ + URIs: uris, + ProviderFactories: []confmap.ProviderFactory{ + fileprovider.NewFactory(), + envprovider.NewFactory(), + yamlprovider.NewFactory(), + httpprovider.NewFactory(), + httpsprovider.NewFactory(), + }, + ConverterFactories: newConverterFactorie(enhanced), + } +} + +func newConverterFactorie(enhanced bool) []confmap.ConverterFactory { + converterFactories := []confmap.ConverterFactory{ + expandconverter.NewFactory(), + } + + converter, err := converterimpl.NewConverter(converterimpl.Requires{}) + if err != nil { + return []confmap.ConverterFactory{} + } + + if enhanced { + converterFactories = append(converterFactories, &converterFactory{converter: converter}) + } + + return converterFactories +} + +func newConfigProviderSettings(uris []string, enhanced bool) otelcol.ConfigProviderSettings { + return otelcol.ConfigProviderSettings{ + ResolverSettings: newResolverSettings(uris, enhanced), + } +} diff --git a/comp/otelcol/ddflareextension/impl/extension.go b/comp/otelcol/ddflareextension/impl/extension.go index fca2149b9544d..c1d47e758dc7e 100644 --- a/comp/otelcol/ddflareextension/impl/extension.go +++ b/comp/otelcol/ddflareextension/impl/extension.go @@ -14,7 +14,9 @@ import ( "net/http" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/otelcol" "go.uber.org/zap" extensionDef "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def" @@ -35,36 +37,38 @@ type ddExtension struct { tlsListener net.Listener info component.BuildInfo debug extensionDef.DebugSourceResponse + configStore *configStore } var _ extension.Extension = (*ddExtension)(nil) -// NewExtension creates a new instance of the extension. -func NewExtension(_ context.Context, cfg *Config, telemetry component.TelemetrySettings, info component.BuildInfo) (extensionDef.Component, error) { - ext := &ddExtension{ - cfg: cfg, - telemetry: telemetry, - info: info, - debug: extensionDef.DebugSourceResponse{ - Sources: map[string]extensionDef.OTelFlareSource{}, - }, - } +// NotifyConfig implements the ConfigWatcher interface, which allows this extension +// to be notified of the Collector's effective configuration. See interface: +// https://github.com/open-telemetry/opentelemetry-collector/blob/d0fde2f6b98f13cbbd8657f8188207ac7d230ed5/extension/extension.go#L46. +// This method is called during the startup process by the Collector's Service right after +// calling Start. +func (ext *ddExtension) NotifyConfig(_ context.Context, conf *confmap.Conf) error { + var cfg *configSettings var err error - ext.server, ext.tlsListener, err = buildHTTPServer(cfg.HTTPConfig.Endpoint, ext) - if err != nil { - return nil, err + + if cfg, err = unmarshal(conf, *ext.cfg.factories); err != nil { + return fmt.Errorf("cannot unmarshal the configuration: %w", err) } - return ext, nil -} -// Start is called when the extension is started. -func (ext *ddExtension) Start(_ context.Context, host component.Host) error { - ext.telemetry.Logger.Info("Starting DD Extension HTTP server", zap.String("url", ext.cfg.HTTPConfig.Endpoint)) + config := &otelcol.Config{ + Receivers: cfg.Receivers.Configs(), + Processors: cfg.Processors.Configs(), + Exporters: cfg.Exporters.Configs(), + Connectors: cfg.Connectors.Configs(), + Extensions: cfg.Extensions.Configs(), + Service: cfg.Service, + } + + ext.configStore.setEnhancedConf(config) // List configured Extensions - configstore := ext.cfg.ConfigStore - c, err := configstore.GetEnhancedConf() + c, err := ext.configStore.getEnhancedConf() if err != nil { return err } @@ -74,7 +78,7 @@ func (ext *ddExtension) Start(_ context.Context, host component.Host) error { return nil } - extensions := host.GetExtensions() + extensions := config.Extensions for extension := range extensions { extractor, ok := supportedDebugExtensions[extension.Type().String()] if !ok { @@ -120,6 +124,44 @@ func (ext *ddExtension) Start(_ context.Context, host component.Host) error { } } + return nil +} + +// NewExtension creates a new instance of the extension. +func NewExtension(_ context.Context, cfg *Config, telemetry component.TelemetrySettings, info component.BuildInfo) (extensionDef.Component, error) { + ocpProvided, err := otelcol.NewConfigProvider(cfg.configProviderSettings) + if err != nil { + return nil, fmt.Errorf("failed to create configprovider: %w", err) + } + + providedConf, err := ocpProvided.Get(context.Background(), *cfg.factories) + if err != nil { + return nil, err + } + + ext := &ddExtension{ + cfg: cfg, + telemetry: telemetry, + info: info, + configStore: &configStore{}, + debug: extensionDef.DebugSourceResponse{ + Sources: map[string]extensionDef.OTelFlareSource{}, + }, + } + + ext.configStore.setProvidedConf(providedConf) + + ext.server, ext.tlsListener, err = buildHTTPServer(cfg.HTTPConfig.Endpoint, ext) + if err != nil { + return nil, err + } + return ext, nil +} + +// Start is called when the extension is started. +func (ext *ddExtension) Start(_ context.Context, _ component.Host) error { + ext.telemetry.Logger.Info("Starting DD Extension HTTP server", zap.String("url", ext.cfg.HTTPConfig.Endpoint)) + go func() { if err := ext.server.Serve(ext.tlsListener); err != nil && err != http.ErrServerClosed { ext.telemetry.ReportStatus(component.NewFatalErrorEvent(err)) @@ -141,13 +183,13 @@ func (ext *ddExtension) Shutdown(ctx context.Context) error { // ServeHTTP the request handler for the extension. func (ext *ddExtension) ServeHTTP(w http.ResponseWriter, _ *http.Request) { - customer, err := ext.cfg.ConfigStore.GetProvidedConfAsString() + customer, err := ext.configStore.getProvidedConfAsString() if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "Unable to get provided config\n") return } - enhanced, err := ext.cfg.ConfigStore.GetEnhancedConfAsString() + enhanced, err := ext.configStore.getEnhancedConfAsString() if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "Unable to get enhanced config\n") diff --git a/comp/otelcol/ddflareextension/impl/extension_test.go b/comp/otelcol/ddflareextension/impl/extension_test.go index f1bd7dd398407..71d8461a0f409 100644 --- a/comp/otelcol/ddflareextension/impl/extension_test.go +++ b/comp/otelcol/ddflareextension/impl/extension_test.go @@ -11,107 +11,44 @@ import ( "encoding/json" "net/http" "net/http/httptest" - "path/filepath" "testing" ddflareextension "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" - spanmetricsconnector "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" - healthcheckextension "github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension" - pprofextension "github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension" - transformprocessor "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/fileprovider" - "go.opentelemetry.io/collector/confmap/provider/yamlprovider" "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" - otlpexporter "go.opentelemetry.io/collector/exporter/otlpexporter" - otlphttpexporter "go.opentelemetry.io/collector/exporter/otlphttpexporter" + "go.opentelemetry.io/collector/exporter/otlpexporter" + "go.opentelemetry.io/collector/exporter/otlphttpexporter" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/zpagesextension" "go.opentelemetry.io/collector/otelcol" "go.opentelemetry.io/collector/processor" - batchprocessor "go.opentelemetry.io/collector/processor/batchprocessor" + "go.opentelemetry.io/collector/processor/batchprocessor" "go.opentelemetry.io/collector/receiver" - nopreceiver "go.opentelemetry.io/collector/receiver/nopreceiver" - otlpreceiver "go.opentelemetry.io/collector/receiver/otlpreceiver" + "go.opentelemetry.io/collector/receiver/nopreceiver" + "go.opentelemetry.io/collector/receiver/otlpreceiver" "go.uber.org/zap" ) -var cpSettings = otelcol.ConfigProviderSettings{ - ResolverSettings: confmap.ResolverSettings{ - URIs: []string{filepath.Join("testdata", "config.yaml")}, - ProviderFactories: []confmap.ProviderFactory{ - fileprovider.NewFactory(), - yamlprovider.NewFactory(), - }, - }, -} - -func components() (otelcol.Factories, error) { - var err error - factories := otelcol.Factories{} - - factories.Extensions, err = extension.MakeFactoryMap( - healthcheckextension.NewFactory(), - pprofextension.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - factories.Receivers, err = receiver.MakeFactoryMap( - nopreceiver.NewFactory(), - otlpreceiver.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - factories.Exporters, err = exporter.MakeFactoryMap( - otlpexporter.NewFactory(), - otlphttpexporter.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - factories.Processors, err = processor.MakeFactoryMap( - batchprocessor.NewFactory(), - transformprocessor.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - factories.Connectors, err = connector.MakeFactoryMap( - spanmetricsconnector.NewFactory(), - ) - if err != nil { - return otelcol.Factories{}, err - } - - return factories, nil -} - func getExtensionTestConfig(t *testing.T) *Config { - cf, err := configstore.NewConfigStore() - assert.NoError(t, err) - factories, err := components() assert.NoError(t, err) - - cf.AddConfigs(cpSettings, cpSettings, factories) return &Config{ HTTPConfig: &confighttp.ServerConfig{ Endpoint: "localhost:0", }, - ConfigStore: cf, + configProviderSettings: newConfigProviderSettings(uriFromFile("config.yaml"), false), + factories: &factories, } } @@ -158,6 +95,10 @@ func TestExtensionHTTPHandler(t *testing.T) { ddExt.Start(context.TODO(), host) + conf := confmapFromResolverSettings(t, newResolverSettings(uriFromFile("config.yaml"), false)) + ddExt.NotifyConfig(context.TODO(), conf) + assert.NoError(t, err) + // Call the handler's ServeHTTP method ddExt.ServeHTTP(rr, req) @@ -202,3 +143,51 @@ func newHostWithExtensions(exts map[component.ID]component.Component) component. func (h *hostWithExtensions) GetExtensions() map[component.ID]component.Component { return h.exts } + +func components() (otelcol.Factories, error) { + var err error + factories := otelcol.Factories{} + + factories.Extensions, err = extension.MakeFactoryMap( + healthcheckextension.NewFactory(), + pprofextension.NewFactory(), + zpagesextension.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Receivers, err = receiver.MakeFactoryMap( + nopreceiver.NewFactory(), + otlpreceiver.NewFactory(), + prometheusreceiver.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Exporters, err = exporter.MakeFactoryMap( + otlpexporter.NewFactory(), + otlphttpexporter.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Processors, err = processor.MakeFactoryMap( + batchprocessor.NewFactory(), + transformprocessor.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Connectors, err = connector.MakeFactoryMap( + spanmetricsconnector.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + return factories, nil +} diff --git a/comp/otelcol/ddflareextension/impl/factory.go b/comp/otelcol/ddflareextension/impl/factory.go index eb6967ebaed44..64c52aa9c966c 100644 --- a/comp/otelcol/ddflareextension/impl/factory.go +++ b/comp/otelcol/ddflareextension/impl/factory.go @@ -10,11 +10,11 @@ import ( "context" "fmt" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/def" "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl/internal/metadata" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/otelcol" ) const ( @@ -24,20 +24,22 @@ const ( type ddExtensionFactory struct { extension.Factory - configstore configstore.Component + factories *otelcol.Factories + configProviderSettings otelcol.ConfigProviderSettings } // NewFactory creates a factory for HealthCheck extension. -func NewFactory(configstore configstore.Component) extension.Factory { +func NewFactory(factories *otelcol.Factories, configProviderSettings otelcol.ConfigProviderSettings) extension.Factory { return &ddExtensionFactory{ - configstore: configstore, + factories: factories, + configProviderSettings: configProviderSettings, } } func (f *ddExtensionFactory) CreateExtension(ctx context.Context, set extension.Settings, cfg component.Config) (extension.Extension, error) { - config := &Config{ - ConfigStore: f.configstore, + factories: f.factories, + configProviderSettings: f.configProviderSettings, } config.HTTPConfig = cfg.(*Config).HTTPConfig return NewExtension(ctx, config, set.TelemetrySettings, set.BuildInfo) @@ -48,7 +50,6 @@ func (f *ddExtensionFactory) CreateDefaultConfig() component.Config { HTTPConfig: &confighttp.ServerConfig{ Endpoint: fmt.Sprintf("localhost:%d", defaultHTTPPort), }, - ConfigStore: f.configstore, } } diff --git a/comp/otelcol/ddflareextension/impl/factory_test.go b/comp/otelcol/ddflareextension/impl/factory_test.go index 16ae62a1cd8c5..5a38c3513c0cb 100644 --- a/comp/otelcol/ddflareextension/impl/factory_test.go +++ b/comp/otelcol/ddflareextension/impl/factory_test.go @@ -10,18 +10,16 @@ import ( "context" "testing" - configstore "github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl" "github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl/internal/metadata" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/extension" ) func getTestFactory(t *testing.T) extension.Factory { - conv, err := configstore.NewConfigStore() - require.NoError(t, err) + factories, err := components() + assert.NoError(t, err) - return NewFactory(conv) + return NewFactory(&factories, newConfigProviderSettings(uriFromFile("config.yaml"), false)) } func TestNewFactory(t *testing.T) { diff --git a/comp/otelcol/ddflareextension/impl/go.mod b/comp/otelcol/ddflareextension/impl/go.mod index eba0f9a9e9570..dc737f335465f 100644 --- a/comp/otelcol/ddflareextension/impl/go.mod +++ b/comp/otelcol/ddflareextension/impl/go.mod @@ -3,30 +3,47 @@ module github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl go 1.22.0 replace ( + github.com/DataDog/datadog-agent/cmd/agent/common/path => ../../../../cmd/agent/common/path + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../api/api/def github.com/DataDog/datadog-agent/comp/core/config => ../../../core/config github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../core/flare/builder github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../core/flare/types github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface => ../../../core/hostname/hostnameinterface github.com/DataDog/datadog-agent/comp/core/log/def => ../../../core/log/def + github.com/DataDog/datadog-agent/comp/core/log/mock => ../../../core/log/mock github.com/DataDog/datadog-agent/comp/core/secrets => ../../../core/secrets + github.com/DataDog/datadog-agent/comp/core/status => ../../../core/status + github.com/DataDog/datadog-agent/comp/core/tagger/types => ../../../core/tagger/types + github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../../../core/tagger/utils github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../def + github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder => ../../../forwarder/defaultforwarder + github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface => ../../../forwarder/orchestrator/orchestratorinterface github.com/DataDog/datadog-agent/comp/logs/agent/config => ../../../logs/agent/config - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def => ../../configstore/def - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl => ../../configstore/impl + github.com/DataDog/datadog-agent/comp/otelcol/converter/def => ../../../otelcol/converter/def + github.com/DataDog/datadog-agent/comp/otelcol/converter/impl => ../../converter/impl github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def => ../../ddflareextension/def github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline => ../../logsagentpipeline github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl => ../../logsagentpipeline/logsagentpipelineimpl + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter => ../../otlp/components/exporter/datadogexporter github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter => ../../otlp/components/exporter/logsagentexporter + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter => ../../otlp/components/exporter/serializerexporter github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient => ../../otlp/components/metricsclient + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor => ../../otlp/components/processor/infraattributesprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor => ../../otlp/components/statsprocessor + github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil => ../../../otelcol/otlp/testutil + github.com/DataDog/datadog-agent/comp/serializer/compression => ../../../serializer/compression + github.com/DataDog/datadog-agent/comp/trace/agent/def => ../../../trace/agent/def github.com/DataDog/datadog-agent/comp/trace/compression/def => ../../../trace/compression/def github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip => ../../../trace/compression/impl-gzip github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd => ../../../trace/compression/impl-zstd + github.com/DataDog/datadog-agent/pkg/aggregator/ckey => ../../../../pkg/aggregator/ckey github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../../../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../../../../pkg/logs/client @@ -40,116 +57,355 @@ replace ( github.com/DataDog/datadog-agent/pkg/logs/sources => ../../../../pkg/logs/sources github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface => ../../../../pkg/logs/status/statusinterface github.com/DataDog/datadog-agent/pkg/logs/status/utils => ../../../../pkg/logs/status/utils + github.com/DataDog/datadog-agent/pkg/logs/util/testutils => ../../../../pkg/logs/util/testutils + github.com/DataDog/datadog-agent/pkg/metrics => ../../../../pkg/metrics github.com/DataDog/datadog-agent/pkg/obfuscate => ../../../../pkg/obfuscate + github.com/DataDog/datadog-agent/pkg/orchestrator/model => ../../../../pkg/orchestrator/model + github.com/DataDog/datadog-agent/pkg/process/util/api => ../../../../pkg/process/util/api github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto github.com/DataDog/datadog-agent/pkg/remoteconfig/state => ../../../../pkg/remoteconfig/state + github.com/DataDog/datadog-agent/pkg/serializer => ../../../../pkg/serializer github.com/DataDog/datadog-agent/pkg/status/health => ../../../../pkg/status/health + github.com/DataDog/datadog-agent/pkg/tagger/types => ../../../../pkg/tagger/types + github.com/DataDog/datadog-agent/pkg/tagset => ../../../../pkg/tagset github.com/DataDog/datadog-agent/pkg/telemetry => ../../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/trace => ../../../../pkg/trace github.com/DataDog/datadog-agent/pkg/util/backoff => ../../../../pkg/util/backoff + github.com/DataDog/datadog-agent/pkg/util/buf => ../../../../pkg/util/buf github.com/DataDog/datadog-agent/pkg/util/cgroups => ../../../../pkg/util/cgroups + github.com/DataDog/datadog-agent/pkg/util/common => ../../../../pkg/util/common + github.com/DataDog/datadog-agent/pkg/util/containers/image => ../../../../pkg/util/containers/image github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/http => ../../../../pkg/util/http + github.com/DataDog/datadog-agent/pkg/util/json => ../../../../pkg/util/json github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/sort => ../../../../pkg/util/sort github.com/DataDog/datadog-agent/pkg/util/startstop => ../../../../pkg/util/startstop github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../../../pkg/util/statstracker github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/tagger => ../../../../pkg/util/tagger + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version + github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea + github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector => github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 + github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e ) require ( - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/otelcol/converter/impl v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor v0.0.0-00010101000000-000000000000 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.104.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 go.opentelemetry.io/collector/config/confighttp v0.104.0 go.opentelemetry.io/collector/confmap v0.104.0 + go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0 + go.opentelemetry.io/collector/confmap/provider/envprovider v0.104.0 go.opentelemetry.io/collector/confmap/provider/fileprovider v0.104.0 + go.opentelemetry.io/collector/confmap/provider/httpprovider v0.104.0 + go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0 go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0 go.opentelemetry.io/collector/connector v0.104.0 go.opentelemetry.io/collector/exporter v0.104.0 go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 go.opentelemetry.io/collector/extension v0.104.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 go.opentelemetry.io/collector/otelcol v0.104.0 go.opentelemetry.io/collector/processor v0.104.0 go.opentelemetry.io/collector/processor/batchprocessor v0.104.0 go.opentelemetry.io/collector/receiver v0.104.0 go.opentelemetry.io/collector/receiver/nopreceiver v0.104.0 go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 + go.opentelemetry.io/collector/service v0.104.0 go.uber.org/zap v1.27.0 + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa + gopkg.in/yaml.v2 v2.4.0 ) require ( + cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/Code-Hex/go-generics-cache v1.3.1 // indirect + github.com/DataDog/agent-payload/v5 v5.0.123 // indirect + github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/types v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.0.0-20240525065430-d0b647bcb646 // indirect + github.com/DataDog/datadog-agent/comp/serializer/compression v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/trace/agent/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/trace/compression/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/processor v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/sender v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/metrics v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/process/util/api v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/tagger/types v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect + github.com/DataDog/datadog-go/v5 v5.5.0 // indirect + github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect + github.com/DataDog/go-sqllexer v0.0.14 // indirect + github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect + github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 // indirect + github.com/DataDog/sketches-go v1.4.6 // indirect + github.com/DataDog/viper v1.13.5 // indirect + github.com/DataDog/zstd v1.5.5 // indirect + github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go v1.53.11 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/briandowns/spinner v1.23.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dennwc/varint v1.0.0 // indirect + github.com/digitalocean/godo v1.109.0 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/docker v25.0.6+incompatible // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.12.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.9 // indirect + github.com/go-resty/resty/v2 v2.12.0 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect + github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/goccy/go-json v0.10.3 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.2 // indirect + github.com/gophercloud/gophercloud v1.8.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/hashicorp/consul/api v1.29.1 // indirect + github.com/hashicorp/cronexpr v1.1.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.4 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/hetznercloud/hcloud-go/v2 v2.6.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ionos-cloud/sdk-go/v6 v6.1.11 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/karrick/godirwalk v1.17.0 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect + github.com/linode/linodego v1.33.0 // indirect github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/miekg/dns v1.1.58 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/mostynb/go-grpc-compression v1.2.3 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.104.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.104.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.104.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect + github.com/outcaste-io/ristretto v0.2.3 // indirect + github.com/ovh/go-ovh v1.4.3 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/philhofer/fwd v1.1.2 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.2 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e // indirect + github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 // indirect github.com/rs/cors v1.11.0 // indirect + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect github.com/shirou/gopsutil/v4 v4.24.5 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/shoenig/test v1.7.1 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stormcat24/protodep v0.1.8 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/tilinna/clock v1.1.0 // indirect + github.com/tinylib/msgp v1.1.9 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect + github.com/twmb/murmur3 v1.1.8 // indirect + github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector v0.104.0 // indirect @@ -163,16 +419,18 @@ require ( go.opentelemetry.io/collector/config/configtls v0.104.0 // indirect go.opentelemetry.io/collector/config/internal v0.104.0 // indirect go.opentelemetry.io/collector/consumer v0.104.0 // indirect + go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 // indirect go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect go.opentelemetry.io/collector/featuregate v1.11.0 // indirect + go.opentelemetry.io/collector/otelcol/otelcoltest v0.104.0 // indirect go.opentelemetry.io/collector/pdata v1.11.0 // indirect go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect go.opentelemetry.io/contrib/config v0.7.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/contrib/zpages v0.52.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect @@ -182,21 +440,42 @@ require ( go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.2.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.22.2 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/text v0.18.0 // indirect + golang.org/x/time v0.6.0 // indirect + golang.org/x/tools v0.24.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/grpc v1.64.0 // indirect + google.golang.org/api v0.169.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.2.0 // indirect + k8s.io/api v0.29.3 // indirect + k8s.io/apimachinery v0.29.3 // indirect + k8s.io/client-go v0.29.3 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/comp/otelcol/ddflareextension/impl/go.sum b/comp/otelcol/ddflareextension/impl/go.sum index 8abd0addada8f..7d4fd6329a01a 100644 --- a/comp/otelcol/ddflareextension/impl/go.sum +++ b/comp/otelcol/ddflareextension/impl/go.sum @@ -1,33 +1,239 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= +github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/DataDog/agent-payload/v5 v5.0.123 h1:fc/mME+zXBPo8i8690rVJXeqlZ1o+8ixIzNu43XP+o8= +github.com/DataDog/agent-payload/v5 v5.0.123/go.mod h1:FgVQKmVdqdmZTbxIptqJC/l+xEzdiXsaAOs/vGAvWzs= +github.com/DataDog/datadog-agent/comp/core/log v0.54.0 h1:wP3bJua8qmURqLXkmYxrbELMJQ2oO1MuVNfxHJT4wiQ= +github.com/DataDog/datadog-agent/comp/core/log v0.54.0/go.mod h1:mtMxZiwg13b4bHgDf8xE6FHgTcadzI5Cc0lx2MSY1mE= +github.com/DataDog/datadog-api-client-go/v2 v2.26.0 h1:bZr0hu+hx8L91+yU5EGw8wK3FlCVEIashpx+cylWsf0= +github.com/DataDog/datadog-api-client-go/v2 v2.26.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= +github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= +github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= +github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= +github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= +github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k= +github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee/go.mod h1:nTot/Iy0kW16bXgXr6blEc8gFeAS7vTqYlhAxh+dbc0= +github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= +github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= +github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= +github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= +github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= +github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f/go.mod h1:oXfOhM/Kr8OvqS6tVqJwxPBornV0yrx3bc+l0BDr7PQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 h1:N4xzkSD2BkRwEZSPf3C2eUZxjS5trpo4gOwRh8mu+BA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.53.11 h1:KcmduYvX15rRqt4ZU/7jKkmDxU/G87LJ9MUI0yQJh00= +github.com/aws/aws-sdk-go v1.53.11/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= +github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU= +github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v25.0.6+incompatible h1:5cPwbwriIcsua2REJe8HqQV+6WlWc1byg2QSXzBxBGg= +github.com/docker/docker v25.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= +github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -36,57 +242,236 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA= +github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= +github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc= +github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI= +github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg= +github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= +github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= +github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= +github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= +github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= +github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= +github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= @@ -97,37 +482,118 @@ github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPgh github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= +github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw= +github.com/linode/linodego v1.33.0/go.mod h1:dSJJgIwqZCF5wnpuC6w5cyIbRtcexAm7uVvuJopGB40= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= +github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 h1:Kpfqjwp+nlgqacXkSS8T8iGiTMTFo8NoT8AoRomDOpU= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0/go.mod h1:ymbGC/jEXTq8mgHsxzV1PjVGHmV5hSQXmkYkFfGfuLw= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0 h1:6dvpPt8pCcV+TfMnnanFk2NQYf9HN1voSS9iIHdW+L8= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0/go.mod h1:MfSM6mt9qH3vHCaj2rlX6IY/7fN+zCLzNJC25XG9rNU= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.103.0 h1:2XWbSIoIKQyFvn97pS4uc0Pxwe7EWCmZEg2r/+kiL58= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.103.0/go.mod h1:WrnJQRKaivYllAC2B1KeCI5uYiYsZv3Hcbd6iQfr9Jg= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.104.0 h1:lkf7Bof0rbPy2/0+tu+FAgEzwVKmJKcMlx8xsR26TdA= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.104.0/go.mod h1:B6dQmrNwW1q7rOadf57fwIaZHYzwrovTSSEEaiFyf0w= github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0 h1:SveJtKEP2pXyCbucjrDzbBGQUUgrU+vBMTyUgy0tplc= github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.104.0/go.mod h1:HdVNjnRruSyRiqXvPBy/ZVumw7zjegmoJmFRgtBnaQU= github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0 h1:dcs3PHXBShL5+DWmDrNXnESlehQjRjIaVE84GPyZL5E= github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.104.0/go.mod h1:Vh707OU/o72qqlDGS+8WVkMCTIlmiTfy3k6PQeq/tgY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.104.0 h1:KmZvS+RN2w4zxMuX1yiobjkN8fvwBUJ+vl5LkO3O7bk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.104.0/go.mod h1:fc7PiNmgpw+RlWzdWcuoiH9mIlDgiryy70ZjEJC+nlY= github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0 h1:4ke4j/y7AQnRAyYveB+KGcdjVYEKVrwTxc3BDHagdd0= github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.104.0/go.mod h1:I2zX9YBggIum9LAHXN1DqqbYOENrHXbXdkXouhwVCHw= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0 h1:/koTWTWCFF7tBYkDX5UzCaEc/ceTU8jij/Yzuj0So3M= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.104.0/go.mod h1:KWVekIHTPScOrLKVYOiijxfEdGK5OBhD4EFNBh96ESg= github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0 h1:4dU16tXhXWUfOYHoDtpEJHYze1ltgMFWvD1jWVeARRI= github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.104.0/go.mod h1:poM/ch3rxaWlkiGV3ohdEDALhfwx6jaKd1z7xk6iY0o= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.104.0 h1:j5EAcIE5iA03KdrfrmXmplfPc1Lybt6D8RAmuumoq60= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.104.0/go.mod h1:VS66oUydCMwiWl1BFmLs7iNy4lGsfVYsriXr/d1fpAk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.104.0 h1:hB2LSx2h/Xvnfam8jXu8sGy3M6YVSD6bcI5saenp+kY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.104.0/go.mod h1:jTZf5CwMDiILww23FgxvLdIkCPH952ItR/3dJUb/sSk= github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0 h1:dOPRk39L5bwQNbxJ7mSUyHan0un/r9DV9X7G+YrktGk= github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.104.0/go.mod h1:nyUlZ88VgBDoA9SfmUs0RcsVzqts9z0PpLxjFZPjD3w= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.104.0 h1:J4VwD+t7XpMuhdgd5KwhI5f17bOKHDD862szUW2ulVo= @@ -138,56 +604,217 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.104.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.104.0/go.mod h1:tImy4FWNu1qpaXRVaNi2BU+TmZHtYgLO6LbB6mspZio= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0 h1:Pl4rXXpRG/xJuNWUS3I/w1jViHcrssMf47bGX/Ug/KY= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.104.0/go.mod h1:tP4dyc5+g/qoXYb8lmNj+y+Nhphn4MkL23/np0Zhx2g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0 h1:dNDjrDhJmSv2JoK3n2hX/nyf/twTTnLuvAhQTMHGQ5M= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0/go.mod h1:SyCZC+vcI2lnyb7iqH0/6dGgCihuqtCxGmLaZToxSHk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.104.0 h1:iNr5/wS/0Rg4PnPO2Zf3Yj4Qc1RooVQ/7U7jKzocyPo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.104.0/go.mod h1:4bLfc6BnVKRp3yY+ueEUEeyNWjW/InCGbFs9ZA7o/ko= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.104.0 h1:eCceBGaatwEKLiOzYjrYc4zNSMMfb+5Of9VNUnTYU80= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.104.0/go.mod h1:/mkH8dlZxnFRoccQoXkN/XOP6Q7G/1F8XTUO9+xZw7U= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.104.0 h1:avbrkX0c51UjJE13RBqk/Z5QyO/J7J2/O9FIBaJ+Few= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.104.0/go.mod h1:tkmsd1veEEsXtFdYSvoZU7S80INqCbNUGkEGQAivlV0= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.104.0 h1:W2OartqDicbzoLjAp2MCi+FIt2FBy5PyeYce0kIuerc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.104.0/go.mod h1:I2so4Vn+ROaCECo0bdQXNxyUjY9tbq1JvcyuWPETLcM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.104.0 h1:AnzZUTLBT5kADIOTE3NKqXK214sqnkilQqXkqgLjhJs= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.104.0/go.mod h1:5FQezrJhOulRFWMnrpo3Z9O/qWySgDNniPp0p2mFJs0= github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0 h1:Vwkk+0+cppH+TrmdiVFWcshhdvh2g2IZEj16V8SLjLw= github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.104.0/go.mod h1:QmV2JbLC0lzzi0hMUKv5hJ824wdzvYInjVJsphQQ5Uo= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.104.0 h1:Nwkj5EFH90NxWPcl4qeef5AX+A1COWn1Xy1mkzuyIHE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.104.0/go.mod h1:NJwlpVFJu2Dd1mEqCHzSXSNmd5JDhWGVDqo1Oi3RZKk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/openshift/api v3.9.0+incompatible h1:fJ/KsefYuZAjmrr3+5U9yZIZbTOpVkDDLDLFresAeYs= +github.com/openshift/api v3.9.0+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= +github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 h1:ZHRIMCFIJN1p9LsJt4HQ+akDrys4PrYnXzOWI5LK03I= +github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142/go.mod h1:fjS8r9mqDVsPb5td3NehsNOAWa4uiFkYEfVZioQ2gH0= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= +github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= +github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= +github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e h1:UmqAuY2OyDoog8+l5FybViJE5B2r+UxVGCUwFTsY5AA= +github.com/prometheus/prometheus v0.51.2-0.20240405174432-b4a973753c6e/go.mod h1:+0ld+ozir7zWFcHA2vVpWAKxXakIioEjPPNOqH+J3ZA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 h1:arwj11zP0yJIxIRiDn22E0H8PxfF7TsTrc2wIPFIsf4= +github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9/go.mod h1:SKZx6stCn03JN3BOWTwvVIO2ajMkb/zQdTceXYhKw/4= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stormcat24/protodep v0.1.8 h1:FOycjjkjZiastf21aRoCjtoVdhsoBE8mZ0RvY6AHqFE= +github.com/stormcat24/protodep v0.1.8/go.mod h1:6OoSZD5GGomKfmH1LvfJxNIRvYhewFXH5+eNv8h4wOM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= +github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I= +github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= +github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= +github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= +github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= +github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= +github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= @@ -216,8 +843,16 @@ go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4 go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0 h1:7BhJk71V8xhm8wUpuHG4CVRAPu8JajKj8VmGZ6zS7SA= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.104.0/go.mod h1:o2xTZJpc65SyYPOAGOjyvWwQEqYSWT4Q4/gMfOYpAzc= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.104.0 h1:/3iSlUHH1Q3xeZc55oVekd4dibXzqgphXZI7EaYJ+ak= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.104.0/go.mod h1:RZDXvP81JwvIGeq3rvDBrRKMUfn2BeKCmppHm4Qm0D8= go.opentelemetry.io/collector/confmap/provider/fileprovider v0.104.0 h1:B+nMVlIUQxuP52CZSegGuA2z9S+Cv2XwFb2a/TLFPhc= go.opentelemetry.io/collector/confmap/provider/fileprovider v0.104.0/go.mod h1:O0RcaP/I/kn7JHrwohUfj6AwvQYLxjbqg/HnjkvLLTw= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.104.0 h1:6UreSAu64Ft3VfKWE3sjcmf+mWMyWemSsrjS/fjRPpQ= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.104.0/go.mod h1:+vP6R5i9h+oYJNjp4bQHvtSHEu1t+CgSKIeZYZZRQXA= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0 h1:y07I19lmp9VHZ58PJ3nwwd1wqumnIBeMxTNBSh/Vn6k= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.104.0/go.mod h1:WV1HOa0z3Ln5ZkwEW7Cm2pCHkfzYY9kBe0dLy8DqeYA= go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0 h1:itBGhyEbX+iz8kz3nc4PYxQx4bL7y87xXNUcGnbKPuY= go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.104.0/go.mod h1:iPVsTBkRFHZ21UEfSGWk8c4maOzTp6BWWpTk+l6PjJI= go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= @@ -226,6 +861,8 @@ go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhR go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= +go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw= +go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0/go.mod h1:fAF7Q3Xh0OkxYWUycdrNNDXkyz3nhHIRKDkez0aQ6zg= go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 h1:JkNCOj7DdyJhcYIaRqtS/X+YtAPRjE4pcruyY6LoM7c= @@ -240,6 +877,8 @@ go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zR go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.104.0 h1:duPbOTahDcDP+XupC/KkHvebb8+NVKh7LzIpiEuKwLU= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.104.0/go.mod h1:cNosA2o77fGp2N4Ofs5h6HBdHhlPQAbKBjBIc1l+8O4= go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= @@ -270,8 +909,8 @@ go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= @@ -290,93 +929,387 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= +google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d h1:Aqf0fiIdUQEj0Gn9mKFFXoQfTTEaNopWpfVyYADxiSg= -google.golang.org/genproto/googleapis/api v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Od4k8V1LQSizPRUK4OzZ7TBE/20k+jPczUDAEyvn69Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -385,16 +1318,68 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/zorkian/go-datadog-api.v2 v2.30.0 h1:umQdVO0Ytx+kYadhuJNjFtDgIsIEBnKrOTvNuu8ClKI= +gopkg.in/zorkian/go-datadog-api.v2 v2.30.0/go.mod h1:kx0CSMRpzEZfx/nFH62GLU4stZjparh/BRpM89t4XCQ= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= +gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml new file mode 100644 index 0000000000000..505453b479f2b --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml @@ -0,0 +1,236 @@ +connectors: {} +exporters: + datadog: + api: + fail_on_invalid_key: false + key: "[REDACTED]" + site: datadoghq.com + auth: null + compression: "" + cookies: null + disable_keep_alives: false + endpoint: "" + headers: {} + host_metadata: + enabled: true + hostname_source: config_or_system + tags: [] + hostname: "" + http2_ping_timeout: 0s + http2_read_idle_timeout: 0s + idle_conn_timeout: null + logs: + batch_wait: 5 + compression_level: 6 + dialer: + timeout: 0s + dump_payloads: false + endpoint: https://agent-http-intake.logs.datadoghq.com + use_compression: true + max_conns_per_host: null + max_idle_conns: null + max_idle_conns_per_host: null + metrics: + apm_stats_receiver_addr: "" + delta_ttl: 3600 + dialer: + timeout: 0s + enabled: false + endpoint: https://api.datadoghq.com + histograms: + mode: distributions + send_aggregation_metrics: false + send_count_sum_metrics: false + instrumentation_library_metadata_as_tags: false + instrumentation_scope_metadata_as_tags: false + resource_attributes_as_tags: false + summaries: + mode: gauges + sums: + cumulative_monotonic_mode: to_delta + initial_cumulative_monotonic_value: auto + tag_cardinality: "" + tags: "" + only_metadata: false + proxy_url: "" + read_buffer_size: 0 + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 1000 + storage: null + timeout: 15s + tls: + ca_file: "" + ca_pem: "[REDACTED]" + cert_file: "" + cert_pem: "[REDACTED]" + cipher_suites: [] + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + key_pem: "[REDACTED]" + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + traces: + compute_top_level_by_span_kind: true + dialer: + timeout: 0s + endpoint: https://trace.agent.datadoghq.com + ignore_resources: [] + span_name_as_resource_name: true + span_name_remappings: {} + trace_buffer: 0 + write_buffer_size: 0 +extensions: + ddflare/dd-autoconfigured: + auth: null + compression_algorithms: [] + cors: null + endpoint: localhost:7777 + include_metadata: false + max_request_body_size: 0 + response_headers: {} + tls: null + health_check/dd-autoconfigured: + auth: null + check_collector_pipeline: + enabled: false + exporter_failure_threshold: 5 + interval: 5m + compression_algorithms: [] + cors: null + endpoint: localhost:13133 + include_metadata: false + max_request_body_size: 0 + path: / + response_body: null + response_headers: {} + tls: null + pprof/dd-autoconfigured: + block_profile_fraction: 0 + dialer: + timeout: 0s + endpoint: localhost:1777 + mutex_profile_fraction: 0 + save_to_file: "" + zpages/dd-autoconfigured: + auth: null + compression_algorithms: [] + cors: null + endpoint: localhost:55679 + include_metadata: false + max_request_body_size: 0 + response_headers: {} + tls: null +processors: + infraattributes/dd-autoconfigured: + cardinality: 0 + logs: + log: [] + metrics: + metric: [] + traces: + span: [] +receivers: + otlp: + protocols: + grpc: null + http: null + prometheus: + config: + global: + evaluation_interval: 1m + scrape_interval: 1m + scrape_protocols: + - OpenMetricsText1.0.0 + - OpenMetricsText0.0.1 + - PrometheusText0.0.4 + scrape_timeout: 10s + scrape_configs: + - enable_compression: true + enable_http2: true + follow_redirects: true + honor_timestamps: true + job_name: datadog-agent + metrics_path: /metrics + scheme: http + scrape_interval: 5s + scrape_protocols: + - OpenMetricsText1.0.0 + - OpenMetricsText0.0.1 + - PrometheusText0.0.4 + scrape_timeout: 5s + static_configs: + - targets: + - 0.0.0.0:8888 + track_timestamps_staleness: false + report_extra_scrape_metrics: false + start_time_metric_regex: "" + target_allocator: null + trim_metric_suffixes: false + use_start_time_metric: false +service: + extensions: + - pprof/dd-autoconfigured + - zpages/dd-autoconfigured + - health_check/dd-autoconfigured + - ddflare/dd-autoconfigured + pipelines: + logs: + exporters: + - datadog + processors: + - infraattributes/dd-autoconfigured + receivers: + - otlp + metrics: + exporters: + - datadog + processors: + - infraattributes/dd-autoconfigured + receivers: + - otlp + - prometheus + traces: + exporters: + - datadog + processors: + - infraattributes/dd-autoconfigured + receivers: + - otlp + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: + - stderr + initial_fields: {} + level: info + output_paths: + - stderr + sampling: + enabled: true + initial: 10 + thereafter: 100 + tick: 10s + metrics: + address: :8888 + level: Normal + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml new file mode 100644 index 0000000000000..7e10c57812684 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml @@ -0,0 +1,181 @@ +connectors: {} +exporters: + datadog: + api: + fail_on_invalid_key: false + key: '[REDACTED]' + site: datadoghq.com + auth: null + compression: "" + cookies: null + disable_keep_alives: false + endpoint: "" + headers: {} + host_metadata: + enabled: true + hostname_source: config_or_system + tags: [] + hostname: "" + http2_ping_timeout: 0s + http2_read_idle_timeout: 0s + idle_conn_timeout: null + logs: + batch_wait: 5 + compression_level: 6 + dialer: + timeout: 0s + dump_payloads: false + endpoint: https://agent-http-intake.logs.datadoghq.com + use_compression: true + max_conns_per_host: null + max_idle_conns: null + max_idle_conns_per_host: null + metrics: + apm_stats_receiver_addr: "" + dialer: + timeout: 0s + delta_ttl: 3600 + enabled: false + endpoint: https://api.datadoghq.com + histograms: + mode: distributions + send_aggregation_metrics: false + send_count_sum_metrics: false + instrumentation_library_metadata_as_tags: false + instrumentation_scope_metadata_as_tags: false + resource_attributes_as_tags: false + summaries: + mode: gauges + sums: + cumulative_monotonic_mode: to_delta + initial_cumulative_monotonic_value: auto + tag_cardinality: "" + tags: "" + only_metadata: false + proxy_url: "" + read_buffer_size: 0 + retry_on_failure: + enabled: true + initial_interval: 5s + max_elapsed_time: 5m0s + max_interval: 30s + multiplier: 1.5 + randomization_factor: 0.5 + sending_queue: + enabled: true + num_consumers: 10 + queue_size: 1000 + storage: null + timeout: 15s + tls: + ca_file: "" + ca_pem: '[REDACTED]' + cert_file: "" + cert_pem: '[REDACTED]' + cipher_suites: [] + include_system_ca_certs_pool: false + insecure: false + insecure_skip_verify: false + key_file: "" + key_pem: '[REDACTED]' + max_version: "" + min_version: "" + reload_interval: 0s + server_name_override: "" + traces: + compute_top_level_by_span_kind: true + dialer: + timeout: 0s + endpoint: https://trace.agent.datadoghq.com + ignore_resources: [] + span_name_as_resource_name: true + span_name_remappings: {} + trace_buffer: 0 + write_buffer_size: 0 +extensions: {} +processors: {} +receivers: + otlp: + protocols: + grpc: null + http: null + prometheus: + config: + global: + evaluation_interval: 1m + scrape_interval: 1m + scrape_protocols: + - OpenMetricsText1.0.0 + - OpenMetricsText0.0.1 + - PrometheusText0.0.4 + scrape_timeout: 10s + scrape_configs: + - enable_compression: true + enable_http2: true + follow_redirects: true + honor_timestamps: true + job_name: datadog-agent + metrics_path: /metrics + scheme: http + scrape_interval: 5s + scrape_protocols: + - OpenMetricsText1.0.0 + - OpenMetricsText0.0.1 + - PrometheusText0.0.4 + scrape_timeout: 5s + static_configs: + - targets: + - 0.0.0.0:8888 + track_timestamps_staleness: false + report_extra_scrape_metrics: false + start_time_metric_regex: "" + target_allocator: null + trim_metric_suffixes: false + use_start_time_metric: false +service: + extensions: [] + pipelines: + logs: + exporters: + - datadog + processors: [] + receivers: + - otlp + metrics: + exporters: + - datadog + processors: [] + receivers: + - otlp + - prometheus + traces: + exporters: + - datadog + processors: [] + receivers: + - otlp + telemetry: + logs: + development: false + disable_caller: false + disable_stacktrace: false + encoding: console + error_output_paths: + - stderr + initial_fields: {} + level: info + output_paths: + - stderr + sampling: + enabled: true + initial: 10 + thereafter: 100 + tick: 10s + metrics: + address: :8888 + level: Normal + readers: [] + resource: {} + traces: + processors: [] + propagators: [] diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config.yaml new file mode 100644 index 0000000000000..6939a55d7d783 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config.yaml @@ -0,0 +1,26 @@ +receivers: + otlp: + prometheus: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 5s + static_configs: + - targets: ['0.0.0.0:8888'] + +exporters: + datadog: + api: + key: '12345' + +service: + pipelines: + traces: + receivers: [otlp] + exporters: [datadog] + metrics: + receivers: [otlp, prometheus] + exporters: [datadog] + logs: + receivers: [otlp] + exporters: [datadog] diff --git a/comp/otelcol/ddflareextension/impl/unmarshaler.go b/comp/otelcol/ddflareextension/impl/unmarshaler.go new file mode 100644 index 0000000000000..9b48c23ea34d0 --- /dev/null +++ b/comp/otelcol/ddflareextension/impl/unmarshaler.go @@ -0,0 +1,115 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package ddflareextensionimpl defines the OpenTelemetry Extension implementation. +package ddflareextensionimpl + +import ( + "fmt" + + "golang.org/x/exp/maps" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/otelcol" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/service" + "go.opentelemetry.io/collector/service/telemetry" +) + +type configSettings struct { + Receivers *configs[receiver.Factory] `mapstructure:"receivers"` + Processors *configs[processor.Factory] `mapstructure:"processors"` + Exporters *configs[exporter.Factory] `mapstructure:"exporters"` + Connectors *configs[connector.Factory] `mapstructure:"connectors"` + Extensions *configs[extension.Factory] `mapstructure:"extensions"` + Service service.Config `mapstructure:"service"` +} + +// unmarshal the configSettings from a confmap.Conf. +// After the config is unmarshalled, `Validate()` must be called to validate. +func unmarshal(v *confmap.Conf, factories otelcol.Factories) (*configSettings, error) { + + telFactory := telemetry.NewFactory() + defaultTelConfig := *telFactory.CreateDefaultConfig().(*telemetry.Config) + + // Unmarshal top level sections and validate. + cfg := &configSettings{ + Receivers: newConfigs(factories.Receivers), + Processors: newConfigs(factories.Processors), + Exporters: newConfigs(factories.Exporters), + Connectors: newConfigs(factories.Connectors), + Extensions: newConfigs(factories.Extensions), + // TODO: Add a component.ServiceFactory to allow this to be defined by the Service. + Service: service.Config{ + Telemetry: defaultTelConfig, + }, + } + + return cfg, v.Unmarshal(&cfg) +} + +type configs[F component.Factory] struct { + cfgs map[component.ID]component.Config + + factories map[component.Type]F +} + +func newConfigs[F component.Factory](factories map[component.Type]F) *configs[F] { + return &configs[F]{factories: factories} +} + +func (c *configs[F]) Configs() map[component.ID]component.Config { + return c.cfgs +} + +func (c *configs[F]) Unmarshal(conf *confmap.Conf) error { + rawCfgs := make(map[component.ID]map[string]any) + if err := conf.Unmarshal(&rawCfgs); err != nil { + return err + } + + // Prepare resulting map. + c.cfgs = make(map[component.ID]component.Config) + // Iterate over raw configs and create a config for each. + for id := range rawCfgs { + // Find factory based on component kind and type that we read from config source. + factory, ok := c.factories[id.Type()] + if !ok { + return errorUnknownType(id, maps.Keys(c.factories)) + } + + // Get the configuration from the confmap.Conf to preserve internal representation. + sub, err := conf.Sub(id.String()) + if err != nil { + return errorUnmarshalError(id, err) + } + + // Create the default config for this component. + cfg := factory.CreateDefaultConfig() + + // Now that the default config struct is created we can Unmarshal into it, + // and it will apply user-defined config on top of the default. + if err := sub.Unmarshal(&cfg); err != nil { + return errorUnmarshalError(id, err) + } + + c.cfgs[id] = cfg + } + + return nil +} + +func errorUnknownType(id component.ID, factories []component.Type) error { + return fmt.Errorf("unknown type: %q for id: %q (valid values: %v)", id.Type(), id, factories) +} + +func errorUnmarshalError(id component.ID, err error) error { + return fmt.Errorf("error reading configuration for %q: %w", id, err) +} diff --git a/comp/otelcol/logsagentpipeline/go.mod b/comp/otelcol/logsagentpipeline/go.mod index b5adb734302e7..03f9f36a6d637 100644 --- a/comp/otelcol/logsagentpipeline/go.mod +++ b/comp/otelcol/logsagentpipeline/go.mod @@ -20,6 +20,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../../../pkg/logs/client @@ -61,14 +62,15 @@ require github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.56.0-rc.3 require ( github.com/DataDog/agent-payload/v5 v5.0.106 // indirect github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 // indirect @@ -84,20 +86,20 @@ require ( github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod index 471ae801b59bb..1740ca3ca7ede 100644 --- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod +++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod @@ -21,6 +21,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../pkg/config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../../../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../../../../pkg/logs/client @@ -64,8 +65,8 @@ require ( github.com/DataDog/datadog-agent/comp/core/log/mock v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 @@ -76,7 +77,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/testutil v0.56.0-rc.3 github.com/stretchr/testify v1.9.0 @@ -88,12 +89,13 @@ require ( github.com/DataDog/agent-payload/v5 v5.0.106 // indirect github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/processor v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3 // indirect @@ -101,18 +103,18 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/log/setup v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect diff --git a/comp/otelcol/otlp/collector.go b/comp/otelcol/otlp/collector.go index 96787d7a53196..2ce66bf173972 100644 --- a/comp/otelcol/otlp/collector.go +++ b/comp/otelcol/otlp/collector.go @@ -33,6 +33,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" + "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor" @@ -86,6 +87,11 @@ func (t *tagEnricher) Enrich(_ context.Context, extraTags []string, dimensions * return enrichedTags } +func generateID(group, resource, namespace, name string) string { + + return string(util.GenerateKubeMetadataEntityID(group, resource, namespace, name)) +} + func getComponents(s serializer.MetricSerializer, logsAgentChannel chan *message.Message, tagger tagger.Component) ( otelcol.Factories, error, @@ -121,7 +127,7 @@ func getComponents(s serializer.MetricSerializer, logsAgentChannel chan *message processorFactories := []processor.Factory{batchprocessor.NewFactory()} if tagger != nil { - processorFactories = append(processorFactories, infraattributesprocessor.NewFactory(tagger)) + processorFactories = append(processorFactories, infraattributesprocessor.NewFactory(tagger, generateID)) } processors, err := processor.MakeFactoryMap(processorFactories...) if err != nil { diff --git a/comp/otelcol/otlp/collector_test.go b/comp/otelcol/otlp/collector_test.go index 3d9b51062daf5..9da8447cfd4de 100644 --- a/comp/otelcol/otlp/collector_test.go +++ b/comp/otelcol/otlp/collector_test.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" serializermock "github.com/DataDog/datadog-agent/pkg/serializer/mocks" ) @@ -66,20 +66,21 @@ func AssertFailedRun(t *testing.T, pcfg PipelineConfig, expected string) { require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - assert.ErrorContains(t, p.Run(ctx), expected) + pipelineError := p.Run(ctx) + assert.ErrorContains(t, pipelineError, expected) } func TestStartPipeline(t *testing.T) { - config.Datadog().SetWithoutSource("hostname", "otlp-testhostname") - defer config.Datadog().SetWithoutSource("hostname", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "otlp-testhostname") + defer pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") pcfg := getTestPipelineConfig() AssertSucessfulRun(t, pcfg) } func TestStartPipelineFromConfig(t *testing.T) { - config.Datadog().SetWithoutSource("hostname", "otlp-testhostname") - defer config.Datadog().SetWithoutSource("hostname", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "otlp-testhostname") + defer pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") // TODO (AP-1723): Disable changing the gRPC logger before re-enabling. if runtime.GOOS == "windows" { @@ -103,7 +104,7 @@ func TestStartPipelineFromConfig(t *testing.T) { {path: "receiver/advanced.yaml"}, { path: "receiver/typo.yaml", - err: "error decoding 'receivers': error reading configuration for \"otlp\": 1 error(s) decoding:\n\n* 'protocols' has invalid keys: htttp", + err: "'protocols' has invalid keys: htttp", }, } diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod index 5f48e6da16c46..acbef53a372a3 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod @@ -34,6 +34,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../../../pkg/config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../../../../../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../../../../../../pkg/logs/client @@ -97,10 +98,10 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 go.opentelemetry.io/collector/config/configauth v0.104.0 @@ -127,7 +128,7 @@ require ( github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect @@ -137,11 +138,12 @@ require ( github.com/DataDog/datadog-agent/comp/serializer/compression v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/trace/compression/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 // indirect @@ -167,29 +169,29 @@ require ( github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/go-sqllexer v0.0.14 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/DataDog/zstd v1.5.5 // indirect diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum index cdf2e4f5f136d..6c32aacebcc70 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum @@ -14,18 +14,18 @@ github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4ti github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0 h1:KNiq6ofE5BBMQjl7w9fftg8z44C9z51w7qOWIKs5SCg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 h1:FaUFQE8IuaNdpOQGIhoy2h58v8AVND+yZG3gVqKAwLQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 h1:px2+7svK86oeCGd+sT1x/9f0pqIJdApGFnWI0AOPXwA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1/go.mod h1:+LijQ2LdlocAQ4WB+7KsoIGe90bfogkRslubd9swVow= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 h1:Fija8Qo0z/HngskYyBpMqmJKM2ejNr1NfXUyWszFDAw= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0/go.mod h1:lNu6vfFNCV/tyWxs8x8nCN1TqK+bPeI2dbnlwFTs8VA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 h1:x6re32f8gQ8fdCllywQyAbxQuXNrgxeimpLBfvwA97g= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0/go.mod h1:R84ZVbxKSgMxzvJro/MftVrlkGm2C2gndUhV35wyR8A= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod index e689f59dd1341..726a091dff399 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod @@ -17,6 +17,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../../../../pkg/config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../../../../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/message => ../../../../../../pkg/logs/message github.com/DataDog/datadog-agent/pkg/logs/sources => ../../../../../../pkg/logs/sources @@ -44,9 +45,9 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.14.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 github.com/stormcat24/protodep v0.1.8 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 @@ -55,27 +56,28 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/proto v0.55.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.13.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/DataDog/zstd v1.5.2 // indirect diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum index 35e04032d9e4c..4c182e6b228bd 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum @@ -2,12 +2,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-api-client-go/v2 v2.13.0 h1:2c1dXSyUfum2YIVoYlqnBhV5JOG1cLSW+4jB3RrKjLc= github.com/DataDog/datadog-api-client-go/v2 v2.13.0/go.mod h1:kntOqXEh1SmjwSDzW/eJkr9kS7EqttvEkelglWtJRbg= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0 h1:1FGr7e8wAebpvpoabdQcRt5WtPCJ2W2kDPzLfOb07/c= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.14.0 h1:nma5ZICTbHZ0YoMu18ziWGSLK1ICzMm6rJTv+IatJ0U= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.14.0/go.mod h1:xUiGj13q5uHPboc0xZ754fyusiF5C2RxNzOFdTbdZFA= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod index 14c69501c50af..ec3b34ee5f4f8 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod @@ -65,9 +65,9 @@ require ( github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.104.0 github.com/stretchr/testify v1.9.0 diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum index cc8e3ecec4203..00e001429e524 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum @@ -4,14 +4,14 @@ github.com/DataDog/agent-payload/v5 v5.0.114 h1:qg3jfzz2/lOFKbFOw2yM6RM8eyMs4HlE github.com/DataDog/agent-payload/v5 v5.0.114/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0 h1:KNiq6ofE5BBMQjl7w9fftg8z44C9z51w7qOWIKs5SCg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 h1:FaUFQE8IuaNdpOQGIhoy2h58v8AVND+yZG3gVqKAwLQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 h1:Fija8Qo0z/HngskYyBpMqmJKM2ejNr1NfXUyWszFDAw= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0/go.mod h1:lNu6vfFNCV/tyWxs8x8nCN1TqK+bPeI2dbnlwFTs8VA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 h1:x6re32f8gQ8fdCllywQyAbxQuXNrgxeimpLBfvwA97g= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0/go.mod h1:R84ZVbxKSgMxzvJro/MftVrlkGm2C2gndUhV35wyR8A= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go index e34800eaf21df..5cf3de7117651 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/config_test.go @@ -35,7 +35,8 @@ func TestLoadingConfigStrictLogs(t *testing.T) { for _, tt := range tests { t.Run(tt.id.String(), func(t *testing.T) { tc := newTestTaggerClient() - f := NewFactory(tc) + gc := newTestGenerateIDClient().generateID + f := NewFactory(tc, gc) cfg := f.CreateDefaultConfig() sub, err := cm.Sub(tt.id.String()) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go index 086c89e1bd417..429ee568fb79f 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory.go @@ -18,14 +18,18 @@ import ( var processorCapabilities = consumer.Capabilities{MutatesData: true} +// TODO: Remove tagger and generateID as depenendencies to enable future import of +// infraattributesprocessor by external go packages like ocb type factory struct { - tagger taggerClient + tagger taggerClient + generateID GenerateKubeMetadataEntityID } // NewFactory returns a new factory for the InfraAttributes processor. -func NewFactory(tagger taggerClient) processor.Factory { +func NewFactory(tagger taggerClient, generateID GenerateKubeMetadataEntityID) processor.Factory { f := &factory{ - tagger: tagger, + tagger: tagger, + generateID: generateID, } return processor.NewFactory( @@ -49,7 +53,7 @@ func (f *factory) createMetricsProcessor( cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { - iap, err := newInfraAttributesMetricProcessor(set, cfg.(*Config), f.tagger) + iap, err := newInfraAttributesMetricProcessor(set, cfg.(*Config), f.tagger, f.generateID) if err != nil { return nil, err } @@ -68,7 +72,7 @@ func (f *factory) createLogsProcessor( cfg component.Config, nextConsumer consumer.Logs, ) (processor.Logs, error) { - iap, err := newInfraAttributesLogsProcessor(set, cfg.(*Config), f.tagger) + iap, err := newInfraAttributesLogsProcessor(set, cfg.(*Config), f.tagger, f.generateID) if err != nil { return nil, err } @@ -87,7 +91,7 @@ func (f *factory) createTracesProcessor( cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { - iap, err := newInfraAttributesSpanProcessor(set, cfg.(*Config), f.tagger) + iap, err := newInfraAttributesSpanProcessor(set, cfg.(*Config), f.tagger, f.generateID) if err != nil { return nil, err } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go index 5ba0b9eb0ce0e..21ee70fd1874a 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/factory_test.go @@ -21,7 +21,8 @@ import ( func TestType(t *testing.T) { tc := newTestTaggerClient() - factory := NewFactory(tc) + gc := newTestGenerateIDClient().generateID + factory := NewFactory(tc, gc) pType := factory.Type() assert.Equal(t, pType, Type) @@ -29,7 +30,8 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { tc := newTestTaggerClient() - factory := NewFactory(tc) + gc := newTestGenerateIDClient().generateID + factory := NewFactory(tc, gc) cfg := factory.CreateDefaultConfig() assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } @@ -52,10 +54,11 @@ func TestCreateProcessors(t *testing.T) { cm, err := confmaptest.LoadConf(filepath.Join("testdata", tt.configName)) require.NoError(t, err) tc := newTestTaggerClient() + gc := newTestGenerateIDClient().generateID for k := range cm.ToStringMap() { // Check if all processor variations that are defined in test config can be actually created - factory := NewFactory(tc) + factory := NewFactory(tc, gc) cfg := factory.CreateDefaultConfig() sub, err := cm.Sub(k) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod new file mode 100644 index 0000000000000..df8797c00d15f --- /dev/null +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod @@ -0,0 +1,134 @@ +module github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../../../api/api/def + github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../../../core/flare/builder + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../../../core/flare/types + github.com/DataDog/datadog-agent/comp/core/secrets => ../../../../../core/secrets + github.com/DataDog/datadog-agent/comp/core/tagger/common => ../../../../../core/tagger/common + github.com/DataDog/datadog-agent/comp/core/tagger/types => ../../../../../core/tagger/types + github.com/DataDog/datadog-agent/comp/core/tagger/utils => ../../../../../core/tagger/utils + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../../../core/telemetry + github.com/DataDog/datadog-agent/comp/def => ../../../../../def + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../../../pkg/collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../../../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/model => ../../../../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../../../pkg/util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../../../pkg/util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../../../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../../../pkg/util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../../../../../../pkg/util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/tagger => ../../../../../../pkg/util/tagger + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../../../pkg/util/testutil + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../../../pkg/util/winutil +) + +require ( + github.com/DataDog/datadog-agent/comp/core/tagger/types v0.56.0-rc.3 + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/component v0.104.0 + go.opentelemetry.io/collector/confmap v0.104.0 + go.opentelemetry.io/collector/consumer v0.104.0 + go.opentelemetry.io/collector/pdata v1.11.0 + go.opentelemetry.io/collector/processor v0.104.0 + go.opentelemetry.io/collector/semconv v0.104.0 + go.opentelemetry.io/otel/metric v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 + go.uber.org/zap v1.27.0 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 // indirect + github.com/DataDog/viper v1.13.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.54.0 // indirect + github.com/prometheus/procfs v0.15.0 // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/collector v0.104.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect + go.opentelemetry.io/collector/featuregate v1.11.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.104.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.104.0 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect + go.opentelemetry.io/otel/sdk v1.27.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.24.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum new file mode 100644 index 0000000000000..32837146844ff --- /dev/null +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum @@ -0,0 +1,422 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= +github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= +go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= +go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= +go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= +go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= +go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= +go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= +go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= +go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= +go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= +go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= +go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= +go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= +go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= +go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= +go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= +go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= +go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= +go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= +go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= +go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= +go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= +go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients.go similarity index 100% rename from comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient.go rename to comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients.go diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go similarity index 74% rename from comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go rename to comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go index 41fc7ee5ca3b8..989623475184a 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/taggerclient_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/helperclients_test.go @@ -6,7 +6,8 @@ package infraattributesprocessor import ( - "github.com/DataDog/datadog-agent/comp/core/tagger/common" + "fmt" + "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -29,5 +30,15 @@ func (t *testTaggerClient) Tag(entityID string, _ types.TagCardinality) ([]strin // GlobalTags mocks taggerimpl.GlobalTags functionality for purpose of testing, removing dependency on Taggerimpl func (t *testTaggerClient) GlobalTags(_ types.TagCardinality) ([]string, error) { - return t.tagMap[common.GetGlobalEntityID().String()], nil + return t.tagMap[types.NewEntityID("internal", "global-entity-id").String()], nil +} + +type testGenerateIDClient struct{} + +func newTestGenerateIDClient() *testGenerateIDClient { + return &testGenerateIDClient{} +} + +func (t *testGenerateIDClient) generateID(group, resource, namespace, name string) string { + return fmt.Sprintf("%s/%s/%s/%s", group, resource, namespace, name) } diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go index bd3ef1584890a..8d63b39c3754b 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs.go @@ -19,13 +19,15 @@ type infraAttributesLogProcessor struct { logger *zap.Logger tagger taggerClient cardinality types.TagCardinality + generateID GenerateKubeMetadataEntityID } -func newInfraAttributesLogsProcessor(set processor.Settings, cfg *Config, tagger taggerClient) (*infraAttributesLogProcessor, error) { +func newInfraAttributesLogsProcessor(set processor.Settings, cfg *Config, tagger taggerClient, generateID GenerateKubeMetadataEntityID) (*infraAttributesLogProcessor, error) { ialp := &infraAttributesLogProcessor{ logger: set.Logger, tagger: tagger, cardinality: cfg.Cardinality, + generateID: generateID, } set.Logger.Info("Logs Infra Attributes Processor configured") @@ -36,7 +38,7 @@ func (ialp *infraAttributesLogProcessor) processLogs(_ context.Context, ld plog. rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { resourceAttributes := rls.At(i).Resource().Attributes() - entityIDs := entityIDsFromAttributes(resourceAttributes) + entityIDs := entityIDsFromAttributes(resourceAttributes, ialp.generateID) tagMap := make(map[string]string) // Get all unique tags from resource attributes and global tags diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go index 054c6667d1fc1..4248ee573697e 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/logs_test.go @@ -14,7 +14,6 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/processor/processortest" - "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -127,9 +126,10 @@ func TestInfraAttributesLogProcessor(t *testing.T) { tc := newTestTaggerClient() tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} - tc.tagMap[common.GetGlobalEntityID().String()] = []string{"global:tag"} + tc.tagMap[types.NewEntityID("internal", "global-entity-id").String()] = []string{"global:tag"} + gc := newTestGenerateIDClient().generateID - factory := NewFactory(tc) + factory := NewFactory(tc, gc) flp, err := factory.CreateLogsProcessor( context.Background(), processortest.NewNopSettings(), diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go index 5dcbd2e1f974c..e7a8cb3d7a700 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics.go @@ -11,7 +11,6 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -24,22 +23,27 @@ type infraAttributesMetricProcessor struct { logger *zap.Logger tagger taggerClient cardinality types.TagCardinality + generateID GenerateKubeMetadataEntityID } -func newInfraAttributesMetricProcessor(set processor.Settings, cfg *Config, tagger taggerClient) (*infraAttributesMetricProcessor, error) { +func newInfraAttributesMetricProcessor(set processor.Settings, cfg *Config, tagger taggerClient, generateID GenerateKubeMetadataEntityID) (*infraAttributesMetricProcessor, error) { iamp := &infraAttributesMetricProcessor{ logger: set.Logger, tagger: tagger, cardinality: cfg.Cardinality, + generateID: generateID, } set.Logger.Info("Metric Infra Attributes Processor configured") return iamp, nil } +// GenerateKubeMetadataEntityID is a function that generates an entity ID for a Kubernetes resource. +type GenerateKubeMetadataEntityID func(group, resource, namespace, name string) string + // TODO: Replace OriginIDFromAttributes in opentelemetry-mapping-go with this method // entityIDsFromAttributes gets the entity IDs from resource attributes. // If not found, an empty string slice is returned. -func entityIDsFromAttributes(attrs pcommon.Map) []types.EntityID { +func entityIDsFromAttributes(attrs pcommon.Map, generateID GenerateKubeMetadataEntityID) []types.EntityID { entityIDs := make([]types.EntityID, 0, 8) // Prefixes come from pkg/util/kubernetes/kubelet and pkg/util/containers. if containerID, ok := attrs.Get(conventions.AttributeContainerID); ok { @@ -61,11 +65,11 @@ func entityIDsFromAttributes(attrs pcommon.Map) []types.EntityID { } } if namespace, ok := attrs.Get(conventions.AttributeK8SNamespaceName); ok { - entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID("", "namespaces", "", namespace.AsString())))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "namespaces", "", namespace.AsString()))) } if nodeName, ok := attrs.Get(conventions.AttributeK8SNodeName); ok { - entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, string(util.GenerateKubeMetadataEntityID("", "nodes", "", nodeName.AsString())))) + entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesMetadata, generateID("", "nodes", "", nodeName.AsString()))) } if podUID, ok := attrs.Get(conventions.AttributeK8SPodUID); ok { entityIDs = append(entityIDs, types.NewEntityID(types.KubernetesPodUID, podUID.AsString())) @@ -88,7 +92,7 @@ func (iamp *infraAttributesMetricProcessor) processMetrics(_ context.Context, md rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { resourceAttributes := rms.At(i).Resource().Attributes() - entityIDs := entityIDsFromAttributes(resourceAttributes) + entityIDs := entityIDsFromAttributes(resourceAttributes, iamp.generateID) tagMap := make(map[string]string) // Get all unique tags from resource attributes and global tags diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go index 023f60b904edc..01cecbaefcc62 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/metrics_test.go @@ -16,7 +16,6 @@ import ( "go.opentelemetry.io/collector/processor/processortest" conventions "go.opentelemetry.io/collector/semconv/v1.21.0" - "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -130,8 +129,10 @@ func TestInfraAttributesMetricProcessor(t *testing.T) { tc := newTestTaggerClient() tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} - tc.tagMap[common.GetGlobalEntityID().String()] = []string{"global:tag"} - factory := NewFactory(tc) + tc.tagMap[types.NewEntityID("internal", "global-entity-id").String()] = []string{"global:tag"} + gc := newTestGenerateIDClient().generateID + + factory := NewFactory(tc, gc) fmp, err := factory.CreateMetricsProcessor( context.Background(), processortest.NewNopSettings(), @@ -262,10 +263,10 @@ func TestEntityIDsFromAttributes(t *testing.T) { entityIDs: []string{"process://process_pid_goes_here"}, }, } - + gc := newTestGenerateIDClient().generateID for _, testInstance := range tests { t.Run(testInstance.name, func(t *testing.T) { - entityIDs := entityIDsFromAttributes(testInstance.attrs) + entityIDs := entityIDsFromAttributes(testInstance.attrs, gc) entityIDsAsStrings := make([]string, len(entityIDs)) for idx, entityID := range entityIDs { entityIDsAsStrings[idx] = entityID.String() diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go index d35f7b4009208..ce6fe02674e95 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces.go @@ -19,13 +19,15 @@ type infraAttributesSpanProcessor struct { logger *zap.Logger tagger taggerClient cardinality types.TagCardinality + generateID GenerateKubeMetadataEntityID } -func newInfraAttributesSpanProcessor(set processor.Settings, cfg *Config, tagger taggerClient) (*infraAttributesSpanProcessor, error) { +func newInfraAttributesSpanProcessor(set processor.Settings, cfg *Config, tagger taggerClient, generateID GenerateKubeMetadataEntityID) (*infraAttributesSpanProcessor, error) { iasp := &infraAttributesSpanProcessor{ logger: set.Logger, tagger: tagger, cardinality: cfg.Cardinality, + generateID: generateID, } set.Logger.Info("Span Infra Attributes Processor configured") return iasp, nil @@ -35,7 +37,7 @@ func (iasp *infraAttributesSpanProcessor) processTraces(_ context.Context, td pt rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { resourceAttributes := rss.At(i).Resource().Attributes() - entityIDs := entityIDsFromAttributes(resourceAttributes) + entityIDs := entityIDsFromAttributes(resourceAttributes, iasp.generateID) tagMap := make(map[string]string) // Get all unique tags from resource attributes and global tags diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go index 53cf8ad88f134..0c9a47324e1df 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/traces_test.go @@ -14,7 +14,6 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor/processortest" - "github.com/DataDog/datadog-agent/comp/core/tagger/common" "github.com/DataDog/datadog-agent/comp/core/tagger/types" ) @@ -127,8 +126,9 @@ func TestInfraAttributesTraceProcessor(t *testing.T) { tc := newTestTaggerClient() tc.tagMap["container_id://test"] = []string{"container:id"} tc.tagMap["deployment://namespace/deployment"] = []string{"deployment:name"} - tc.tagMap[common.GetGlobalEntityID().String()] = []string{"global:tag"} - factory := NewFactory(tc) + tc.tagMap[types.NewEntityID("internal", "global-entity-id").String()] = []string{"global:tag"} + gc := newTestGenerateIDClient().generateID + factory := NewFactory(tc, gc) fmp, err := factory.CreateTracesProcessor( context.Background(), processortest.NewNopSettings(), diff --git a/comp/otelcol/otlp/components/statsprocessor/go.mod b/comp/otelcol/otlp/components/statsprocessor/go.mod index b21dae11b5ade..ca6cee287f497 100644 --- a/comp/otelcol/otlp/components/statsprocessor/go.mod +++ b/comp/otelcol/otlp/components/statsprocessor/go.mod @@ -23,7 +23,7 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 go.opentelemetry.io/collector/pdata v1.11.0 diff --git a/comp/otelcol/otlp/components/statsprocessor/go.sum b/comp/otelcol/otlp/components/statsprocessor/go.sum index a928af42bb225..8c6d9031f59ae 100644 --- a/comp/otelcol/otlp/components/statsprocessor/go.sum +++ b/comp/otelcol/otlp/components/statsprocessor/go.sum @@ -4,8 +4,8 @@ github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/ github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= diff --git a/comp/otelcol/otlp/integrationtest/integration_test.go b/comp/otelcol/otlp/integrationtest/integration_test.go index e073c29ba1f3c..d80e75f2ad6d7 100644 --- a/comp/otelcol/otlp/integrationtest/integration_test.go +++ b/comp/otelcol/otlp/integrationtest/integration_test.go @@ -54,7 +54,6 @@ import ( collectorcontribFx "github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/fx" collectordef "github.com/DataDog/datadog-agent/comp/otelcol/collector/def" collectorfx "github.com/DataDog/datadog-agent/comp/otelcol/collector/fx" - configstorefx "github.com/DataDog/datadog-agent/comp/otelcol/configstore/fx" converter "github.com/DataDog/datadog-agent/comp/otelcol/converter/def" converterfx "github.com/DataDog/datadog-agent/comp/otelcol/converter/fx" "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline" @@ -96,7 +95,6 @@ func runTestOTelAgent(ctx context.Context, params *subcommands.GlobalParams) err fx.Provide(func(cp converter.Component) confmap.Converter { return cp }), - configstorefx.Module(), fx.Provide(func() (coreconfig.Component, error) { c, err := agentConfig.NewConfigComponent(context.Background(), "", params.ConfPaths) if err != nil { diff --git a/comp/otelcol/otlp/no_otlp.go b/comp/otelcol/otlp/no_otlp.go index 5f3f5b2ab0929..0800b404fc178 100644 --- a/comp/otelcol/otlp/no_otlp.go +++ b/comp/otelcol/otlp/no_otlp.go @@ -8,11 +8,11 @@ package otlp import ( - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // IsEnabled checks if OTLP pipeline is enabled in a given config. -func IsEnabled(_ config.Reader) bool { +func IsEnabled(_ model.Reader) bool { return false } diff --git a/comp/otelcol/otlp/testutil/go.mod b/comp/otelcol/otlp/testutil/go.mod index cd00e6506c335..b198ecd8a455f 100644 --- a/comp/otelcol/otlp/testutil/go.mod +++ b/comp/otelcol/otlp/testutil/go.mod @@ -33,8 +33,8 @@ require ( github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/proto v0.55.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 github.com/DataDog/sketches-go v1.4.6 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/pdata v1.9.0 diff --git a/comp/otelcol/otlp/testutil/go.sum b/comp/otelcol/otlp/testutil/go.sum index b4d06b630c428..5d129b48dda03 100644 --- a/comp/otelcol/otlp/testutil/go.sum +++ b/comp/otelcol/otlp/testutil/go.sum @@ -1,9 +1,9 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0 h1:1FGr7e8wAebpvpoabdQcRt5WtPCJ2W2kDPzLfOb07/c= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.17.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/comp/process/agent/agentimpl/agent.go b/comp/process/agent/agentimpl/agent.go index 4eb469a1540e4..ff9e5d80eae79 100644 --- a/comp/process/agent/agentimpl/agent.go +++ b/comp/process/agent/agentimpl/agent.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/process/runner" submitterComp "github.com/DataDog/datadog-agent/comp/process/submitter" "github.com/DataDog/datadog-agent/comp/process/types" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/checks" processStatsd "github.com/DataDog/datadog-agent/pkg/process/statsd" "github.com/DataDog/datadog-agent/pkg/util/flavor" @@ -100,7 +100,7 @@ func newProcessAgent(deps dependencies) (provides, error) { }, nil } - if err := processStatsd.Configure(ddconfig.GetBindHost(), deps.Config.GetInt("dogstatsd_port"), deps.Statsd.CreateForHostPort); err != nil { + if err := processStatsd.Configure(pkgconfigsetup.GetBindHost(pkgconfigsetup.Datadog()), deps.Config.GetInt("dogstatsd_port"), deps.Statsd.CreateForHostPort); err != nil { deps.Log.Criticalf("Error configuring statsd for process-agent: %s", err) return provides{ Comp: processAgent{ diff --git a/comp/process/agent/status.go b/comp/process/agent/status.go index d536af75aeca4..d7666144e7292 100644 --- a/comp/process/agent/status.go +++ b/comp/process/agent/status.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/status" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processStatus "github.com/DataDog/datadog-agent/pkg/process/util/status" ) @@ -63,7 +63,7 @@ func (s StatusProvider) populateStatus() map[string]interface{} { } else { // Get expVar server address - ipcAddr, err := ddconfig.GetIPCAddress() + ipcAddr, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { status["error"] = fmt.Sprintf("%v", err.Error()) return status diff --git a/comp/process/apiserver/apiserver.go b/comp/process/apiserver/apiserver.go index 66cf2b1d66851..c216b2a93fef4 100644 --- a/comp/process/apiserver/apiserver.go +++ b/comp/process/apiserver/apiserver.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/process-agent/api" log "github.com/DataDog/datadog-agent/comp/core/log/def" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var _ Component = (*apiserver)(nil) @@ -40,12 +40,12 @@ func newApiServer(deps dependencies) Component { r := mux.NewRouter() api.SetupAPIServerHandlers(deps.APIServerDeps, r) // Set up routes - addr, err := ddconfig.GetProcessAPIAddressPort() + addr, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return err } deps.Log.Infof("API server listening on %s", addr) - timeout := time.Duration(ddconfig.Datadog().GetInt("server_timeout")) * time.Second + timeout := time.Duration(pkgconfigsetup.Datadog().GetInt("server_timeout")) * time.Second apiserver := &apiserver{ server: &http.Server{ diff --git a/comp/process/expvars/expvarsimpl/expvars.go b/comp/process/expvars/expvarsimpl/expvars.go index 9125e7245d1b0..7d30b58185aee 100644 --- a/comp/process/expvars/expvarsimpl/expvars.go +++ b/comp/process/expvars/expvarsimpl/expvars.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/process/expvars" "github.com/DataDog/datadog-agent/comp/process/hostinfo" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" "github.com/DataDog/datadog-agent/pkg/process/status" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -91,8 +91,8 @@ func newExpvarServer(deps dependencies) (expvars.Component, error) { func getExpvarPort(deps dependencies) int { expVarPort := deps.Config.GetInt("process_config.expvar_port") if expVarPort <= 0 { - _ = deps.Log.Warnf("Invalid process_config.expvar_port -- %d, using default port %d", expVarPort, ddconfig.DefaultProcessExpVarPort) - expVarPort = ddconfig.DefaultProcessExpVarPort + _ = deps.Log.Warnf("Invalid process_config.expvar_port -- %d, using default port %d", expVarPort, pkgconfigsetup.DefaultProcessExpVarPort) + expVarPort = pkgconfigsetup.DefaultProcessExpVarPort } return expVarPort } diff --git a/comp/process/forwarders/forwardersimpl/forwarders.go b/comp/process/forwarders/forwardersimpl/forwarders.go index 97c22f21f1480..2e26c4c9f0c79 100644 --- a/comp/process/forwarders/forwardersimpl/forwarders.go +++ b/comp/process/forwarders/forwardersimpl/forwarders.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/resolver" "github.com/DataDog/datadog-agent/comp/process/forwarders" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -46,8 +46,8 @@ func newForwarders(deps dependencies) (forwarders.Component, error) { config := deps.Config queueBytes := config.GetInt("process_config.process_queue_bytes") if queueBytes <= 0 { - deps.Logger.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, ddconfig.DefaultProcessQueueBytes) - queueBytes = ddconfig.DefaultProcessQueueBytes + deps.Logger.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, pkgconfigsetup.DefaultProcessQueueBytes) + queueBytes = pkgconfigsetup.DefaultProcessQueueBytes } eventsAPIEndpoints, err := endpoint.GetEventsAPIEndpoints(config) diff --git a/comp/process/profiler/profilerimpl/profiler.go b/comp/process/profiler/profilerimpl/profiler.go index 1c6b53d884d38..bdcaa28f2bf71 100644 --- a/comp/process/profiler/profilerimpl/profiler.go +++ b/comp/process/profiler/profilerimpl/profiler.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" profilecomp "github.com/DataDog/datadog-agent/comp/process/profiler" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/profiling" @@ -71,7 +71,7 @@ func getProfilingSettings(cfg config.Component) profiling.Settings { if site == "" { s := cfg.GetString("site") if s == "" { - s = ddconfig.DefaultSite + s = pkgconfigsetup.DefaultSite } site = fmt.Sprintf(profiling.ProfilingURLTemplate, s) } diff --git a/comp/process/status/statusimpl/status.go b/comp/process/status/statusimpl/status.go index f7e8f8b307a3a..7002745a9f6ed 100644 --- a/comp/process/status/statusimpl/status.go +++ b/comp/process/status/statusimpl/status.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/status" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processStatus "github.com/DataDog/datadog-agent/pkg/process/util/status" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -84,7 +84,7 @@ func (s statusProvider) populateStatus() map[string]interface{} { } else { // Get expVar server address - ipcAddr, err := ddconfig.GetIPCAddress() + ipcAddr, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { status["error"] = fmt.Sprintf("%v", err.Error()) return status @@ -92,7 +92,7 @@ func (s statusProvider) populateStatus() map[string]interface{} { port := s.config.GetInt("process_config.expvar_port") if port <= 0 { - port = ddconfig.DefaultProcessExpVarPort + port = pkgconfigsetup.DefaultProcessExpVarPort } url = fmt.Sprintf("http://%s:%d/debug/vars", ipcAddr, port) } diff --git a/comp/remote-config/rcclient/rcclientimpl/rcclient.go b/comp/remote-config/rcclient/rcclientimpl/rcclient.go index bbe779df219cd..e4dd131dea804 100644 --- a/comp/remote-config/rcclient/rcclientimpl/rcclient.go +++ b/comp/remote-config/rcclient/rcclientimpl/rcclient.go @@ -21,10 +21,10 @@ import ( "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient/types" "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/remote/client" "github.com/DataDog/datadog-agent/pkg/config/remote/data" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" @@ -71,7 +71,7 @@ type dependencies struct { // components that are instantiated last). Remote configuration client is a good candidate for this since it must be // able to interact with any other components (i.e. be at the end of the dependency graph). func newRemoteConfigClient(deps dependencies) (rcclient.Component, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } @@ -89,8 +89,8 @@ func newRemoteConfigClient(deps dependencies) (rcclient.Component, error) { // We have to create the client in the constructor and set its name later c, err := client.NewUnverifiedGRPCClient( ipcAddress, - config.GetIPCPort(), - func() (string, error) { return security.FetchAuthToken(config.Datadog()) }, + pkgconfigsetup.GetIPCPort(), + func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }, optsWithDefault..., ) if err != nil { @@ -98,11 +98,11 @@ func newRemoteConfigClient(deps dependencies) (rcclient.Component, error) { } var clientMRF *client.Client - if config.Datadog().GetBool("multi_region_failover.enabled") { + if pkgconfigsetup.Datadog().GetBool("multi_region_failover.enabled") { clientMRF, err = client.NewUnverifiedMRFGRPCClient( ipcAddress, - config.GetIPCPort(), - func() (string, error) { return security.FetchAuthToken(config.Datadog()) }, + pkgconfigsetup.GetIPCPort(), + func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }, optsWithDefault..., ) if err != nil { @@ -119,7 +119,7 @@ func newRemoteConfigClient(deps dependencies) (rcclient.Component, error) { settingsComponent: deps.SettingsComponent, } - if config.IsRemoteConfigEnabled(config.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { deps.Lc.Append(fx.Hook{ OnStart: func(context.Context) error { rc.start() @@ -160,19 +160,19 @@ func (rc rcClient) start() { func (rc rcClient) mrfUpdateCallback(updates map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) { // If the updates map is empty, we should unset the failover settings if they were set via RC previously if len(updates) == 0 { - mrfFailoverMetricsSource := config.Datadog().GetSource("multi_region_failover.failover_metrics") - mrfFailoverLogsSource := config.Datadog().GetSource("multi_region_failover.failover_logs") + mrfFailoverMetricsSource := pkgconfigsetup.Datadog().GetSource("multi_region_failover.failover_metrics") + mrfFailoverLogsSource := pkgconfigsetup.Datadog().GetSource("multi_region_failover.failover_logs") // Unset the RC-sourced failover values regardless of what they are - config.Datadog().UnsetForSource("multi_region_failover.failover_metrics", model.SourceRC) - config.Datadog().UnsetForSource("multi_region_failover.failover_logs", model.SourceRC) + pkgconfigsetup.Datadog().UnsetForSource("multi_region_failover.failover_metrics", model.SourceRC) + pkgconfigsetup.Datadog().UnsetForSource("multi_region_failover.failover_logs", model.SourceRC) // If either of the values were previously set via RC, log the current values now that we've unset them if mrfFailoverMetricsSource == model.SourceRC { - pkglog.Infof("Falling back to `multi_region_failover.failover_metrics: %t`", config.Datadog().GetBool("multi_region_failover.failover_metrics")) + pkglog.Infof("Falling back to `multi_region_failover.failover_metrics: %t`", pkgconfigsetup.Datadog().GetBool("multi_region_failover.failover_metrics")) } if mrfFailoverLogsSource == model.SourceRC { - pkglog.Infof("Falling back to `multi_region_failover.failover_logs: %t`", config.Datadog().GetBool("multi_region_failover.failover_logs")) + pkglog.Infof("Falling back to `multi_region_failover.failover_logs: %t`", pkgconfigsetup.Datadog().GetBool("multi_region_failover.failover_logs")) } return } @@ -261,7 +261,7 @@ func (rc rcClient) agentConfigUpdateCallback(updates map[string]state.RawConfig, } // Checks who (the source) is responsible for the last logLevel change - source := config.Datadog().GetSource("log_level") + source := pkgconfigsetup.Datadog().GetSource("log_level") switch source { case model.SourceRC: @@ -269,8 +269,8 @@ func (rc rcClient) agentConfigUpdateCallback(updates map[string]state.RawConfig, // - we want to change (once again) the log level through RC // - we want to fall back to the log level we had saved as fallback (in that case mergedConfig.LogLevel == "") if len(mergedConfig.LogLevel) == 0 { - config.Datadog().UnsetForSource("log_level", model.SourceRC) - pkglog.Infof("Removing remote-config log level override, falling back to '%s'", config.Datadog().Get("log_level")) + pkgconfigsetup.Datadog().UnsetForSource("log_level", model.SourceRC) + pkglog.Infof("Removing remote-config log level override, falling back to '%s'", pkgconfigsetup.Datadog().Get("log_level")) } else { newLevel := mergedConfig.LogLevel pkglog.Infof("Changing log level to '%s' through remote config", newLevel) diff --git a/comp/remote-config/rcclient/rcclientimpl/rcclient_test.go b/comp/remote-config/rcclient/rcclientimpl/rcclient_test.go index b7ba8ab8ff88d..d31decbceb748 100644 --- a/comp/remote-config/rcclient/rcclientimpl/rcclient_test.go +++ b/comp/remote-config/rcclient/rcclientimpl/rcclient_test.go @@ -16,10 +16,10 @@ import ( "github.com/DataDog/datadog-agent/comp/core/settings/settingsimpl" "github.com/DataDog/datadog-agent/comp/remote-config/rcclient" "github.com/DataDog/datadog-agent/pkg/api/security" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" @@ -43,7 +43,7 @@ func (m *mockLogLevelRuntimeSettings) Set(_ config.Component, v interface{}, sou return m.expectedError } m.logLevel = v.(string) - pkgconfig.Datadog().Set(m.Name(), m.logLevel, source) + pkgconfigsetup.Datadog().Set(m.Name(), m.logLevel, source) return nil } @@ -122,11 +122,11 @@ func TestAgentConfigCallback(t *testing.T) { structRC := rc.(rcClient) - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) assert.NoError(t, err) structRC.client, _ = client.NewUnverifiedGRPCClient( - ipcAddress, pkgconfig.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(pkgconfig.Datadog()) }, + ipcAddress, pkgconfigsetup.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }, client.WithAgent("test-agent", "9.99.9"), client.WithProducts(state.ProductAgentConfig), client.WithPollInterval(time.Hour), @@ -134,15 +134,15 @@ func TestAgentConfigCallback(t *testing.T) { // ----------------- // Test scenario #1: Agent Flare request by RC and the log level hadn't been changed by the user before - assert.Equal(t, model.SourceDefault, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, model.SourceDefault, pkgconfigsetup.Datadog().GetSource("log_level")) // Set log level to debug structRC.agentConfigUpdateCallback(map[string]state.RawConfig{ "datadog/2/AGENT_CONFIG/layer1/configname": layerStartFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) - assert.Equal(t, "debug", pkgconfig.Datadog().Get("log_level")) - assert.Equal(t, model.SourceRC, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, "debug", pkgconfigsetup.Datadog().Get("log_level")) + assert.Equal(t, model.SourceRC, pkgconfigsetup.Datadog().GetSource("log_level")) // Send an empty log level request, as RC would at the end of the Agent Flare request // Should fallback to the default level @@ -150,36 +150,36 @@ func TestAgentConfigCallback(t *testing.T) { "datadog/2/AGENT_CONFIG/layer1/configname": layerEndFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) - assert.Equal(t, "info", pkgconfig.Datadog().Get("log_level")) - assert.Equal(t, model.SourceDefault, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, "info", pkgconfigsetup.Datadog().Get("log_level")) + assert.Equal(t, model.SourceDefault, pkgconfigsetup.Datadog().GetSource("log_level")) // ----------------- // Test scenario #2: log level was changed by the user BEFORE Agent Flare request - pkgconfig.Datadog().Set("log_level", "info", model.SourceCLI) + pkgconfigsetup.Datadog().Set("log_level", "info", model.SourceCLI) structRC.agentConfigUpdateCallback(map[string]state.RawConfig{ "datadog/2/AGENT_CONFIG/layer1/configname": layerStartFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) // Log level should still be "info" because it was enforced by the user - assert.Equal(t, "info", pkgconfig.Datadog().Get("log_level")) + assert.Equal(t, "info", pkgconfigsetup.Datadog().Get("log_level")) // Source should still be CLI as it has priority over RC - assert.Equal(t, model.SourceCLI, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, model.SourceCLI, pkgconfigsetup.Datadog().GetSource("log_level")) // ----------------- // Test scenario #3: log level is changed by the user DURING the Agent Flare request - pkgconfig.Datadog().UnsetForSource("log_level", model.SourceCLI) + pkgconfigsetup.Datadog().UnsetForSource("log_level", model.SourceCLI) structRC.agentConfigUpdateCallback(map[string]state.RawConfig{ "datadog/2/AGENT_CONFIG/layer1/configname": layerStartFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) - assert.Equal(t, "debug", pkgconfig.Datadog().Get("log_level")) - assert.Equal(t, model.SourceRC, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, "debug", pkgconfigsetup.Datadog().Get("log_level")) + assert.Equal(t, model.SourceRC, pkgconfigsetup.Datadog().GetSource("log_level")) - pkgconfig.Datadog().Set("log_level", "debug", model.SourceCLI) + pkgconfigsetup.Datadog().Set("log_level", "debug", model.SourceCLI) structRC.agentConfigUpdateCallback(map[string]state.RawConfig{ "datadog/2/AGENT_CONFIG/layer1/configname": layerEndFlare, "datadog/2/AGENT_CONFIG/configuration_order/configname": configOrder, }, applyEmpty) - assert.Equal(t, "debug", pkgconfig.Datadog().Get("log_level")) - assert.Equal(t, model.SourceCLI, pkgconfig.Datadog().GetSource("log_level")) + assert.Equal(t, "debug", pkgconfigsetup.Datadog().Get("log_level")) + assert.Equal(t, model.SourceCLI, pkgconfigsetup.Datadog().GetSource("log_level")) } diff --git a/comp/remote-config/rcservice/rcserviceimpl/rcservice.go b/comp/remote-config/rcservice/rcserviceimpl/rcservice.go index d212af87f63ed..48226f0dcb916 100644 --- a/comp/remote-config/rcservice/rcserviceimpl/rcservice.go +++ b/comp/remote-config/rcservice/rcserviceimpl/rcservice.go @@ -19,8 +19,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname" "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter" - "github.com/DataDog/datadog-agent/pkg/config" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/version" @@ -50,7 +50,7 @@ type dependencies struct { // newRemoteConfigServiceOptional conditionally creates and configures a new remote config service, based on whether RC is enabled. func newRemoteConfigServiceOptional(deps dependencies) optional.Option[rcservice.Component] { none := optional.NewNoneOption[rcservice.Component]() - if !config.IsRemoteConfigEnabled(deps.Cfg) { + if !pkgconfigsetup.IsRemoteConfigEnabled(deps.Cfg) { return none } diff --git a/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go b/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go index 46fe18b7e0894..a474da3cfeb89 100644 --- a/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go +++ b/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname" "github.com/DataDog/datadog-agent/comp/remote-config/rcservicemrf" "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter" - "github.com/DataDog/datadog-agent/pkg/config" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -49,7 +49,7 @@ type dependencies struct { // newMrfRemoteConfigServiceOptional conditionally creates and configures a new MRF remote config service, based on whether RC is enabled. func newMrfRemoteConfigServiceOptional(deps dependencies) optional.Option[rcservicemrf.Component] { none := optional.NewNoneOption[rcservicemrf.Component]() - if !config.IsRemoteConfigEnabled(deps.Cfg) || !deps.Cfg.GetBool("multi_region_failover.enabled") { + if !pkgconfigsetup.IsRemoteConfigEnabled(deps.Cfg) || !deps.Cfg.GetBool("multi_region_failover.enabled") { return none } diff --git a/comp/snmptraps/config/config.go b/comp/snmptraps/config/config.go index 97afe10653c77..418f38a107dd0 100644 --- a/comp/snmptraps/config/config.go +++ b/comp/snmptraps/config/config.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/snmptraps/snmplog" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/snmp/gosnmplib" "github.com/DataDog/datadog-agent/pkg/snmp/utils" ) @@ -51,7 +52,7 @@ type TrapsConfig struct { // ReadConfig builds the traps configuration from the Agent configuration. func ReadConfig(host string, conf config.Component) (*TrapsConfig, error) { var c = &TrapsConfig{} - err := conf.UnmarshalKey("network_devices.snmp_traps", &c) + err := structure.UnmarshalKey(conf, "network_devices.snmp_traps", c) if err != nil { return nil, err } diff --git a/comp/systray/systray/systrayimpl/doflare.go b/comp/systray/systray/systrayimpl/doflare.go index dc0c8228011f7..229f1f113f71c 100644 --- a/comp/systray/systray/systrayimpl/doflare.go +++ b/comp/systray/systray/systrayimpl/doflare.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/flare/helpers" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -171,14 +171,14 @@ func requestFlare(s *systrayImpl, caseID, customerEmail string) (response string s.log.Debug("Asking the agent to build the flare archive.") c := util.GetClient(false) // FIX: get certificates right then make this true - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } - urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, config.Datadog().GetInt("cmd_port")) + urlstr := fmt.Sprintf("https://%v:%v/agent/flare", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) // Set session token - e = util.SetAuthToken(config.Datadog()) + e = util.SetAuthToken(pkgconfigsetup.Datadog()) if e != nil { return } diff --git a/comp/trace/agent/def/go.mod b/comp/trace/agent/def/go.mod index 3647b991a6acd..3519642a216f9 100644 --- a/comp/trace/agent/def/go.mod +++ b/comp/trace/agent/def/go.mod @@ -6,7 +6,7 @@ replace github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 go.opentelemetry.io/collector/pdata v1.9.0 ) diff --git a/comp/trace/agent/def/go.sum b/comp/trace/agent/def/go.sum index 99ed13405a9d9..d7e46e4a7be46 100644 --- a/comp/trace/agent/def/go.sum +++ b/comp/trace/agent/def/go.sum @@ -1,5 +1,5 @@ -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= diff --git a/comp/trace/agent/impl/run.go b/comp/trace/agent/impl/run.go index d88e015dc4ee7..15bc8ed8ea198 100644 --- a/comp/trace/agent/impl/run.go +++ b/comp/trace/agent/impl/run.go @@ -15,8 +15,8 @@ import ( "github.com/DataDog/datadog-agent/comp/trace/config" "github.com/DataDog/datadog-agent/pkg/api/security" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" rc "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/trace/api" tracecfg "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" @@ -40,13 +40,13 @@ func runAgentSidekicks(ag component) error { defer watchdog.LogOnPanic(ag.Statsd) - if err := util.SetupCoreDump(coreconfig.Datadog()); err != nil { + if err := util.SetupCoreDump(pkgconfigsetup.Datadog()); err != nil { log.Warnf("Can't setup core dumps: %v, core dumps might not be available after a crash", err) } rand.Seed(time.Now().UTC().UnixNano()) - if coreconfig.IsRemoteConfigEnabled(coreconfig.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { cf, err := newConfigFetcher() if err != nil { ag.telemetryCollector.SendStartupError(telemetry.CantCreateRCCLient, err) @@ -65,7 +65,7 @@ func runAgentSidekicks(ag component) error { // the trace agent. // pkg/config is not a go-module yet and pulls a large chunk of Agent code base with it. Using it within the // trace-agent would largely increase the number of module pulled by OTEL when using the pkg/trace go-module. - if err := apiutil.CreateAndSetAuthToken(coreconfig.Datadog()); err != nil { + if err := apiutil.CreateAndSetAuthToken(pkgconfigsetup.Datadog()); err != nil { log.Errorf("could not set auth token: %s", err) } else { ag.Agent.DebugServer.AddRoute("/config", ag.config.GetConfigHandler()) @@ -106,37 +106,37 @@ func stopAgentSidekicks(cfg config.Component, statsd statsd.ClientInterface) { } func profilingConfig(tracecfg *tracecfg.AgentConfig) *profiling.Settings { - if !coreconfig.Datadog().GetBool("apm_config.internal_profiling.enabled") { + if !pkgconfigsetup.Datadog().GetBool("apm_config.internal_profiling.enabled") { return nil } - endpoint := coreconfig.Datadog().GetString("internal_profiling.profile_dd_url") + endpoint := pkgconfigsetup.Datadog().GetString("internal_profiling.profile_dd_url") if endpoint == "" { endpoint = fmt.Sprintf(profiling.ProfilingURLTemplate, tracecfg.Site) } - tags := coreconfig.Datadog().GetStringSlice("internal_profiling.extra_tags") + tags := pkgconfigsetup.Datadog().GetStringSlice("internal_profiling.extra_tags") tags = append(tags, fmt.Sprintf("version:%s", version.AgentVersion)) return &profiling.Settings{ ProfilingURL: endpoint, // remaining configuration parameters use the top-level `internal_profiling` config - Period: coreconfig.Datadog().GetDuration("internal_profiling.period"), + Period: pkgconfigsetup.Datadog().GetDuration("internal_profiling.period"), Service: "trace-agent", - CPUDuration: coreconfig.Datadog().GetDuration("internal_profiling.cpu_duration"), - MutexProfileFraction: coreconfig.Datadog().GetInt("internal_profiling.mutex_profile_fraction"), - BlockProfileRate: coreconfig.Datadog().GetInt("internal_profiling.block_profile_rate"), - WithGoroutineProfile: coreconfig.Datadog().GetBool("internal_profiling.enable_goroutine_stacktraces"), - WithBlockProfile: coreconfig.Datadog().GetBool("internal_profiling.enable_block_profiling"), - WithMutexProfile: coreconfig.Datadog().GetBool("internal_profiling.enable_mutex_profiling"), + CPUDuration: pkgconfigsetup.Datadog().GetDuration("internal_profiling.cpu_duration"), + MutexProfileFraction: pkgconfigsetup.Datadog().GetInt("internal_profiling.mutex_profile_fraction"), + BlockProfileRate: pkgconfigsetup.Datadog().GetInt("internal_profiling.block_profile_rate"), + WithGoroutineProfile: pkgconfigsetup.Datadog().GetBool("internal_profiling.enable_goroutine_stacktraces"), + WithBlockProfile: pkgconfigsetup.Datadog().GetBool("internal_profiling.enable_block_profiling"), + WithMutexProfile: pkgconfigsetup.Datadog().GetBool("internal_profiling.enable_mutex_profiling"), Tags: tags, } } func newConfigFetcher() (rc.ConfigFetcher, error) { - ipcAddress, err := coreconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } // Auth tokens are handled by the rcClient - return rc.NewAgentGRPCConfigFetcher(ipcAddress, coreconfig.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(coreconfig.Datadog()) }) + return rc.NewAgentGRPCConfigFetcher(ipcAddress, pkgconfigsetup.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }) } diff --git a/comp/trace/config/component.go b/comp/trace/config/component.go index 3e34c58739461..6a5b4427d6d30 100644 --- a/comp/trace/config/component.go +++ b/comp/trace/config/component.go @@ -20,7 +20,7 @@ import ( "go.uber.org/fx" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" traceconfig "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -30,7 +30,7 @@ import ( // Component is the component type. type Component interface { // Warnings returns config warnings collected during setup. - Warnings() *config.Warnings + Warnings() *model.Warnings // SetHandler returns a handler for runtime configuration changes. SetHandler() http.Handler diff --git a/comp/trace/config/config.go b/comp/trace/config/config.go index f00889c39a708..3aca85dc07b32 100644 --- a/comp/trace/config/config.go +++ b/comp/trace/config/config.go @@ -17,9 +17,9 @@ import ( coreconfig "github.com/DataDog/datadog-agent/comp/core/config" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pkgconfigutils "github.com/DataDog/datadog-agent/pkg/config/utils" traceconfig "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -46,7 +46,7 @@ type cfg struct { coreConfig coreconfig.Component // warnings are the warnings generated during setup - warnings *pkgconfig.Warnings + warnings *model.Warnings } // NewConfig is the default constructor for the component, it returns @@ -70,7 +70,7 @@ func NewConfig(deps Dependencies) (Component, error) { return &c, nil } -func (c *cfg) Warnings() *pkgconfig.Warnings { +func (c *cfg) Warnings() *model.Warnings { return c.warnings } @@ -96,7 +96,7 @@ func (c *cfg) SetHandler() http.Handler { if lvl == "warning" { lvl = "warn" } - if err := pkgconfigutils.SetLogLevel(lvl, pkgconfig.Datadog(), model.SourceAgentRuntime); err != nil { + if err := pkgconfigutils.SetLogLevel(lvl, pkgconfigsetup.Datadog(), model.SourceAgentRuntime); err != nil { httpError(w, http.StatusInternalServerError, err) return } diff --git a/comp/trace/config/config_mock.go b/comp/trace/config/config_mock.go index 38d2f01fce4ba..d0ef2dfbc848a 100644 --- a/comp/trace/config/config_mock.go +++ b/comp/trace/config/config_mock.go @@ -11,8 +11,8 @@ package config import ( "testing" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // newMock exported mock builder to allow modifying mocks that might be @@ -24,7 +24,7 @@ func newMock(deps Dependencies, _ testing.TB) (Component, error) { } c := cfg{ - warnings: &pkgconfig.Warnings{}, + warnings: &model.Warnings{}, coreConfig: deps.Config, AgentConfig: traceCfg, } diff --git a/comp/trace/config/config_test.go b/comp/trace/config/config_test.go index 093b90295e106..fcf3e598a8fd1 100644 --- a/comp/trace/config/config_test.go +++ b/comp/trace/config/config_test.go @@ -31,8 +31,8 @@ import ( corecomp "github.com/DataDog/datadog-agent/comp/core/config" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" traceconfig "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -882,9 +882,9 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) if tt.envNew == "DD_APM_IGNORE_RESOURCES" { - assert.Equal(t, []string{"4", "5", "6"}, coreconfig.Datadog().GetStringSlice(tt.key)) + assert.Equal(t, []string{"4", "5", "6"}, pkgconfigsetup.Datadog().GetStringSlice(tt.key)) } else { - assert.Equal(t, "4,5,6", coreconfig.Datadog().GetString(tt.key)) + assert.Equal(t, "4,5,6", pkgconfigsetup.Datadog().GetString(tt.key)) } } }) @@ -1504,7 +1504,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-site.com", coreconfig.Datadog().GetString("apm_config.profiling_dd_url")) + assert.Equal(t, "my-site.com", pkgconfigsetup.Datadog().GetString("apm_config.profiling_dd_url")) }) env = "DD_APM_DEBUGGER_DD_URL" @@ -1522,7 +1522,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-site.com", coreconfig.Datadog().GetString("apm_config.debugger_dd_url")) + assert.Equal(t, "my-site.com", pkgconfigsetup.Datadog().GetString("apm_config.debugger_dd_url")) }) env = "DD_APM_DEBUGGER_API_KEY" @@ -1540,7 +1540,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-key", coreconfig.Datadog().GetString("apm_config.debugger_api_key")) + assert.Equal(t, "my-key", pkgconfigsetup.Datadog().GetString("apm_config.debugger_api_key")) }) env = "DD_APM_DEBUGGER_ADDITIONAL_ENDPOINTS" @@ -1562,7 +1562,7 @@ func TestLoadEnv(t *testing.T) { "url2": {"key3"}, } - actual := coreconfig.Datadog().GetStringMapStringSlice("apm_config.debugger_additional_endpoints") + actual := pkgconfigsetup.Datadog().GetStringMapStringSlice("apm_config.debugger_additional_endpoints") if !reflect.DeepEqual(actual, expected) { t.Fatalf("Failed to process env var %s, expected %v and got %v", env, expected, actual) } @@ -1583,7 +1583,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-diagnostics-site.com", coreconfig.Datadog().GetString("apm_config.debugger_diagnostics_dd_url")) + assert.Equal(t, "my-diagnostics-site.com", pkgconfigsetup.Datadog().GetString("apm_config.debugger_diagnostics_dd_url")) }) env = "DD_APM_DEBUGGER_DIAGNOSTICS_API_KEY" @@ -1601,7 +1601,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.Equal(t, "my-diagnostics-key", coreconfig.Datadog().GetString("apm_config.debugger_diagnostics_api_key")) + assert.Equal(t, "my-diagnostics-key", pkgconfigsetup.Datadog().GetString("apm_config.debugger_diagnostics_api_key")) }) env = "DD_APM_DEBUGGER_DIAGNOSTICS_ADDITIONAL_ENDPOINTS" @@ -1623,7 +1623,7 @@ func TestLoadEnv(t *testing.T) { "diagnostics-url2": {"diagnostics-key3"}, } - actual := coreconfig.Datadog().GetStringMapStringSlice("apm_config.debugger_diagnostics_additional_endpoints") + actual := pkgconfigsetup.Datadog().GetStringMapStringSlice("apm_config.debugger_diagnostics_additional_endpoints") if !reflect.DeepEqual(actual, expected) { t.Fatalf("Failed to process env var %s, expected %v and got %v", env, expected, actual) } @@ -1643,7 +1643,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, "my-site.com", coreconfig.Datadog().GetString("apm_config.symdb_dd_url")) + assert.Equal(t, "my-site.com", pkgconfigsetup.Datadog().GetString("apm_config.symdb_dd_url")) }) env = "DD_APM_SYMDB_API_KEY" @@ -1660,7 +1660,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, "my-key", coreconfig.Datadog().GetString("apm_config.symdb_api_key")) + assert.Equal(t, "my-key", pkgconfigsetup.Datadog().GetString("apm_config.symdb_api_key")) }) env = "DD_APM_SYMDB_ADDITIONAL_ENDPOINTS" @@ -1682,7 +1682,7 @@ func TestLoadEnv(t *testing.T) { "url2": {"key3"}, } - actual := coreconfig.Datadog().GetStringMapStringSlice("apm_config.symdb_additional_endpoints") + actual := pkgconfigsetup.Datadog().GetStringMapStringSlice("apm_config.symdb_additional_endpoints") if !reflect.DeepEqual(actual, expected) { t.Fatalf("Failed to process env var %s, expected %v and got %v", env, expected, actual) } @@ -1703,7 +1703,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) - assert.False(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.credit_cards.enabled")) + assert.False(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.credit_cards.enabled")) assert.False(t, cfg.Obfuscation.CreditCards.Enabled) }) @@ -1721,7 +1721,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.False(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.credit_cards.luhn")) + assert.False(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.credit_cards.luhn")) }) env = "DD_APM_OBFUSCATION_ELASTICSEARCH_ENABLED" @@ -1738,7 +1738,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled")) assert.True(t, cfg.Obfuscation.ES.Enabled) }) @@ -1757,7 +1757,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"client_id", "product_id"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") actualParsed := cfg.Obfuscation.ES.KeepValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -1778,7 +1778,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"key1", "key2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") actualParsed := cfg.Obfuscation.ES.ObfuscateSQLValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -1798,7 +1798,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string")) assert.True(t, cfg.Obfuscation.HTTP.RemoveQueryString) }) @@ -1816,7 +1816,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) assert.True(t, cfg.Obfuscation.Memcached.Enabled) }) @@ -1834,7 +1834,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) assert.True(t, cfg.Obfuscation.Memcached.Enabled) }) @@ -1852,9 +1852,9 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled")) assert.True(t, cfg.Obfuscation.Memcached.Enabled) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command")) assert.True(t, cfg.Obfuscation.Memcached.KeepCommand) }) @@ -1872,7 +1872,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled")) assert.True(t, cfg.Obfuscation.Mongo.Enabled) }) @@ -1891,7 +1891,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"document_id", "template_id"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") actualParsed := cfg.Obfuscation.Mongo.KeepValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -1912,7 +1912,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"key1", "key2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") actualParsed := cfg.Obfuscation.Mongo.ObfuscateSQLValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -1932,7 +1932,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.redis.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.enabled")) assert.True(t, cfg.Obfuscation.Redis.Enabled) }) @@ -1950,7 +1950,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args")) assert.True(t, cfg.Obfuscation.Redis.RemoveAllArgs) }) @@ -1968,7 +1968,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces")) assert.True(t, cfg.Obfuscation.RemoveStackTraces) }) @@ -1986,7 +1986,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled")) assert.True(t, cfg.Obfuscation.SQLExecPlan.Enabled) }) @@ -2005,7 +2005,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"id1", "id2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") actualParsed := cfg.Obfuscation.SQLExecPlan.KeepValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -2026,7 +2026,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"key1", "key2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") actualParsed := cfg.Obfuscation.SQLExecPlan.ObfuscateSQLValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -2046,7 +2046,7 @@ func TestLoadEnv(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.True(t, coreconfig.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled")) + assert.True(t, pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled")) assert.True(t, cfg.Obfuscation.SQLExecPlanNormalize.Enabled) }) @@ -2065,7 +2065,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"id1", "id2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") actualParsed := cfg.Obfuscation.SQLExecPlanNormalize.KeepValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -2086,7 +2086,7 @@ func TestLoadEnv(t *testing.T) { assert.NotNil(t, cfg) expected := []string{"key1", "key2"} - actualConfig := coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") + actualConfig := pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") actualParsed := cfg.Obfuscation.SQLExecPlanNormalize.ObfuscateSQLValues assert.Equal(t, expected, actualConfig) assert.Equal(t, expected, actualParsed) @@ -2111,7 +2111,7 @@ func TestLoadEnv(t *testing.T) { "url1": {"key1", "key2"}, "url2": {"key3"}, } - actual := coreconfig.Datadog().GetStringMapStringSlice("apm_config.profiling_additional_endpoints") + actual := pkgconfigsetup.Datadog().GetStringMapStringSlice("apm_config.profiling_additional_endpoints") if !reflect.DeepEqual(actual, expected) { t.Fatalf("Failed to process env var %s, expected %v and got %v", env, expected, actual) } @@ -2160,7 +2160,7 @@ func TestLoadEnv(t *testing.T) { )) cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, "install_id_foo_bar", coreconfig.Datadog().GetString("apm_config.install_id")) + assert.Equal(t, "install_id_foo_bar", pkgconfigsetup.Datadog().GetString("apm_config.install_id")) assert.Equal(t, "install_id_foo_bar", cfg.InstallSignature.InstallID) assert.True(t, cfg.InstallSignature.Found) }) @@ -2178,7 +2178,7 @@ func TestLoadEnv(t *testing.T) { )) cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, "host_injection", coreconfig.Datadog().GetString("apm_config.install_type")) + assert.Equal(t, "host_injection", pkgconfigsetup.Datadog().GetString("apm_config.install_type")) assert.Equal(t, "host_injection", cfg.InstallSignature.InstallType) assert.True(t, cfg.InstallSignature.Found) }) @@ -2196,7 +2196,7 @@ func TestLoadEnv(t *testing.T) { )) cfg := c.Object() assert.NotNil(t, cfg) - assert.Equal(t, int64(1699621675), coreconfig.Datadog().GetInt64("apm_config.install_time")) + assert.Equal(t, int64(1699621675), pkgconfigsetup.Datadog().GetInt64("apm_config.install_time")) assert.Equal(t, int64(1699621675), cfg.InstallSignature.InstallTime) assert.True(t, cfg.InstallSignature.Found) }) @@ -2476,9 +2476,9 @@ func TestGenerateInstallSignature(t *testing.T) { cfg := c.Object() assert.NotNil(t, cfg) - assert.False(t, coreconfig.Datadog().IsSet("apm_config.install_id")) - assert.False(t, coreconfig.Datadog().IsSet("apm_config.install_type")) - assert.False(t, coreconfig.Datadog().IsSet("apm_config.install_time")) + assert.False(t, pkgconfigsetup.Datadog().IsSet("apm_config.install_id")) + assert.False(t, pkgconfigsetup.Datadog().IsSet("apm_config.install_type")) + assert.False(t, pkgconfigsetup.Datadog().IsSet("apm_config.install_time")) assert.True(t, cfg.InstallSignature.Found) installFilePath := filepath.Join(cfgDir, "install.json") diff --git a/comp/trace/config/hostname.go b/comp/trace/config/hostname.go index 294bee77a5b2a..03264e9c005ec 100644 --- a/comp/trace/config/hostname.go +++ b/comp/trace/config/hostname.go @@ -17,7 +17,7 @@ import ( "strings" "time" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/grpc" @@ -45,12 +45,12 @@ func acquireHostname(c *config.AgentConfig) error { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - ipcAddress, err := coreconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - client, err := grpc.GetDDAgentClient(ctx, ipcAddress, coreconfig.GetIPCPort()) + client, err := grpc.GetDDAgentClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } diff --git a/comp/trace/config/remote.go b/comp/trace/config/remote.go index 5be3e504a7579..28d01e46ae6aa 100644 --- a/comp/trace/config/remote.go +++ b/comp/trace/config/remote.go @@ -10,8 +10,8 @@ package config import ( corecompcfg "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/pkg/api/security" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" rc "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/version" @@ -20,7 +20,7 @@ import ( func remote(c corecompcfg.Component, ipcAddress string) (config.RemoteClient, error) { return rc.NewGRPCClient( ipcAddress, - coreconfig.GetIPCPort(), + pkgconfigsetup.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(c) }, rc.WithAgent(rcClientName, version.AgentVersion), rc.WithProducts(state.ProductAPMSampling, state.ProductAgentConfig), diff --git a/comp/trace/config/setup.go b/comp/trace/config/setup.go index 86f192e10fdfb..df0e554d76262 100644 --- a/comp/trace/config/setup.go +++ b/comp/trace/config/setup.go @@ -25,9 +25,10 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/otelcol/otlp" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" @@ -98,17 +99,17 @@ func prepareConfig(c corecompcfg.Component) (*config.AgentConfig, error) { cfg.LogFilePath = DefaultLogFilePath } - ipcAddress, err := coreconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } orch := fargate.GetOrchestrator() // Needs to be after loading config, because it relies on feature auto-detection cfg.FargateOrchestrator = config.FargateOrchestratorName(orch) - if p := coreconfig.Datadog().GetProxies(); p != nil { + if p := pkgconfigsetup.Datadog().GetProxies(); p != nil { cfg.Proxy = httputils.GetProxyTransportFunc(p, c) } - if coreconfig.IsRemoteConfigEnabled(coreConfigObject) && coreConfigObject.GetBool("remote_configuration.apm_sampling.enabled") { + if pkgconfigsetup.IsRemoteConfigEnabled(coreConfigObject) && coreConfigObject.GetBool("remote_configuration.apm_sampling.enabled") { client, err := remote(c, ipcAddress) if err != nil { log.Errorf("Error when subscribing to remote config management %v", err) @@ -129,10 +130,10 @@ func containerTagsFunc(cid string) ([]string, error) { // The format for cfgKey should be a map which has the URL as a key and one or // more API keys as an array value. func appendEndpoints(endpoints []*config.Endpoint, cfgKey string) []*config.Endpoint { - if !coreconfig.Datadog().IsSet(cfgKey) { + if !pkgconfigsetup.Datadog().IsSet(cfgKey) { return endpoints } - for url, keys := range coreconfig.Datadog().GetStringMapStringSlice(cfgKey) { + for url, keys := range pkgconfigsetup.Datadog().GetStringMapStringSlice(cfgKey) { if len(keys) == 0 { log.Errorf("'%s' entries must have at least one API key present", cfgKey) continue @@ -149,7 +150,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.Endpoints = []*config.Endpoint{{}} } if core.IsSet("api_key") { - c.Endpoints[0].APIKey = utils.SanitizeAPIKey(coreconfig.Datadog().GetString("api_key")) + c.Endpoints[0].APIKey = utils.SanitizeAPIKey(pkgconfigsetup.Datadog().GetString("api_key")) } if core.IsSet("hostname") { c.Hostname = core.GetString("hostname") @@ -166,7 +167,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.Endpoints[0].Host = host } } else { - c.Endpoints[0].Host = utils.GetMainEndpoint(coreconfig.Datadog(), apiEndpointPrefix, "apm_config.apm_dd_url") + c.Endpoints[0].Host = utils.GetMainEndpoint(pkgconfigsetup.Datadog(), apiEndpointPrefix, "apm_config.apm_dd_url") } c.Endpoints = appendEndpoints(c.Endpoints, "apm_config.additional_endpoints") @@ -197,11 +198,11 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error if core.IsSet("apm_config.enabled") { c.Enabled = core.GetBool("apm_config.enabled") } - if coreconfig.Datadog().IsSet("apm_config.log_file") { - c.LogFilePath = coreconfig.Datadog().GetString("apm_config.log_file") + if pkgconfigsetup.Datadog().IsSet("apm_config.log_file") { + c.LogFilePath = pkgconfigsetup.Datadog().GetString("apm_config.log_file") } - if env := utils.GetTraceAgentDefaultEnv(coreconfig.Datadog()); env != "" { + if env := utils.GetTraceAgentDefaultEnv(pkgconfigsetup.Datadog()); env != "" { c.DefaultEnv = env } @@ -312,7 +313,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error if k := "apm_config.replace_tags"; core.IsSet(k) { rt := make([]*config.ReplaceRule, 0) - if err := coreconfig.Datadog().UnmarshalKey(k, &rt); err != nil { + if err := structure.UnmarshalKey(core, k, &rt); err != nil { log.Errorf("Bad format for %q it should be of the form '[{\"name\": \"tag_name\",\"pattern\":\"pattern\",\"repl\":\"replace_str\"}]', error: %v", "apm_config.replace_tags", err) } else { err := compileReplaceRules(rt) @@ -346,8 +347,8 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.GUIPort = core.GetString("GUI_port") var grpcPort int - if otlp.IsEnabled(coreconfig.Datadog()) { - grpcPort = core.GetInt(coreconfig.OTLPTracePort) + if otlp.IsEnabled(pkgconfigsetup.Datadog()) { + grpcPort = core.GetInt(pkgconfigsetup.OTLPTracePort) } // We use a noop set of telemetry settings. This silences all warnings and metrics from the attributes translator. @@ -361,7 +362,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error BindHost: c.ReceiverHost, GRPCPort: grpcPort, MaxRequestBytes: c.MaxRequestBytes, - SpanNameRemappings: coreconfig.Datadog().GetStringMapString("otlp_config.traces.span_name_remappings"), + SpanNameRemappings: pkgconfigsetup.Datadog().GetStringMapString("otlp_config.traces.span_name_remappings"), SpanNameAsResourceName: core.GetBool("otlp_config.traces.span_name_as_resource_name"), ProbabilisticSampling: core.GetFloat64("otlp_config.traces.probabilistic_sampler.sampling_percentage"), AttributesTranslator: attributesTranslator, @@ -384,7 +385,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error if core.GetBool("apm_config.telemetry.enabled") { c.TelemetryConfig.Enabled = true c.TelemetryConfig.Endpoints = []*config.Endpoint{{ - Host: utils.GetMainEndpoint(coreconfig.Datadog(), config.TelemetryEndpointPrefix, "apm_config.telemetry.dd_url"), + Host: utils.GetMainEndpoint(pkgconfigsetup.Datadog(), config.TelemetryEndpointPrefix, "apm_config.telemetry.dd_url"), APIKey: c.Endpoints[0].APIKey, }} c.TelemetryConfig.Endpoints = appendEndpoints(c.TelemetryConfig.Endpoints, "apm_config.telemetry.additional_endpoints") @@ -392,7 +393,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.Obfuscation = new(config.ObfuscationConfig) if core.IsSet("apm_config.obfuscation") { var o config.ObfuscationConfig - err := coreconfig.Datadog().UnmarshalKey("apm_config.obfuscation", &o) + err := pkgconfigsetup.Datadog().UnmarshalKey("apm_config.obfuscation", &o) if err == nil { c.Obfuscation = &o if o.RemoveStackTraces { @@ -413,7 +414,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.Obfuscation.Redis.Enabled = true c.Obfuscation.CreditCards.Enabled = true - // TODO(x): There is an issue with coreconfig.Datadog().IsSet("apm_config.obfuscation"), probably coming from Viper, + // TODO(x): There is an issue with pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation"), probably coming from Viper, // where it returns false even is "apm_config.obfuscation.credit_cards.enabled" is set via an environment // variable, so we need a temporary workaround by specifically setting env. var. accessible fields. if core.IsSet("apm_config.obfuscation.credit_cards.enabled") { @@ -422,71 +423,71 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error if core.IsSet("apm_config.obfuscation.credit_cards.luhn") { c.Obfuscation.CreditCards.Luhn = core.GetBool("apm_config.obfuscation.credit_cards.luhn") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.elasticsearch.enabled") { - c.Obfuscation.ES.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.elasticsearch.enabled") { + c.Obfuscation.ES.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.elasticsearch.keep_values") { - c.Obfuscation.ES.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.elasticsearch.keep_values") { + c.Obfuscation.ES.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") { - c.Obfuscation.ES.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") { + c.Obfuscation.ES.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.opensearch.enabled") { - c.Obfuscation.OpenSearch.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.opensearch.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.opensearch.enabled") { + c.Obfuscation.OpenSearch.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.opensearch.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.opensearch.keep_values") { - c.Obfuscation.OpenSearch.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.opensearch.keep_values") { + c.Obfuscation.OpenSearch.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.opensearch.obfuscate_sql_values") { - c.Obfuscation.OpenSearch.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.opensearch.obfuscate_sql_values") { + c.Obfuscation.OpenSearch.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.obfuscate_sql_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.http.remove_query_string") { - c.Obfuscation.HTTP.RemoveQueryString = coreconfig.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.http.remove_query_string") { + c.Obfuscation.HTTP.RemoveQueryString = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.http.remove_paths_with_digits") { - c.Obfuscation.HTTP.RemovePathDigits = coreconfig.Datadog().GetBool("apm_config.obfuscation.http.remove_paths_with_digits") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.http.remove_paths_with_digits") { + c.Obfuscation.HTTP.RemovePathDigits = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_paths_with_digits") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.memcached.enabled") { - c.Obfuscation.Memcached.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.memcached.enabled") { + c.Obfuscation.Memcached.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.memcached.keep_command") { - c.Obfuscation.Memcached.KeepCommand = coreconfig.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.memcached.keep_command") { + c.Obfuscation.Memcached.KeepCommand = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.mongodb.enabled") { - c.Obfuscation.Mongo.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.mongodb.enabled") { + c.Obfuscation.Mongo.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.mongodb.keep_values") { - c.Obfuscation.Mongo.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.mongodb.keep_values") { + c.Obfuscation.Mongo.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.mongodb.obfuscate_sql_values") { - c.Obfuscation.Mongo.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.mongodb.obfuscate_sql_values") { + c.Obfuscation.Mongo.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.redis.enabled") { - c.Obfuscation.Redis.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.redis.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.redis.enabled") { + c.Obfuscation.Redis.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.redis.remove_all_args") { - c.Obfuscation.Redis.RemoveAllArgs = coreconfig.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.redis.remove_all_args") { + c.Obfuscation.Redis.RemoveAllArgs = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.remove_stack_traces") { - c.Obfuscation.RemoveStackTraces = coreconfig.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.remove_stack_traces") { + c.Obfuscation.RemoveStackTraces = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.enabled") { - c.Obfuscation.SQLExecPlan.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.enabled") { + c.Obfuscation.SQLExecPlan.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.keep_values") { - c.Obfuscation.SQLExecPlan.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.keep_values") { + c.Obfuscation.SQLExecPlan.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") { - c.Obfuscation.SQLExecPlan.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") { + c.Obfuscation.SQLExecPlan.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.enabled") { - c.Obfuscation.SQLExecPlanNormalize.Enabled = coreconfig.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.enabled") { + c.Obfuscation.SQLExecPlanNormalize.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") { - c.Obfuscation.SQLExecPlanNormalize.KeepValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") { + c.Obfuscation.SQLExecPlanNormalize.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") } - if coreconfig.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") { - c.Obfuscation.SQLExecPlanNormalize.ObfuscateSQLValues = coreconfig.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") + if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") { + c.Obfuscation.SQLExecPlanNormalize.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") } } @@ -503,8 +504,8 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error } } - if coreconfig.Datadog().IsSet("apm_config.filter_tags_regex.require") { - tags := coreconfig.Datadog().GetStringSlice("apm_config.filter_tags_regex.require") + if pkgconfigsetup.Datadog().IsSet("apm_config.filter_tags_regex.require") { + tags := pkgconfigsetup.Datadog().GetStringSlice("apm_config.filter_tags_regex.require") for _, tag := range tags { splitTag := splitTagRegex(tag) if containsKey(c.RequireTags, splitTag.K) { @@ -514,8 +515,8 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.RequireTagsRegex = append(c.RequireTagsRegex, splitTag) } } - if coreconfig.Datadog().IsSet("apm_config.filter_tags_regex.reject") { - tags := coreconfig.Datadog().GetStringSlice("apm_config.filter_tags_regex.reject") + if pkgconfigsetup.Datadog().IsSet("apm_config.filter_tags_regex.reject") { + tags := pkgconfigsetup.Datadog().GetStringSlice("apm_config.filter_tags_regex.reject") for _, tag := range tags { splitTag := splitTagRegex(tag) if containsKey(c.RejectTags, splitTag.K) { @@ -531,7 +532,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error "apm_config.trace_writer": c.TraceWriter, "apm_config.stats_writer": c.StatsWriter, } { - if err := coreconfig.Datadog().UnmarshalKey(key, cfg); err != nil { + if err := pkgconfigsetup.Datadog().UnmarshalKey(key, cfg); err != nil { log.Errorf("Error reading writer config %q: %v", key, err) } } @@ -551,7 +552,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error // undocumented deprecated if core.IsSet("apm_config.analyzed_rate_by_service") { rateByService := make(map[string]float64) - if err := coreconfig.Datadog().UnmarshalKey("apm_config.analyzed_rate_by_service", &rateByService); err != nil { + if err := pkgconfigsetup.Datadog().UnmarshalKey("apm_config.analyzed_rate_by_service", &rateByService); err != nil { return err } c.AnalyzedRateByServiceLegacy = rateByService @@ -588,7 +589,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error } c.Site = core.GetString("site") if c.Site == "" { - c.Site = coreconfig.DefaultSite + c.Site = pkgconfigsetup.DefaultSite } if k := "use_dogstatsd"; core.IsSet(k) { c.StatsdEnabled = core.GetBool(k) @@ -661,7 +662,7 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error // backwards compatibility with Agent 5. These should eventually be removed. // TODO(x): remove them gradually or fully in a future release. func loadDeprecatedValues(c *config.AgentConfig) error { - cfg := coreconfig.Datadog() + cfg := pkgconfigsetup.Datadog() if cfg.IsSet("apm_config.api_key") { c.Endpoints[0].APIKey = utils.SanitizeAPIKey(cfg.GetString("apm_config.api_key")) } @@ -842,7 +843,7 @@ func SetHandler() http.Handler { if lvl == "warning" { lvl = "warn" } - if err := utils.SetLogLevel(lvl, coreconfig.Datadog(), model.SourceAgentRuntime); err != nil { + if err := utils.SetLogLevel(lvl, pkgconfigsetup.Datadog(), model.SourceAgentRuntime); err != nil { httpError(w, http.StatusInternalServerError, err) return } diff --git a/devenv/scripts/Install-DevEnv.ps1 b/devenv/scripts/Install-DevEnv.ps1 index d35e0df5b7a65..c9fff932ce2a7 100644 --- a/devenv/scripts/Install-DevEnv.ps1 +++ b/devenv/scripts/Install-DevEnv.ps1 @@ -45,7 +45,7 @@ Write-Host -ForegroundColor Yellow -BackgroundColor DarkGreen '- Installing Gola $ErrorActionPreference = 'Stop' $ProgressPreference = 'SilentlyContinue' -$go_version = "1.22.6" +$go_version = "1.22.7" Write-Host -ForegroundColor Green "Installing go $go_version" $gozip = "https://dl.google.com/go/go$go_version.windows-amd64.zip" diff --git a/docs/dev/agent_dev_env.md b/docs/dev/agent_dev_env.md index a4b41fc748379..e41f571d22f63 100644 --- a/docs/dev/agent_dev_env.md +++ b/docs/dev/agent_dev_env.md @@ -138,7 +138,7 @@ This procedure ensures you not only get the correct version of `invoke`, but als ### Golang -You must [install Golang](https://golang.org/doc/install) version `1.22.6` or +You must [install Golang](https://golang.org/doc/install) version `1.22.7` or higher. Make sure that `$GOPATH/bin` is in your `$PATH` otherwise `invoke` cannot use any additional tool it might need. diff --git a/docs/public/setup.md b/docs/public/setup.md index 5f3b8d52f362c..dff2dea51a1c0 100644 --- a/docs/public/setup.md +++ b/docs/public/setup.md @@ -101,7 +101,7 @@ This procedure ensures you not only get the correct version of `invoke`, but als ### Golang -You must [install Golang](https://golang.org/doc/install) version `1.22.6` or higher. Make sure that `$GOPATH/bin` is in your `$PATH` otherwise `invoke` cannot use any additional tool it might need. +You must [install Golang](https://golang.org/doc/install) version `1.22.7` or higher. Make sure that `$GOPATH/bin` is in your `$PATH` otherwise `invoke` cannot use any additional tool it might need. !!! note Versions of Golang that aren't an exact match to the version specified in our build images (see e.g. [here](https://github.com/DataDog/datadog-agent-buildimages/blob/c025473ee467ee6d884d532e4c12c7d982ce8fe1/circleci/Dockerfile#L43)) may not be able to build the agent and/or the [rtloader](https://github.com/DataDog/datadog-agent/tree/main/rtloader) binary properly. diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000000..e4807a8251de5 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,11 @@ +# Datadog Agent Example Configurations + +This is a collection of example `datadog-agent.yaml` files to get you started with Datadog. Consult the +[config_template](https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml) for a full list of configuration options. + +To use these add your `api_key` and if necessary update the `site`. Add an `env` +tag to the `env:` key and any other required tags. If these parameters are set +with environment variables, they can be commented out. + +Add any other configuration settings needed, then you can copy the file to `/etc/datadog-agent/datadog.yaml` +for Linux systems or `%ProgramData%\Datadog\datadog.yaml` for Windows and restart the Datadog Agent. \ No newline at end of file diff --git a/examples/agent_apm.yaml b/examples/agent_apm.yaml new file mode 100644 index 0000000000000..df644494b8b76 --- /dev/null +++ b/examples/agent_apm.yaml @@ -0,0 +1,20 @@ +## Minimal configuration to enable Datadog to ship metrics and traces. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +apm_config: + enabled: true \ No newline at end of file diff --git a/examples/agent_apm_liveprocess.yaml b/examples/agent_apm_liveprocess.yaml new file mode 100644 index 0000000000000..b7c53dee72735 --- /dev/null +++ b/examples/agent_apm_liveprocess.yaml @@ -0,0 +1,26 @@ +## Minimal configuration to enable Datadog to ship metrics, traces and Live Processes. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +logs_enabled: true + +process_config: + process_collection: + enabled: true + +apm_config: + enabled: true \ No newline at end of file diff --git a/examples/agent_apm_logs.yaml b/examples/agent_apm_logs.yaml new file mode 100644 index 0000000000000..b264ca42fa1dd --- /dev/null +++ b/examples/agent_apm_logs.yaml @@ -0,0 +1,22 @@ +## Minimal configuration to enable Datadog to ship metrics, logs and traces. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +apm_config: + enabled: true + +logs_enabled: true \ No newline at end of file diff --git a/examples/agent_apm_logs_live_process.yaml b/examples/agent_apm_logs_live_process.yaml new file mode 100644 index 0000000000000..c1773a195577d --- /dev/null +++ b/examples/agent_apm_logs_live_process.yaml @@ -0,0 +1,26 @@ +## Minimal configuration to enable Datadog to ship metrics, logs, APM traces and live processes. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +apm_config: + enabled: true + +logs_enabled: true + +process_config: + process_collection: + enabled: true \ No newline at end of file diff --git a/examples/agent_liveprocess.yaml b/examples/agent_liveprocess.yaml new file mode 100644 index 0000000000000..abe7f780282e1 --- /dev/null +++ b/examples/agent_liveprocess.yaml @@ -0,0 +1,24 @@ +## Minimal configuration to enable Datadog to ship metrics and live processes. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +process_config: + process_collection: + enabled: true + +apm_config: + enabled: false \ No newline at end of file diff --git a/examples/agent_logs.yaml b/examples/agent_logs.yaml new file mode 100644 index 0000000000000..e632436f43ac7 --- /dev/null +++ b/examples/agent_logs.yaml @@ -0,0 +1,22 @@ +## Minimal configuration to enable Datadog to ship metrics and logs. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +logs_enabled: true + +apm_config: + enabled: false \ No newline at end of file diff --git a/examples/agent_logs_liveprocess.yaml b/examples/agent_logs_liveprocess.yaml new file mode 100644 index 0000000000000..87e9bdf502c12 --- /dev/null +++ b/examples/agent_logs_liveprocess.yaml @@ -0,0 +1,26 @@ +## Minimal configuration to enable Datadog to ship metrics, logs and live processes. + +## @env DD_API_KEY - string - required +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +logs_enabled: true + +process_config: + process_collection: + enabled: true + +apm_config: + enabled: false \ No newline at end of file diff --git a/examples/agent_minimal.yaml b/examples/agent_minimal.yaml new file mode 100644 index 0000000000000..4ae15e33bf5eb --- /dev/null +++ b/examples/agent_minimal.yaml @@ -0,0 +1,19 @@ +## Minimal configuration to enable Datadog to ship metrics only. + +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +apm_config: + enabled: false diff --git a/examples/agent_otel.yaml b/examples/agent_otel.yaml new file mode 100644 index 0000000000000..a19294ffc98a6 --- /dev/null +++ b/examples/agent_otel.yaml @@ -0,0 +1,27 @@ +## Minimal configuration to enable Datadog to ship metrics and enable OpenTelemetry. + +## The Datadog API key used by your Agent to submit metrics and events to Datadog. +api_key: + +## The environment name where the agent is running. Attached in-app to every +## metric, event, log, trace, and service check emitted by this Agent. +env: + +## The site of the Datadog intake to send Agent data to. +site: datadoghq.com + +## List of host tags. Attached in-app to every metric, event, log, trace, and service check emitted by this Agent. +tags: + - example:changeme +# - : + +oltp_config: + receiver: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 + +apm_config: + enabled: false diff --git a/flakes.yaml b/flakes.yaml index 6887638a60fc5..a3d6f71ed04a2 100644 --- a/flakes.yaml +++ b/flakes.yaml @@ -11,7 +11,6 @@ test/new-e2e/tests/containers: - TestECSSuite/TestCPU/metric___container.cpu.usage{^ecs_container_name:stress-ng$} - TestEKSSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - TestKindSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - - TestECSSuite test/new-e2e/tests/installer: - TestPackages/upgrade_scenario_ubuntu_22_04_x86_64/TestUpgradeSuccessful diff --git a/go.mod b/go.mod index a024f6d9ea5a8..6b3024459c022 100644 --- a/go.mod +++ b/go.mod @@ -47,8 +47,6 @@ replace ( github.com/DataDog/datadog-agent/comp/netflow/payload => ./comp/netflow/payload github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def => ./comp/otelcol/collector-contrib/def github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl => ./comp/otelcol/collector-contrib/impl - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def => ./comp/otelcol/configstore/def - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl => ./comp/otelcol/configstore/impl github.com/DataDog/datadog-agent/comp/otelcol/converter/def => ./comp/otelcol/converter/def github.com/DataDog/datadog-agent/comp/otelcol/converter/impl => ./comp/otelcol/converter/impl github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def => ./comp/otelcol/ddflareextension/def/ @@ -59,6 +57,7 @@ replace ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter => ./comp/otelcol/otlp/components/exporter/logsagentexporter github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter => ./comp/otelcol/otlp/components/exporter/serializerexporter github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient => ./comp/otelcol/otlp/components/metricsclient + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor => ./comp/otelcol/otlp/components/processor/infraattributesprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor => ./comp/otelcol/otlp/components/statsprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil => ./comp/otelcol/otlp/testutil github.com/DataDog/datadog-agent/comp/serializer/compression => ./comp/serializer/compression @@ -74,6 +73,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/model => ./pkg/config/model/ github.com/DataDog/datadog-agent/pkg/config/remote => ./pkg/config/remote/ github.com/DataDog/datadog-agent/pkg/config/setup => ./pkg/config/setup/ + github.com/DataDog/datadog-agent/pkg/config/structure => ./pkg/config/structure/ github.com/DataDog/datadog-agent/pkg/config/utils => ./pkg/config/utils/ github.com/DataDog/datadog-agent/pkg/errors => ./pkg/errors github.com/DataDog/datadog-agent/pkg/gohai => ./pkg/gohai @@ -149,21 +149,21 @@ require ( github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 github.com/DataDog/datadog-go/v5 v5.5.0 github.com/DataDog/datadog-operator v1.8.0-rc.1 github.com/DataDog/ebpf-manager v0.7.1 github.com/DataDog/gopsutil v1.2.2 - github.com/DataDog/nikos v1.12.4 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 + github.com/DataDog/nikos v1.12.5 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 github.com/DataDog/sketches-go v1.4.6 github.com/DataDog/viper v1.13.5 github.com/DataDog/watermarkpodautoscaler v0.6.1 - github.com/DataDog/zstd v1.5.5 + github.com/DataDog/zstd v1.5.6 github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f // indirect github.com/Masterminds/semver/v3 v3.3.0 github.com/Masterminds/sprig/v3 v3.3.0 // indirect @@ -190,7 +190,7 @@ require ( github.com/coreos/go-semver v0.3.1 github.com/coreos/go-systemd v22.5.0+incompatible github.com/cri-o/ocicni v0.4.3 - github.com/cyphar/filepath-securejoin v0.3.1 + github.com/cyphar/filepath-securejoin v0.3.2 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/docker/docker v25.0.6+incompatible github.com/docker/go-connections v0.5.0 @@ -293,7 +293,7 @@ require ( go.opentelemetry.io/collector/processor/batchprocessor v0.104.0 go.opentelemetry.io/collector/receiver v0.104.0 go.opentelemetry.io/collector/receiver/otlpreceiver v0.104.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/dig v1.18.0 @@ -310,8 +310,8 @@ require ( golang.org/x/time v0.6.0 golang.org/x/tools v0.25.0 golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 - google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/grpc v1.66.1 + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.2 google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a google.golang.org/protobuf v1.34.2 gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 @@ -338,10 +338,10 @@ require ( ) require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.8 // indirect - cloud.google.com/go/storage v1.41.0 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.2.0 // indirect + cloud.google.com/go/storage v1.43.0 // indirect code.cloudfoundry.org/cfhttp/v2 v2.0.0 // indirect code.cloudfoundry.org/clock v1.0.0 // indirect code.cloudfoundry.org/consuladapter v0.0.0-20200131002136-ac1daf48ba97 // indirect @@ -354,7 +354,7 @@ require ( github.com/BurntSushi/toml v1.3.2 // indirect github.com/DataDog/aptly v1.5.3 // indirect github.com/DataDog/extendeddaemonset v0.10.0-rc.4 // indirect - github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect + github.com/DataDog/go-tuf v1.1.0-0.5.2 github.com/DataDog/gostackparse v0.7.0 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DisposaBoy/JsonConfigReader v0.0.0-20201129172854-99cf318d67e7 // indirect @@ -428,7 +428,7 @@ require ( github.com/google/licenseclassifier/v2 v2.0.0 // indirect github.com/google/uuid v1.6.0 github.com/google/wire v0.6.0 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -553,27 +553,27 @@ require ( go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 // indirect go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20220522111935-c3bc4116dcd1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/consumer v0.104.0 + go.opentelemetry.io/collector/consumer v0.104.0 // indirect go.opentelemetry.io/collector/featuregate v1.11.0 - go.opentelemetry.io/collector/semconv v0.104.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/collector/semconv v0.104.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 + go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel/trace v1.29.0 go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/crypto v0.27.0 // indirect golang.org/x/mod v0.21.0 - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/term v0.24.0 // indirect gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/api v0.185.0 // indirect + google.golang.org/api v0.197.0 // indirect google.golang.org/appengine v1.6.8 // indirect gopkg.in/Knetic/govaluate.v3 v3.0.0 // indirect gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect @@ -599,6 +599,7 @@ require ( github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.56.2 github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 github.com/containerd/containerd/api v1.7.19 github.com/containerd/errdefs v0.1.0 github.com/distribution/reference v0.6.0 @@ -620,7 +621,7 @@ require ( github.com/DataDog/datadog-agent/comp/core/log/impl v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/core/log/impl-trace v0.0.0-00010101000000-000000000000 github.com/DataDog/datadog-agent/comp/core/log/mock v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.2 + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/core/status/statusimpl v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/core/tagger/types v0.56.2 @@ -632,8 +633,6 @@ require ( github.com/DataDog/datadog-agent/comp/netflow/payload v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/configstore/def v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/configstore/impl v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/converter/impl v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.56.0-rc.3 @@ -642,6 +641,7 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/serializer/compression v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/trace/agent/def v0.56.0-rc.3 @@ -650,12 +650,12 @@ require ( github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/api v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.2 - github.com/DataDog/datadog-agent/pkg/config/env v0.56.2 + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel - github.com/DataDog/datadog-agent/pkg/config/model v0.56.2 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/config/remote v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.2 + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/errors v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 @@ -685,23 +685,23 @@ require ( github.com/DataDog/datadog-agent/pkg/util/cache v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/containers/image v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 github.com/DataDog/datadog-agent/pkg/util/flavor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/grpc v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log/setup v1.0.0 - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/system v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 github.com/DataDog/datadog-agent/pkg/util/tagger v0.56.2 // indirect github.com/DataDog/datadog-agent/pkg/util/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/uuid v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.2 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 github.com/DataDog/go-libddwaf/v3 v3.3.0 github.com/DataDog/go-sqllexer v0.0.14 @@ -725,7 +725,7 @@ require ( github.com/judwhite/go-svc v1.2.1 github.com/kr/pretty v0.3.1 // todo: update datadog connector with breaking changes from https://github.com/DataDog/datadog-agent/pull/26347. - github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.104.0 github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 github.com/prometheus-community/pro-bing v0.4.1 github.com/rickar/props v1.0.0 @@ -750,8 +750,8 @@ require ( ) require ( - cloud.google.com/go/auth v0.5.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect code.cloudfoundry.org/go-diodes v0.0.0-20240604201846-c756bfed2ed3 // indirect code.cloudfoundry.org/go-loggregator v7.4.0+incompatible // indirect code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 // indirect @@ -771,11 +771,11 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.2 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.17.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0 // indirect github.com/Intevation/gval v1.3.0 // indirect github.com/Intevation/jsonpath v0.2.1 // indirect @@ -821,7 +821,7 @@ require ( github.com/go-openapi/spec v0.20.14 // indirect github.com/go-resty/resty/v2 v2.12.0 // indirect github.com/go-test/deep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/goccy/go-yaml v1.11.0 // indirect @@ -832,8 +832,8 @@ require ( github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/gophercloud/gophercloud v1.8.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect github.com/gorilla/websocket v1.5.1 // indirect @@ -975,8 +975,6 @@ require ( go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 // indirect go.opentelemetry.io/collector/filter v0.104.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.104.0 // indirect - go.opentelemetry.io/collector/pdata/testdata v0.104.0 // indirect go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.104.0 // indirect go.opentelemetry.io/collector/receiver/nopreceiver v0.104.0 // indirect go.opentelemetry.io/contrib/config v0.7.0 // indirect @@ -990,8 +988,8 @@ require ( golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gotest.tools/v3 v3.5.1 // indirect @@ -1015,6 +1013,8 @@ replace github.com/vishvananda/netlink => github.com/DataDog/netlink v1.0.1-0.20 // Cannot be upgraded to 0.26 without lossing CRI API v1alpha2 replace k8s.io/cri-api => k8s.io/cri-api v0.25.5 +replace github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector => github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.103.0 + // Use custom Trivy fork to reduce binary size // Pull in replacements needed by upstream Trivy replace ( diff --git a/go.sum b/go.sum index cc9e6112e673b..a44be83c9d819 100644 --- a/go.sum +++ b/go.sum @@ -37,8 +37,8 @@ cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFO cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -100,10 +100,10 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -184,8 +184,8 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -320,8 +320,8 @@ cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGE cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= -cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -354,6 +354,8 @@ cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeN cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -540,8 +542,8 @@ cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeL cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= -cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -728,20 +730,20 @@ github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYx github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= github.com/DataDog/netlink v1.0.1-0.20240223195320-c7a4f832a3d1 h1:HnvrdC79xJ+RPxTQdhDDwxblTNWhJUKeyTPsuyaOnxQ= github.com/DataDog/netlink v1.0.1-0.20240223195320-c7a4f832a3d1/go.mod h1:whJevzBpTrid75eZy99s3DqCmy05NfibNaF2Ol5Ox5A= -github.com/DataDog/nikos v1.12.4 h1:UBo2v1Std4GvPHalKs22+1kcM4tXvMACREe5k4QMzf0= -github.com/DataDog/nikos v1.12.4/go.mod h1:ovDmd9Jz/ZJwP39wgQmFUXZZzeDYJtFgsY7K1OPstWk= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0 h1:KNiq6ofE5BBMQjl7w9fftg8z44C9z51w7qOWIKs5SCg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 h1:FaUFQE8IuaNdpOQGIhoy2h58v8AVND+yZG3gVqKAwLQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.17.0 h1:K6SIJy7ECWdQMWJMo60oJNvduOeIJ/t/6VDbHWDd/oM= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.17.0/go.mod h1:L7QrJ1emQ+rcXAs2JW5b+eu72G9A4yku35Ia4kLrdcg= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 h1:Fija8Qo0z/HngskYyBpMqmJKM2ejNr1NfXUyWszFDAw= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0/go.mod h1:lNu6vfFNCV/tyWxs8x8nCN1TqK+bPeI2dbnlwFTs8VA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 h1:x6re32f8gQ8fdCllywQyAbxQuXNrgxeimpLBfvwA97g= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0/go.mod h1:R84ZVbxKSgMxzvJro/MftVrlkGm2C2gndUhV35wyR8A= +github.com/DataDog/nikos v1.12.5 h1:O4kdkUkL0nws9+rdeLKQLIN5+lC542eKeUY1C5utQBA= +github.com/DataDog/nikos v1.12.5/go.mod h1:2KZ5BIt/7gYYg/hI8F1U+Urq+LfeZOeDbFnrrBmoYVg= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/trivy v0.0.0-20240729123106-0d652d4a5630 h1:GA5L0gJsKLmuzGRGOqC3koKod7fmSL6C8GEDrBceJBI= @@ -752,8 +754,8 @@ github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950 h1:2imDajw3V85w1iqH github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950/go.mod h1:FU+7qU8DeQQgSZDmmThMJi93kPkLFgy0oVAcLxurjIk= github.com/DataDog/watermarkpodautoscaler v0.6.1 h1:KEj10Cm8wO/36lEOgqjgDfIMMpMPReY/+bDacWe7Adw= github.com/DataDog/watermarkpodautoscaler v0.6.1/go.mod h1:iaEXqganxe2zHi3pyJFuf59X8srmGFoikPtjWxMW9mU= -github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= -github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f/go.mod h1:oXfOhM/Kr8OvqS6tVqJwxPBornV0yrx3bc+l0BDr7PQ= github.com/Datadog/dublin-traceroute v0.0.1 h1:xh5xfA25gjrpRK72lQotL79S4vAvxpc4UOQdR22p2IY= @@ -1120,8 +1122,8 @@ github.com/cri-o/ocicni v0.4.3 h1:BfnrZrtr/F+o+b+yOguB1o6I4OzjieF3k3dN4MrsCJA= github.com/cri-o/ocicni v0.4.3/go.mod h1:RzIKSln5AT65hyyfGj3/gsfCpjiY1Y6rVK51Uc5YNzk= github.com/csaf-poc/csaf_distribution/v3 v3.0.0 h1:ob9+Fmpff0YWgTP3dYaw7G2hKQ9cegh9l3zksc+q3sM= github.com/csaf-poc/csaf_distribution/v3 v3.0.0/go.mod h1:uilCTiNKivq+6zrDvjtZaUeLk70oe21iwKivo6ILwlQ= -github.com/cyphar/filepath-securejoin v0.3.1 h1:1V7cHiaW+C+39wEfpH6XlLBQo3j/PciWFrgfCLS8XrE= -github.com/cyphar/filepath-securejoin v0.3.1/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= +github.com/cyphar/filepath-securejoin v0.3.2 h1:QhZu5AxQ+o1XZH0Ye05YzvJ0kAdK6VQc0z9NNMek7gc= +github.com/cyphar/filepath-securejoin v0.3.2/go.mod h1:F7i41x/9cBF7lzCrVsYs9fuzwRZm4NQsGTBdpp6mETc= github.com/datadog/trivy-db v0.0.0-20240228172000-42caffdaee3f h1:IFB3J+f0m2e7nZjPTqvzLrrb6dVU6BQrsGx/7Tmm8Xk= github.com/datadog/trivy-db v0.0.0-20240228172000-42caffdaee3f/go.mod h1:cj9/QmD9N3OZnKQMp+/DvdV+ym3HyIkd4e+F0ZM3ZGs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -1364,8 +1366,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -1522,8 +1524,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1538,8 +1540,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1554,8 +1556,8 @@ github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38 github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -2739,16 +2741,16 @@ go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5 go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= @@ -2767,14 +2769,14 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -3041,8 +3043,8 @@ golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4 golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -3408,8 +3410,8 @@ google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjY google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.185.0 h1:ENEKk1k4jW8SmmaT6RE+ZasxmxezCrD5Vw4npvr+pAU= -google.golang.org/api v0.185.0/go.mod h1:HNfvIkJGlgrIlrbYkAm9W9IdkmKZjOTVh33YltygGbg= +google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3559,21 +3561,21 @@ google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 h1:CUiCqkPw1nNrNQzCCG4WA65m0nAmQiwXHpub3dNyruU= -google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4/go.mod h1:EvuUDCulqGgV80RvP1BHuom+smhX4qtlhnNatHuroGQ= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -3620,8 +3622,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.66.1 h1:hO5qAXR19+/Z44hmvIM4dQFMSYX9XcWsByfoxutBpAM= -google.golang.org/grpc v1.66.1/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a h1:p51n6zkL483uumoZhCSGtHCem9kDeU05G5jX/wYI9gw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o= diff --git a/omnibus/config/projects/agent-binaries.rb b/omnibus/config/projects/agent-binaries.rb index d401fffcf1364..bd9e9c790a975 100644 --- a/omnibus/config/projects/agent-binaries.rb +++ b/omnibus/config/projects/agent-binaries.rb @@ -20,7 +20,7 @@ install_dir "C:/opt/datadog-agent/" maintainer 'Datadog Inc.' # Windows doesn't want our e-mail address :( else - install_dir '/opt/datadog-agent' + install_dir ENV["INSTALL_DIR"] || '/opt/datadog-agent' maintainer 'Datadog Packages ' end diff --git a/omnibus/config/projects/agent.rb b/omnibus/config/projects/agent.rb index c5067c017b312..c2421a99bffb6 100644 --- a/omnibus/config/projects/agent.rb +++ b/omnibus/config/projects/agent.rb @@ -4,6 +4,7 @@ # Copyright 2016-present Datadog, Inc. require "./lib/ostools.rb" flavor = ENV['AGENT_FLAVOR'] +output_config_dir = ENV["OUTPUT_CONFIG_DIR"] if flavor.nil? || flavor == 'base' name 'agent' @@ -280,7 +281,7 @@ end if linux_target? - extra_package_file '/etc/datadog-agent/' + extra_package_file "#{output_config_dir}/etc/datadog-agent/" extra_package_file '/usr/bin/dd-agent' extra_package_file '/var/log/datadog/' end diff --git a/omnibus/config/projects/dogstatsd.rb b/omnibus/config/projects/dogstatsd.rb index 76800d8018b3b..43b5adadd94ad 100644 --- a/omnibus/config/projects/dogstatsd.rb +++ b/omnibus/config/projects/dogstatsd.rb @@ -29,7 +29,7 @@ install_dir "C:/opt/datadog-dogstatsd/" maintainer 'Datadog Inc.' # Windows doesn't want our e-mail address :( else - install_dir '/opt/datadog-dogstatsd' + install_dir ENV["INSTALL_DIR"] || '/opt/datadog-dogstatsd' if redhat_target? || suse_target? maintainer 'Datadog, Inc ' diff --git a/omnibus/config/projects/iot-agent.rb b/omnibus/config/projects/iot-agent.rb index f2e35368aab95..b14eace6f0540 100644 --- a/omnibus/config/projects/iot-agent.rb +++ b/omnibus/config/projects/iot-agent.rb @@ -20,7 +20,7 @@ install_dir "C:/opt/datadog-agent/" maintainer 'Datadog Inc.' # Windows doesn't want our e-mail address :( else - install_dir '/opt/datadog-agent' + install_dir ENV["INSTALL_DIR"] || '/opt/datadog-agent' if redhat_target? || suse_target? maintainer 'Datadog, Inc ' diff --git a/omnibus/config/software/datadog-agent-finalize.rb b/omnibus/config/software/datadog-agent-finalize.rb index 2d545f58b498b..f69104e560a0d 100644 --- a/omnibus/config/software/datadog-agent-finalize.rb +++ b/omnibus/config/software/datadog-agent-finalize.rb @@ -14,11 +14,13 @@ skip_transitive_dependency_licensing true + always_build true build do license :project_license + output_config_dir = ENV["OUTPUT_CONFIG_DIR"] flavor_arg = ENV['AGENT_FLAVOR'] # TODO too many things done here, should be split block do @@ -95,37 +97,37 @@ if linux_target? # Move configuration files - mkdir "/etc/datadog-agent" + mkdir "#{output_config_dir}/etc/datadog-agent" move "#{install_dir}/bin/agent/dd-agent", "/usr/bin/dd-agent" - move "#{install_dir}/etc/datadog-agent/datadog.yaml.example", "/etc/datadog-agent" - move "#{install_dir}/etc/datadog-agent/conf.d", "/etc/datadog-agent", :force=>true + move "#{install_dir}/etc/datadog-agent/datadog.yaml.example", "#{output_config_dir}/etc/datadog-agent" + move "#{install_dir}/etc/datadog-agent/conf.d", "#{output_config_dir}/etc/datadog-agent", :force=>true unless heroku_target? - move "#{install_dir}/etc/datadog-agent/system-probe.yaml.example", "/etc/datadog-agent" - move "#{install_dir}/etc/datadog-agent/security-agent.yaml.example", "/etc/datadog-agent", :force=>true - move "#{install_dir}/etc/datadog-agent/runtime-security.d", "/etc/datadog-agent", :force=>true - move "#{install_dir}/etc/datadog-agent/compliance.d", "/etc/datadog-agent" + move "#{install_dir}/etc/datadog-agent/system-probe.yaml.example", "#{output_config_dir}/etc/datadog-agent" + move "#{install_dir}/etc/datadog-agent/security-agent.yaml.example", "#{output_config_dir}/etc/datadog-agent", :force=>true + move "#{install_dir}/etc/datadog-agent/runtime-security.d", "#{output_config_dir}/etc/datadog-agent", :force=>true + move "#{install_dir}/etc/datadog-agent/compliance.d", "#{output_config_dir}/etc/datadog-agent" # Move SELinux policy if debian_target? || redhat_target? - move "#{install_dir}/etc/datadog-agent/selinux", "/etc/datadog-agent/selinux" + move "#{install_dir}/etc/datadog-agent/selinux", "#{output_config_dir}/etc/datadog-agent/selinux" end end if ot_target? - move "#{install_dir}/etc/datadog-agent/otel-config.yaml.example", "/etc/datadog-agent" + move "#{install_dir}/etc/datadog-agent/otel-config.yaml.example", "#{output_config_dir}/etc/datadog-agent" end # Create empty directories so that they're owned by the package # (also requires `extra_package_file` directive in project def) - mkdir "/etc/datadog-agent/checks.d" + mkdir "#{output_config_dir}/etc/datadog-agent/checks.d" mkdir "/var/log/datadog" # remove unused configs - delete "/etc/datadog-agent/conf.d/apm.yaml.default" - delete "/etc/datadog-agent/conf.d/process_agent.yaml.default" + delete "#{output_config_dir}/etc/datadog-agent/conf.d/apm.yaml.default" + delete "#{output_config_dir}/etc/datadog-agent/conf.d/process_agent.yaml.default" # remove windows specific configs - delete "/etc/datadog-agent/conf.d/winproc.d" + delete "#{output_config_dir}/etc/datadog-agent/conf.d/winproc.d" # cleanup clutter delete "#{install_dir}/etc" @@ -177,6 +179,9 @@ # Most postgres binaries are removed in postgres' own software # recipe, but we need pg_config to build psycopq. delete "#{install_dir}/embedded/bin/pg_config" + + # Edit rpath from a true path to relative path for each binary + command "inv omnibus.rpath-edit #{install_dir} #{install_dir}", cwd: Dir.pwd end if osx_target? @@ -189,6 +194,9 @@ # remove docker configuration delete "#{install_dir}/etc/conf.d/docker.d" + # Edit rpath from a true path to relative path for each binary + command "inv omnibus.rpath-edit #{install_dir} #{install_dir} --platform=macos", cwd: Dir.pwd + if ENV['HARDENED_RUNTIME_MAC'] == 'true' hardened_runtime = "-o runtime --entitlements #{entitlements_file} " else diff --git a/omnibus/config/software/datadog-agent.rb b/omnibus/config/software/datadog-agent.rb index ef24f0b1940ed..0162daef390fe 100644 --- a/omnibus/config/software/datadog-agent.rb +++ b/omnibus/config/software/datadog-agent.rb @@ -233,6 +233,7 @@ # The file below is touched by software builds that don't put anything in the installation # directory (libgcc right now) so that the git_cache gets updated let's remove it from the # final package + # Change RPATH from the install_dir to relative RPATH unless windows_target? delete "#{install_dir}/uselessfile" end diff --git a/omnibus/config/software/init-scripts-agent.rb b/omnibus/config/software/init-scripts-agent.rb index b425b8bd4f334..7add284c1ba38 100644 --- a/omnibus/config/software/init-scripts-agent.rb +++ b/omnibus/config/software/init-scripts-agent.rb @@ -5,8 +5,9 @@ always_build true build do + output_config_dir = ENV["OUTPUT_CONFIG_DIR"] || "" if linux_target? - etc_dir = "/etc/datadog-agent" + etc_dir = "#{output_config_dir}/etc/datadog-agent" mkdir "/etc/init" if debian_target? # sysvinit support for debian only for now diff --git a/omnibus/config/software/installer.rb b/omnibus/config/software/installer.rb index fee51d35e8084..99708622641fb 100644 --- a/omnibus/config/software/installer.rb +++ b/omnibus/config/software/installer.rb @@ -32,7 +32,7 @@ env = with_embedded_path(env) if linux_target? - command "invoke installer.build --rebuild --run-path=/opt/datadog-packages/run --install-path=#{install_dir}", env: env + command "invoke installer.build --rebuild --no-cgo --run-path=/opt/datadog-packages/run --install-path=#{install_dir}", env: env mkdir "#{install_dir}/bin" copy 'bin/installer', "#{install_dir}/bin/" elsif windows_target? diff --git a/omnibus/lib/project_extension.rb b/omnibus/lib/project_extension.rb index 50c6ca2b70e20..50def10f56ec5 100644 --- a/omnibus/lib/project_extension.rb +++ b/omnibus/lib/project_extension.rb @@ -41,26 +41,48 @@ def package_me def ddwcssign(file) log.info(self.class.name) { "Signing #{file}" } - cmd = Array.new.tap do |arr| + + # Signing is inherently flaky as the timestamp server may not be available + # retry a few times + max_retries = 3 + attempts = 0 + delay = 2 + + begin + attempts += 1 + cmd = Array.new.tap do |arr| arr << "dd-wcs" arr << "sign" arr << "\"#{file}\"" - end.join(" ") - status = shellout(cmd) - if status.exitstatus != 0 - log.warn(self.class.name) do - <<-EOH.strip - Failed to sign with dd-wcs - - STDOUT - ------ - #{status.stdout} - - STDERR - ------ - #{status.stderr} - EOH + end.join(" ") + + status = shellout(cmd) + if status.exitstatus != 0 + log.warn(self.class.name) do + <<-EOH.strip + Failed to sign with dd-wcs (Attempt #{attempts} of #{max_retries}) + + STDOUT + ------ + #{status.stdout} + + STDERR + ------ + #{status.stderr} + EOH + end + raise "Failed to sign with dd-wcs" + else + log.info(self.class.name) { "Successfully signed #{file} after #{attempts} attempt(s)" } + end + rescue => e + # Retry logic: raise error after 3 attempts + if attempts < max_retries + log.info(self.class.name) { "Retrying signing #{file} (Attempt #{attempts + 1})" } + sleep(delay) + retry end + raise "Failed to sign with dd-wcs: #{e.message}" end end @@ -78,4 +100,4 @@ class Project expose :inspect_binary expose :sign_file end -end \ No newline at end of file +end diff --git a/pkg/aggregator/aggregator.go b/pkg/aggregator/aggregator.go index 41fbeb1f93ad4..f8cac0884df2d 100644 --- a/pkg/aggregator/aggregator.go +++ b/pkg/aggregator/aggregator.go @@ -18,7 +18,8 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -258,7 +259,7 @@ type FlushAndSerializeInParallel struct { } // NewFlushAndSerializeInParallel creates a new instance of FlushAndSerializeInParallel. -func NewFlushAndSerializeInParallel(config config.Config) FlushAndSerializeInParallel { +func NewFlushAndSerializeInParallel(config model.Config) FlushAndSerializeInParallel { return FlushAndSerializeInParallel{ BufferSize: config.GetInt("aggregator_flush_metrics_and_serialize_in_parallel_buffer_size"), ChannelSize: config.GetInt("aggregator_flush_metrics_and_serialize_in_parallel_chan_size"), @@ -267,21 +268,21 @@ func NewFlushAndSerializeInParallel(config config.Config) FlushAndSerializeInPar // NewBufferedAggregator instantiates a BufferedAggregator func NewBufferedAggregator(s serializer.MetricSerializer, eventPlatformForwarder eventplatform.Component, hostname string, flushInterval time.Duration) *BufferedAggregator { - bufferSize := config.Datadog().GetInt("aggregator_buffer_size") + bufferSize := pkgconfigsetup.Datadog().GetInt("aggregator_buffer_size") agentName := flavor.GetFlavor() - if agentName == flavor.IotAgent && !config.Datadog().GetBool("iot_host") { + if agentName == flavor.IotAgent && !pkgconfigsetup.Datadog().GetBool("iot_host") { agentName = flavor.DefaultAgent - } else if config.Datadog().GetBool("iot_host") { + } else if pkgconfigsetup.Datadog().GetBool("iot_host") { // Override the agentName if this Agent is configured to report as IotAgent agentName = flavor.IotAgent } - if config.Datadog().GetBool("heroku_dyno") { + if pkgconfigsetup.Datadog().GetBool("heroku_dyno") { // Override the agentName if this Agent is configured to report as Heroku Dyno agentName = flavor.HerokuAgent } - if config.Datadog().GetBool("djm_config.enabled") { + if pkgconfigsetup.Datadog().GetBool("djm_config.enabled") { AddRecurrentSeries(&metrics.Serie{ Name: "datadog.djm.agent_host", Points: []metrics.Point{{Value: 1.0}}, @@ -289,7 +290,7 @@ func NewBufferedAggregator(s serializer.MetricSerializer, eventPlatformForwarder }) } - tagsStore := tags.NewStore(config.Datadog().GetBool("aggregator_use_tags_store"), "aggregator") + tagsStore := tags.NewStore(pkgconfigsetup.Datadog().GetBool("aggregator_use_tags_store"), "aggregator") aggregator := &BufferedAggregator{ bufferedServiceCheckIn: make(chan []*servicecheck.ServiceCheck, bufferSize), @@ -316,10 +317,10 @@ func NewBufferedAggregator(s serializer.MetricSerializer, eventPlatformForwarder stopChan: make(chan struct{}), health: health.RegisterLiveness("aggregator"), agentName: agentName, - tlmContainerTagsEnabled: config.Datadog().GetBool("basic_telemetry_add_container_tags"), + tlmContainerTagsEnabled: pkgconfigsetup.Datadog().GetBool("basic_telemetry_add_container_tags"), agentTags: tagger.AgentTags, globalTags: tagger.GlobalTags, - flushAndSerializeInParallel: NewFlushAndSerializeInParallel(config.Datadog()), + flushAndSerializeInParallel: NewFlushAndSerializeInParallel(pkgconfigsetup.Datadog()), } return aggregator @@ -642,7 +643,7 @@ func (agg *BufferedAggregator) flushServiceChecks(start time.Time, waitForSerial addFlushCount("ServiceChecks", int64(len(serviceChecks))) // For debug purposes print out all serviceCheck/tag combinations - if config.Datadog().GetBool("log_payloads") { + if pkgconfigsetup.Datadog().GetBool("log_payloads") { log.Debug("Flushing the following Service Checks:") for _, sc := range serviceChecks { log.Debugf("%s", sc) @@ -699,7 +700,7 @@ func (agg *BufferedAggregator) flushEvents(start time.Time, waitForSerializer bo addFlushCount("Events", int64(len(events))) // For debug purposes print out all Event/tag combinations - if config.Datadog().GetBool("log_payloads") { + if pkgconfigsetup.Datadog().GetBool("log_payloads") { log.Debug("Flushing the following Events:") for _, event := range events { log.Debugf("%s", event) @@ -932,10 +933,10 @@ func (agg *BufferedAggregator) handleRegisterSampler(id checkid.ID) { return } agg.checkSamplers[id] = newCheckSampler( - config.Datadog().GetInt("check_sampler_bucket_commits_count_expiry"), - config.Datadog().GetBool("check_sampler_expire_metrics"), - config.Datadog().GetBool("check_sampler_context_metrics"), - config.Datadog().GetDuration("check_sampler_stateful_metric_expiration_time"), + pkgconfigsetup.Datadog().GetInt("check_sampler_bucket_commits_count_expiry"), + pkgconfigsetup.Datadog().GetBool("check_sampler_expire_metrics"), + pkgconfigsetup.Datadog().GetBool("check_sampler_context_metrics"), + pkgconfigsetup.Datadog().GetDuration("check_sampler_stateful_metric_expiration_time"), agg.tagsStore, id, ) diff --git a/pkg/aggregator/aggregator_test.go b/pkg/aggregator/aggregator_test.go index 74a4fb14ffa79..cf577d0f8397a 100644 --- a/pkg/aggregator/aggregator_test.go +++ b/pkg/aggregator/aggregator_test.go @@ -28,7 +28,7 @@ import ( orchestratorforwarder "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" @@ -575,8 +575,8 @@ func TestTags(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - defer pkgconfig.Datadog().SetWithoutSource("basic_telemetry_add_container_tags", nil) - pkgconfig.Datadog().SetWithoutSource("basic_telemetry_add_container_tags", tt.tlmContainerTagsEnabled) + defer pkgconfigsetup.Datadog().SetWithoutSource("basic_telemetry_add_container_tags", nil) + pkgconfigsetup.Datadog().SetWithoutSource("basic_telemetry_add_container_tags", tt.tlmContainerTagsEnabled) agg := NewBufferedAggregator(nil, nil, tt.hostname, time.Second) agg.agentTags = tt.agentTags agg.globalTags = tt.globalTags @@ -586,9 +586,9 @@ func TestTags(t *testing.T) { } func TestTimeSamplerFlush(t *testing.T) { - pc := pkgconfig.Datadog().GetInt("dogstatsd_pipeline_count") - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", 1) - defer pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) + pc := pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", 1) + defer pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) s := &MockSerializerIterableSerie{} s.On("AreSeriesEnabled").Return(true) @@ -607,9 +607,9 @@ func TestAddDJMRecurrentSeries(t *testing.T) { // this test IS USING globals (recurrentSeries) // - - djmEnabled := pkgconfig.Datadog().GetBool("djm_config.enabled") - pkgconfig.Datadog().SetWithoutSource("djm_config.enabled", true) - defer pkgconfig.Datadog().SetWithoutSource("djm_config.enabled", djmEnabled) + djmEnabled := pkgconfigsetup.Datadog().GetBool("djm_config.enabled") + pkgconfigsetup.Datadog().SetWithoutSource("djm_config.enabled", true) + defer pkgconfigsetup.Datadog().SetWithoutSource("djm_config.enabled", djmEnabled) s := &MockSerializerIterableSerie{} // NewBufferedAggregator with DJM enable will create a new recurrentSeries diff --git a/pkg/aggregator/check_sampler.go b/pkg/aggregator/check_sampler.go index 729ee6153d341..0efa8238da958 100644 --- a/pkg/aggregator/check_sampler.go +++ b/pkg/aggregator/check_sampler.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -54,7 +54,7 @@ func (cs *CheckSampler) addSample(metricSample *metrics.MetricSample) { return } - if err := cs.metrics.AddSample(contextKey, metricSample, metricSample.Timestamp, 1, config.Datadog()); err != nil { + if err := cs.metrics.AddSample(contextKey, metricSample, metricSample.Timestamp, 1, pkgconfigsetup.Datadog()); err != nil { log.Debugf("Ignoring sample '%s' on host '%s' and tags '%s': %s", metricSample.Name, metricSample.Host, metricSample.Tags, err) } } diff --git a/pkg/aggregator/check_sampler_bench_test.go b/pkg/aggregator/check_sampler_bench_test.go index 71a32170b8575..4c731fd090390 100644 --- a/pkg/aggregator/check_sampler_bench_test.go +++ b/pkg/aggregator/check_sampler_bench_test.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -43,10 +43,10 @@ func benchmarkAddBucket(bucketValue int64, b *testing.B) { // For some reasons using InitAggregator[WithInterval] doesn't fix the problem, // but this do. deps := fxutil.Test[benchmarkDeps](b, core.MockBundle()) - forwarderOpts := forwarder.NewOptionsWithResolvers(config.Datadog(), deps.Log, resolver.NewSingleDomainResolvers(map[string][]string{"hello": {"world"}})) + forwarderOpts := forwarder.NewOptionsWithResolvers(pkgconfigsetup.Datadog(), deps.Log, resolver.NewSingleDomainResolvers(map[string][]string{"hello": {"world"}})) options := DefaultAgentDemultiplexerOptions() options.DontStartForwarders = true - sharedForwarder := forwarder.NewDefaultForwarder(config.Datadog(), deps.Log, forwarderOpts) + sharedForwarder := forwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), deps.Log, forwarderOpts) orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(deps.Hostname)) demux := InitAndStartAgentDemultiplexer(deps.Log, sharedForwarder, &orchestratorForwarder, options, eventPlatformForwarder, deps.Compressor, "hostname") diff --git a/pkg/aggregator/demultiplexer.go b/pkg/aggregator/demultiplexer.go index 1896c60711127..5a9b76c2b5811 100644 --- a/pkg/aggregator/demultiplexer.go +++ b/pkg/aggregator/demultiplexer.go @@ -8,7 +8,7 @@ package aggregator import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" @@ -146,8 +146,8 @@ func GetDogStatsDWorkerAndPipelineCount() (int, int) { func getDogStatsDWorkerAndPipelineCount(vCPUs int) (int, int) { var dsdWorkerCount int var pipelineCount int - autoAdjust := config.Datadog().GetBool("dogstatsd_pipeline_autoadjust") - autoAdjustStrategy := config.Datadog().GetString("dogstatsd_pipeline_autoadjust_strategy") + autoAdjust := pkgconfigsetup.Datadog().GetBool("dogstatsd_pipeline_autoadjust") + autoAdjustStrategy := pkgconfigsetup.Datadog().GetString("dogstatsd_pipeline_autoadjust_strategy") if autoAdjustStrategy != AutoAdjustStrategyMaxThroughput && autoAdjustStrategy != AutoAdjustStrategyPerOrigin { log.Warnf("Invalid value for 'dogstatsd_pipeline_autoadjust_strategy', using default value: %s", AutoAdjustStrategyMaxThroughput) @@ -160,7 +160,7 @@ func getDogStatsDWorkerAndPipelineCount(vCPUs int) (int, int) { // ------------------------------------ if !autoAdjust { - pipelineCount = config.Datadog().GetInt("dogstatsd_pipeline_count") + pipelineCount = pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") if pipelineCount <= 0 { // guard against configuration mistakes pipelineCount = 1 } @@ -199,7 +199,7 @@ func getDogStatsDWorkerAndPipelineCount(vCPUs int) (int, int) { pipelineCount = 1 } - if config.Datadog().GetInt("dogstatsd_pipeline_count") > 1 { + if pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") > 1 { log.Warn("DogStatsD pipeline count value ignored since 'dogstatsd_pipeline_autoadjust' is enabled.") } } else if autoAdjustStrategy == AutoAdjustStrategyPerOrigin { @@ -216,7 +216,7 @@ func getDogStatsDWorkerAndPipelineCount(vCPUs int) (int, int) { dsdWorkerCount = 2 } - pipelineCount = config.Datadog().GetInt("dogstatsd_pipeline_count") + pipelineCount = pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") if pipelineCount <= 0 { // guard against configuration mistakes pipelineCount = vCPUs * 2 } diff --git a/pkg/aggregator/demultiplexer_agent.go b/pkg/aggregator/demultiplexer_agent.go index fb077e54df6b3..fcd44e572e45c 100644 --- a/pkg/aggregator/demultiplexer_agent.go +++ b/pkg/aggregator/demultiplexer_agent.go @@ -20,8 +20,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -139,15 +139,15 @@ func initAgentDemultiplexer( hostname string) *AgentDemultiplexer { // prepare the multiple forwarders // ------------------------------- - if config.Datadog().GetBool("telemetry.enabled") && config.Datadog().GetBool("telemetry.dogstatsd_origin") && !config.Datadog().GetBool("aggregator_use_tags_store") { + if pkgconfigsetup.Datadog().GetBool("telemetry.enabled") && pkgconfigsetup.Datadog().GetBool("telemetry.dogstatsd_origin") && !pkgconfigsetup.Datadog().GetBool("aggregator_use_tags_store") { log.Warn("DogStatsD origin telemetry is not supported when aggregator_use_tags_store is disabled.") - config.Datadog().Set("telemetry.dogstatsd_origin", false, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("telemetry.dogstatsd_origin", false, model.SourceAgentRuntime) } // prepare the serializer // ---------------------- - sharedSerializer := serializer.NewSerializer(sharedForwarder, orchestratorForwarder, compressor, config.Datadog(), hostname) + sharedSerializer := serializer.NewSerializer(sharedForwarder, orchestratorForwarder, compressor, pkgconfigsetup.Datadog(), hostname) // prepare the embedded aggregator // -- @@ -157,8 +157,8 @@ func initAgentDemultiplexer( // statsd samplers // --------------- - bufferSize := config.Datadog().GetInt("aggregator_buffer_size") - metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(config.Datadog())) + bufferSize := pkgconfigsetup.Datadog().GetInt("aggregator_buffer_size") + metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(pkgconfigsetup.Datadog())) _, statsdPipelinesCount := GetDogStatsDWorkerAndPipelineCount() log.Debug("the Demultiplexer will use", statsdPipelinesCount, "pipelines") @@ -167,7 +167,7 @@ func initAgentDemultiplexer( for i := 0; i < statsdPipelinesCount; i++ { // the sampler - tagsStore := tags.NewStore(config.Datadog().GetBool("aggregator_use_tags_store"), fmt.Sprintf("timesampler #%d", i)) + tagsStore := tags.NewStore(pkgconfigsetup.Datadog().GetBool("aggregator_use_tags_store"), fmt.Sprintf("timesampler #%d", i)) statsdSampler := NewTimeSampler(TimeSamplerID(i), bucketSize, tagsStore, agg.hostname) @@ -180,9 +180,9 @@ func initAgentDemultiplexer( var noAggWorker *noAggregationStreamWorker var noAggSerializer serializer.MetricSerializer if options.EnableNoAggregationPipeline { - noAggSerializer = serializer.NewSerializer(sharedForwarder, orchestratorForwarder, compressor, config.Datadog(), hostname) + noAggSerializer = serializer.NewSerializer(sharedForwarder, orchestratorForwarder, compressor, pkgconfigsetup.Datadog(), hostname) noAggWorker = newNoAggregationStreamWorker( - config.Datadog().GetInt("dogstatsd_no_aggregation_pipeline_batch_size"), + pkgconfigsetup.Datadog().GetInt("dogstatsd_no_aggregation_pipeline_batch_size"), metricSamplePool, noAggSerializer, agg.flushAndSerializeInParallel, @@ -312,7 +312,7 @@ func (d *AgentDemultiplexer) flushLoop() { // Stop stops the demultiplexer. // Resources are released, the instance should not be used after a call to `Stop()`. func (d *AgentDemultiplexer) Stop(flush bool) { - timeout := config.Datadog().GetDuration("aggregator_stop_timeout") * time.Second + timeout := pkgconfigsetup.Datadog().GetDuration("aggregator_stop_timeout") * time.Second if d.noAggStreamWorker != nil { d.noAggStreamWorker.stop(flush) @@ -399,7 +399,7 @@ func (d *AgentDemultiplexer) flushToSerializer(start time.Time, waitForSerialize return } - logPayloads := config.Datadog().GetBool("log_payloads") + logPayloads := pkgconfigsetup.Datadog().GetBool("log_payloads") series, sketches := createIterableMetrics(d.aggregator.flushAndSerializeInParallel, d.sharedSerializer, logPayloads, false) metrics.Serialize( diff --git a/pkg/aggregator/demultiplexer_serverless.go b/pkg/aggregator/demultiplexer_serverless.go index d76d89309d5c4..97573531f5a9d 100644 --- a/pkg/aggregator/demultiplexer_serverless.go +++ b/pkg/aggregator/demultiplexer_serverless.go @@ -15,7 +15,7 @@ import ( forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer" @@ -42,16 +42,16 @@ type ServerlessDemultiplexer struct { // InitAndStartServerlessDemultiplexer creates and starts new Demultiplexer for the serverless agent. func InitAndStartServerlessDemultiplexer(keysPerDomain map[string][]string, forwarderTimeout time.Duration) *ServerlessDemultiplexer { - bufferSize := config.Datadog().GetInt("aggregator_buffer_size") + bufferSize := pkgconfigsetup.Datadog().GetInt("aggregator_buffer_size") logger := logimpl.NewTemporaryLoggerWithoutInit() - forwarder := forwarder.NewSyncForwarder(config.Datadog(), logger, keysPerDomain, forwarderTimeout) + forwarder := forwarder.NewSyncForwarder(pkgconfigsetup.Datadog(), logger, keysPerDomain, forwarderTimeout) h, _ := hostname.Get(context.Background()) - serializer := serializer.NewSerializer(forwarder, nil, compressionimpl.NewCompressor(config.Datadog()), config.Datadog(), h) - metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(config.Datadog())) - tagsStore := tags.NewStore(config.Datadog().GetBool("aggregator_use_tags_store"), "timesampler") + serializer := serializer.NewSerializer(forwarder, nil, compressionimpl.NewCompressor(pkgconfigsetup.Datadog()), pkgconfigsetup.Datadog(), h) + metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(pkgconfigsetup.Datadog())) + tagsStore := tags.NewStore(pkgconfigsetup.Datadog().GetBool("aggregator_use_tags_store"), "timesampler") statsdSampler := NewTimeSampler(TimeSamplerID(0), bucketSize, tagsStore, "") - flushAndSerializeInParallel := NewFlushAndSerializeInParallel(config.Datadog()) + flushAndSerializeInParallel := NewFlushAndSerializeInParallel(pkgconfigsetup.Datadog()) statsdWorker := newTimeSamplerWorker(statsdSampler, DefaultFlushInterval, bufferSize, metricSamplePool, flushAndSerializeInParallel, tagsStore) demux := &ServerlessDemultiplexer{ @@ -104,7 +104,7 @@ func (d *ServerlessDemultiplexer) ForceFlushToSerializer(start time.Time, waitFo d.flushLock.Lock() defer d.flushLock.Unlock() - logPayloads := config.Datadog().GetBool("log_payloads") + logPayloads := pkgconfigsetup.Datadog().GetBool("log_payloads") series, sketches := createIterableMetrics(d.flushAndSerializeInParallel, d.serializer, logPayloads, true) metrics.Serialize( diff --git a/pkg/aggregator/demultiplexer_test.go b/pkg/aggregator/demultiplexer_test.go index efd4ebccebd14..244565ddfd428 100644 --- a/pkg/aggregator/demultiplexer_test.go +++ b/pkg/aggregator/demultiplexer_test.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/comp/serializer/compression" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/stretchr/testify/assert" @@ -96,17 +96,17 @@ func TestDemuxForwardersCreated(t *testing.T) { // now, simulate a cluster-agent environment and enabled the orchestrator feature - oee := pkgconfig.Datadog().Get("orchestrator_explorer.enabled") - cre := pkgconfig.Datadog().Get("clc_runner_enabled") - ecp := pkgconfig.Datadog().Get("extra_config_providers") + oee := pkgconfigsetup.Datadog().Get("orchestrator_explorer.enabled") + cre := pkgconfigsetup.Datadog().Get("clc_runner_enabled") + ecp := pkgconfigsetup.Datadog().Get("extra_config_providers") defer func() { - pkgconfig.Datadog().SetWithoutSource("orchestrator_explorer.enabled", oee) - pkgconfig.Datadog().SetWithoutSource("clc_runner_enabled", cre) - pkgconfig.Datadog().SetWithoutSource("extra_config_providers", ecp) + pkgconfigsetup.Datadog().SetWithoutSource("orchestrator_explorer.enabled", oee) + pkgconfigsetup.Datadog().SetWithoutSource("clc_runner_enabled", cre) + pkgconfigsetup.Datadog().SetWithoutSource("extra_config_providers", ecp) }() - pkgconfig.Datadog().SetWithoutSource("orchestrator_explorer.enabled", true) - pkgconfig.Datadog().SetWithoutSource("clc_runner_enabled", true) - pkgconfig.Datadog().SetWithoutSource("extra_config_providers", []string{"clusterchecks"}) + pkgconfigsetup.Datadog().SetWithoutSource("orchestrator_explorer.enabled", true) + pkgconfigsetup.Datadog().SetWithoutSource("clc_runner_enabled", true) + pkgconfigsetup.Datadog().SetWithoutSource("extra_config_providers", []string{"clusterchecks"}) // since we're running the tests with -tags orchestrator and we've enabled the // needed feature above, we should have an orchestrator forwarder instantiated now @@ -203,18 +203,18 @@ func TestDemuxFlushAggregatorToSerializer(t *testing.T) { } func TestGetDogStatsDWorkerAndPipelineCount(t *testing.T) { - pc := pkgconfig.Datadog().GetInt("dogstatsd_pipeline_count") - aa := pkgconfig.Datadog().GetInt("dogstatsd_pipeline_autoadjust") + pc := pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_count") + aa := pkgconfigsetup.Datadog().GetInt("dogstatsd_pipeline_autoadjust") defer func() { - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", aa) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", aa) }() assert := assert.New(t) // auto-adjust - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", true) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", true) dsdWorkers, pipelines := getDogStatsDWorkerAndPipelineCount(16) assert.Equal(8, dsdWorkers) @@ -234,8 +234,8 @@ func TestGetDogStatsDWorkerAndPipelineCount(t *testing.T) { // no auto-adjust - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", false) - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) // default value + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", false) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", pc) // default value dsdWorkers, pipelines = getDogStatsDWorkerAndPipelineCount(16) assert.Equal(14, dsdWorkers) @@ -255,8 +255,8 @@ func TestGetDogStatsDWorkerAndPipelineCount(t *testing.T) { // no auto-adjust + pipeline count - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", false) - pkgconfig.Datadog().SetWithoutSource("dogstatsd_pipeline_count", 4) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_autoadjust", false) + pkgconfigsetup.Datadog().SetWithoutSource("dogstatsd_pipeline_count", 4) dsdWorkers, pipelines = getDogStatsDWorkerAndPipelineCount(16) assert.Equal(11, dsdWorkers) @@ -290,8 +290,7 @@ func createDemuxDepsWithOrchestratorFwd( modules := fx.Options( defaultforwarder.MockModule(), core.MockBundle(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorParams), + orchestratorForwarderImpl.Module(orchestratorParams), eventplatformimpl.Module(eventPlatformParams), eventplatformreceiverimpl.Module(), compressionimpl.MockModule(), diff --git a/pkg/aggregator/main_test.go b/pkg/aggregator/main_test.go index d13f64c63786f..0befde08183ee 100644 --- a/pkg/aggregator/main_test.go +++ b/pkg/aggregator/main_test.go @@ -11,11 +11,11 @@ import ( "os" "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestMain(m *testing.M) { - checker := config.NewChangeChecker() + checker := pkgconfigsetup.NewChangeChecker() exit := m.Run() if checker.HasChanged() { os.Exit(1) diff --git a/pkg/aggregator/mocksender/mocksender.go b/pkg/aggregator/mocksender/mocksender.go index 41e7514787d3d..85420860a4672 100644 --- a/pkg/aggregator/mocksender/mocksender.go +++ b/pkg/aggregator/mocksender/mocksender.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -40,7 +40,7 @@ func CreateDefaultDemultiplexer() *aggregator.AgentDemultiplexer { opts.FlushInterval = 1 * time.Hour opts.DontStartForwarders = true log := logimpl.NewTemporaryLoggerWithoutInit() - sharedForwarder := forwarder.NewDefaultForwarder(config.Datadog(), log, forwarder.NewOptions(config.Datadog(), log, nil)) + sharedForwarder := forwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), log, forwarder.NewOptions(pkgconfigsetup.Datadog(), log, nil)) orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostnameimpl.NewHostnameService())) return aggregator.InitAndStartAgentDemultiplexer(log, sharedForwarder, &orchestratorForwarder, opts, eventPlatformForwarder, compressionimpl.NewMockCompressor(), "") diff --git a/pkg/aggregator/no_aggregation_stream_worker.go b/pkg/aggregator/no_aggregation_stream_worker.go index 530b2fc58799c..38324f7d7dd01 100644 --- a/pkg/aggregator/no_aggregation_stream_worker.go +++ b/pkg/aggregator/no_aggregation_stream_worker.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/tagset" @@ -93,7 +93,7 @@ func newNoAggregationStreamWorker(maxMetricsPerPayload int, _ *metrics.MetricSam metricBuffer: tagset.NewHashlessTagsAccumulator(), stopChan: make(chan trigger), - samplesChan: make(chan metrics.MetricSampleBatch, config.Datadog().GetInt("dogstatsd_queue_size")), + samplesChan: make(chan metrics.MetricSampleBatch, pkgconfigsetup.Datadog().GetInt("dogstatsd_queue_size")), // warning for the unsupported metric types should appear maximum 200 times // every 5 minutes. @@ -144,7 +144,7 @@ func (w *noAggregationStreamWorker) run() { ticker := time.NewTicker(noAggWorkerStreamCheckFrequency) defer ticker.Stop() - logPayloads := config.Datadog().GetBool("log_payloads") + logPayloads := pkgconfigsetup.Datadog().GetBool("log_payloads") w.seriesSink, w.sketchesSink = createIterableMetrics(w.flushConfig, w.serializer, logPayloads, false) stopped := false diff --git a/pkg/aggregator/test_common.go b/pkg/aggregator/test_common.go index c7bfba146a245..53e634a14cba5 100644 --- a/pkg/aggregator/test_common.go +++ b/pkg/aggregator/test_common.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // PeekSender returns a Sender with passed ID or an error if the sender is not registered @@ -34,5 +34,5 @@ func (d *AgentDemultiplexer) PeekSender(cid checkid.ID) (sender.Sender, error) { //nolint:revive // TODO(AML) Fix revive linter func NewForwarderTest(log log.Component) defaultforwarder.Forwarder { - return defaultforwarder.NewDefaultForwarder(config.Datadog(), log, defaultforwarder.NewOptions(config.Datadog(), log, nil)) + return defaultforwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), log, defaultforwarder.NewOptions(pkgconfigsetup.Datadog(), log, nil)) } diff --git a/pkg/aggregator/time_sampler.go b/pkg/aggregator/time_sampler.go index 30f41aec5726f..08a4c17eda40b 100644 --- a/pkg/aggregator/time_sampler.go +++ b/pkg/aggregator/time_sampler.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -52,8 +52,8 @@ func NewTimeSampler(id TimeSamplerID, interval int64, cache *tags.Store, hostnam idString := strconv.Itoa(int(id)) log.Infof("Creating TimeSampler #%s", idString) - contextExpireTime := config.Datadog().GetInt64("dogstatsd_context_expiry_seconds") - counterExpireTime := contextExpireTime + config.Datadog().GetInt64("dogstatsd_expiry_seconds") + contextExpireTime := pkgconfigsetup.Datadog().GetInt64("dogstatsd_context_expiry_seconds") + counterExpireTime := contextExpireTime + pkgconfigsetup.Datadog().GetInt64("dogstatsd_expiry_seconds") s := &TimeSampler{ interval: interval, @@ -97,7 +97,7 @@ func (s *TimeSampler) sample(metricSample *metrics.MetricSample, timestamp float s.metricsByTimestamp[bucketStart] = bucketMetrics } // Add sample to bucket - if err := bucketMetrics.AddSample(contextKey, metricSample, timestamp, s.interval, nil, config.Datadog()); err != nil { + if err := bucketMetrics.AddSample(contextKey, metricSample, timestamp, s.interval, nil, pkgconfigsetup.Datadog()); err != nil { log.Debugf("TimeSampler #%d Ignoring sample '%s' on host '%s' and tags '%s': %s", s.id, metricSample.Name, metricSample.Host, metricSample.Tags, err) } } @@ -264,7 +264,7 @@ func (s *TimeSampler) flushContextMetrics(contextMetricsFlusher *metrics.Context } func (s *TimeSampler) countersSampleZeroValue(timestamp int64, contextMetrics metrics.ContextMetrics) { - expirySeconds := config.Datadog().GetInt64("dogstatsd_expiry_seconds") + expirySeconds := pkgconfigsetup.Datadog().GetInt64("dogstatsd_expiry_seconds") for counterContext, entry := range s.contextResolver.resolver.contextsByKey { if entry.lastSeen+expirySeconds > timestamp && entry.context.mtype == metrics.CounterType { sample := &metrics.MetricSample{ @@ -279,13 +279,13 @@ func (s *TimeSampler) countersSampleZeroValue(timestamp int64, contextMetrics me } // Add a zero value sample to the counter // It is ok to add a 0 sample to a counter that was already sampled in the bucket, it won't change its value - contextMetrics.AddSample(counterContext, sample, float64(timestamp), s.interval, nil, config.Datadog()) //nolint:errcheck + contextMetrics.AddSample(counterContext, sample, float64(timestamp), s.interval, nil, pkgconfigsetup.Datadog()) //nolint:errcheck } } } func (s *TimeSampler) sendTelemetry(timestamp float64, series metrics.SerieSink) { - if !config.Datadog().GetBool("telemetry.enabled") { + if !pkgconfigsetup.Datadog().GetBool("telemetry.enabled") { return } @@ -296,7 +296,7 @@ func (s *TimeSampler) sendTelemetry(timestamp float64, series metrics.SerieSink) fmt.Sprintf("sampler_id:%d", s.id), } - if config.Datadog().GetBool("telemetry.dogstatsd_origin") { + if pkgconfigsetup.Datadog().GetBool("telemetry.dogstatsd_origin") { s.contextResolver.sendOriginTelemetry(timestamp, series, s.hostname, tags) } } diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index f971da2048b6c..0220d71a19ea6 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -75,8 +75,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/stats" "github.com/DataDog/datadog-agent/pkg/collector/python" "github.com/DataDog/datadog-agent/pkg/commonchecks" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" statuscollector "github.com/DataDog/datadog-agent/pkg/status/collector" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -184,8 +184,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { fx.Provide(func() serializer.MetricSerializer { return nil }), compressionimpl.Module(), demultiplexerimpl.Module(), - orchestratorForwarderImpl.Module(), - fx.Supply(orchestratorForwarderImpl.NewNoopParams()), + orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewNoopParams()), eventplatformimpl.Module(eventplatforParams), eventplatformreceiverimpl.Module(), fx.Provide(func() demultiplexerimpl.Params { @@ -275,15 +274,15 @@ func run( previousIntegrationTracing := false previousIntegrationTracingExhaustive := false if cliParams.generateIntegrationTraces { - if pkgconfig.Datadog().IsSet("integration_tracing") { - previousIntegrationTracing = pkgconfig.Datadog().GetBool("integration_tracing") + if pkgconfigsetup.Datadog().IsSet("integration_tracing") { + previousIntegrationTracing = pkgconfigsetup.Datadog().GetBool("integration_tracing") } - if pkgconfig.Datadog().IsSet("integration_tracing_exhaustive") { - previousIntegrationTracingExhaustive = pkgconfig.Datadog().GetBool("integration_tracing_exhaustive") + if pkgconfigsetup.Datadog().IsSet("integration_tracing_exhaustive") { + previousIntegrationTracingExhaustive = pkgconfigsetup.Datadog().GetBool("integration_tracing_exhaustive") } - pkgconfig.Datadog().Set("integration_tracing", true, model.SourceAgentRuntime) - pkgconfig.Datadog().Set("integration_tracing_exhaustive", true, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("integration_tracing", true, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("integration_tracing_exhaustive", true, model.SourceAgentRuntime) } if len(cliParams.args) != 0 { @@ -298,7 +297,7 @@ func run( pkgcollector.InitPython(common.GetPythonPaths()...) commonchecks.RegisterChecks(wmeta, config, telemetry) - common.LoadComponents(secretResolver, wmeta, ac, pkgconfig.Datadog().GetString("confd_path")) + common.LoadComponents(secretResolver, wmeta, ac, pkgconfigsetup.Datadog().GetString("confd_path")) ac.LoadAndRun(context.Background()) // Create the CheckScheduler, but do not attach it to @@ -624,8 +623,8 @@ func run( } if cliParams.generateIntegrationTraces { - pkgconfig.Datadog().Set("integration_tracing", previousIntegrationTracing, model.SourceAgentRuntime) - pkgconfig.Datadog().Set("integration_tracing_exhaustive", previousIntegrationTracingExhaustive, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("integration_tracing", previousIntegrationTracing, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("integration_tracing_exhaustive", previousIntegrationTracingExhaustive, model.SourceAgentRuntime) } return nil diff --git a/pkg/cli/subcommands/clusterchecks/command.go b/pkg/cli/subcommands/clusterchecks/command.go index 6a29ac12c7cd3..1b41623dd4a04 100644 --- a/pkg/cli/subcommands/clusterchecks/command.go +++ b/pkg/cli/subcommands/clusterchecks/command.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -130,7 +130,7 @@ func rebalance(_ log.Component, config config.Component, cliParams *cliParams) e fmt.Println("Requesting a cluster check rebalance...") c := util.GetClient(false) // FIX: get certificates right then make this true - urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/rebalance", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/rebalance", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) // Set session token err := util.SetAuthToken(config) @@ -183,7 +183,7 @@ func isolate(_ log.Component, config config.Component, cliParams *cliParams) err if cliParams.checkID == "" { return fmt.Errorf("checkID must be specified") } - urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/isolate/check/%s", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port"), cliParams.checkID) + urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks/isolate/check/%s", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"), cliParams.checkID) // Set session token err := util.SetAuthToken(config) diff --git a/pkg/cli/subcommands/dcaflare/command.go b/pkg/cli/subcommands/dcaflare/command.go index 102261f10664c..30a00d7f19f3d 100644 --- a/pkg/cli/subcommands/dcaflare/command.go +++ b/pkg/cli/subcommands/dcaflare/command.go @@ -23,9 +23,9 @@ import ( "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/input" @@ -110,7 +110,7 @@ func readProfileData(seconds int) (flare.ProfileData, error) { c := util.GetClient(false) fmt.Fprintln(color.Output, color.BlueString("Getting a %ds profile snapshot from datadog-cluster-agent.", seconds)) - pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfig.Datadog().GetInt("expvar_port")) + pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfigsetup.Datadog().GetInt("expvar_port")) for _, prof := range []struct{ name, URL string }{ { @@ -156,9 +156,9 @@ func run(cliParams *cliParams, _ config.Component) error { e error ) c := util.GetClient(false) // FIX: get certificates right then make this true - urlstr := fmt.Sprintf("https://localhost:%v/flare", pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr := fmt.Sprintf("https://localhost:%v/flare", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) - logFile := pkgconfig.Datadog().GetString("log_file") + logFile := pkgconfigsetup.Datadog().GetString("log_file") if logFile == "" { logFile = path.DefaultDCALogFile } @@ -189,7 +189,7 @@ func run(cliParams *cliParams, _ config.Component) error { return nil } - if e = util.SetAuthToken(pkgconfig.Datadog()); e != nil { + if e = util.SetAuthToken(pkgconfigsetup.Datadog()); e != nil { return e } @@ -226,7 +226,7 @@ func run(cliParams *cliParams, _ config.Component) error { } } - response, e := flare.SendFlare(pkgconfig.Datadog(), filePath, cliParams.caseID, cliParams.email, helpers.NewLocalFlareSource()) + response, e := flare.SendFlare(pkgconfigsetup.Datadog(), filePath, cliParams.caseID, cliParams.email, helpers.NewLocalFlareSource()) fmt.Println(response) if e != nil { return e @@ -239,7 +239,7 @@ func newSettingsClient() (settings.Client, error) { apiConfigURL := fmt.Sprintf( "https://localhost:%v/config", - pkgconfig.Datadog().GetInt("cluster_agent.cmd_port"), + pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"), ) return settingshttp.NewClient(c, apiConfigURL, "datadog-cluster-agent", settingshttp.NewHTTPClientOptions(util.LeaveConnectionOpen)), nil diff --git a/pkg/cli/subcommands/health/command.go b/pkg/cli/subcommands/health/command.go index f4d1233d61d99..21e0c0efb48c8 100644 --- a/pkg/cli/subcommands/health/command.go +++ b/pkg/cli/subcommands/health/command.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -73,16 +73,16 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { func requestHealth(_ log.Component, config config.Component, cliParams *cliParams) error { c := util.GetClient(false) // FIX: get certificates right then make this true - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } var urlstr string if flavor.GetFlavor() == flavor.ClusterAgent { - urlstr = fmt.Sprintf("https://%v:%v/status/health", ipcAddress, pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr = fmt.Sprintf("https://%v:%v/status/health", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) } else { - urlstr = fmt.Sprintf("https://%v:%v/agent/status/health", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + urlstr = fmt.Sprintf("https://%v:%v/agent/status/health", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) } // Set session token diff --git a/pkg/cli/subcommands/taggerlist/command.go b/pkg/cli/subcommands/taggerlist/command.go index 3ff65b7305f17..7b1e239c30574 100644 --- a/pkg/cli/subcommands/taggerlist/command.go +++ b/pkg/cli/subcommands/taggerlist/command.go @@ -16,7 +16,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl/api" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -85,16 +85,16 @@ func taggerList(_ log.Component, config config.Component, _ *cliParams) error { } func getTaggerURL(_ config.Component) (string, error) { - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } var urlstr string if flavor.GetFlavor() == flavor.ClusterAgent { - urlstr = fmt.Sprintf("https://%v:%v/tagger-list", ipcAddress, pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr = fmt.Sprintf("https://%v:%v/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) } else { - urlstr = fmt.Sprintf("https://%v:%v/agent/tagger-list", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + urlstr = fmt.Sprintf("https://%v:%v/agent/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) } return urlstr, nil diff --git a/pkg/cli/subcommands/workloadlist/command.go b/pkg/cli/subcommands/workloadlist/command.go index d3576bad8c509..931f698c7aebd 100644 --- a/pkg/cli/subcommands/workloadlist/command.go +++ b/pkg/cli/subcommands/workloadlist/command.go @@ -17,7 +17,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/util" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -112,16 +112,16 @@ func workloadList(_ log.Component, config config.Component, cliParams *cliParams } func workloadURL(verbose bool) (string, error) { - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } var prefix string if flavor.GetFlavor() == flavor.ClusterAgent { - prefix = fmt.Sprintf("https://%v:%v/workload-list", ipcAddress, pkgconfig.Datadog().GetInt("cluster_agent.cmd_port")) + prefix = fmt.Sprintf("https://%v:%v/workload-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) } else { - prefix = fmt.Sprintf("https://%v:%v/agent/workload-list", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + prefix = fmt.Sprintf("https://%v:%v/agent/workload-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) } if verbose { diff --git a/pkg/cloudfoundry/containertagger/container_tagger.go b/pkg/cloudfoundry/containertagger/container_tagger.go index 6915d0f10d694..a5f1805e6f86a 100644 --- a/pkg/cloudfoundry/containertagger/container_tagger.go +++ b/pkg/cloudfoundry/containertagger/container_tagger.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/utils" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/cloudfoundry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -47,8 +47,8 @@ func NewContainerTagger(wmeta workloadmeta.Component) (*ContainerTagger, error) return nil, err } - retryCount := config.Datadog().GetInt("cloud_foundry_container_tagger.retry_count") - retryInterval := time.Second * time.Duration(config.Datadog().GetInt("cloud_foundry_container_tagger.retry_interval")) + retryCount := pkgconfigsetup.Datadog().GetInt("cloud_foundry_container_tagger.retry_count") + retryInterval := time.Second * time.Duration(pkgconfigsetup.Datadog().GetInt("cloud_foundry_container_tagger.retry_interval")) return &ContainerTagger{ gardenUtil: gu, @@ -106,7 +106,7 @@ func (c *ContainerTagger) processEvent(ctx context.Context, evt workloadmeta.Eve log.Debugf("Processing Event (id %s): %+v", eventID, storeContainer) // extract tags - hostTags := hostMetadataUtils.Get(ctx, true, config.Datadog()) + hostTags := hostMetadataUtils.Get(ctx, true, pkgconfigsetup.Datadog()) tags := storeContainer.CollectorTags tags = append(tags, hostTags.System...) tags = append(tags, hostTags.GoogleCloudPlatform...) @@ -161,7 +161,7 @@ func (c *ContainerTagger) processEvent(ctx context.Context, evt workloadmeta.Eve // updateTagsInContainer runs a script inside the container that handles updating the agent with the given tags func updateTagsInContainer(container garden.Container, tags []string) (int, error) { //nolint:revive // TODO(PLINT) Fix revive linter - shell_path := config.Datadog().GetString("cloud_foundry_container_tagger.shell_path") + shell_path := pkgconfigsetup.Datadog().GetString("cloud_foundry_container_tagger.shell_path") process, err := container.Run(garden.ProcessSpec{ Path: shell_path, Args: []string{"/home/vcap/app/.datadog/scripts/update_agent_config.sh"}, diff --git a/pkg/clusteragent/admission/controllers/webhook/config.go b/pkg/clusteragent/admission/controllers/webhook/config.go index 477a41c787439..697dd68a8805b 100644 --- a/pkg/clusteragent/admission/controllers/webhook/config.go +++ b/pkg/clusteragent/admission/controllers/webhook/config.go @@ -13,7 +13,7 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" ) @@ -35,16 +35,16 @@ type Config struct { // NewConfig creates a webhook controller configuration func NewConfig(admissionV1Enabled, namespaceSelectorEnabled bool) Config { return Config{ - webhookName: config.Datadog().GetString("admission_controller.webhook_name"), - secretName: config.Datadog().GetString("admission_controller.certificate.secret_name"), + webhookName: pkgconfigsetup.Datadog().GetString("admission_controller.webhook_name"), + secretName: pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name"), namespace: common.GetResourcesNamespace(), admissionV1Enabled: admissionV1Enabled, namespaceSelectorEnabled: namespaceSelectorEnabled, - svcName: config.Datadog().GetString("admission_controller.service_name"), + svcName: pkgconfigsetup.Datadog().GetString("admission_controller.service_name"), svcPort: int32(443), - timeout: config.Datadog().GetInt32("admission_controller.timeout_seconds"), - failurePolicy: config.Datadog().GetString("admission_controller.failure_policy"), - reinvocationPolicy: config.Datadog().GetString("admission_controller.reinvocation_policy"), + timeout: pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds"), + failurePolicy: pkgconfigsetup.Datadog().GetString("admission_controller.failure_policy"), + reinvocationPolicy: pkgconfigsetup.Datadog().GetString("admission_controller.reinvocation_policy"), } } diff --git a/pkg/clusteragent/admission/controllers/webhook/controller_v1_test.go b/pkg/clusteragent/admission/controllers/webhook/controller_v1_test.go index b4c990f55459a..b8cc5e2d1ed5d 100644 --- a/pkg/clusteragent/admission/controllers/webhook/controller_v1_test.go +++ b/pkg/clusteragent/admission/controllers/webhook/controller_v1_test.go @@ -31,8 +31,9 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/certificate" ) @@ -233,7 +234,7 @@ func TestGenerateTemplatesV1(t *testing.T) { matchPolicy := admiv1.Exact sideEffects := admiv1.SideEffectClassNone port := int32(443) - timeout := config.Datadog().GetInt32("admission_controller.timeout_seconds") + timeout := pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds") webhook := func(name, path string, objSelector, nsSelector *metav1.LabelSelector, operations []admiv1.OperationType, resources []string) admiv1.MutatingWebhook { return admiv1.MutatingWebhook{ Name: name, @@ -267,13 +268,13 @@ func TestGenerateTemplatesV1(t *testing.T) { } tests := []struct { name string - setupConfig func(config.Config) + setupConfig func(model.Config) configFunc func() Config want func() []admiv1.MutatingWebhook }{ { name: "config injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -296,7 +297,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "config injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -315,7 +316,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "tags injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -338,7 +339,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "tags injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -357,7 +358,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "lib injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -380,7 +381,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "lib injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -399,7 +400,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "config and tags injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) mockConfig.SetWithoutSource("admission_controller.auto_instrumentation.enabled", false) @@ -422,7 +423,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "config and tags injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -454,7 +455,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -479,7 +480,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "AKS-specific label selector without namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.add_aks_selectors", true) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) @@ -528,7 +529,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "AKS-specific label selector with namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.add_aks_selectors", true) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) @@ -574,7 +575,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "cws instrumentation", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -615,7 +616,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "cws instrumentation, mutate unlabelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -652,7 +653,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "cws instrumentation, namespace selector", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -693,7 +694,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "cws instrumentation, namespace selector, mutate unlabelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -730,7 +731,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, misconfigured profiles, supported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -750,7 +751,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, supported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -782,7 +783,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, unsupported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -802,7 +803,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -822,7 +823,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, only single namespace selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -852,7 +853,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, valid selector specified, unsupported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -872,7 +873,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, only single object selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -900,7 +901,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, one object selector and one namespace selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -928,7 +929,7 @@ func TestGenerateTemplatesV1(t *testing.T) { }, { name: "agent sidecar injection, multiple selectors (should refuse to create webhook), provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -977,7 +978,7 @@ func TestGetWebhookSkeletonV1(t *testing.T) { sideEffects := admiv1.SideEffectClassNone port := int32(443) path := "/bar" - defaultTimeout := config.Datadog().GetInt32("admission_controller.timeout_seconds") + defaultTimeout := pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds") customTimeout := int32(2) namespaceSelector, _ := common.DefaultLabelSelectors(true) _, objectSelector := common.DefaultLabelSelectors(false) diff --git a/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1_test.go b/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1_test.go index 540ce5fbc44d6..3a90da5138556 100644 --- a/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1_test.go +++ b/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1_test.go @@ -31,8 +31,9 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/certificate" ) @@ -228,7 +229,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { matchPolicy := admiv1beta1.Exact sideEffects := admiv1beta1.SideEffectClassNone port := int32(443) - timeout := config.Datadog().GetInt32("admission_controller.timeout_seconds") + timeout := pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds") webhook := func(name, path string, objSelector, nsSelector *metav1.LabelSelector, operations []admiv1beta1.OperationType, resources []string) admiv1beta1.MutatingWebhook { return admiv1beta1.MutatingWebhook{ Name: name, @@ -262,13 +263,13 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { } tests := []struct { name string - setupConfig func(config.Config) + setupConfig func(model.Config) configFunc func() Config want func() []admiv1beta1.MutatingWebhook }{ { name: "config injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -291,7 +292,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "config injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -310,7 +311,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "tags injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -333,7 +334,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "tags injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -352,7 +353,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "lib injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -375,7 +376,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "lib injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", false) @@ -394,7 +395,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "config and tags injection, mutate labelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) mockConfig.SetWithoutSource("admission_controller.auto_instrumentation.enabled", false) @@ -417,7 +418,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "config and tags injection, mutate all", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -449,7 +450,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) mockConfig.SetWithoutSource("admission_controller.inject_tags.enabled", true) @@ -474,7 +475,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "AKS-specific label selector without namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.add_aks_selectors", true) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) @@ -522,7 +523,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "AKS-specific label selector with namespace selector enabled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.add_aks_selectors", true) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", true) @@ -567,7 +568,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "cws instrumentation", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -608,7 +609,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "cws instrumentation, mutate unlabelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -645,7 +646,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "cws instrumentation, namespace selector", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -686,7 +687,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "cws instrumentation, namespace selector, mutate unlabelled", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", true) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -723,7 +724,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, misconfigured profiles, supported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -743,7 +744,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, supported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -775,7 +776,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, unsupported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -795,7 +796,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, no selectors specified, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -815,7 +816,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, only single namespace selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -845,7 +846,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, valid selector specified, unsupported provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -865,7 +866,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, only single object selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -893,7 +894,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, one object selector and one namespace selector, no provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -921,7 +922,7 @@ func TestGenerateTemplatesV1beta1(t *testing.T) { }, { name: "agent sidecar injection, multiple selectors (should refuse to create webhook), provider specified", - setupConfig: func(mockConfig config.Config) { + setupConfig: func(mockConfig model.Config) { mockConfig.SetWithoutSource("admission_controller.mutate_unlabelled", false) mockConfig.SetWithoutSource("admission_controller.namespace_selector_fallback", false) mockConfig.SetWithoutSource("admission_controller.inject_config.enabled", false) @@ -970,7 +971,7 @@ func TestGetWebhookSkeletonV1beta1(t *testing.T) { defaultReinvocationPolicy := admiv1beta1.IfNeededReinvocationPolicy port := int32(443) path := "/bar" - defaultTimeout := config.Datadog().GetInt32("admission_controller.timeout_seconds") + defaultTimeout := pkgconfigsetup.Datadog().GetInt32("admission_controller.timeout_seconds") customTimeout := int32(2) namespaceSelector, _ := common.DefaultLabelSelectors(true) _, objectSelector := common.DefaultLabelSelectors(false) diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar.go b/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar.go index c63ac2b995d3b..544a0b0a3ec55 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/agent_sidecar.go @@ -26,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/admission" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" apiCommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -60,8 +60,8 @@ func NewWebhook() *Webhook { return &Webhook{ name: webhookName, - isEnabled: config.Datadog().GetBool("admission_controller.agent_sidecar.enabled"), - endpoint: config.Datadog().GetString("admission_controller.agent_sidecar.endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.agent_sidecar.enabled"), + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, namespaceSelector: nsSelector, @@ -157,11 +157,11 @@ func (w *Webhook) injectAgentSidecar(pod *corev1.Pod, _ string, _ dynamic.Interf func getDefaultSidecarTemplate(containerRegistry string) *corev1.Container { ddSite := os.Getenv("DD_SITE") if ddSite == "" { - ddSite = config.DefaultSite + ddSite = pkgconfigsetup.DefaultSite } - imageName := config.Datadog().GetString("admission_controller.agent_sidecar.image_name") - imageTag := config.Datadog().GetString("admission_controller.agent_sidecar.image_tag") + imageName := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.image_name") + imageTag := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.image_tag") agentContainer := &corev1.Container{ Env: []corev1.EnvVar{ @@ -195,7 +195,7 @@ func getDefaultSidecarTemplate(containerRegistry string) *corev1.Container { }, { Name: "DD_LANGUAGE_DETECTION_ENABLED", - Value: strconv.FormatBool(config.Datadog().GetBool("language_detection.enabled") && config.Datadog().GetBool("language_detection.reporting.enabled")), + Value: strconv.FormatBool(pkgconfigsetup.Datadog().GetBool("language_detection.enabled") && pkgconfigsetup.Datadog().GetBool("language_detection.reporting.enabled")), }, }, Image: fmt.Sprintf("%s/%s:%s", containerRegistry, imageName, imageTag), @@ -213,11 +213,11 @@ func getDefaultSidecarTemplate(containerRegistry string) *corev1.Container { }, } - clusterAgentEnabled := config.Datadog().GetBool("admission_controller.agent_sidecar.cluster_agent.enabled") + clusterAgentEnabled := pkgconfigsetup.Datadog().GetBool("admission_controller.agent_sidecar.cluster_agent.enabled") if clusterAgentEnabled { - clusterAgentCmdPort := config.Datadog().GetInt("cluster_agent.cmd_port") - clusterAgentServiceName := config.Datadog().GetString("cluster_agent.kubernetes_service_name") + clusterAgentCmdPort := pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port") + clusterAgentServiceName := pkgconfigsetup.Datadog().GetString("cluster_agent.kubernetes_service_name") _, _ = withEnvOverrides(agentContainer, corev1.EnvVar{ Name: "DD_CLUSTER_AGENT_ENABLED", @@ -247,7 +247,7 @@ func getDefaultSidecarTemplate(containerRegistry string) *corev1.Container { // labelSelectors returns the mutating webhooks object selectors based on the configuration func labelSelectors() (namespaceSelector, objectSelector *metav1.LabelSelector) { // Read and parse selectors - selectorsJSON := config.Datadog().GetString("admission_controller.agent_sidecar.selectors") + selectorsJSON := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.selectors") // Get sidecar profiles _, err := loadSidecarProfiles() @@ -269,7 +269,7 @@ func labelSelectors() (namespaceSelector, objectSelector *metav1.LabelSelector) return nil, nil } - provider := config.Datadog().GetString("admission_controller.agent_sidecar.provider") + provider := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.provider") if !providerIsSupported(provider) { log.Errorf("agent sidecar provider is not supported: %v", provider) return nil, nil diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go b/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go index 628d75e76450f..c7367f62919d1 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/profiles.go @@ -13,7 +13,7 @@ import ( corev1 "k8s.io/api/core/v1" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) //////////////////////////////// @@ -34,7 +34,7 @@ type ProfileOverride struct { // one profile is configured func loadSidecarProfiles() ([]ProfileOverride, error) { // Read and parse profiles - profilesJSON := config.Datadog().GetString("admission_controller.agent_sidecar.profiles") + profilesJSON := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.profiles") var profiles []ProfileOverride diff --git a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go index d05d123a852f2..0f03cadbb8509 100644 --- a/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go +++ b/pkg/clusteragent/admission/mutate/agent_sidecar/providers.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" configWebhook "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/config" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -56,7 +56,7 @@ func providerIsSupported(provider string) bool { // applyProviderOverrides applies the necessary overrides for the provider // configured. It returns a boolean that indicates if the pod was mutated. func applyProviderOverrides(pod *corev1.Pod) (bool, error) { - provider := config.Datadog().GetString("admission_controller.agent_sidecar.provider") + provider := pkgconfigsetup.Datadog().GetString("admission_controller.agent_sidecar.provider") if !providerIsSupported(provider) { return false, fmt.Errorf("unsupported provider: %v", provider) diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation.go index 2ea1076879239..909612cdd3b11 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation.go @@ -29,7 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" mutatecommon "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -41,7 +41,7 @@ const ( // defaultMilliCPURequest defines default milli cpu request number. defaultMilliCPURequest int64 = 50 // 0.05 core // defaultMemoryRequest defines default memory request size. - defaultMemoryRequest int64 = 20 * 1024 * 1024 // 20 MB + defaultMemoryRequest int64 = 100 * 1024 * 1024 // 100 MB (recommended minimum by Alpine) webhookName = "lib_injection" ) @@ -84,13 +84,13 @@ func NewWebhook(wmeta workloadmeta.Component, filter mutatecommon.InjectionFilte return nil, err } - v, err := instrumentationVersion(config.Datadog().GetString("apm_config.instrumentation.version")) + v, err := instrumentationVersion(pkgconfigsetup.Datadog().GetString("apm_config.instrumentation.version")) if err != nil { return nil, fmt.Errorf("invalid version for key apm_config.instrumentation.version: %w", err) } var ( - isEnabled = config.Datadog().GetBool("admission_controller.auto_instrumentation.enabled") + isEnabled = pkgconfigsetup.Datadog().GetBool("admission_controller.auto_instrumentation.enabled") containerRegistry = mutatecommon.ContainerRegistry("admission_controller.auto_instrumentation.container_registry") pinnedLibraries []libInfo ) @@ -102,14 +102,14 @@ func NewWebhook(wmeta workloadmeta.Component, filter mutatecommon.InjectionFilte return &Webhook{ name: webhookName, isEnabled: isEnabled, - endpoint: config.Datadog().GetString("admission_controller.auto_instrumentation.endpoint"), + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.auto_instrumentation.endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, initSecurityContext: initSecurityContext, initResourceRequirements: initResourceRequirements, injectionFilter: filter, containerRegistry: containerRegistry, - injectorImageTag: config.Datadog().GetString("apm_config.instrumentation.injector_image_tag"), + injectorImageTag: pkgconfigsetup.Datadog().GetString("apm_config.instrumentation.injector_image_tag"), pinnedLibraries: pinnedLibraries, version: v, wmeta: wmeta, @@ -228,7 +228,7 @@ func (w *Webhook) inject(pod *corev1.Pod, ns string, _ dynamic.Interface) (bool, // * false - product disactivated, not overridable remotely func securityClientLibraryConfigMutators() []podMutator { boolVal := func(key string) string { - return strconv.FormatBool(config.Datadog().GetBool(key)) + return strconv.FormatBool(pkgconfigsetup.Datadog().GetBool(key)) } return []podMutator{ configKeyEnvVarMutator{ @@ -259,7 +259,7 @@ func profilingClientLibraryConfigMutators() []podMutator { configKeyEnvVarMutator{ envKey: "DD_PROFILING_ENABLED", configKey: "admission_controller.auto_instrumentation.profiling.enabled", - getVal: config.Datadog().GetString, + getVal: pkgconfigsetup.Datadog().GetString, }, } } @@ -289,7 +289,7 @@ func injectApmTelemetryConfig(pod *corev1.Pod) { func getPinnedLibraries(registry string) []libInfo { // If APM Instrumentation is enabled and configuration apm_config.instrumentation.lib_versions specified, // inject only the libraries from the configuration - singleStepLibraryVersions := config.Datadog(). + singleStepLibraryVersions := pkgconfigsetup.Datadog(). GetStringMapString("apm_config.instrumentation.lib_versions") var res []libInfo @@ -351,14 +351,14 @@ func (l *libInfoLanguageDetection) containerMutator(v version) containerMutator // The languages information is available in workloadmeta-store // and attached on the pod's owner. func (w *Webhook) getLibrariesLanguageDetection(pod *corev1.Pod) *libInfoLanguageDetection { - if !config.Datadog().GetBool("language_detection.enabled") || - !config.Datadog().GetBool("language_detection.reporting.enabled") { + if !pkgconfigsetup.Datadog().GetBool("language_detection.enabled") || + !pkgconfigsetup.Datadog().GetBool("language_detection.reporting.enabled") { return nil } return &libInfoLanguageDetection{ libs: w.getAutoDetectedLibraries(pod), - injectionEnabled: config.Datadog().GetBool("admission_controller.auto_instrumentation.inject_auto_detected_libraries"), + injectionEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.auto_instrumentation.inject_auto_detected_libraries"), } } @@ -651,8 +651,8 @@ func initResources() (corev1.ResourceRequirements, error) { var resources = corev1.ResourceRequirements{Limits: corev1.ResourceList{}, Requests: corev1.ResourceList{}} - if config.Datadog().IsSet("admission_controller.auto_instrumentation.init_resources.cpu") { - quantity, err := resource.ParseQuantity(config.Datadog().GetString("admission_controller.auto_instrumentation.init_resources.cpu")) + if pkgconfigsetup.Datadog().IsSet("admission_controller.auto_instrumentation.init_resources.cpu") { + quantity, err := resource.ParseQuantity(pkgconfigsetup.Datadog().GetString("admission_controller.auto_instrumentation.init_resources.cpu")) if err != nil { return resources, err } @@ -663,8 +663,8 @@ func initResources() (corev1.ResourceRequirements, error) { resources.Limits[corev1.ResourceCPU] = *resource.NewMilliQuantity(defaultMilliCPURequest, resource.DecimalSI) } - if config.Datadog().IsSet("admission_controller.auto_instrumentation.init_resources.memory") { - quantity, err := resource.ParseQuantity(config.Datadog().GetString("admission_controller.auto_instrumentation.init_resources.memory")) + if pkgconfigsetup.Datadog().IsSet("admission_controller.auto_instrumentation.init_resources.memory") { + quantity, err := resource.ParseQuantity(pkgconfigsetup.Datadog().GetString("admission_controller.auto_instrumentation.init_resources.memory")) if err != nil { return resources, err } @@ -682,8 +682,8 @@ func parseInitSecurityContext() (*corev1.SecurityContext, error) { securityContext := corev1.SecurityContext{} confKey := "admission_controller.auto_instrumentation.init_security_context" - if config.Datadog().IsSet(confKey) { - confValue := config.Datadog().GetString(confKey) + if pkgconfigsetup.Datadog().IsSet(confKey) { + confValue := pkgconfigsetup.Datadog().GetString(confKey) err := json.Unmarshal([]byte(confValue), &securityContext) if err != nil { return nil, fmt.Errorf("failed to get init security context from configuration, %s=`%s`: %v", confKey, confValue, err) diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go index d7b217f755020..2e776c6c78b3b 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go @@ -640,7 +640,7 @@ func TestExtractLibInfo(t *testing.T) { }, { lang: "dotnet", - image: "registry/dd-lib-dotnet-init:v2", + image: "registry/dd-lib-dotnet-init:v3", }, { lang: "ruby", @@ -1098,7 +1098,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: false, wantCPU: "50m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{}, }, { @@ -1121,7 +1121,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: false, wantCPU: "200m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{}, }, { @@ -1143,7 +1143,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: true, wantCPU: "50m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{}, }, { @@ -1153,7 +1153,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: false, wantCPU: "50m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{"NET_ADMIN", "SYS_TIME"}, @@ -1191,7 +1191,7 @@ func TestInjectLibInitContainer(t *testing.T) { lang: java, wantErr: false, wantCPU: "50m", - wantMem: "20Mi", + wantMem: "100Mi", secCtx: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, @@ -1358,7 +1358,7 @@ func TestInjectAutoInstrumentation(t *testing.T) { "java": "v1", "python": "v2", "ruby": "v2", - "dotnet": "v2", + "dotnet": "v3", "js": "v5", } diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/injection_filter.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/injection_filter.go index 8f2c542e72705..de75b2b4e290e 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/injection_filter.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/injection_filter.go @@ -11,7 +11,7 @@ import ( "fmt" mutatecommon "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" apiServerCommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -45,7 +45,7 @@ type injectionFilter struct { // This DOES NOT respect `mutate_unlabelled` since it is a namespace // specific check. func (f *injectionFilter) IsNamespaceEligible(ns string) bool { - apmInstrumentationEnabled := config.Datadog().GetBool("apm_config.instrumentation.enabled") + apmInstrumentationEnabled := pkgconfigsetup.Datadog().GetBool("apm_config.instrumentation.enabled") if !apmInstrumentationEnabled { log.Debugf("APM Instrumentation is disabled") @@ -85,8 +85,8 @@ func (f *injectionFilter) Err() error { // are not one of the ones disabled by default. // - Enabled and disabled namespaces: return error. func makeAPMSSINamespaceFilter() (*containers.Filter, error) { - apmEnabledNamespaces := config.Datadog().GetStringSlice("apm_config.instrumentation.enabled_namespaces") - apmDisabledNamespaces := config.Datadog().GetStringSlice("apm_config.instrumentation.disabled_namespaces") + apmEnabledNamespaces := pkgconfigsetup.Datadog().GetStringSlice("apm_config.instrumentation.enabled_namespaces") + apmDisabledNamespaces := pkgconfigsetup.Datadog().GetStringSlice("apm_config.instrumentation.disabled_namespaces") if len(apmEnabledNamespaces) > 0 && len(apmDisabledNamespaces) > 0 { return nil, fmt.Errorf("apm.instrumentation.enabled_namespaces and apm.instrumentation.disabled_namespaces configuration cannot be set together") diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go index dccaf9fa8c9f3..7ddaace808661 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go @@ -113,7 +113,7 @@ func (l language) isSupported() bool { // If this language does not appear in supportedLanguages, it will not be injected. var languageVersions = map[language]string{ java: "v1", // https://datadoghq.atlassian.net/browse/APMON-1064 - dotnet: "v2", // https://datadoghq.atlassian.net/browse/APMON-1067 + dotnet: "v3", // https://datadoghq.atlassian.net/browse/APMON-1390 python: "v2", // https://datadoghq.atlassian.net/browse/APMON-1068 ruby: "v2", // https://datadoghq.atlassian.net/browse/APMON-1066 js: "v5", // https://datadoghq.atlassian.net/browse/APMON-1065 diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go index ecd3119fd6b0b..9debc63664d05 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // containerMutator describes something that can mutate a container. @@ -166,7 +166,7 @@ type configKeyEnvVarMutator struct { } func (c configKeyEnvVarMutator) mutatePod(pod *corev1.Pod) error { - if config.Datadog().IsSet(c.configKey) { + if pkgconfigsetup.Datadog().IsSet(c.configKey) { _ = common.InjectEnv(pod, corev1.EnvVar{Name: c.envKey, Value: c.getVal(c.configKey)}) } diff --git a/pkg/clusteragent/admission/mutate/autoscaling/autoscaling.go b/pkg/clusteragent/admission/mutate/autoscaling/autoscaling.go index 9f4bfa158dfad..feefdef468844 100644 --- a/pkg/clusteragent/admission/mutate/autoscaling/autoscaling.go +++ b/pkg/clusteragent/admission/mutate/autoscaling/autoscaling.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/cluster-agent/admission" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/workload" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" admiv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" @@ -39,7 +39,7 @@ type Webhook struct { func NewWebhook(patcher workload.PodPatcher) *Webhook { return &Webhook{ name: webhookName, - isEnabled: config.Datadog().GetBool("autoscaling.workload.enabled"), + isEnabled: pkgconfigsetup.Datadog().GetBool("autoscaling.workload.enabled"), endpoint: webhookEndpoint, resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, diff --git a/pkg/clusteragent/admission/mutate/common/common.go b/pkg/clusteragent/admission/mutate/common/common.go index ba60a05f472b8..7ce0baa5bea05 100644 --- a/pkg/clusteragent/admission/mutate/common/common.go +++ b/pkg/clusteragent/admission/mutate/common/common.go @@ -20,7 +20,7 @@ import ( "k8s.io/client-go/dynamic" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -181,11 +181,11 @@ func containsVolumeMount(volumeMounts []corev1.VolumeMount, element corev1.Volum // config option, and falls back to the default container registry if no // webhook-specific container registry is set. func ContainerRegistry(specificConfigOpt string) string { - if config.Datadog().IsSet(specificConfigOpt) { - return config.Datadog().GetString(specificConfigOpt) + if pkgconfigsetup.Datadog().IsSet(specificConfigOpt) { + return pkgconfigsetup.Datadog().GetString(specificConfigOpt) } - return config.Datadog().GetString("admission_controller.container_registry") + return pkgconfigsetup.Datadog().GetString("admission_controller.container_registry") } // MarkVolumeAsSafeToEvictForAutoscaler adds the Kubernetes cluster-autoscaler diff --git a/pkg/clusteragent/admission/mutate/common/label_selectors.go b/pkg/clusteragent/admission/mutate/common/label_selectors.go index d305427ab782b..2e69d9dcecdb2 100644 --- a/pkg/clusteragent/admission/mutate/common/label_selectors.go +++ b/pkg/clusteragent/admission/mutate/common/label_selectors.go @@ -11,16 +11,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // DefaultLabelSelectors returns the mutating webhooks object selector based on the configuration func DefaultLabelSelectors(useNamespaceSelector bool) (namespaceSelector, objectSelector *metav1.LabelSelector) { var labelSelector metav1.LabelSelector - if config.Datadog().GetBool("admission_controller.mutate_unlabelled") || - config.Datadog().GetBool("apm_config.instrumentation.enabled") || - len(config.Datadog().GetStringSlice("apm_config.instrumentation.enabled_namespaces")) > 0 { + if pkgconfigsetup.Datadog().GetBool("admission_controller.mutate_unlabelled") || + pkgconfigsetup.Datadog().GetBool("apm_config.instrumentation.enabled") || + len(pkgconfigsetup.Datadog().GetStringSlice("apm_config.instrumentation.enabled_namespaces")) > 0 { // Accept all, ignore pods if they're explicitly filtered-out labelSelector = metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ @@ -40,7 +40,7 @@ func DefaultLabelSelectors(useNamespaceSelector bool) (namespaceSelector, object } } - if config.Datadog().GetBool("admission_controller.add_aks_selectors") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.add_aks_selectors") { return aksSelectors(useNamespaceSelector, labelSelector) } diff --git a/pkg/clusteragent/admission/mutate/common/ns_injection_filter.go b/pkg/clusteragent/admission/mutate/common/ns_injection_filter.go index 068e3b4ebaf31..94e05458c551c 100644 --- a/pkg/clusteragent/admission/mutate/common/ns_injection_filter.go +++ b/pkg/clusteragent/admission/mutate/common/ns_injection_filter.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -36,7 +36,7 @@ func (f InjectionFilter) ShouldMutatePod(pod *corev1.Pod) bool { return true } - return config.Datadog().GetBool("admission_controller.mutate_unlabelled") + return pkgconfigsetup.Datadog().GetBool("admission_controller.mutate_unlabelled") } type podMutationLabelFlag int diff --git a/pkg/clusteragent/admission/mutate/config/config.go b/pkg/clusteragent/admission/mutate/config/config.go index 8321f8f531e15..874d61eab8f04 100644 --- a/pkg/clusteragent/admission/mutate/config/config.go +++ b/pkg/clusteragent/admission/mutate/config/config.go @@ -24,7 +24,7 @@ import ( admCommon "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" apiCommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -75,7 +75,7 @@ var ( agentHostServiceEnvVar = corev1.EnvVar{ Name: agentHostEnvVarName, - Value: config.Datadog().GetString("admission_controller.inject_config.local_service_name") + "." + apiCommon.GetMyNamespace() + ".svc.cluster.local", + Value: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.local_service_name") + "." + apiCommon.GetMyNamespace() + ".svc.cluster.local", } defaultDdEntityIDEnvVar = corev1.EnvVar{ @@ -90,12 +90,12 @@ var ( traceURLSocketEnvVar = corev1.EnvVar{ Name: traceURLEnvVarName, - Value: config.Datadog().GetString("admission_controller.inject_config.trace_agent_socket"), + Value: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.trace_agent_socket"), } dogstatsdURLSocketEnvVar = corev1.EnvVar{ Name: dogstatsdURLEnvVarName, - Value: config.Datadog().GetString("admission_controller.inject_config.dogstatsd_socket"), + Value: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.dogstatsd_socket"), } ) @@ -115,11 +115,11 @@ type Webhook struct { func NewWebhook(wmeta workloadmeta.Component, injectionFilter common.InjectionFilter) *Webhook { return &Webhook{ name: webhookName, - isEnabled: config.Datadog().GetBool("admission_controller.inject_config.enabled"), - endpoint: config.Datadog().GetString("admission_controller.inject_config.endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.inject_config.enabled"), + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, - mode: config.Datadog().GetString("admission_controller.inject_config.mode"), + mode: pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.mode"), wmeta: wmeta, injectionFilter: injectionFilter, } @@ -282,13 +282,13 @@ func buildVolume(volumeName, path string, hostpathType corev1.HostPathType, read func injectSocketVolumes(pod *corev1.Pod) bool { var injectedVolNames []string - if config.Datadog().GetBool("admission_controller.inject_config.type_socket_volumes") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.inject_config.type_socket_volumes") { volumes := map[string]string{ DogstatsdSocketVolumeName: strings.TrimPrefix( - config.Datadog().GetString("admission_controller.inject_config.dogstatsd_socket"), "unix://", + pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.dogstatsd_socket"), "unix://", ), TraceAgentSocketVolumeName: strings.TrimPrefix( - config.Datadog().GetString("admission_controller.inject_config.trace_agent_socket"), "unix://", + pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.trace_agent_socket"), "unix://", ), } @@ -302,7 +302,7 @@ func injectSocketVolumes(pod *corev1.Pod) bool { } else { volume, volumeMount := buildVolume( DatadogVolumeName, - config.Datadog().GetString("admission_controller.inject_config.socket_path"), + pkgconfigsetup.Datadog().GetString("admission_controller.inject_config.socket_path"), corev1.HostPathDirectoryOrCreate, true, ) diff --git a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go index 86c6c3ecf30c2..91cec20c7c9d2 100644 --- a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go +++ b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go @@ -36,7 +36,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation/k8scp" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation/k8sexec" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/resolvers/usersessions" "github.com/DataDog/datadog-agent/pkg/util/containers" apiserverUtils "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -96,9 +96,9 @@ type WebhookForPods struct { func newWebhookForPods(admissionFunc admission.WebhookFunc) *WebhookForPods { return &WebhookForPods{ name: webhookForPodsName, - isEnabled: config.Datadog().GetBool("admission_controller.cws_instrumentation.enabled") && - len(config.Datadog().GetString("admission_controller.cws_instrumentation.image_name")) > 0, - endpoint: config.Datadog().GetString("admission_controller.cws_instrumentation.pod_endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.cws_instrumentation.enabled") && + len(pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.image_name")) > 0, + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.pod_endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, admissionFunc: admissionFunc, @@ -156,9 +156,9 @@ type WebhookForCommands struct { func newWebhookForCommands(admissionFunc admission.WebhookFunc) *WebhookForCommands { return &WebhookForCommands{ name: webhookForCommandsName, - isEnabled: config.Datadog().GetBool("admission_controller.cws_instrumentation.enabled") && - len(config.Datadog().GetString("admission_controller.cws_instrumentation.image_name")) > 0, - endpoint: config.Datadog().GetString("admission_controller.cws_instrumentation.command_endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.cws_instrumentation.enabled") && + len(pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.image_name")) > 0, + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.command_endpoint"), resources: []string{"pods/exec"}, operations: []admiv1.OperationType{admiv1.Connect}, admissionFunc: admissionFunc, @@ -205,7 +205,7 @@ func (w *WebhookForCommands) MutateFunc() admission.WebhookFunc { func parseCWSInitContainerResources() (*corev1.ResourceRequirements, error) { var resources = &corev1.ResourceRequirements{Limits: corev1.ResourceList{}, Requests: corev1.ResourceList{}} - if cpu := config.Datadog().GetString("admission_controller.cws_instrumentation.init_resources.cpu"); len(cpu) > 0 { + if cpu := pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.init_resources.cpu"); len(cpu) > 0 { quantity, err := resource.ParseQuantity(cpu) if err != nil { return nil, err @@ -214,7 +214,7 @@ func parseCWSInitContainerResources() (*corev1.ResourceRequirements, error) { resources.Limits[corev1.ResourceCPU] = quantity } - if mem := config.Datadog().GetString("admission_controller.cws_instrumentation.init_resources.memory"); len(mem) > 0 { + if mem := pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.init_resources.memory"); len(mem) > 0 { quantity, err := resource.ParseQuantity(mem) if err != nil { return nil, err @@ -291,16 +291,16 @@ func NewCWSInstrumentation(wmeta workloadmeta.Component) (*CWSInstrumentation, e // Parse filters ci.filter, err = containers.NewFilter( containers.GlobalFilter, - config.Datadog().GetStringSlice("admission_controller.cws_instrumentation.include"), - config.Datadog().GetStringSlice("admission_controller.cws_instrumentation.exclude"), + pkgconfigsetup.Datadog().GetStringSlice("admission_controller.cws_instrumentation.include"), + pkgconfigsetup.Datadog().GetStringSlice("admission_controller.cws_instrumentation.exclude"), ) if err != nil { return nil, fmt.Errorf("couldn't initialize filter: %w", err) } // Parse init container image - cwsInjectorImageName := config.Datadog().GetString("admission_controller.cws_instrumentation.image_name") - cwsInjectorImageTag := config.Datadog().GetString("admission_controller.cws_instrumentation.image_tag") + cwsInjectorImageName := pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.image_name") + cwsInjectorImageTag := pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.image_tag") cwsInjectorContainerRegistry := common.ContainerRegistry("admission_controller.cws_instrumentation.container_registry") @@ -317,16 +317,16 @@ func NewCWSInstrumentation(wmeta workloadmeta.Component) (*CWSInstrumentation, e } // parse mode - ci.mode, err = ParseInstrumentationMode(config.Datadog().GetString("admission_controller.cws_instrumentation.mode")) + ci.mode, err = ParseInstrumentationMode(pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.mode")) if err != nil { return nil, fmt.Errorf("can't initiatilize CWS Instrumentation: %v", err) } - ci.mountVolumeForRemoteCopy = config.Datadog().GetBool("admission_controller.cws_instrumentation.remote_copy.mount_volume") - ci.directoryForRemoteCopy = config.Datadog().GetString("admission_controller.cws_instrumentation.remote_copy.directory") + ci.mountVolumeForRemoteCopy = pkgconfigsetup.Datadog().GetBool("admission_controller.cws_instrumentation.remote_copy.mount_volume") + ci.directoryForRemoteCopy = pkgconfigsetup.Datadog().GetString("admission_controller.cws_instrumentation.remote_copy.directory") if ci.mode == RemoteCopy { // build the cluster agent service account - serviceAccountName := config.Datadog().GetString("cluster_agent.service_account_name") + serviceAccountName := pkgconfigsetup.Datadog().GetString("cluster_agent.service_account_name") if len(serviceAccountName) == 0 { return nil, fmt.Errorf("can't initialize CWS Instrumentation in %s mode without providing a service account name in config (cluster_agent.service_account_name)", RemoteCopy) } @@ -764,8 +764,8 @@ func injectCWSInitContainer(pod *corev1.Pod, resources *corev1.ResourceRequireme func labelSelectors(useNamespaceSelector bool) (namespaceSelector, objectSelector *metav1.LabelSelector) { var labelSelector metav1.LabelSelector - if config.Datadog().GetBool("admission_controller.cws_instrumentation.mutate_unlabelled") || - config.Datadog().GetBool("admission_controller.mutate_unlabelled") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.cws_instrumentation.mutate_unlabelled") || + pkgconfigsetup.Datadog().GetBool("admission_controller.mutate_unlabelled") { // Accept all, ignore pods if they're explicitly filtered-out labelSelector = metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ diff --git a/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go b/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go index b48138d2d2ed0..2fd2279eaeaa8 100644 --- a/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go +++ b/pkg/clusteragent/admission/mutate/tagsfromlabels/tags.go @@ -26,7 +26,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/metrics" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -56,8 +56,8 @@ type Webhook struct { func NewWebhook(wmeta workloadmeta.Component, injectionFilter common.InjectionFilter) *Webhook { return &Webhook{ name: webhookName, - isEnabled: config.Datadog().GetBool("admission_controller.inject_tags.enabled"), - endpoint: config.Datadog().GetString("admission_controller.inject_tags.endpoint"), + isEnabled: pkgconfigsetup.Datadog().GetBool("admission_controller.inject_tags.enabled"), + endpoint: pkgconfigsetup.Datadog().GetString("admission_controller.inject_tags.endpoint"), resources: []string{"pods"}, operations: []admiv1.OperationType{admiv1.Create}, ownerCacheTTL: ownerCacheTTL(), @@ -270,9 +270,9 @@ func (w *Webhook) getAndCacheOwner(info *ownerInfo, ns string, dc dynamic.Interf } func ownerCacheTTL() time.Duration { - if config.Datadog().IsSet("admission_controller.pod_owners_cache_validity") { // old option. Kept for backwards compatibility - return config.Datadog().GetDuration("admission_controller.pod_owners_cache_validity") * time.Minute + if pkgconfigsetup.Datadog().IsSet("admission_controller.pod_owners_cache_validity") { // old option. Kept for backwards compatibility + return pkgconfigsetup.Datadog().GetDuration("admission_controller.pod_owners_cache_validity") * time.Minute } - return config.Datadog().GetDuration("admission_controller.inject_tags.pod_owners_cache_validity") * time.Minute + return pkgconfigsetup.Datadog().GetDuration("admission_controller.inject_tags.pod_owners_cache_validity") * time.Minute } diff --git a/pkg/clusteragent/admission/patch/provider.go b/pkg/clusteragent/admission/patch/provider.go index 10ff1593b61fb..2b1469533c38a 100644 --- a/pkg/clusteragent/admission/patch/provider.go +++ b/pkg/clusteragent/admission/patch/provider.go @@ -11,8 +11,8 @@ import ( "errors" "github.com/DataDog/datadog-agent/pkg/clusteragent/telemetry" - "github.com/DataDog/datadog-agent/pkg/config" rcclient "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) type patchProvider interface { @@ -21,12 +21,12 @@ type patchProvider interface { } func newPatchProvider(rcClient *rcclient.Client, isLeaderNotif <-chan struct{}, telemetryCollector telemetry.TelemetryCollector, clusterName string) (patchProvider, error) { - if config.IsRemoteConfigEnabled(config.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { return newRemoteConfigProvider(rcClient, isLeaderNotif, telemetryCollector, clusterName) } - if config.Datadog().GetBool("admission_controller.auto_instrumentation.patcher.fallback_to_file_provider") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.auto_instrumentation.patcher.fallback_to_file_provider") { // Use the file config provider for e2e testing only (it replaces RC as a source of configs) - file := config.Datadog().GetString("admission_controller.auto_instrumentation.patcher.file_provider_path") + file := pkgconfigsetup.Datadog().GetString("admission_controller.auto_instrumentation.patcher.file_provider_path") return newfileProvider(file, isLeaderNotif, clusterName), nil } return nil, errors.New("remote config is disabled") diff --git a/pkg/clusteragent/admission/start.go b/pkg/clusteragent/admission/start.go index 596ed8caa4d10..6da39581700fc 100644 --- a/pkg/clusteragent/admission/start.go +++ b/pkg/clusteragent/admission/start.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/controllers/secret" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/controllers/webhook" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/workload" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -37,18 +37,18 @@ type ControllerContext struct { // StartControllers starts the secret and webhook controllers func StartControllers(ctx ControllerContext, wmeta workloadmeta.Component, pa workload.PodPatcher) ([]webhook.MutatingWebhook, error) { - if !config.Datadog().GetBool("admission_controller.enabled") { + if !pkgconfigsetup.Datadog().GetBool("admission_controller.enabled") { log.Info("Admission controller is disabled") return nil, nil } certConfig := secret.NewCertConfig( - config.Datadog().GetDuration("admission_controller.certificate.expiration_threshold")*time.Hour, - config.Datadog().GetDuration("admission_controller.certificate.validity_bound")*time.Hour) + pkgconfigsetup.Datadog().GetDuration("admission_controller.certificate.expiration_threshold")*time.Hour, + pkgconfigsetup.Datadog().GetDuration("admission_controller.certificate.validity_bound")*time.Hour) secretConfig := secret.NewConfig( common.GetResourcesNamespace(), - config.Datadog().GetString("admission_controller.certificate.secret_name"), - config.Datadog().GetString("admission_controller.service_name"), + pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name"), + pkgconfigsetup.Datadog().GetString("admission_controller.service_name"), certConfig) secretController := secret.NewController( ctx.Client, diff --git a/pkg/clusteragent/admission/status.go b/pkg/clusteragent/admission/status.go index 3492640df251b..315f30ea26190 100644 --- a/pkg/clusteragent/admission/status.go +++ b/pkg/clusteragent/admission/status.go @@ -16,7 +16,7 @@ import ( "strconv" "github.com/DataDog/datadog-agent/comp/core/status" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/certificate" @@ -29,14 +29,14 @@ import ( // GetStatus returns status info for the secret and webhook controllers. func GetStatus(apiCl kubernetes.Interface) map[string]interface{} { status := make(map[string]interface{}) - if !config.Datadog().GetBool("admission_controller.enabled") { + if !pkgconfigsetup.Datadog().GetBool("admission_controller.enabled") { status["Disabled"] = "The admission controller is not enabled on the Cluster Agent" return status } ns := common.GetResourcesNamespace() - webhookName := config.Datadog().GetString("admission_controller.webhook_name") - secretName := config.Datadog().GetString("admission_controller.certificate.secret_name") + webhookName := pkgconfigsetup.Datadog().GetString("admission_controller.webhook_name") + secretName := pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name") status["WebhookName"] = webhookName status["SecretName"] = fmt.Sprintf("%s/%s", ns, secretName) diff --git a/pkg/clusteragent/admission/util.go b/pkg/clusteragent/admission/util.go index 4f385ba151b64..90f0f893add60 100644 --- a/pkg/clusteragent/admission/util.go +++ b/pkg/clusteragent/admission/util.go @@ -12,7 +12,7 @@ import ( "strconv" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -24,7 +24,7 @@ import ( // Returns true if `namespace_selector_fallback` is enabled and k8s version is between 1.10 and 1.14 (included). // Kubernetes 1.15+ supports object selectors. func useNamespaceSelector(discoveryCl discovery.DiscoveryInterface) (bool, error) { - if !config.Datadog().GetBool("admission_controller.namespace_selector_fallback") { + if !pkgconfigsetup.Datadog().GetBool("admission_controller.namespace_selector_fallback") { return false, nil } diff --git a/pkg/clusteragent/api/leader_forwarder.go b/pkg/clusteragent/api/leader_forwarder.go index d77d0bb22e901..a5989f62d112b 100644 --- a/pkg/clusteragent/api/leader_forwarder.go +++ b/pkg/clusteragent/api/leader_forwarder.go @@ -20,7 +20,7 @@ import ( "github.com/cihub/seelog" - "github.com/DataDog/datadog-agent/pkg/config" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) const ( @@ -43,7 +43,7 @@ type LeaderForwarder struct { // NewLeaderForwarder initializes a new LeaderForwarder instance and is used for test purposes func NewLeaderForwarder(apiPort, maxConnections int) *LeaderForwarder { // Use a stack depth of 4 on top of the default one to get a relevant filename in the stdlib - logWriter, _ := config.NewLogWriter(4, seelog.DebugLvl) + logWriter, _ := pkglogsetup.NewLogWriter(4, seelog.DebugLvl) return &LeaderForwarder{ apiPort: strconv.Itoa(apiPort), transport: &http.Transport{ diff --git a/pkg/clusteragent/api/leader_handler.go b/pkg/clusteragent/api/leader_handler.go index 04f744bc6d763..b87e5197d7240 100644 --- a/pkg/clusteragent/api/leader_handler.go +++ b/pkg/clusteragent/api/leader_handler.go @@ -12,7 +12,7 @@ package api import ( "net/http" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -61,7 +61,7 @@ func WithLeaderProxyHandler(handlerName string, preHandler RequestPreHandler, le lph := LeaderProxyHandler{ handlerName: handlerName, leaderForwarder: GetGlobalLeaderForwarder(), - leaderElectionEnabled: config.Datadog().GetBool("leader_election"), + leaderElectionEnabled: pkgconfigsetup.Datadog().GetBool("leader_election"), preHandler: preHandler, leaderHandler: leaderHandler, } diff --git a/pkg/clusteragent/autoscaling/custommetrics/provider.go b/pkg/clusteragent/autoscaling/custommetrics/provider.go index 84041c0269f26..5d50c7960e00d 100644 --- a/pkg/clusteragent/autoscaling/custommetrics/provider.go +++ b/pkg/clusteragent/autoscaling/custommetrics/provider.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/custom-metrics-apiserver/pkg/provider" "sigs.k8s.io/custom-metrics-apiserver/pkg/provider/defaults" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -53,7 +53,7 @@ type datadogProvider struct { // NewDatadogProvider creates a Custom Metrics and External Metrics Provider. func NewDatadogProvider(ctx context.Context, client dynamic.Interface, mapper apimeta.RESTMapper, store Store) provider.ExternalMetricsProvider { - maxAge := config.Datadog().GetInt64("external_metrics_provider.local_copy_refresh_rate") + maxAge := pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.local_copy_refresh_rate") d := &datadogProvider{ client: client, mapper: mapper, diff --git a/pkg/clusteragent/autoscaling/custommetrics/status.go b/pkg/clusteragent/autoscaling/custommetrics/status.go index a31e98e525169..72da47185b143 100644 --- a/pkg/clusteragent/autoscaling/custommetrics/status.go +++ b/pkg/clusteragent/autoscaling/custommetrics/status.go @@ -12,19 +12,19 @@ import ( "k8s.io/client-go/kubernetes" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" ) // GetStatus returns status info for the Custom Metrics Server. func GetStatus(apiCl kubernetes.Interface) map[string]interface{} { status := make(map[string]interface{}) - if !config.Datadog().GetBool("external_metrics_provider.enabled") { + if !pkgconfigsetup.Datadog().GetBool("external_metrics_provider.enabled") { status["Disabled"] = "The external metrics provider is not enabled on the Cluster Agent" return status } - if config.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") { + if pkgconfigsetup.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") { status["NoStatus"] = "External metrics provider uses DatadogMetric - Check status directly from Kubernetes with: `kubectl get datadogmetric`" return status } diff --git a/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go b/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go index b88453e1a08c6..9dfbe88ae11d8 100644 --- a/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go +++ b/pkg/clusteragent/autoscaling/custommetrics/store_configmap.go @@ -14,7 +14,7 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -40,7 +40,7 @@ type configMapStore struct { // GetConfigmapName returns the name of the ConfigMap used to store the state of the Custom Metrics Provider func GetConfigmapName() string { - return config.Datadog().GetString("hpa_configmap_name") + return pkgconfigsetup.Datadog().GetString("hpa_configmap_name") } // NewConfigMapStore returns a new store backed by a configmap. The configmap will be created diff --git a/pkg/clusteragent/autoscaling/externalmetrics/provider.go b/pkg/clusteragent/autoscaling/externalmetrics/provider.go index 06f8a7d960dd6..bf07e1251413e 100644 --- a/pkg/clusteragent/autoscaling/externalmetrics/provider.go +++ b/pkg/clusteragent/autoscaling/externalmetrics/provider.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/custom-metrics-apiserver/pkg/provider/defaults" datadogclient "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection" @@ -59,17 +59,17 @@ func NewDatadogMetricProvider(ctx context.Context, apiCl *apiserver.APIClient, d return nil, fmt.Errorf("Unable to create DatadogMetricProvider as LeaderElection failed with: %v", err) } - aggregator := config.Datadog().GetString("external_metrics.aggregator") - rollup := config.Datadog().GetInt("external_metrics_provider.rollup") + aggregator := pkgconfigsetup.Datadog().GetString("external_metrics.aggregator") + rollup := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.rollup") setQueryConfigValues(aggregator, rollup) - refreshPeriod := config.Datadog().GetInt64("external_metrics_provider.refresh_period") - metricsMaxAge = int64(math.Max(config.Datadog().GetFloat64("external_metrics_provider.max_age"), float64(3*rollup))) - metricsQueryValidityPeriod = int64(config.Datadog().GetFloat64("external_metrics_provider.query_validity_period")) - splitBatchBackoffOnErrors := config.Datadog().GetBool("external_metrics_provider.split_batches_with_backoff") + refreshPeriod := pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.refresh_period") + metricsMaxAge = int64(math.Max(pkgconfigsetup.Datadog().GetFloat64("external_metrics_provider.max_age"), float64(3*rollup))) + metricsQueryValidityPeriod = int64(pkgconfigsetup.Datadog().GetFloat64("external_metrics_provider.query_validity_period")) + splitBatchBackoffOnErrors := pkgconfigsetup.Datadog().GetBool("external_metrics_provider.split_batches_with_backoff") autogenNamespace := common.GetResourcesNamespace() - autogenEnabled := config.Datadog().GetBool("external_metrics_provider.enable_datadogmetric_autogen") - wpaEnabled := config.Datadog().GetBool("external_metrics_provider.wpa_controller") + autogenEnabled := pkgconfigsetup.Datadog().GetBool("external_metrics_provider.enable_datadogmetric_autogen") + wpaEnabled := pkgconfigsetup.Datadog().GetBool("external_metrics_provider.wpa_controller") provider := &datadogMetricProvider{ apiCl: apiCl, diff --git a/pkg/clusteragent/autoscaling/workload/controller.go b/pkg/clusteragent/autoscaling/workload/controller.go index 27ae6f77070aa..d6d8b9dd22b41 100644 --- a/pkg/clusteragent/autoscaling/workload/controller.go +++ b/pkg/clusteragent/autoscaling/workload/controller.go @@ -14,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" scaleclient "k8s.io/client-go/scale" @@ -241,13 +242,13 @@ func (c *Controller) syncPodAutoscaler(ctx context.Context, key, ns, name string // and compare it with the one in the PodAutoscaler. If they differ, we should update the PodAutoscaler // otherwise store the Generation if podAutoscalerInternal.Generation() != podAutoscaler.Generation { - localHash, err := autoscaling.ObjectHash(podAutoscalerInternal.Spec) + localHash, err := autoscaling.ObjectHash(podAutoscalerInternal.Spec()) if err != nil { c.store.Unlock(key) return autoscaling.Requeue, fmt.Errorf("Failed to compute Spec hash for PodAutoscaler: %s/%s, err: %v", ns, name, err) } - remoteHash, err := autoscaling.ObjectHash(podAutoscaler.Spec) + remoteHash, err := autoscaling.ObjectHash(&podAutoscaler.Spec) if err != nil { c.store.Unlock(key) return autoscaling.Requeue, fmt.Errorf("Failed to compute Spec hash for PodAutoscaler: %s/%s, err: %v", ns, name, err) @@ -282,12 +283,31 @@ func (c *Controller) syncPodAutoscaler(ctx context.Context, key, ns, name string return autoscaling.NoRequeue, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, validationErr, podAutoscalerInternal, podAutoscaler) } + // Get autoscaler target + targetGVK, targetErr := podAutoscalerInternal.TargetGVK() + if targetErr != nil { + podAutoscalerInternal.SetError(targetErr) + return autoscaling.NoRequeue, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, targetErr, podAutoscalerInternal, podAutoscaler) + } + target := NamespacedPodOwner{ + Namespace: podAutoscalerInternal.Namespace(), + Name: podAutoscalerInternal.Spec().TargetRef.Name, + Kind: targetGVK.Kind, + } + // Now that everything is synced, we can perform the actual processing - result, scalingErr := c.handleScaling(ctx, podAutoscaler, &podAutoscalerInternal) + result, scalingErr := c.handleScaling(ctx, podAutoscaler, &podAutoscalerInternal, targetGVK, target) + + // Update current replicas + pods := c.podWatcher.GetPodsForOwner(target) + currentReplicas := len(pods) + podAutoscalerInternal.SetCurrentReplicas(int32(currentReplicas)) + + // Update status based on latest state return result, c.updateAutoscalerStatusAndUnlock(ctx, key, ns, name, scalingErr, podAutoscalerInternal, podAutoscaler) } -func (c *Controller) handleScaling(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, podAutoscalerInternal *model.PodAutoscalerInternal) (autoscaling.ProcessResult, error) { +func (c *Controller) handleScaling(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, podAutoscalerInternal *model.PodAutoscalerInternal, targetGVK schema.GroupVersionKind, target NamespacedPodOwner) (autoscaling.ProcessResult, error) { // TODO: While horizontal scaling is in progress we should not start vertical scaling // While vertical scaling is in progress we should only allow horizontal upscale horizontalRes, err := c.horizontalController.sync(ctx, podAutoscaler, podAutoscalerInternal) @@ -295,7 +315,7 @@ func (c *Controller) handleScaling(ctx context.Context, podAutoscaler *datadoghq return horizontalRes, err } - verticalRes, err := c.verticalController.sync(ctx, podAutoscaler, podAutoscalerInternal) + verticalRes, err := c.verticalController.sync(ctx, podAutoscaler, podAutoscalerInternal, targetGVK, target) if err != nil { return verticalRes, err } diff --git a/pkg/clusteragent/autoscaling/workload/controller_horizontal.go b/pkg/clusteragent/autoscaling/workload/controller_horizontal.go index 9c3c043d02c21..3a9c0ac675033 100644 --- a/pkg/clusteragent/autoscaling/workload/controller_horizontal.go +++ b/pkg/clusteragent/autoscaling/workload/controller_horizontal.go @@ -79,9 +79,6 @@ func (hr *horizontalController) sync(ctx context.Context, podAutoscaler *datadog return autoscaling.Requeue, err } - // Update current replicas - autoscalerInternal.SetCurrentReplicas(scale.Status.Replicas) - return hr.performScaling(ctx, podAutoscaler, autoscalerInternal, gr, scale) } diff --git a/pkg/clusteragent/autoscaling/workload/controller_horizontal_test.go b/pkg/clusteragent/autoscaling/workload/controller_horizontal_test.go index 009811fbf4a39..f306f680bbc34 100644 --- a/pkg/clusteragent/autoscaling/workload/controller_horizontal_test.go +++ b/pkg/clusteragent/autoscaling/workload/controller_horizontal_test.go @@ -111,7 +111,6 @@ func (f *horizontalControllerFixture) testScalingDecision(args horizontalScaling f.scaler.AssertNumberOfCalls(f.t, "get", 1) f.scaler.AssertNumberOfCalls(f.t, "update", expectedUpdateCalls) - args.fakePai.CurrentReplicas = pointer.Ptr[int32](args.statusReplicas) if scaleActionExpected && args.scaleError == nil { // Update fakePai with the new expected state action := &datadoghq.DatadogPodAutoscalerHorizontalAction{ @@ -142,8 +141,9 @@ func TestHorizontalControllerSyncPrerequisites(t *testing.T) { autoscalerName := "test" fakePai := &model.FakePodAutoscalerInternal{ - Namespace: autoscalerNamespace, - Name: autoscalerName, + Namespace: autoscalerNamespace, + Name: autoscalerName, + CurrentReplicas: pointer.Ptr[int32](5), } // Test case: no Spec, no action taken @@ -165,7 +165,7 @@ func TestHorizontalControllerSyncPrerequisites(t *testing.T) { model.AssertPodAutoscalersEqual(t, fakePai.Build(), autoscaler) // Test case: Correct Spec and GVK, but no scaling values - // Should only update replica count + // Should do nothing expectedGVK := schema.GroupVersionKind{ Group: "apps", Version: "v1", @@ -304,7 +304,8 @@ func TestHorizontalControllerSyncScaleDecisions(t *testing.T) { Replicas: 5, }, }, - TargetGVK: expectedGVK, + TargetGVK: expectedGVK, + CurrentReplicas: pointer.Ptr[int32](5), } // Step: same number of replicas, no action taken, only updating status diff --git a/pkg/clusteragent/autoscaling/workload/controller_vertical.go b/pkg/clusteragent/autoscaling/workload/controller_vertical.go index 5ea9d8d4f300a..2a940490d875a 100644 --- a/pkg/clusteragent/autoscaling/workload/controller_vertical.go +++ b/pkg/clusteragent/autoscaling/workload/controller_vertical.go @@ -56,7 +56,7 @@ func newVerticalController(clock clock.Clock, eventRecorder record.EventRecorder return res } -func (u *verticalController) sync(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, autoscalerInternal *model.PodAutoscalerInternal) (autoscaling.ProcessResult, error) { +func (u *verticalController) sync(ctx context.Context, podAutoscaler *datadoghq.DatadogPodAutoscaler, autoscalerInternal *model.PodAutoscalerInternal, targetGVK schema.GroupVersionKind, target NamespacedPodOwner) (autoscaling.ProcessResult, error) { scalingValues := autoscalerInternal.ScalingValues() // Check if the autoscaler has a vertical scaling recommendation @@ -67,18 +67,6 @@ func (u *verticalController) sync(ctx context.Context, podAutoscaler *datadoghq. } recomendationID := scalingValues.Vertical.ResourcesHash - targetGVK, err := autoscalerInternal.TargetGVK() - if err != nil { - autoscalerInternal.SetError(err) - return autoscaling.NoRequeue, err - } - - // Get the pod owner from the workload - target := NamespacedPodOwner{ - Namespace: autoscalerInternal.Namespace(), - Name: autoscalerInternal.Spec().TargetRef.Name, - Kind: targetGVK.Kind, - } // Get the pods for the pod owner pods := u.podWatcher.GetPodsForOwner(target) diff --git a/pkg/clusteragent/clusterchecks/dispatcher_isolate_test.go b/pkg/clusteragent/clusterchecks/dispatcher_isolate_test.go index ece79222c8c3d..609d4264fa968 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_isolate_test.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_isolate_test.go @@ -10,19 +10,20 @@ package clusterchecks import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" - "github.com/stretchr/testify/assert" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestIsolateCheckSuccessful(t *testing.T) { testDispatcher := newDispatcher() testDispatcher.store.nodes["A"] = newNodeStore("A", "") - testDispatcher.store.nodes["A"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["A"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["B"] = newNodeStore("B", "") - testDispatcher.store.nodes["B"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["B"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["A"].clcRunnerStats = map[string]types.CLCRunnerStats{ "checkA0": { @@ -100,9 +101,9 @@ func TestIsolateCheckSuccessful(t *testing.T) { func TestIsolateNonExistentCheckFails(t *testing.T) { testDispatcher := newDispatcher() testDispatcher.store.nodes["A"] = newNodeStore("A", "") - testDispatcher.store.nodes["A"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["A"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["B"] = newNodeStore("B", "") - testDispatcher.store.nodes["B"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["B"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["A"].clcRunnerStats = map[string]types.CLCRunnerStats{ "checkA0": { @@ -178,7 +179,7 @@ func TestIsolateNonExistentCheckFails(t *testing.T) { func TestIsolateCheckOnlyOneRunnerFails(t *testing.T) { testDispatcher := newDispatcher() testDispatcher.store.nodes["A"] = newNodeStore("A", "") - testDispatcher.store.nodes["A"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["A"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["A"].clcRunnerStats = map[string]types.CLCRunnerStats{ "checkA0": { diff --git a/pkg/clusteragent/clusterchecks/dispatcher_main.go b/pkg/clusteragent/clusterchecks/dispatcher_main.go index 63048637bb61a..398bf21d2c5c5 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_main.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_main.go @@ -13,7 +13,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -38,10 +38,10 @@ func newDispatcher() *dispatcher { d := &dispatcher{ store: newClusterStore(), } - d.nodeExpirationSeconds = config.Datadog().GetInt64("cluster_checks.node_expiration_timeout") - d.extraTags = config.Datadog().GetStringSlice("cluster_checks.extra_tags") + d.nodeExpirationSeconds = pkgconfigsetup.Datadog().GetInt64("cluster_checks.node_expiration_timeout") + d.extraTags = pkgconfigsetup.Datadog().GetStringSlice("cluster_checks.extra_tags") - excludedChecks := config.Datadog().GetStringSlice("cluster_checks.exclude_checks") + excludedChecks := pkgconfigsetup.Datadog().GetStringSlice("cluster_checks.exclude_checks") // This option will almost always be empty if len(excludedChecks) > 0 { d.excludedChecks = make(map[string]struct{}, len(excludedChecks)) @@ -50,7 +50,7 @@ func newDispatcher() *dispatcher { } } - excludedChecksFromDispatching := config.Datadog().GetStringSlice("cluster_checks.exclude_checks_from_dispatching") + excludedChecksFromDispatching := pkgconfigsetup.Datadog().GetStringSlice("cluster_checks.exclude_checks_from_dispatching") // This option will almost always be empty if len(excludedChecksFromDispatching) > 0 { d.excludedChecksFromDispatching = make(map[string]struct{}, len(excludedChecksFromDispatching)) @@ -59,20 +59,20 @@ func newDispatcher() *dispatcher { } } - d.rebalancingPeriod = config.Datadog().GetDuration("cluster_checks.rebalance_period") + d.rebalancingPeriod = pkgconfigsetup.Datadog().GetDuration("cluster_checks.rebalance_period") hname, _ := hostname.Get(context.TODO()) clusterTagValue := clustername.GetClusterName(context.TODO(), hname) - clusterTagName := config.Datadog().GetString("cluster_checks.cluster_tag_name") + clusterTagName := pkgconfigsetup.Datadog().GetString("cluster_checks.cluster_tag_name") if clusterTagValue != "" { - if clusterTagName != "" && !config.Datadog().GetBool("disable_cluster_name_tag_key") { + if clusterTagName != "" && !pkgconfigsetup.Datadog().GetBool("disable_cluster_name_tag_key") { d.extraTags = append(d.extraTags, fmt.Sprintf("%s:%s", clusterTagName, clusterTagValue)) log.Info("Adding both tags cluster_name and kube_cluster_name. You can use 'disable_cluster_name_tag_key' in the Agent config to keep the kube_cluster_name tag only") } d.extraTags = append(d.extraTags, fmt.Sprintf("kube_cluster_name:%s", clusterTagValue)) } - d.advancedDispatching = config.Datadog().GetBool("cluster_checks.advanced_dispatching_enabled") + d.advancedDispatching = pkgconfigsetup.Datadog().GetBool("cluster_checks.advanced_dispatching_enabled") if !d.advancedDispatching { return d } diff --git a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go index 6b03a704d5953..2e7dd891ab912 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -198,12 +198,12 @@ func (d *dispatcher) updateRunnersStats() { ip := node.clientIP node.RUnlock() - if config.Datadog().GetBool("cluster_checks.rebalance_with_utilization") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.rebalance_with_utilization") { workers, err := d.clcRunnersClient.GetRunnerWorkers(ip) if err != nil { // This can happen in old versions of the runners that do not expose this information. log.Debugf("Cannot get number of workers for node %s with IP %s. Assuming default. Error: %v", name, node.clientIP, err) - node.workers = config.DefaultNumWorkers + node.workers = pkgconfigsetup.DefaultNumWorkers } else { node.workers = workers.Count } diff --git a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go index 981640d079eb0..c7da3d88d96eb 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_rebalance.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -172,7 +172,7 @@ func (d *dispatcher) moveCheck(src, dest, checkID string) error { } func (d *dispatcher) rebalance(force bool) []types.RebalanceResponse { - if config.Datadog().GetBool("cluster_checks.rebalance_with_utilization") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.rebalance_with_utilization") { return d.rebalanceUsingUtilization(force) } @@ -339,7 +339,7 @@ func (d *dispatcher) rebalanceUsingUtilization(force bool) []types.RebalanceResp // checks. currentUtilizationStdDev := currentChecksDistribution.utilizationStdDev() proposedUtilizationStdDev := proposedDistribution.utilizationStdDev() - minPercImprovement := config.Datadog().GetInt("cluster_checks.rebalance_min_percentage_improvement") + minPercImprovement := pkgconfigsetup.Datadog().GetInt("cluster_checks.rebalance_min_percentage_improvement") if force || rebalanceIsWorthIt(currentChecksDistribution, proposedDistribution, minPercImprovement) { diff --git a/pkg/clusteragent/clusterchecks/dispatcher_rebalance_test.go b/pkg/clusteragent/clusterchecks/dispatcher_rebalance_test.go index 9550adc53b2f1..09747e5c82abf 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_rebalance_test.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_rebalance_test.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestRebalance(t *testing.T) { @@ -1522,9 +1522,9 @@ func TestRebalanceUsingUtilization(t *testing.T) { testDispatcher.store.active = true testDispatcher.store.nodes["node1"] = newNodeStore("node1", "") - testDispatcher.store.nodes["node1"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["node1"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["node2"] = newNodeStore("node2", "") - testDispatcher.store.nodes["node2"].workers = config.DefaultNumWorkers + testDispatcher.store.nodes["node2"].workers = pkgconfigsetup.DefaultNumWorkers testDispatcher.store.nodes["node1"].clcRunnerStats = map[string]types.CLCRunnerStats{ // This is the check with the highest utilization. The code will try to diff --git a/pkg/clusteragent/clusterchecks/dispatcher_test.go b/pkg/clusteragent/clusterchecks/dispatcher_test.go index 12ffdb069206d..f2c31184ff8b7 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_test.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_test.go @@ -17,7 +17,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" @@ -469,7 +468,7 @@ func TestReset(t *testing.T) { } func TestPatchConfiguration(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) checkConfig := integration.Config{ Name: "test", @@ -507,7 +506,7 @@ func TestPatchConfiguration(t *testing.T) { } func TestPatchEndpointsConfiguration(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) checkConfig := integration.Config{ Name: "test", @@ -540,7 +539,7 @@ func TestPatchEndpointsConfiguration(t *testing.T) { } func TestExtraTags(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) for _, tc := range []struct { extraTagsConfig []string diff --git a/pkg/clusteragent/clusterchecks/handler.go b/pkg/clusteragent/clusterchecks/handler.go index ce2a51c252fe7..fff45ba64e3d1 100644 --- a/pkg/clusteragent/clusterchecks/handler.go +++ b/pkg/clusteragent/clusterchecks/handler.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/scheduler" "github.com/DataDog/datadog-agent/pkg/clusteragent/api" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -66,12 +66,12 @@ func NewHandler(ac pluggableAutoConfig) (*Handler, error) { h := &Handler{ autoconfig: ac, leaderStatusFreq: 5 * time.Second, - warmupDuration: config.Datadog().GetDuration("cluster_checks.warmup_duration") * time.Second, + warmupDuration: pkgconfigsetup.Datadog().GetDuration("cluster_checks.warmup_duration") * time.Second, leadershipChan: make(chan state, 1), dispatcher: newDispatcher(), } - if config.Datadog().GetBool("leader_election") { + if pkgconfigsetup.Datadog().GetBool("leader_election") { h.leaderForwarder = api.GetGlobalLeaderForwarder() callback, err := getLeaderIPCallback() if err != nil { diff --git a/pkg/clusteragent/clusterchecks/status.go b/pkg/clusteragent/clusterchecks/status.go index 858569baaf865..7877b2e8f932a 100644 --- a/pkg/clusteragent/clusterchecks/status.go +++ b/pkg/clusteragent/clusterchecks/status.go @@ -12,7 +12,7 @@ import ( "io" "github.com/DataDog/datadog-agent/comp/core/status" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Provider provides the functionality to populate the status output @@ -49,7 +49,7 @@ func (Provider) HTML(_ bool, _ io.Writer) error { } func populateStatus(stats map[string]interface{}) { - if config.Datadog().GetBool("cluster_checks.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") { if cchecks, err := GetStats(); err == nil { stats["clusterchecks"] = cchecks } diff --git a/pkg/clusteragent/languagedetection/patcher.go b/pkg/clusteragent/languagedetection/patcher.go index 3d8379a5a71a5..e1171876c65c3 100644 --- a/pkg/clusteragent/languagedetection/patcher.go +++ b/pkg/clusteragent/languagedetection/patcher.go @@ -26,7 +26,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -66,8 +66,8 @@ func newLanguagePatcher(ctx context.Context, store workloadmeta.Component, logge logger: logger, queue: workqueue.NewRateLimitingQueueWithConfig( workqueue.NewItemExponentialFailureRateLimiter( - config.Datadog().GetDuration("cluster_agent.language_detection.patcher.base_backoff"), - config.Datadog().GetDuration("cluster_agent.language_detection.patcher.max_backoff"), + pkgconfigsetup.Datadog().GetDuration("cluster_agent.language_detection.patcher.base_backoff"), + pkgconfigsetup.Datadog().GetDuration("cluster_agent.language_detection.patcher.max_backoff"), ), workqueue.RateLimitingQueueConfig{ Name: subsystem, diff --git a/pkg/clusteragent/orchestrator/status.go b/pkg/clusteragent/orchestrator/status.go index 27d56a681a88f..26ea3dab2d428 100644 --- a/pkg/clusteragent/orchestrator/status.go +++ b/pkg/clusteragent/orchestrator/status.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/status" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/orchestrator" orchcfg "github.com/DataDog/datadog-agent/pkg/orchestrator/config" pkgorchestratormodel "github.com/DataDog/datadog-agent/pkg/orchestrator/model" @@ -41,12 +41,12 @@ type stats struct { // GetStatus returns status info for the orchestrator explorer. func GetStatus(ctx context.Context, apiCl kubernetes.Interface) map[string]interface{} { status := make(map[string]interface{}) - if !config.Datadog().GetBool("orchestrator_explorer.enabled") { + if !pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.enabled") { status["Disabled"] = "The orchestrator explorer is not enabled on the Cluster Agent" return status } - if !config.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { status["Disabled"] = "Leader election is not enabled on the Cluster Agent. The orchestrator explorer needs leader election for resource collection." return status } @@ -81,7 +81,7 @@ func GetStatus(ctx context.Context, apiCl kubernetes.Interface) map[string]inter setSkippedResourcesInformationDCAMode(status) // rewriting DCA Mode in case we are running in cluster check mode. - if orchestrator.KubernetesResourceCache.ItemCount() == 0 && config.Datadog().GetBool("cluster_checks.enabled") { + if orchestrator.KubernetesResourceCache.ItemCount() == 0 && pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") { // we need to check first whether we have dispatched checks to CLC stats, err := clusterchecks.GetStats() if err != nil { @@ -102,11 +102,11 @@ func GetStatus(ctx context.Context, apiCl kubernetes.Interface) map[string]inter } // get options - if config.Datadog().GetBool("orchestrator_explorer.container_scrubbing.enabled") { + if pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.container_scrubbing.enabled") { status["ContainerScrubbing"] = "Container scrubbing: enabled" } - if config.Datadog().GetBool("orchestrator_explorer.manifest_collection.enabled") { + if pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.manifest_collection.enabled") { status["ManifestCollection"] = "Manifest collection: enabled" } @@ -252,7 +252,7 @@ func (Provider) HTML(_ bool, _ io.Writer) error { func populateStatus(stats map[string]interface{}) { apiCl, apiErr := apiserver.GetAPIClient() - if config.Datadog().GetBool("orchestrator_explorer.enabled") { + if pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.enabled") { if apiErr != nil { stats["orchestrator"] = map[string]string{"Error": apiErr.Error()} } else { diff --git a/pkg/clusteragent/telemetry/collector.go b/pkg/clusteragent/telemetry/collector.go index 801d8b8e21fd2..9e772bd44b9b8 100644 --- a/pkg/clusteragent/telemetry/collector.go +++ b/pkg/clusteragent/telemetry/collector.go @@ -16,7 +16,7 @@ import ( "strconv" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -101,7 +101,7 @@ func httpClientFactory(timeout time.Duration) func() *http.Client { return &http.Client{ Timeout: timeout, // reusing core agent HTTP transport to benefit from proxy settings. - Transport: httputils.CreateHTTPTransport(config.Datadog()), + Transport: httputils.CreateHTTPTransport(pkgconfigsetup.Datadog()), } } } @@ -112,7 +112,7 @@ func httpClientFactory(timeout time.Duration) func() *http.Client { func NewCollector(rcClientId string, kubernetesClusterId string) TelemetryCollector { return &telemetryCollector{ client: httputils.NewResetClient(httpClientResetInterval, httpClientFactory(httpClientTimeout)), - host: utils.GetMainEndpoint(config.Datadog(), mainEndpointPrefix, mainEndpointUrlKey), + host: utils.GetMainEndpoint(pkgconfigsetup.Datadog(), mainEndpointPrefix, mainEndpointUrlKey), userAgent: "Datadog Cluster Agent", rcClientId: rcClientId, kubernetesClusterId: kubernetesClusterId, @@ -154,12 +154,12 @@ func (tc *telemetryCollector) sendRemoteConfigEvent(eventName string, event ApmR log.Errorf("Error while trying to create a web request for a remote config event: %v", err) return } - if !config.Datadog().IsSet("api_key") { + if !pkgconfigsetup.Datadog().IsSet("api_key") { return } req.Header.Add("Content-Type", "application/json") req.Header.Add("User-Agent", tc.userAgent) - req.Header.Add("DD-API-KEY", config.Datadog().GetString("api_key")) + req.Header.Add("DD-API-KEY", pkgconfigsetup.Datadog().GetString("api_key")) req.Header.Add("Content-Length", bodyLen) resp, err := tc.client.Do(req) diff --git a/pkg/clusteragent/telemetry/collector_test.go b/pkg/clusteragent/telemetry/collector_test.go index 9835c3a1caaca..5e9a085d8bc47 100644 --- a/pkg/clusteragent/telemetry/collector_test.go +++ b/pkg/clusteragent/telemetry/collector_test.go @@ -14,7 +14,7 @@ import ( "net/http/httptest" "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/stretchr/testify/assert" ) @@ -57,7 +57,7 @@ func TestTelemetryPath(t *testing.T) { collector := NewCollector(testRcClientId, testKubernetesClusterId) collector.SetTestHost(server.URL) - config.Datadog().SetWithoutSource("api_key", "dummy") + pkgconfigsetup.Datadog().SetWithoutSource("api_key", "dummy") var reqCount int var path string diff --git a/pkg/collector/check/jmx.go b/pkg/collector/check/jmx.go index 9cf15a9aad5ac..ab88b4eb98288 100644 --- a/pkg/collector/check/jmx.go +++ b/pkg/collector/check/jmx.go @@ -9,7 +9,7 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - agentconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // IsJMXConfig checks if a certain YAML config contains at least one instance of a JMX config @@ -25,7 +25,7 @@ func IsJMXConfig(config integration.Config) bool { // IsJMXInstance checks if a certain YAML instance is a JMX config func IsJMXInstance(name string, instance integration.Data, initConfig integration.Data) bool { - if _, ok := agentconfig.StandardJMXIntegrations[name]; ok { + if _, ok := pkgconfigsetup.StandardJMXIntegrations[name]; ok { return true } diff --git a/pkg/collector/check/stats/stats.go b/pkg/collector/check/stats/stats.go index d7a78363271d2..5270dd79c0051 100644 --- a/pkg/collector/check/stats/stats.go +++ b/pkg/collector/check/stats/stats.go @@ -13,7 +13,7 @@ import ( "github.com/mitchellh/mapstructure" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -145,14 +145,14 @@ func NewStats(c StatsCheck) *Stats { CheckVersion: c.Version(), CheckConfigSource: c.ConfigSource(), Interval: c.Interval(), - telemetry: utils.IsCheckTelemetryEnabled(c.String(), config.Datadog()), + telemetry: utils.IsCheckTelemetryEnabled(c.String(), pkgconfigsetup.Datadog()), EventPlatformEvents: make(map[string]int64), TotalEventPlatformEvents: make(map[string]int64), } // We are interested in a check's run state values even when they are 0 so we // initialize them here explicitly - if stats.telemetry && utils.IsTelemetryEnabled(config.Datadog()) { + if stats.telemetry && utils.IsTelemetryEnabled(pkgconfigsetup.Datadog()) { tlmRuns.InitializeToZero(stats.CheckName, runCheckFailureTag) tlmRuns.InitializeToZero(stats.CheckName, runCheckSuccessTag) } diff --git a/pkg/collector/corechecks/checkbase.go b/pkg/collector/corechecks/checkbase.go index 68d410e5d7bbc..39a6a5b8df53e 100644 --- a/pkg/collector/corechecks/checkbase.go +++ b/pkg/collector/corechecks/checkbase.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/check/stats" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -64,7 +64,7 @@ func NewCheckBaseWithInterval(name string, defaultInterval time.Duration) CheckB checkName: name, checkID: checkid.ID(name), checkInterval: defaultInterval, - telemetry: utils.IsCheckTelemetryEnabled(name, config.Datadog()), + telemetry: utils.IsCheckTelemetryEnabled(name, pkgconfigsetup.Datadog()), } } diff --git a/pkg/collector/corechecks/cluster/helm/helm.go b/pkg/collector/corechecks/cluster/helm/helm.go index 11e42d1c4b0e2..d9392b85873dd 100644 --- a/pkg/collector/corechecks/cluster/helm/helm.go +++ b/pkg/collector/corechecks/cluster/helm/helm.go @@ -27,7 +27,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -95,7 +95,7 @@ func newCheck() check.Check { CheckBase: core.NewCheckBase(CheckName), instance: &checkConfig{}, store: newReleasesStore(), - runLeaderElection: !config.IsCLCRunner(), + runLeaderElection: !pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()), eventsManager: &eventsManager{}, } } @@ -456,7 +456,7 @@ func isManagedByHelm(object metav1.Object) bool { } func isLeader() (bool, error) { - if !config.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { return false, errors.New("leader election not enabled. The check will not run") } diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go index ef141ec4f61ef..367bedd234317 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go @@ -24,7 +24,8 @@ import ( core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/ksm/customresources" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" kubestatemetrics "github.com/DataDog/datadog-agent/pkg/kubestatemetrics/builder" ksmstore "github.com/DataDog/datadog-agent/pkg/kubestatemetrics/store" @@ -190,7 +191,7 @@ type KSMConfig struct { // KSMCheck wraps the config and the metric stores needed to run the check type KSMCheck struct { core.CheckBase - agentConfig ddconfig.Config + agentConfig model.Config instance *KSMConfig allStores [][]cache.Store telemetry *telemetryCache @@ -240,7 +241,7 @@ func init() { // Configure prepares the configuration of the KSM check instance func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConfigDigest uint64, config, initConfig integration.Data, source string) error { k.BuildID(integrationConfigDigest, config, initConfig) - k.agentConfig = ddconfig.Datadog() + k.agentConfig = pkgconfigsetup.Datadog() err := k.CommonConfigure(senderManager, initConfig, config, source) if err != nil { @@ -344,7 +345,7 @@ func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConf resyncPeriod := k.instance.ResyncPeriod if resyncPeriod == 0 { - resyncPeriod = ddconfig.Datadog().GetInt("kubernetes_informers_resync_period") + resyncPeriod = pkgconfigsetup.Datadog().GetInt("kubernetes_informers_resync_period") } builder.WithResync(time.Duration(resyncPeriod) * time.Second) @@ -526,7 +527,7 @@ func (k *KSMCheck) Run() error { // we also do a safety check for dedicated runners to avoid trying the leader election if (!k.isCLCRunner || !k.instance.LeaderSkip) && !podsFromKubeletInNodeAgent { // Only run if Leader Election is enabled. - if !ddconfig.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { return log.Error("Leader Election not enabled. The cluster-agent will not run the kube-state-metrics core check.") } @@ -952,8 +953,8 @@ func newKSMCheck(base core.CheckBase, instance *KSMConfig) *KSMCheck { CheckBase: base, instance: instance, telemetry: newTelemetryCache(), - isCLCRunner: ddconfig.IsCLCRunner(), - isRunningOnNodeAgent: flavor.GetFlavor() != flavor.ClusterAgent && !ddconfig.IsCLCRunner(), + isCLCRunner: pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()), + isRunningOnNodeAgent: flavor.GetFlavor() != flavor.ClusterAgent && !pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()), metricNamesMapper: defaultMetricNamesMapper(), metricAggregators: defaultMetricAggregators(), metricTransformers: defaultMetricTransformers(), diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_test.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_test.go index 7d04037fa97bf..001d20167f084 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_test.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ksmstore "github.com/DataDog/datadog-agent/pkg/kubestatemetrics/store" ) @@ -1644,7 +1644,7 @@ func TestKSMCheckInitTags(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - conf := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + conf := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) conf.SetWithoutSource("tags", tt.tagsInConfig) k := &KSMCheck{ diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go index 7054a232e7282..dfc6d051d3dea 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/events_common.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/kubetags" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/util" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -413,7 +413,7 @@ func init() { } func getEventSource(controllerName string, sourceComponent string) string { - if !ddConfig.Datadog().GetBool("kubernetes_events_source_detection.enabled") { + if !pkgconfigsetup.Datadog().GetBool("kubernetes_events_source_detection.enabled") { return kubernetesEventSource } diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go index ee6a132f7f9c5..5bb3ec879312b 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -124,7 +124,7 @@ type KubeASCheck struct { func (c *KubeASConfig) parse(data []byte) error { // default values - c.CollectEvent = ddConfig.Datadog().GetBool("collect_kubernetes_events") + c.CollectEvent = pkgconfigsetup.Datadog().GetBool("collect_kubernetes_events") c.CollectOShiftQuotas = true c.ResyncPeriodEvents = defaultResyncPeriodInSecond c.UseComponentStatus = true @@ -174,7 +174,7 @@ func (k *KubeASCheck) Configure(senderManager sender.SenderManager, _ uint64, co clusterName := clustername.GetRFC1123CompliantClusterName(context.TODO(), hostnameDetected) // Automatically add events based on activated Datadog products - if ddConfig.Datadog().GetBool("autoscaling.workload.enabled") { + if pkgconfigsetup.Datadog().GetBool("autoscaling.workload.enabled") { k.instance.CollectedEventTypes = append(k.instance.CollectedEventTypes, collectedEventType{ Source: "datadog-workload-autoscaler", }) @@ -204,7 +204,7 @@ func (k *KubeASCheck) Run() error { } defer sender.Commit() - if ddConfig.Datadog().GetBool("cluster_agent.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") { log.Debug("Cluster agent is enabled. Not running Kubernetes API Server check or collecting Kubernetes Events.") return nil } @@ -212,7 +212,7 @@ func (k *KubeASCheck) Run() error { // The Cluster Agent will passed in the `skip_leader_election` bool. if !k.instance.LeaderSkip { // Only run if Leader Election is enabled. - if !ddConfig.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { return log.Error("Leader Election not enabled. Not running Kubernetes API Server check or collecting Kubernetes Events.") } leader, errLeader := cluster.RunLeaderElection() diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go index b20bca4044e61..d410ec83c7724 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_openshift_test.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestReportClusterQuotas(t *testing.T) { @@ -29,9 +29,9 @@ func TestReportClusterQuotas(t *testing.T) { json.Unmarshal(raw, &list) require.Len(t, list.Items, 1) - prevClusterName := config.Datadog().GetString("cluster_name") - config.Datadog().SetWithoutSource("cluster_name", "test-cluster-name") - defer config.Datadog().SetWithoutSource("cluster_name", prevClusterName) + prevClusterName := pkgconfigsetup.Datadog().GetString("cluster_name") + pkgconfigsetup.Datadog().SetWithoutSource("cluster_name", "test-cluster-name") + defer pkgconfigsetup.Datadog().SetWithoutSource("cluster_name", prevClusterName) instanceCfg := []byte("") initCfg := []byte("") diff --git a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go index 9e583aaeabade..e528141618f03 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go +++ b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go @@ -20,7 +20,7 @@ import ( core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/orchestrator" orchcfg "github.com/DataDog/datadog-agent/pkg/orchestrator/config" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -89,7 +89,7 @@ func newOrchestratorCheck(base core.CheckBase, instance *OrchestratorInstance) * instance: instance, stopCh: make(chan struct{}), groupID: atomic.NewInt32(rand.Int31()), - isCLCRunner: config.IsCLCRunner(), + isCLCRunner: pkgconfigsetup.IsCLCRunner(pkgconfigsetup.Datadog()), } } @@ -176,7 +176,7 @@ func (o *OrchestratorCheck) Run() error { // we also do a safety check for dedicated runners to avoid trying the leader election if !o.isCLCRunner || !o.instance.LeaderSkip { // Only run if Leader Election is enabled. - if !config.Datadog().GetBool("leader_election") { + if !pkgconfigsetup.Datadog().GetBool("leader_election") { return log.Errorc("Leader Election not enabled. The cluster-agent will not run the check.", orchestrator.ExtraLogContext...) } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go index 43315ca94520e..fe0974a49fac2 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go @@ -11,7 +11,7 @@ package transformers import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" ) @@ -37,7 +37,7 @@ func RetrieveUnifiedServiceTags(labels map[string]string) []string { if tagValue, found := labels[kubernetes.EnvTagLabelKey]; found { tags = append(tags, fmt.Sprintf("%s:%s", labelToTagKeys[kubernetes.EnvTagLabelKey], tagValue)) } else { - if envTag := config.Datadog().GetString("env"); envTag != "" { + if envTag := pkgconfigsetup.Datadog().GetString("env"); envTag != "" { tags = append(tags, fmt.Sprintf("%s:%s", tagKeyEnv, envTag)) } } diff --git a/pkg/collector/corechecks/containerimage/check.go b/pkg/collector/corechecks/containerimage/check.go index bf953b83a8d2c..791373763743e 100644 --- a/pkg/collector/corechecks/containerimage/check.go +++ b/pkg/collector/corechecks/containerimage/check.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -106,7 +106,7 @@ func Factory(store workloadmeta.Component) optional.Option[func() check.Check] { // Configure parses the check configuration and initializes the container_image check func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, config, initConfig integration.Data, source string) error { - if !ddConfig.Datadog().GetBool("container_image.enabled") { + if !pkgconfigsetup.Datadog().GetBool("container_image.enabled") { return errors.New("collection of container images is disabled") } diff --git a/pkg/collector/corechecks/containerlifecycle/check.go b/pkg/collector/corechecks/containerlifecycle/check.go index 05347475beb57..b7a20adaa4c90 100644 --- a/pkg/collector/corechecks/containerlifecycle/check.go +++ b/pkg/collector/corechecks/containerlifecycle/check.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -53,7 +53,7 @@ type Check struct { // Configure parses the check configuration and initializes the container_lifecycle check func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, config, initConfig integration.Data, source string) error { - if !ddConfig.Datadog().GetBool("container_lifecycle.enabled") { + if !pkgconfigsetup.Datadog().GetBool("container_lifecycle.enabled") { return errors.New("collection of container lifecycle events is disabled") } @@ -117,7 +117,7 @@ func (c *Check) Run() error { ) var taskEventsCh chan workloadmeta.EventBundle - if ddConfig.Datadog().GetBool("ecs_task_collection_enabled") { + if pkgconfigsetup.Datadog().GetBool("ecs_task_collection_enabled") { taskFilter := workloadmeta.NewFilterBuilder(). SetSource(workloadmeta.SourceNodeOrchestrator). @@ -186,7 +186,7 @@ func Factory(store workloadmeta.Component) optional.Option[func() check.Check] { // sendFargateTaskEvent sends Fargate task lifecycle event at the end of the check func (c *Check) sendFargateTaskEvent() { - if !ddConfig.Datadog().GetBool("ecs_task_collection_enabled") || + if !pkgconfigsetup.Datadog().GetBool("ecs_task_collection_enabled") || !env.IsECSFargate() { return } diff --git a/pkg/collector/corechecks/containers/containerd/events.go b/pkg/collector/corechecks/containers/containerd/events.go index 8e024071a19b8..71c14aa585ac5 100644 --- a/pkg/collector/corechecks/containers/containerd/events.go +++ b/pkg/collector/corechecks/containers/containerd/events.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" ctrUtil "github.com/DataDog/datadog-agent/pkg/util/containerd" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -174,7 +174,7 @@ func (s *subscriber) run(ctx context.Context) error { return fmt.Errorf("subscriber is already running the event listener routine") } - excludePauseContainers := config.Datadog().GetBool("exclude_pause_container") + excludePauseContainers := pkgconfigsetup.Datadog().GetBool("exclude_pause_container") // Only used when excludePauseContainers is true var pauseContainers setPauseContainers diff --git a/pkg/collector/corechecks/containers/containerd/events_test.go b/pkg/collector/corechecks/containers/containerd/events_test.go index 53aae7473ff70..a4cc07881af0a 100644 --- a/pkg/collector/corechecks/containers/containerd/events_test.go +++ b/pkg/collector/corechecks/containers/containerd/events_test.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" containerdutil "github.com/DataDog/datadog-agent/pkg/util/containerd" "github.com/DataDog/datadog-agent/pkg/util/containerd/fake" @@ -246,8 +246,8 @@ func TestCheckEvents_PauseContainers(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - defaultExcludePauseContainers := config.Datadog().GetBool("exclude_pause_container") - config.Datadog().SetWithoutSource("exclude_pause_container", test.excludePauseContainers) + defaultExcludePauseContainers := pkgconfigsetup.Datadog().GetBool("exclude_pause_container") + pkgconfigsetup.Datadog().SetWithoutSource("exclude_pause_container", test.excludePauseContainers) if test.generateCreateEvent { eventCreateContainer, err := createContainerEvent(testNamespace, test.containerID) @@ -276,7 +276,7 @@ func TestCheckEvents_PauseContainers(t *testing.T) { assert.Empty(t, sub.Flush(time.Now().Unix())) } - config.Datadog().SetWithoutSource("exclude_pause_container", defaultExcludePauseContainers) + pkgconfigsetup.Datadog().SetWithoutSource("exclude_pause_container", defaultExcludePauseContainers) }) } diff --git a/pkg/collector/corechecks/containers/docker/check_network.go b/pkg/collector/corechecks/containers/docker/check_network.go index e8dbc5133f49f..3e89621573063 100644 --- a/pkg/collector/corechecks/containers/docker/check_network.go +++ b/pkg/collector/corechecks/containers/docker/check_network.go @@ -20,8 +20,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/generic" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -38,7 +38,7 @@ func (d *DockerCheck) configureNetworkProcessor(processor *generic.Processor) { switch runtime.GOOS { case "linux": if env.IsHostProcAvailable() { - d.networkProcessorExtension = &dockerNetworkExtension{procPath: config.Datadog().GetString("container_proc_root")} + d.networkProcessorExtension = &dockerNetworkExtension{procPath: pkgconfigsetup.Datadog().GetString("container_proc_root")} } case "windows": d.networkProcessorExtension = &dockerNetworkExtension{} diff --git a/pkg/collector/corechecks/containers/generic/processor_network.go b/pkg/collector/corechecks/containers/generic/processor_network.go index 12d9538d4b5cf..d83fd0f215043 100644 --- a/pkg/collector/corechecks/containers/generic/processor_network.go +++ b/pkg/collector/corechecks/containers/generic/processor_network.go @@ -87,6 +87,7 @@ func (pn *ProcessorNetwork) processGroupedContainerNetwork() { for _, containerNetwork := range pn.ungroupedContainerNetwork { pn.generateNetworkMetrics(containerNetwork.tags, containerNetwork.stats) } + pn.ungroupedContainerNetwork = nil for _, containerNetworks := range pn.groupedContainerNetwork { // If we have multiple containers, tagging with container tag is incorrect as the metrics refer to whole isolation group. diff --git a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go index 5afbb23e3891f..f6f1f2ae62255 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider.go @@ -67,7 +67,7 @@ func (p *Provider) proberProbeTotal(metricFam *prom.MetricFamily, sender sender. metricSuffix = "startup_probe" default: log.Debugf("Unsupported probe type %s", probeType) - return + continue } result := metric.Metric["result"] @@ -80,17 +80,17 @@ func (p *Provider) proberProbeTotal(metricFam *prom.MetricFamily, sender sender. metricSuffix += ".unknown.total" default: log.Debugf("Unsupported probe result %s", result) - return + continue } cID, _ := common.GetContainerID(p.store, metric.Metric, p.filter) if cID == "" { - return + continue } tags, _ := tagger.Tag(cID, types.HighCardinality) if len(tags) == 0 { - return + continue } tags = utils.ConcatenateTags(tags, p.Config.Tags) diff --git a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go index ac9d134faf7db..4dfbf97124cce 100644 --- a/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go +++ b/pkg/collector/corechecks/containers/kubelet/provider/probe/provider_test.go @@ -99,11 +99,13 @@ func TestProvider_Provide(t *testing.T) { value: 281049, tags: []string{"instance_tag:something", "kube_namespace:kube-system", "pod_name:fluentbit-gke-45gvm", "kube_container_name:fluentbit"}, }, + /* Excluded container is not expected, see containers.Filter in the test { name: common.KubeletMetricsPrefix + "liveness_probe.success.total", value: 281049, tags: []string{"instance_tag:something", "kube_namespace:kube-system", "pod_name:fluentbit-gke-45gvm", "kube_container_name:fluentbit-gke"}, }, + */ { name: common.KubeletMetricsPrefix + "liveness_probe.success.total", value: 1686298, @@ -304,7 +306,7 @@ func TestProvider_Provide(t *testing.T) { p, err := NewProvider( &containers.Filter{ Enabled: true, - NameExcludeList: []*regexp.Regexp{regexp.MustCompile("agent-excluded")}, + NameExcludeList: []*regexp.Regexp{regexp.MustCompile("fluentbit-gke")}, }, config, store, diff --git a/pkg/collector/corechecks/ebpf/ebpf.go b/pkg/collector/corechecks/ebpf/ebpf.go index 688a781d50252..4e9e6fe65a3cf 100644 --- a/pkg/collector/corechecks/ebpf/ebpf.go +++ b/pkg/collector/corechecks/ebpf/ebpf.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" ebpfcheck "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/ebpfcheck/model" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processnet "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -68,7 +68,7 @@ func (m *EBPFCheck) Configure(senderManager sender.SenderManager, _ uint64, conf if err := m.config.Parse(config); err != nil { return fmt.Errorf("ebpf check config: %s", err) } - if err := processnet.CheckPath(ddconfig.SystemProbe().GetString("system_probe_config.sysprobe_socket")); err != nil { + if err := processnet.CheckPath(pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")); err != nil { return fmt.Errorf("sysprobe socket: %s", err) } @@ -80,7 +80,7 @@ func (m *EBPFCheck) Run() error { if m.sysProbeUtil == nil { var err error m.sysProbeUtil, err = processnet.GetRemoteSystemProbeUtil( - ddconfig.SystemProbe().GetString("system_probe_config.sysprobe_socket"), + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket"), ) if err != nil { return fmt.Errorf("sysprobe connection: %s", err) diff --git a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go index f7ff6312e769b..0a5b506ce1e7b 100644 --- a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go +++ b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go @@ -26,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/oomkill/model" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" process_net "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/cgroups" @@ -87,7 +87,7 @@ func (m *OOMKillCheck) Run() error { } sysProbeUtil, err := process_net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { return err } diff --git a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go index 526b1b0220d4b..d3cd6aab4cf12 100644 --- a/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go +++ b/pkg/collector/corechecks/ebpf/probe/ebpfcheck/probe.go @@ -28,7 +28,7 @@ import ( "golang.org/x/sys/unix" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/ebpfcheck/model" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" ddmaps "github.com/DataDog/datadog-agent/pkg/ebpf/maps" @@ -86,17 +86,17 @@ func NewProbe(cfg *ddebpf.Config) (*Probe, error) { return nil, err } - if ddconfig.SystemProbe().GetBool("ebpf_check.kernel_bpf_stats") { + if pkgconfigsetup.SystemProbe().GetBool("ebpf_check.kernel_bpf_stats") { probe.statsFD, err = ebpf.EnableStats(unix.BPF_STATS_RUN_TIME) if err != nil { log.Warnf("kernel ebpf stats failed to enable, program runtime and run count will be unavailable: %s", err) } } - probe.mapBuffers.keysBufferSizeLimit = uint32(ddconfig.SystemProbe().GetInt("ebpf_check.entry_count.max_keys_buffer_size_bytes")) - probe.mapBuffers.valuesBufferSizeLimit = uint32(ddconfig.SystemProbe().GetInt("ebpf_check.entry_count.max_values_buffer_size_bytes")) - probe.mapBuffers.iterationRestartDetectionEntries = ddconfig.SystemProbe().GetInt("ebpf_check.entry_count.entries_for_iteration_restart_detection") - probe.entryCountMaxRestarts = ddconfig.SystemProbe().GetInt("ebpf_check.entry_count.max_restarts") + probe.mapBuffers.keysBufferSizeLimit = uint32(pkgconfigsetup.SystemProbe().GetInt("ebpf_check.entry_count.max_keys_buffer_size_bytes")) + probe.mapBuffers.valuesBufferSizeLimit = uint32(pkgconfigsetup.SystemProbe().GetInt("ebpf_check.entry_count.max_values_buffer_size_bytes")) + probe.mapBuffers.iterationRestartDetectionEntries = pkgconfigsetup.SystemProbe().GetInt("ebpf_check.entry_count.entries_for_iteration_restart_detection") + probe.entryCountMaxRestarts = pkgconfigsetup.SystemProbe().GetInt("ebpf_check.entry_count.max_restarts") if isForEachElemHelperAvailable() { probe.mphCache = newMapProgHelperCache() diff --git a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go index 30fa918eb37f3..126f949876c87 100644 --- a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go +++ b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/tcpqueuelength/model" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" process_net "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/cgroups" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -83,7 +83,7 @@ func (t *TCPQueueLengthCheck) Run() error { } sysProbeUtil, err := process_net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { return err } diff --git a/pkg/collector/corechecks/embed/apm/apm.go b/pkg/collector/corechecks/embed/apm/apm.go index af0d2d4afa7b9..28162165eee6d 100644 --- a/pkg/collector/corechecks/embed/apm/apm.go +++ b/pkg/collector/corechecks/embed/apm/apm.go @@ -25,7 +25,7 @@ import ( checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/check/stats" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/embed/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -96,10 +96,10 @@ func (c *APMCheck) run() error { hname, _ := hostname.Get(context.TODO()) env := os.Environ() - env = append(env, fmt.Sprintf("DD_API_KEY=%s", utils.SanitizeAPIKey(config.Datadog().GetString("api_key")))) + env = append(env, fmt.Sprintf("DD_API_KEY=%s", utils.SanitizeAPIKey(pkgconfigsetup.Datadog().GetString("api_key")))) env = append(env, fmt.Sprintf("DD_HOSTNAME=%s", hname)) - env = append(env, fmt.Sprintf("DD_DOGSTATSD_PORT=%s", config.Datadog().GetString("dogstatsd_port"))) - env = append(env, fmt.Sprintf("DD_LOG_LEVEL=%s", config.Datadog().GetString("log_level"))) + env = append(env, fmt.Sprintf("DD_DOGSTATSD_PORT=%s", pkgconfigsetup.Datadog().GetString("dogstatsd_port"))) + env = append(env, fmt.Sprintf("DD_LOG_LEVEL=%s", pkgconfigsetup.Datadog().GetString("log_level"))) cmd.Env = env // forward the standard output to the Agent logger @@ -176,7 +176,7 @@ func (c *APMCheck) Configure(_ sender.SenderManager, _ uint64, data integration. c.binPath = defaultBinPath } - configFile := config.Datadog().ConfigFileUsed() + configFile := pkgconfigsetup.Datadog().ConfigFileUsed() c.commandOpts = []string{} @@ -186,7 +186,7 @@ func (c *APMCheck) Configure(_ sender.SenderManager, _ uint64, data integration. } c.source = source - c.telemetry = utils.IsCheckTelemetryEnabled("apm", config.Datadog()) + c.telemetry = utils.IsCheckTelemetryEnabled("apm", pkgconfigsetup.Datadog()) c.initConfig = string(initConfig) c.instanceConfig = string(data) return nil diff --git a/pkg/collector/corechecks/embed/process/process_agent.go b/pkg/collector/corechecks/embed/process/process_agent.go index 1aef66f233b8a..c27d5a4b8f975 100644 --- a/pkg/collector/corechecks/embed/process/process_agent.go +++ b/pkg/collector/corechecks/embed/process/process_agent.go @@ -25,7 +25,7 @@ import ( checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/check/stats" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/embed/common" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/executable" @@ -158,7 +158,7 @@ func (c *ProcessAgentCheck) run() error { func (c *ProcessAgentCheck) Configure(senderManager sender.SenderManager, _ uint64, data integration.Data, initConfig integration.Data, source string) error { // only log whether process check is enabled or not but don't return early, because we still need to initialize "binPath", "source" and // start up process-agent. Ultimately it's up to process-agent to decide whether to run or not based on the config - if enabled := config.Datadog().GetBool("process_config.process_collection.enabled"); !enabled { + if enabled := pkgconfigsetup.Datadog().GetBool("process_config.process_collection.enabled"); !enabled { log.Info("live process monitoring is disabled through main configuration file") } @@ -185,14 +185,14 @@ func (c *ProcessAgentCheck) Configure(senderManager sender.SenderManager, _ uint } // be explicit about the config file location - configFile := config.Datadog().ConfigFileUsed() + configFile := pkgconfigsetup.Datadog().ConfigFileUsed() c.commandOpts = []string{} if _, err := os.Stat(configFile); !os.IsNotExist(err) { c.commandOpts = append(c.commandOpts, fmt.Sprintf("-config=%s", configFile)) } c.source = source - c.telemetry = utils.IsCheckTelemetryEnabled("process_agent", config.Datadog()) + c.telemetry = utils.IsCheckTelemetryEnabled("process_agent", pkgconfigsetup.Datadog()) c.initConfig = string(initConfig) c.instanceConfig = string(data) return nil diff --git a/pkg/collector/corechecks/net/ntp/ntp_test.go b/pkg/collector/corechecks/net/ntp/ntp_test.go index 5e2959c7e3c24..d6035f396642d 100644 --- a/pkg/collector/corechecks/net/ntp/ntp_test.go +++ b/pkg/collector/corechecks/net/ntp/ntp_test.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" ) @@ -361,7 +361,7 @@ func TestDefaultHostConfig(t *testing.T) { expectedHosts := []string{"0.datadog.pool.ntp.org", "1.datadog.pool.ntp.org", "2.datadog.pool.ntp.org", "3.datadog.pool.ntp.org"} testedConfig := []byte(``) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{}) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{}) ntpCheck := new(NTPCheck) ntpCheck.Configure(aggregator.NewNoOpSenderManager(), integration.FakeConfigHash, testedConfig, []byte(""), "test") diff --git a/pkg/collector/corechecks/networkpath/config.go b/pkg/collector/corechecks/networkpath/config.go index 60b805d076c22..f59acbb12301c 100644 --- a/pkg/collector/corechecks/networkpath/config.go +++ b/pkg/collector/corechecks/networkpath/config.go @@ -10,11 +10,11 @@ import ( "strings" "time" + "gopkg.in/yaml.v2" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" - "gopkg.in/yaml.v2" ) const ( @@ -117,7 +117,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data ) c.Tags = instance.Tags - c.Namespace = coreconfig.Datadog().GetString("network_devices.namespace") + c.Namespace = setup.Datadog().GetString("network_devices.namespace") return c, nil } diff --git a/pkg/collector/corechecks/networkpath/config_test.go b/pkg/collector/corechecks/networkpath/config_test.go index bc1999876a46d..c4d7a591deb5f 100644 --- a/pkg/collector/corechecks/networkpath/config_test.go +++ b/pkg/collector/corechecks/networkpath/config_test.go @@ -9,15 +9,15 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" - "github.com/stretchr/testify/assert" ) func TestNewCheckConfig(t *testing.T) { - coreconfig.Datadog().SetDefault("network_devices.namespace", "my-namespace") + setup.Datadog().SetDefault("network_devices.namespace", "my-namespace") tests := []struct { name string rawInstance integration.Data diff --git a/pkg/collector/corechecks/networkpath/networkpath.go b/pkg/collector/corechecks/networkpath/networkpath.go index e7ba9465f268b..8c810bc423509 100644 --- a/pkg/collector/corechecks/networkpath/networkpath.go +++ b/pkg/collector/corechecks/networkpath/networkpath.go @@ -67,12 +67,12 @@ func (c *Check) Run() error { return fmt.Errorf("failed to trace path: %w", err) } path.Namespace = c.config.Namespace + path.Origin = payload.PathOriginNetworkPathIntegration // Add tags to path - commonTags := append(utils.GetCommonAgentTags(), c.config.Tags...) path.Source.Service = c.config.SourceService path.Destination.Service = c.config.DestinationService - path.Tags = commonTags + path.Tags = c.config.Tags // send to EP err = c.SendNetPathMDToEP(senderInstance, path) @@ -80,7 +80,8 @@ func (c *Check) Run() error { return fmt.Errorf("failed to send network path metadata: %w", err) } - c.submitTelemetry(metricSender, path, commonTags, startTime) + metricTags := append(utils.GetCommonAgentTags(), c.config.Tags...) + c.submitTelemetry(metricSender, path, metricTags, startTime) senderInstance.Commit() return nil diff --git a/pkg/collector/corechecks/orchestrator/pod/pod_test.go b/pkg/collector/corechecks/orchestrator/pod/pod_test.go index f803c7534f784..c3ef7a1abee43 100644 --- a/pkg/collector/corechecks/orchestrator/pod/pod_test.go +++ b/pkg/collector/corechecks/orchestrator/pod/pod_test.go @@ -22,12 +22,13 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/config/setup/constants" oconfig "github.com/DataDog/datadog-agent/pkg/orchestrator/config" "github.com/DataDog/datadog-agent/pkg/serializer/types" "github.com/DataDog/datadog-agent/pkg/util/cache" @@ -150,7 +151,7 @@ func TestPodTestSuite(t *testing.T) { } func (suite *PodTestSuite) TestPodCheck() { - cacheKey := cache.BuildAgentKey(config.ClusterIDCacheKey) + cacheKey := cache.BuildAgentKey(constants.ClusterIDCacheKey) cachedClusterID, found := cache.Cache.Get(cacheKey) if !found { cache.Cache.Set(cacheKey, strings.Repeat("1", 36), cache.NoExpiration) diff --git a/pkg/collector/corechecks/sbom/processor.go b/pkg/collector/corechecks/sbom/processor.go index 637bf4ecdfbe8..91bfeddccf531 100644 --- a/pkg/collector/corechecks/sbom/processor.go +++ b/pkg/collector/corechecks/sbom/processor.go @@ -10,9 +10,6 @@ package sbom import ( "context" "errors" - "io/fs" - "os" - "path/filepath" "strings" "time" @@ -21,9 +18,8 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - "github.com/DataDog/datadog-agent/pkg/config/env" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors/host" sbomscanner "github.com/DataDog/datadog-agent/pkg/sbom/scanner" @@ -38,7 +34,7 @@ import ( ) var /* const */ ( - envVarEnv = ddConfig.Datadog().GetString("env") + envVarEnv = pkgconfigsetup.Datadog().GetString("env") sourceAgent = "agent" ) @@ -222,39 +218,13 @@ func (p *processor) processHostScanResult(result sbom.ScanResult) { p.queue <- sbom } -type relFS struct { - root string - fs fs.FS -} - -func newFS(root string) fs.FS { - fs := os.DirFS(root) - return &relFS{root: "/", fs: fs} -} - -func (f *relFS) Open(name string) (fs.File, error) { - if filepath.IsAbs(name) { - var err error - name, err = filepath.Rel(f.root, name) - if err != nil { - return nil, err - } - } - - return f.fs.Open(name) -} - func (p *processor) triggerHostScan() { if !p.hostSBOM { return } log.Debugf("Triggering host SBOM refresh") - scanPath := "/" - if hostRoot := os.Getenv("HOST_ROOT"); env.IsContainerized() && hostRoot != "" { - scanPath = hostRoot - } - scanRequest := host.NewScanRequest(scanPath, newFS("/")) + scanRequest := host.NewHostScanRequest() if err := p.sbomScanner.Scan(scanRequest); err != nil { log.Errorf("Failed to trigger SBOM generation for host: %s", err) diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect.go b/pkg/collector/corechecks/servicediscovery/apm/detect.go index 289e6fc42ee96..a1ad2d303fe62 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/detect.go +++ b/pkg/collector/corechecks/servicediscovery/apm/detect.go @@ -19,7 +19,6 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/language/reader" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/usm" "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" "github.com/DataDog/datadog-agent/pkg/util/kernel" @@ -48,13 +47,6 @@ var ( language.Python: pythonDetector, language.Go: goDetector, } - // For now, only allow a subset of the above detectors to actually run. - allowedLangs = map[language.Language]struct{}{ - language.Java: {}, - language.Node: {}, - language.Python: {}, - language.Go: {}, - } nodeAPMCheckRegex = regexp.MustCompile(`"dd-trace"`) ) @@ -66,10 +58,6 @@ func Detect(pid int, args []string, envs map[string]string, lang language.Langua return Injected } - if _, ok := allowedLangs[lang]; !ok { - return None - } - // different detection for provided instrumentation for each if detect, ok := detectorMap[lang]; ok { return detect(pid, args, envs, contextMap) @@ -250,93 +238,44 @@ func javaDetector(_ int, args []string, envs map[string]string, _ usm.DetectorCo return None } -func findFile(fileName string) (io.ReadCloser, bool) { - f, err := os.Open(fileName) - if err != nil { - return nil, false - } - return f, true -} - -const datadogDotNetInstrumented = "Datadog.Trace.ClrProfiler.Native" +func dotNetDetectorFromMapsReader(reader io.Reader) Instrumentation { + scanner := bufio.NewScanner(bufio.NewReader(reader)) + for scanner.Scan() { + line := scanner.Text() -func dotNetDetector(_ int, args []string, envs map[string]string, _ usm.DetectorContextMap) Instrumentation { - // if it's just the word `dotnet` by itself, don't instrument - if len(args) == 1 && args[0] == "dotnet" { - return None + if strings.HasSuffix(line, "Datadog.Trace.dll") { + return Provided + } } - /* - From Kevin Gosse: - - CORECLR_ENABLE_PROFILING=1 - - CORECLR_PROFILER_PATH environment variables set - (it means that a profiler is attached, it doesn't really matter if it's ours or another vendor) - */ - // don't instrument if the tracer is already installed - foundFlags := 0 - if _, ok := envs["CORECLR_PROFILER_PATH"]; ok { - foundFlags |= 1 - } + return None +} + +// dotNetDetector detects instrumentation in .NET applications. +// +// The primary check is for the environment variables which enables .NET +// profiling. This is required for auto-instrumentation, and besides that custom +// instrumentation using version 3.0.0 or later of Datadog.Trace requires +// auto-instrumentation. It is also set if some third-party +// profiling/instrumentation is active. +// +// The secondary check is to detect cases where an older version of +// Datadog.Trace is used for manual instrumentation without enabling +// auto-instrumentation. For this, we check for the presence of the DLL in the +// maps file. Note that this does not work for single-file deployments. +// +// 785c8a400000-785c8aaeb000 r--s 00000000 fc:06 12762267 /home/foo/.../publish/Datadog.Trace.dll +func dotNetDetector(pid int, _ []string, envs map[string]string, _ usm.DetectorContextMap) Instrumentation { if val, ok := envs["CORECLR_ENABLE_PROFILING"]; ok && val == "1" { - foundFlags |= 2 - } - if foundFlags == 3 { return Provided } - ignoreArgs := map[string]bool{ - "build": true, - "clean": true, - "restore": true, - "publish": true, - } - - if len(args) > 1 { - // Ignore only if the first arg match with the ignore list - if ignoreArgs[args[1]] { - return None - } - // Check to see if there's a DLL on the command line that contain the string Datadog.Trace.ClrProfiler.Native - // If so, it's already instrumented with Datadog, ignore the process - for _, v := range args[1:] { - if strings.HasSuffix(v, ".dll") { - if f, ok := findFile(v); ok { - defer f.Close() - offset, err := reader.Index(f, datadogDotNetInstrumented) - if offset != -1 && err == nil { - return Provided - } - } - } - } - } - - // does the binary contain the string Datadog.Trace.ClrProfiler.Native (this should cover all single-file deployments) - // if so, it's already instrumented with Datadog, ignore the process - if f, ok := findFile(args[0]); ok { - defer f.Close() - offset, err := reader.Index(f, datadogDotNetInstrumented) - if offset != -1 && err == nil { - return Provided - } - } - - // check if there's a .dll in the directory with the same name as the binary used to launch it - // if so, check if it has the Datadog.Trace.ClrProfiler.Native string - // if so, it's already instrumented with Datadog, ignore the process - if f, ok := findFile(args[0] + ".dll"); ok { - defer f.Close() - offset, err := reader.Index(f, datadogDotNetInstrumented) - if offset != -1 && err == nil { - return Provided - } + mapsPath := kernel.HostProc(strconv.Itoa(pid), "maps") + mapsFile, err := os.Open(mapsPath) + if err != nil { + return None } + defer mapsFile.Close() - // does the application folder contain the file Datadog.Trace.dll (this should cover "classic" deployments) - // if so, it's already instrumented with Datadog, ignore the process - if f, ok := findFile("Datadog.Trace.dll"); ok { - f.Close() - return Provided - } - return None + return dotNetDetectorFromMapsReader(mapsFile) } diff --git a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go index b2e551d63fb5a..79570d4159deb 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go +++ b/pkg/collector/corechecks/servicediscovery/apm/detect_nix_test.go @@ -173,6 +173,72 @@ func Test_pythonDetector(t *testing.T) { } } +func TestDotNetDetector(t *testing.T) { + for _, test := range []struct { + name string + env map[string]string + maps string + result Instrumentation + }{ + { + name: "no env, no maps", + result: None, + }, + { + name: "profiling disabled", + env: map[string]string{ + "CORECLR_ENABLE_PROFILING": "0", + }, + result: None, + }, + { + name: "profiling enabled", + env: map[string]string{ + "CORECLR_ENABLE_PROFILING": "1", + }, + result: Provided, + }, + { + name: "not in maps", + maps: ` +785c8ab24000-785c8ab2c000 r--s 00000000 fc:06 12762114 /home/foo/hello/bin/release/net8.0/linux-x64/publish/System.Diagnostics.StackTrace.dll +785c8ab2c000-785c8acce000 r--s 00000000 fc:06 12762148 /home/foo/hello/bin/release/net8.0/linux-x64/publish/System.Net.Http.dll + `, + result: None, + }, + { + name: "in maps, no env", + maps: ` +785c89c00000-785c8a400000 rw-p 00000000 00:00 0 +785c8a400000-785c8aaeb000 r--s 00000000 fc:06 12762267 /home/foo/hello/bin/release/net8.0/linux-x64/publish/Datadog.Trace.dll +785c8aaec000-785c8ab0d000 rw-p 00000000 00:00 0 +785c8ab0d000-785c8ab24000 r--s 00000000 fc:06 12761829 /home/foo/hello/bin/release/net8.0/linux-x64/publish/System.Collections.Specialized.dll + `, + result: Provided, + }, + { + name: "in maps, env misleading", + env: map[string]string{ + "CORECLR_ENABLE_PROFILING": "0", + }, + maps: ` +785c8a400000-785c8aaeb000 r--s 00000000 fc:06 12762267 /home/foo/hello/bin/release/net8.0/linux-x64/publish/Datadog.Trace.dll + `, + result: Provided, + }, + } { + t.Run(test.name, func(t *testing.T) { + var result Instrumentation + if test.maps == "" { + result = dotNetDetector(0, nil, test.env, nil) + } else { + result = dotNetDetectorFromMapsReader(strings.NewReader(test.maps)) + } + assert.Equal(t, test.result, result) + }) + } +} + func TestGoDetector(t *testing.T) { curDir, err := testutil.CurDir() require.NoError(t, err) diff --git a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/.gitignore b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/.gitignore index fbce16df4eb9d..be71b320651da 100644 --- a/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/.gitignore +++ b/pkg/collector/corechecks/servicediscovery/apm/testutil/instrumented/.gitignore @@ -1 +1,2 @@ instrumented +instrumented-nosymbols diff --git a/pkg/collector/corechecks/servicediscovery/events.go b/pkg/collector/corechecks/servicediscovery/events.go index aa02df577a7e9..ae25068e956b0 100644 --- a/pkg/collector/corechecks/servicediscovery/events.go +++ b/pkg/collector/corechecks/servicediscovery/events.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -58,7 +58,7 @@ type telemetrySender struct { func (ts *telemetrySender) newEvent(t eventType, svc serviceInfo) *event { host := ts.hostname.GetSafe(context.Background()) - env := pkgconfig.Datadog().GetString("env") + env := pkgconfigsetup.Datadog().GetString("env") nameSource := "" if svc.service.DDService != "" { diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux.go b/pkg/collector/corechecks/servicediscovery/impl_linux.go index 27ef6aa36cc66..5478b38f507d1 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/servicetype" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processnet "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -161,6 +161,6 @@ type systemProbeClient interface { func getSysProbeClient() (systemProbeClient, error) { return processnet.GetRemoteSystemProbeUtil( - ddconfig.SystemProbe().GetString("system_probe_config.sysprobe_socket"), + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket"), ) } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index ba93cf883c933..6ee4f77ddffed 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -419,7 +419,7 @@ func buildFakeServer(t *testing.T) string { serverBin, err := usmtestutil.BuildGoBinaryWrapper(filepath.Join(curDir, "testutil"), "fake_server") require.NoError(t, err) - for _, alias := range []string{"java", "node", "sshd"} { + for _, alias := range []string{"java", "node", "sshd", "dotnet"} { makeAlias(t, alias, serverBin) } @@ -493,7 +493,15 @@ func TestAPMInstrumentationProvided(t *testing.T) { testCases := map[string]struct { commandline []string // The command line of the fake server language language.Language + env []string }{ + "dotnet": { + commandline: []string{"dotnet", "foo.dll"}, + language: language.DotNet, + env: []string{ + "CORECLR_ENABLE_PROFILING=1", + }, + }, "java": { commandline: []string{"java", "-javaagent:/path/to/dd-java-agent.jar", "-jar", "foo.jar"}, language: language.Java, @@ -514,6 +522,7 @@ func TestAPMInstrumentationProvided(t *testing.T) { bin := filepath.Join(serverDir, test.commandline[0]) cmd := exec.CommandContext(ctx, bin, test.commandline[1:]...) + cmd.Env = append(cmd.Env, test.env...) err := cmd.Start() require.NoError(t, err) @@ -551,7 +560,19 @@ func assertStat(t assert.TestingT, svc model.Service) { return } - assert.Equal(t, uint64(createTimeMs/1000), svc.StartTimeSecs) + // The value returned by proc.CreateTime() can vary between invocations + // since the BootTime (used internally in proc.CreateTime()) can vary when + // the version of BootTimeWithContext which uses /proc/uptime is active in + // gopsutil (either on Docker, or even outside of it due to a bug fixed in + // v4.24.8: + // https://github.com/shirou/gopsutil/commit/aa0b73dc6d5669de5bc9483c0655b1f9446317a9). + // + // This is due to an inherent race since the code in BootTimeWithContext + // substracts the uptime of the host from the current time, and there can be + // in theory an unbounded amount of time between the read of /proc/uptime + // and the retrieval of the current time. Allow a 10 second diff as a + // reasonable value. + assert.InDelta(t, uint64(createTimeMs/1000), svc.StartTimeSecs, 10) } func assertCPU(t *testing.T, url string, pid int) { @@ -654,40 +675,60 @@ func TestNodeDocker(t *testing.T) { }, 30*time.Second, 100*time.Millisecond) } -func TestAPMInstrumentationProvidedPython(t *testing.T) { +func TestAPMInstrumentationProvidedWithMaps(t *testing.T) { curDir, err := testutil.CurDir() require.NoError(t, err) - fmapper := fileopener.BuildFmapper(t) - fakePython := makeAlias(t, "python", fmapper) + for _, test := range []struct { + alias string + lib string + language language.Language + }{ + { + alias: "python", + // We need the process to map something in a directory called + // "site-packages/ddtrace". The actual mapped file does not matter. + lib: filepath.Join(curDir, + "..", "..", "..", "..", + "network", "usm", "testdata", + "site-packages", "ddtrace", + fmt.Sprintf("libssl.so.%s", runtime.GOARCH)), + language: language.Python, + }, + { + alias: "dotnet", + lib: filepath.Join(curDir, "testdata", "Datadog.Trace.dll"), + language: language.DotNet, + }, + } { + t.Run(test.alias, func(t *testing.T) { + fmapper := fileopener.BuildFmapper(t) + fake := makeAlias(t, test.alias, fmapper) - // We need the process to map something in a directory called - // "site-packages/ddtrace". The actual mapped file does not matter. - ddtrace := filepath.Join(curDir, "..", "..", "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace") - lib := filepath.Join(ddtrace, fmt.Sprintf("libssl.so.%s", runtime.GOARCH)) + // Give the process a listening socket + listener, err := net.Listen("tcp", "") + require.NoError(t, err) + f, err := listener.(*net.TCPListener).File() + listener.Close() + require.NoError(t, err) + t.Cleanup(func() { f.Close() }) + disableCloseOnExec(t, f) - // Give the process a listening socket - listener, err := net.Listen("tcp", "") - require.NoError(t, err) - f, err := listener.(*net.TCPListener).File() - listener.Close() - require.NoError(t, err) - t.Cleanup(func() { f.Close() }) - disableCloseOnExec(t, f) + cmd, err := fileopener.OpenFromProcess(t, fake, test.lib) + require.NoError(t, err) - cmd, err := fileopener.OpenFromProcess(t, fakePython, lib) - require.NoError(t, err) + url := setupDiscoveryModule(t) - url := setupDiscoveryModule(t) - - pid := cmd.Process.Pid - require.EventuallyWithT(t, func(collect *assert.CollectT) { - portMap := getServicesMap(t, url) - assert.Contains(collect, portMap, pid) - assert.Equal(collect, string(language.Python), portMap[pid].Language) - assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation) - assertStat(collect, portMap[pid]) - }, 30*time.Second, 100*time.Millisecond) + pid := cmd.Process.Pid + require.EventuallyWithT(t, func(collect *assert.CollectT) { + portMap := getServicesMap(t, url) + assert.Contains(collect, portMap, pid) + assert.Equal(collect, string(test.language), portMap[pid].Language) + assert.Equal(collect, string(apm.Provided), portMap[pid].APMInstrumentation) + assertStat(collect, portMap[pid]) + }, 30*time.Second, 100*time.Millisecond) + }) + } } // Check that we can get listening processes in other namespaces. diff --git a/pkg/collector/corechecks/servicediscovery/module/testdata/Datadog.Trace.dll b/pkg/collector/corechecks/servicediscovery/module/testdata/Datadog.Trace.dll new file mode 100644 index 0000000000000..421376db9e8ae --- /dev/null +++ b/pkg/collector/corechecks/servicediscovery/module/testdata/Datadog.Trace.dll @@ -0,0 +1 @@ +dummy diff --git a/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore b/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore index 16df22f27b688..eaaf842eef4d4 100644 --- a/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore +++ b/pkg/collector/corechecks/servicediscovery/module/testutil/fake_server/.gitignore @@ -1,4 +1,6 @@ fake_server +dotnet +python java node sshd diff --git a/pkg/collector/corechecks/servicediscovery/servicediscovery.go b/pkg/collector/corechecks/servicediscovery/servicediscovery.go index d395f28599b6b..1cdd101ebeb1e 100644 --- a/pkg/collector/corechecks/servicediscovery/servicediscovery.go +++ b/pkg/collector/corechecks/servicediscovery/servicediscovery.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" ) @@ -132,7 +132,7 @@ func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, instance // Run executes the check. func (c *Check) Run() error { - if !pkgconfig.SystemProbe().GetBool("discovery.enabled") { + if !pkgconfigsetup.SystemProbe().GetBool("discovery.enabled") { return nil } diff --git a/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go b/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go index d1481d8d27538..ad5dd0e5d0909 100644 --- a/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go +++ b/pkg/collector/corechecks/snmp/integration_profile_bundle_test.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/common" @@ -26,7 +26,7 @@ func TestProfileBundleJsonZip(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "zipprofiles.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { diff --git a/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go b/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go index fdd891c5bc67d..c73e5a06b3561 100644 --- a/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go +++ b/pkg/collector/corechecks/snmp/integration_profile_metadata_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" @@ -33,7 +33,7 @@ func TestProfileMetadata_f5(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { diff --git a/pkg/collector/corechecks/snmp/integration_topology_test.go b/pkg/collector/corechecks/snmp/integration_topology_test.go index 8aea54e5244c6..1945a8362cad1 100644 --- a/pkg/collector/corechecks/snmp/integration_topology_test.go +++ b/pkg/collector/corechecks/snmp/integration_topology_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" @@ -33,7 +33,7 @@ func TestTopologyPayload_LLDP(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -735,7 +735,7 @@ func TestTopologyPayload_CDP(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -1428,7 +1428,7 @@ func TestTopologyPayload_LLDP_CDP(t *testing.T) { timeNow = common.MockTimeNow aggregator.NewBufferedAggregator(nil, nil, "", 1*time.Hour) invalidPath, _ := filepath.Abs(filepath.Join("internal", "test", "metadata.d")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go index 2fd5d6bba5591..1dc318f384825 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" coreutil "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -512,7 +512,7 @@ func NewCheckConfig(rawInstance integration.Data, rawInitConfig integration.Data } else if initConfig.Namespace != "" { c.Namespace = initConfig.Namespace } else { - c.Namespace = coreconfig.Datadog().GetString("network_devices.namespace") + c.Namespace = pkgconfigsetup.Datadog().GetString("network_devices.namespace") } c.Namespace, err = utils.NormalizeNamespace(c.Namespace) diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go index 67c3d49600777..49bfd183f77a0 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/pkg/aggregator" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkdevice/pinger" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" @@ -1430,7 +1430,7 @@ collect_topology: true } func Test_buildConfig_namespace(t *testing.T) { - defer coreconfig.Datadog().SetWithoutSource("network_devices.namespace", "default") + defer pkgconfigsetup.Datadog().SetWithoutSource("network_devices.namespace", "default") // Should use namespace defined in instance config // language=yaml @@ -1475,7 +1475,7 @@ ip_address: 1.2.3.4 community_string: "abc" `) rawInitConfig = []byte(``) - coreconfig.Datadog().SetWithoutSource("network_devices.namespace", "totoro") + pkgconfigsetup.Datadog().SetWithoutSource("network_devices.namespace", "totoro") conf, err = NewCheckConfig(rawInstanceConfig, rawInitConfig) assert.Nil(t, err) assert.Equal(t, "totoro", conf.Namespace) @@ -1503,7 +1503,7 @@ community_string: "abc" `) rawInitConfig = []byte(` namespace: `) - coreconfig.Datadog().SetWithoutSource("network_devices.namespace", "mononoke") + pkgconfigsetup.Datadog().SetWithoutSource("network_devices.namespace", "mononoke") conf, err = NewCheckConfig(rawInstanceConfig, rawInitConfig) assert.Nil(t, err) assert.Equal(t, "mononoke", conf.Namespace) @@ -1515,7 +1515,7 @@ ip_address: 1.2.3.4 community_string: "abc" `) rawInitConfig = []byte(``) - coreconfig.Datadog().SetWithoutSource("network_devices.namespace", "") + pkgconfigsetup.Datadog().SetWithoutSource("network_devices.namespace", "") _, err = NewCheckConfig(rawInstanceConfig, rawInitConfig) assert.EqualError(t, err, "namespace cannot be empty") } @@ -2442,7 +2442,7 @@ func TestCheckConfig_getResolvedSubnetName(t *testing.T) { } func TestCheckConfig_GetStaticTags(t *testing.T) { - coreconfig.Datadog().SetWithoutSource("hostname", "my-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "my-hostname") tests := []struct { name string config CheckConfig diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go index 08a03370e2b7d..8737e5845378f 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck.go @@ -19,7 +19,7 @@ import ( "go.uber.org/atomic" "github.com/DataDog/datadog-agent/pkg/collector/externalhost" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/hostname/validate" @@ -252,7 +252,7 @@ func (d *DeviceCheck) setDeviceHostExternalTags() { if deviceHostname == "" || err != nil { return } - agentTags := configUtils.GetConfiguredTags(config.Datadog(), false) + agentTags := configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false) log.Debugf("Set external tags for device host, host=`%s`, agentTags=`%v`", deviceHostname, agentTags) externalhost.SetExternalTags(deviceHostname, common.SnmpExternalTagsSourceType, agentTags) } diff --git a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go index 4661a9f28689b..028e233687aca 100644 --- a/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go +++ b/pkg/collector/corechecks/snmp/internal/devicecheck/devicecheck_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/version" @@ -242,7 +242,7 @@ func TestDetectMetricsToCollect(t *testing.T) { defer func() { timeNow = time.Now }() profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "detectmetr.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) sess := session.CreateFakeSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { diff --git a/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go b/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go index b9f9a0735afdb..9d59def5890d0 100644 --- a/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go +++ b/pkg/collector/corechecks/snmp/internal/discovery/discovery_test.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/session" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func waitForDiscoveredDevices(discovery *Discovery, expectedDeviceCount int, timeout time.Duration) error { @@ -34,7 +34,7 @@ func waitForDiscoveredDevices(discovery *Discovery, expectedDeviceCount int, tim func TestDiscovery(t *testing.T) { path, _ := filepath.Abs(filepath.Join(".", "test", "run_path", "TestDiscovery")) - config.Datadog().SetWithoutSource("run_path", path) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", path) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -85,7 +85,7 @@ func TestDiscovery(t *testing.T) { func TestDiscoveryCache(t *testing.T) { path, _ := filepath.Abs(filepath.Join(".", "test", "run_path", "TestDiscoveryCache")) - config.Datadog().SetWithoutSource("run_path", path) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", path) sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { diff --git a/pkg/collector/corechecks/snmp/internal/discovery/testing.go b/pkg/collector/corechecks/snmp/internal/discovery/testing.go index 10f0fc4286488..820bc1747acd7 100644 --- a/pkg/collector/corechecks/snmp/internal/discovery/testing.go +++ b/pkg/collector/corechecks/snmp/internal/discovery/testing.go @@ -10,11 +10,11 @@ package discovery import ( "path/filepath" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // SetTestRunPath sets run_path for testing func SetTestRunPath() { path, _ := filepath.Abs(filepath.Join(".", "test", "run_path")) - config.Datadog().SetWithoutSource("run_path", path) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", path) } diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_json_bundle_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_json_bundle_test.go index 0aea0a7b22401..7ca8bcbbe3172 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_json_bundle_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_json_bundle_test.go @@ -12,13 +12,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func Test_loadBundleJSONProfiles(t *testing.T) { defaultTestConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "zipprofiles.d")) SetGlobalProfileConfigMap(nil) - config.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) pth := findProfileBundleFilePath() require.FileExists(t, pth) resolvedProfiles, err := loadBundleJSONProfiles(pth) diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_resolver_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_resolver_test.go index b3868a66cb60f..d0e03a880b928 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_resolver_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_resolver_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" @@ -26,18 +26,18 @@ import ( func Test_resolveProfiles(t *testing.T) { defaultTestConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "conf.d")) - config.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) defaultTestConfdProfiles := ProfileConfigMap{} userTestConfdProfiles, err := getProfileDefinitions(userProfilesFolder, true) require.NoError(t, err) profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "invalid_ext.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) profilesWithInvalidExtendProfiles, err := getProfileDefinitions(userProfilesFolder, true) require.NoError(t, err) invalidCyclicConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "invalid_cyclic.d")) - config.Datadog().SetWithoutSource("confd_path", invalidCyclicConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidCyclicConfdPath) invalidCyclicProfiles, err := getProfileDefinitions(userProfilesFolder, true) require.NoError(t, err) @@ -50,7 +50,7 @@ func Test_resolveProfiles(t *testing.T) { require.NoError(t, err) userProfilesCaseConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "user_profiles.d")) - config.Datadog().SetWithoutSource("confd_path", userProfilesCaseConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", userProfilesCaseConfdPath) userProfilesCaseUserProfiles, err := getProfileDefinitions(userProfilesFolder, true) require.NoError(t, err) userProfilesCaseDefaultProfiles, err := getProfileDefinitions(defaultProfilesFolder, true) diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go index b924661ae37df..9b6b4fdfed4de 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" ) @@ -109,7 +109,7 @@ func Test_getProfiles(t *testing.T) { t.Run(tt.name, func(t *testing.T) { SetGlobalProfileConfigMap(nil) path, _ := filepath.Abs(filepath.Join("..", "test", tt.mockConfd)) - coreconfig.Datadog().SetWithoutSource("confd_path", path) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", path) actualProfiles, err := GetProfiles(tt.profiles) if tt.expectedErr != "" { diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go index fc1cc11d0948b..03d7be924a9f3 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go @@ -14,7 +14,7 @@ import ( "gopkg.in/yaml.v2" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -109,7 +109,7 @@ func resolveProfileDefinitionPath(definitionFile string) string { } func getProfileConfdRoot(profileFolderName string) string { - confdPath := config.Datadog().GetString("confd_path") + confdPath := pkgconfigsetup.Datadog().GetString("confd_path") return filepath.Join(confdPath, "snmp.d", profileFolderName) } diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml_test.go index 55c629b9b8d8d..81d7513ab9fa7 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml_test.go @@ -16,7 +16,7 @@ import ( "github.com/cihub/seelog" assert "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" @@ -33,7 +33,7 @@ func getMetricFromProfile(p profiledefinition.ProfileDefinition, metricName stri func Test_resolveProfileDefinitionPath(t *testing.T) { defaultTestConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "user_profiles.d")) - config.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) absPath, _ := filepath.Abs(filepath.Join("tmp", "myfile.yaml")) tests := []struct { @@ -49,17 +49,17 @@ func Test_resolveProfileDefinitionPath(t *testing.T) { { name: "relative path with default profile", definitionFilePath: "p2.yaml", - expectedPath: filepath.Join(config.Datadog().Get("confd_path").(string), "snmp.d", "default_profiles", "p2.yaml"), + expectedPath: filepath.Join(pkgconfigsetup.Datadog().Get("confd_path").(string), "snmp.d", "default_profiles", "p2.yaml"), }, { name: "relative path with user profile", definitionFilePath: "p3.yaml", - expectedPath: filepath.Join(config.Datadog().Get("confd_path").(string), "snmp.d", "profiles", "p3.yaml"), + expectedPath: filepath.Join(pkgconfigsetup.Datadog().Get("confd_path").(string), "snmp.d", "profiles", "p3.yaml"), }, { name: "relative path with user profile precedence", definitionFilePath: "p1.yaml", - expectedPath: filepath.Join(config.Datadog().Get("confd_path").(string), "snmp.d", "profiles", "p1.yaml"), + expectedPath: filepath.Join(pkgconfigsetup.Datadog().Get("confd_path").(string), "snmp.d", "profiles", "p1.yaml"), }, } for _, tt := range tests { @@ -84,7 +84,7 @@ func Test_loadYamlProfiles(t *testing.T) { func Test_loadYamlProfiles_withUserProfiles(t *testing.T) { defaultTestConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "user_profiles.d")) SetGlobalProfileConfigMap(nil) - config.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", defaultTestConfdPath) defaultProfiles, err := loadYamlProfiles() assert.Nil(t, err) @@ -113,7 +113,7 @@ func Test_loadYamlProfiles_withUserProfiles(t *testing.T) { func Test_loadYamlProfiles_invalidDir(t *testing.T) { invalidPath, _ := filepath.Abs(filepath.Join(".", "tmp", "invalidPath")) - config.Datadog().SetWithoutSource("confd_path", invalidPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", invalidPath) SetGlobalProfileConfigMap(nil) defaultProfiles, err := loadYamlProfiles() @@ -129,7 +129,7 @@ func Test_loadYamlProfiles_invalidExtendProfile(t *testing.T) { log.SetupLogger(l, "debug") profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "invalid_ext.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) SetGlobalProfileConfigMap(nil) defaultProfiles, err := loadYamlProfiles() @@ -150,7 +150,7 @@ func Test_loadYamlProfiles_userAndDefaultProfileFolderDoesNotExist(t *testing.T) log.SetupLogger(l, "debug") profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "does-not-exist.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) SetGlobalProfileConfigMap(nil) defaultProfiles, err := loadYamlProfiles() @@ -173,7 +173,7 @@ func Test_loadYamlProfiles_validAndInvalidProfiles(t *testing.T) { log.SetupLogger(l, "debug") profilesWithInvalidExtendConfdPath, _ := filepath.Abs(filepath.Join("..", "test", "valid_invalid.d")) - config.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", profilesWithInvalidExtendConfdPath) SetGlobalProfileConfigMap(nil) defaultProfiles, err := loadYamlProfiles() diff --git a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go index 144a7a3abcbd1..6831ad07c2827 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go +++ b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go @@ -13,7 +13,7 @@ import ( "github.com/mohae/deepcopy" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" ) @@ -33,7 +33,7 @@ func SetConfdPathAndCleanProfiles() { if !pathExists(file) { file, _ = filepath.Abs(filepath.Join(".", "internal", "test", "conf.d")) } - config.Datadog().SetWithoutSource("confd_path", file) + pkgconfigsetup.Datadog().SetWithoutSource("confd_path", file) } // FixtureProfileDefinitionMap returns a fixture of ProfileConfigMap with `f5-big-ip` profile diff --git a/pkg/collector/corechecks/snmp/snmp_test.go b/pkg/collector/corechecks/snmp/snmp_test.go index ce0ea597cc889..0306d9889fcc9 100644 --- a/pkg/collector/corechecks/snmp/snmp_test.go +++ b/pkg/collector/corechecks/snmp/snmp_test.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/comp/serializer/compression/compressionimpl" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/externalhost" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -58,7 +58,7 @@ func createDeps(t *testing.T) deps { func Test_Run_simpleCase(t *testing.T) { // We cache the run_path directory because the chk.Run() method will write in cache testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) profile.SetConfdPathAndCleanProfiles() sess := session.CreateMockSession() @@ -339,7 +339,7 @@ tags: func Test_Run_customIfSpeed(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) report.TimeNow = common.MockTimeNow deps := createDeps(t) profile.SetConfdPathAndCleanProfiles() @@ -485,7 +485,7 @@ metrics: func TestSupportedMetricTypes(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) profile.SetConfdPathAndCleanProfiles() sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -560,7 +560,7 @@ metrics: func TestProfile(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) timeNow = common.MockTimeNow deps := createDeps(t) @@ -958,7 +958,7 @@ profiles: func TestServiceCheckFailures(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) profile.SetConfdPathAndCleanProfiles() sess := session.CreateMockSession() sessionFactory := func(*checkconfig.CheckConfig) (session.Session, error) { @@ -1041,7 +1041,7 @@ namespace: nsSubnet func TestCheck_Run(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) sysObjectIDPacketInvalidSysObjectIDMock := gosnmp.SnmpPacket{ Variables: []gosnmp.SnmpPDU{ { @@ -1259,7 +1259,7 @@ namespace: '%s' func TestCheck_Run_sessionCloseError(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) profile.SetConfdPathAndCleanProfiles() sess := session.CreateMockSession() @@ -1305,7 +1305,7 @@ metrics: func TestReportDeviceMetadataEvenOnProfileError(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) timeNow = common.MockTimeNow @@ -1618,7 +1618,7 @@ tags: func TestReportDeviceMetadataWithFetchError(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) timeNow = common.MockTimeNow deps := createDeps(t) senderManager := deps.Demultiplexer @@ -1732,7 +1732,7 @@ tags: func TestDiscovery(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) timeNow = common.MockTimeNow profile.SetConfdPathAndCleanProfiles() @@ -2078,7 +2078,7 @@ metric_tags: func TestDiscovery_CheckError(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) profile.SetConfdPathAndCleanProfiles() @@ -2156,7 +2156,7 @@ metric_tags: func TestDeviceIDAsHostname(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) cache.Cache.Delete(cache.BuildAgentKey("hostname")) // clean existing hostname cache @@ -2166,8 +2166,8 @@ func TestDeviceIDAsHostname(t *testing.T) { return sess, nil } chk := Check{sessionFactory: sessionFactory} - coreconfig.Datadog().SetWithoutSource("hostname", "test-hostname") - coreconfig.Datadog().SetWithoutSource("tags", []string{"agent_tag1:val1", "agent_tag2:val2"}) + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "test-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("tags", []string{"agent_tag1:val1", "agent_tag2:val2"}) senderManager := deps.Demultiplexer // language=yaml @@ -2349,7 +2349,7 @@ use_device_id_as_hostname: true func TestDiscoveryDeviceIDAsHostname(t *testing.T) { testDir := t.TempDir() - coreconfig.Datadog().SetWithoutSource("run_path", testDir) + pkgconfigsetup.Datadog().SetWithoutSource("run_path", testDir) deps := createDeps(t) cache.Cache.Delete(cache.BuildAgentKey("hostname")) // clean existing hostname cache timeNow = common.MockTimeNow @@ -2360,7 +2360,7 @@ func TestDiscoveryDeviceIDAsHostname(t *testing.T) { } chk := Check{sessionFactory: sessionFactory} - coreconfig.Datadog().SetWithoutSource("hostname", "my-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "my-hostname") senderManager := deps.Demultiplexer // language=yaml diff --git a/pkg/collector/corechecks/system/cpu/cpu/cpu_ctx_switches_linux.go b/pkg/collector/corechecks/system/cpu/cpu/cpu_ctx_switches_linux.go index 6fce0b646f394..97dd5eab205e5 100644 --- a/pkg/collector/corechecks/system/cpu/cpu/cpu_ctx_switches_linux.go +++ b/pkg/collector/corechecks/system/cpu/cpu/cpu_ctx_switches_linux.go @@ -15,7 +15,7 @@ import ( "strings" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func readCtxSwitches(procStatPath string) (ctxSwitches int64, err error) { @@ -43,8 +43,8 @@ func readCtxSwitches(procStatPath string) (ctxSwitches int64, err error) { func collectCtxSwitches(sender sender.Sender) error { procfsPath := "/proc" - if config.Datadog().IsSet("procfs_path") { - procfsPath = config.Datadog().GetString("procfs_path") + if pkgconfigsetup.Datadog().IsSet("procfs_path") { + procfsPath = pkgconfigsetup.Datadog().GetString("procfs_path") } ctxSwitches, err := readCtxSwitches(filepath.Join(procfsPath, "/stat")) if err != nil { diff --git a/pkg/collector/corechecks/system/cpu/cpu/cpu_test.go b/pkg/collector/corechecks/system/cpu/cpu/cpu_test.go index cd1ffec2b0863..d5124637d6c4a 100644 --- a/pkg/collector/corechecks/system/cpu/cpu/cpu_test.go +++ b/pkg/collector/corechecks/system/cpu/cpu/cpu_test.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/shirou/gopsutil/v3/cpu" @@ -174,7 +174,7 @@ func TestCPUCheckLinuxErrorStoppedSender(t *testing.T) { } func TestCPUCheckLinuxErrorProcFsPathNoExists(t *testing.T) { - config.Datadog().SetDefault("procfs_path", "/tmp") + pkgconfigsetup.Datadog().SetDefault("procfs_path", "/tmp") cpuInfoFunc = func() ([]cpu.InfoStat, error) { return cpuInfo, nil } @@ -201,7 +201,7 @@ func TestCPUCheckLinuxErrorProcFsPathEmptyFile(t *testing.T) { t.Fatal("Error creating temporary file:", err) } defer os.Remove(tempFile.Name()) - config.Datadog().SetDefault("procfs_path", os.TempDir()) + pkgconfigsetup.Datadog().SetDefault("procfs_path", os.TempDir()) cpuInfoFunc = func() ([]cpu.InfoStat, error) { return cpuInfo, nil } @@ -232,7 +232,7 @@ func TestCPUCheckLinuxErrorProcFsPathWrongFormat(t *testing.T) { if err != nil { t.Fatal("Error writing to temporary file:", err) } - config.Datadog().SetDefault("procfs_path", os.TempDir()) + pkgconfigsetup.Datadog().SetDefault("procfs_path", os.TempDir()) cpuInfoFunc = func() ([]cpu.InfoStat, error) { return cpuInfo, nil } diff --git a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go index b4a7de8bfba57..8f2efd2520802 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go +++ b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/utils" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/system/wincrashdetect/probe" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/event" //process_net "github.com/DataDog/datadog-agent/pkg/process/net" @@ -61,7 +61,7 @@ func TestWinCrashReporting(t *testing.T) { listener, closefunc := createSystemProbeListener() defer closefunc() - config.InitSystemProbeConfig(config.SystemProbe()) + pkgconfigsetup.InitSystemProbeConfig(pkgconfigsetup.SystemProbe()) mux := http.NewServeMux() server := http.Server{ @@ -70,7 +70,7 @@ func TestWinCrashReporting(t *testing.T) { defer server.Close() sock := fmt.Sprintf("localhost:%d", listener.Addr().(*net.TCPAddr).Port) - config.SystemProbe().SetWithoutSource("system_probe_config.sysprobe_socket", sock) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.sysprobe_socket", sock) /* * the underlying system probe connector is a singleton. Therefore, we can't set up different diff --git a/pkg/collector/corechecks/telemetry/check.go b/pkg/collector/corechecks/telemetry/check.go index 91ca99075be10..34299831a4412 100644 --- a/pkg/collector/corechecks/telemetry/check.go +++ b/pkg/collector/corechecks/telemetry/check.go @@ -11,13 +11,14 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + dto "github.com/prometheus/client_model/go" + + "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" - dto "github.com/prometheus/client_model/go" ) const ( @@ -28,10 +29,11 @@ const ( type checkImpl struct { corechecks.CheckBase + telemetry telemetry.Component } func (c *checkImpl) Run() error { - mfs, err := telemetryimpl.GetCompatComponent().Gather(true) + mfs, err := c.telemetry.Gather(true) if err != nil { return err } @@ -105,12 +107,11 @@ func (c *checkImpl) buildTags(lps []*dto.LabelPair) []string { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) -} - -func newCheck() check.Check { - return &checkImpl{ - CheckBase: corechecks.NewCheckBase(CheckName), - } +func Factory(telemetry telemetry.Component) optional.Option[func() check.Check] { + return optional.NewOption(func() check.Check { + return &checkImpl{ + CheckBase: corechecks.NewCheckBase(CheckName), + telemetry: telemetry, + } + }) } diff --git a/pkg/collector/embed_python.go b/pkg/collector/embed_python.go index b8f35b2722b88..c20edcf3b7ae7 100644 --- a/pkg/collector/embed_python.go +++ b/pkg/collector/embed_python.go @@ -9,7 +9,7 @@ package collector import ( "github.com/DataDog/datadog-agent/pkg/collector/python" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -38,8 +38,8 @@ func pySetup(paths ...string) (pythonVersion, pythonHome, pythonPath string) { } func pyPrepareEnv() error { - if config.Datadog().IsSet("procfs_path") { - procfsPath := config.Datadog().GetString("procfs_path") + if pkgconfigsetup.Datadog().IsSet("procfs_path") { + procfsPath := pkgconfigsetup.Datadog().GetString("procfs_path") return python.SetPythonPsutilProcPath(procfsPath) } return nil diff --git a/pkg/collector/python/check.go b/pkg/collector/python/check.go index 304014df9ea49..a1e1c1d7644ae 100644 --- a/pkg/collector/python/check.go +++ b/pkg/collector/python/check.go @@ -26,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/check/stats" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -81,7 +81,7 @@ func NewPythonCheck(senderManager sender.SenderManager, name string, class *C.rt class: class, interval: defaults.DefaultCheckInterval, lastWarnings: []error{}, - telemetry: utils.IsCheckTelemetryEnabled(name, config.Datadog()), + telemetry: utils.IsCheckTelemetryEnabled(name, pkgconfigsetup.Datadog()), } runtime.SetFinalizer(pyCheck, pythonCheckFinalizer) @@ -308,7 +308,7 @@ func (c *PythonCheck) Configure(senderManager sender.SenderManager, integrationC log.Warnf("could not get a '%s' check instance with the new api: %s", c.ModuleName, rtLoaderError) log.Warn("trying to instantiate the check with the old api, passing agentConfig to the constructor") - allSettings := config.Datadog().AllSettings() + allSettings := pkgconfigsetup.Datadog().AllSettings() agentConfig, err := yaml.Marshal(allSettings) if err != nil { log.Errorf("error serializing agent config: %s", err) diff --git a/pkg/collector/python/datadog_agent.go b/pkg/collector/python/datadog_agent.go index f9c3c52b2493a..c64eaa5f35b12 100644 --- a/pkg/collector/python/datadog_agent.go +++ b/pkg/collector/python/datadog_agent.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/externalhost" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/obfuscate" "github.com/DataDog/datadog-agent/pkg/persistentcache" "github.com/DataDog/datadog-agent/pkg/util" @@ -63,7 +63,7 @@ func GetHostname(hostname **C.char) { // //export GetHostTags func GetHostTags(hostTags **C.char) { - tags := hosttags.Get(context.Background(), true, config.Datadog()) + tags := hosttags.Get(context.Background(), true, pkgconfigsetup.Datadog()) tagsBytes, err := json.Marshal(tags) if err != nil { log.Warnf("Error getting host tags: %v. Invalid tags: %v", err, tags) @@ -85,7 +85,7 @@ func GetClusterName(clusterName **C.char) { // //export TracemallocEnabled func TracemallocEnabled() C.bool { - return C.bool(config.Datadog().GetBool("tracemalloc_debug")) + return C.bool(pkgconfigsetup.Datadog().GetBool("tracemalloc_debug")) } // Headers returns a basic set of HTTP headers that can be used by clients in Python checks. @@ -110,12 +110,12 @@ func Headers(yamlPayload **C.char) { //export GetConfig func GetConfig(key *C.char, yamlPayload **C.char) { goKey := C.GoString(key) - if !config.Datadog().IsSet(goKey) { + if !pkgconfigsetup.Datadog().IsSet(goKey) { *yamlPayload = nil return } - value := config.Datadog().Get(goKey) + value := pkgconfigsetup.Datadog().Get(goKey) data, err := yaml.Marshal(value) if err != nil { log.Errorf("could not convert configuration value '%v' to YAML: %s", value, err) @@ -245,12 +245,12 @@ var ( ) // lazyInitObfuscator initializes the obfuscator the first time it is used. We can't initialize during the package init -// because the obfuscator depends on config.Datadog and it isn't guaranteed to be initialized during package init, but +// because the obfuscator depends on pkgconfigsetup.Datadog and it isn't guaranteed to be initialized during package init, but // will definitely be initialized by the time one of the python checks runs func lazyInitObfuscator() *obfuscate.Obfuscator { obfuscatorLoader.Do(func() { var cfg obfuscate.Config - if err := config.Datadog().UnmarshalKey("apm_config.obfuscation", &cfg); err != nil { + if err := pkgconfigsetup.Datadog().UnmarshalKey("apm_config.obfuscation", &cfg); err != nil { log.Errorf("Failed to unmarshal apm_config.obfuscation: %s", err.Error()) cfg = obfuscate.Config{} } @@ -586,7 +586,7 @@ var defaultMongoObfuscateSettings = obfuscate.JSONConfig{ //export getProcessStartTime func getProcessStartTime() float64 { - return float64(config.StartTime.Unix()) + return float64(pkgconfigsetup.StartTime.Unix()) } // ObfuscateMongoDBString obfuscates the MongoDB query diff --git a/pkg/collector/python/init.go b/pkg/collector/python/init.go index 5b905debba7ba..04217d158e9a4 100644 --- a/pkg/collector/python/init.go +++ b/pkg/collector/python/init.go @@ -21,7 +21,7 @@ import ( "unsafe" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -376,11 +376,11 @@ func resolvePythonExecPath(pythonVersion string, ignoreErrors bool) (string, err //nolint:revive // TODO(AML) Fix revive linter func Initialize(paths ...string) error { - pythonVersion := config.Datadog().GetString("python_version") - allowPathHeuristicsFailure := config.Datadog().GetBool("allow_python_path_heuristics_failure") + pythonVersion := pkgconfigsetup.Datadog().GetString("python_version") + allowPathHeuristicsFailure := pkgconfigsetup.Datadog().GetBool("allow_python_path_heuristics_failure") // Memory related RTLoader-global initialization - if config.Datadog().GetBool("memtrack_enabled") { + if pkgconfigsetup.Datadog().GetBool("memtrack_enabled") { C.initMemoryTracker() } @@ -426,7 +426,7 @@ func Initialize(paths ...string) error { return err } - if config.Datadog().GetBool("telemetry.enabled") && config.Datadog().GetBool("telemetry.python_memory") { + if pkgconfigsetup.Datadog().GetBool("telemetry.enabled") && pkgconfigsetup.Datadog().GetBool("telemetry.python_memory") { initPymemTelemetry() } diff --git a/pkg/collector/python/init_nix.go b/pkg/collector/python/init_nix.go index ddfee4f741edd..52cd96fb9657d 100644 --- a/pkg/collector/python/init_nix.go +++ b/pkg/collector/python/init_nix.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) /* @@ -26,10 +26,10 @@ import "C" // Any platform-specific initialization belongs here. func initializePlatform() error { // Setup crash handling specifics - *NIX-only - if config.Datadog().GetBool("c_stacktrace_collection") { + if pkgconfigsetup.Datadog().GetBool("c_stacktrace_collection") { var cCoreDump int - if config.Datadog().GetBool("c_core_dump") { + if pkgconfigsetup.Datadog().GetBool("c_core_dump") { cCoreDump = 1 } diff --git a/pkg/collector/python/init_windows.go b/pkg/collector/python/init_windows.go index 48a31a741125d..04d18175df1f7 100644 --- a/pkg/collector/python/init_windows.go +++ b/pkg/collector/python/init_windows.go @@ -10,14 +10,14 @@ package python import ( "os" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Any platform-specific initialization belongs here. func initializePlatform() error { // On Windows, it's not uncommon to have a system-wide PYTHONPATH env var set. // Unset it, so our embedded python doesn't try to load things from the system. - if !config.Datadog().GetBool("windows_use_pythonpath") { + if !pkgconfigsetup.Datadog().GetBool("windows_use_pythonpath") { os.Unsetenv("PYTHONPATH") } diff --git a/pkg/collector/python/loader.go b/pkg/collector/python/loader.go index 969a3e10c07f9..66df21aaf6944 100644 --- a/pkg/collector/python/loader.go +++ b/pkg/collector/python/loader.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/loaders" - agentConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -129,7 +129,7 @@ func (cl *PythonCheckLoader) Load(senderManager sender.SenderManager, config int defer glock.unlock() // Platform-specific preparation - if !agentConfig.Datadog().GetBool("win_skip_com_init") { + if !pkgconfigsetup.Datadog().GetBool("win_skip_com_init") { log.Debugf("Performing platform loading prep") err = platformLoaderPrep() if err != nil { @@ -186,7 +186,7 @@ func (cl *PythonCheckLoader) Load(senderManager sender.SenderManager, config int log.Debugf("python check '%s' doesn't have a '__version__' attribute: %s", config.Name, getRtLoaderError()) } - if !agentConfig.Datadog().GetBool("disable_py3_validation") && !loadedAsWheel { + if !pkgconfigsetup.Datadog().GetBool("disable_py3_validation") && !loadedAsWheel { // Customers, though unlikely might version their custom checks. // Let's use the module namespace to try to decide if this was a // custom check, check for py3 compatibility @@ -288,7 +288,7 @@ func reportPy3Warnings(checkName string, checkFilePath string) { checkFilePath = checkFilePath[:len(checkFilePath)-1] } - if strings.TrimSpace(agentConfig.Datadog().GetString("python_version")) == "3" { + if strings.TrimSpace(pkgconfigsetup.Datadog().GetString("python_version")) == "3" { // the linter used by validatePython3 doesn't work when run from python3 status = a7TagPython3 metricValue = 1.0 diff --git a/pkg/collector/python/memory.go b/pkg/collector/python/memory.go index 5d6fdb403de62..38060ef37b462 100644 --- a/pkg/collector/python/memory.go +++ b/pkg/collector/python/memory.go @@ -16,7 +16,7 @@ import ( "github.com/cihub/seelog" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -118,7 +118,7 @@ func TrackedCString(str string) *C.char { cstr := C.CString(str) // TODO(memory-tracking): track the origin of the string (for example check name) - if config.Datadog().GetBool("memtrack_enabled") { + if pkgconfigsetup.Datadog().GetBool("memtrack_enabled") { MemoryTracker(unsafe.Pointer(cstr), C.size_t(len(str)+1), C.DATADOG_AGENT_RTLOADER_ALLOCATION) } diff --git a/pkg/collector/python/py3_checker.go b/pkg/collector/python/py3_checker.go index a72e31463b3c9..131dfb73ee7c5 100644 --- a/pkg/collector/python/py3_checker.go +++ b/pkg/collector/python/py3_checker.go @@ -16,11 +16,11 @@ import ( "path/filepath" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var ( - linterTimeout = time.Duration(config.Datadog().GetInt("python3_linter_timeout")) * time.Second + linterTimeout = time.Duration(pkgconfigsetup.Datadog().GetInt("python3_linter_timeout")) * time.Second ) type warning struct { diff --git a/pkg/collector/runner/runner.go b/pkg/collector/runner/runner.go index e8be2f5cef27a..e6ba495f02f2b 100644 --- a/pkg/collector/runner/runner.go +++ b/pkg/collector/runner/runner.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/runner/tracker" "github.com/DataDog/datadog-agent/pkg/collector/scheduler" "github.com/DataDog/datadog-agent/pkg/collector/worker" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -53,7 +53,7 @@ type Runner struct { // NewRunner takes the number of desired goroutines processing incoming checks. func NewRunner(senderManager sender.SenderManager) *Runner { - numWorkers := config.Datadog().GetInt("check_runners") + numWorkers := pkgconfigsetup.Datadog().GetInt("check_runners") r := &Runner{ senderManager: senderManager, @@ -66,7 +66,7 @@ func NewRunner(senderManager sender.SenderManager) *Runner { } if !r.isStaticWorkerCount { - numWorkers = config.DefaultNumWorkers + numWorkers = pkgconfigsetup.DefaultNumWorkers } r.ensureMinWorkers(numWorkers) @@ -164,7 +164,7 @@ func (r *Runner) UpdateNumWorkers(numChecks int64) { case numChecks <= 25: desiredNumWorkers = 20 default: - desiredNumWorkers = config.MaxNumWorkers + desiredNumWorkers = pkgconfigsetup.MaxNumWorkers } r.ensureMinWorkers(desiredNumWorkers) diff --git a/pkg/collector/runner/runner_test.go b/pkg/collector/runner/runner_test.go index 5771b886e0ba7..6ae3cd335c81b 100644 --- a/pkg/collector/runner/runner_test.go +++ b/pkg/collector/runner/runner_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/stub" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" "github.com/DataDog/datadog-agent/pkg/collector/scheduler" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Fixtures @@ -143,14 +143,14 @@ func assertAsyncBool(t *testing.T, actualValueFunc func() bool, expectedValue bo func testSetUp(t *testing.T) { assertAsyncWorkerCount(t, 0) expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") } // Tests func TestNewRunner(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "3") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "3") r := NewRunner(aggregator.NewNoOpSenderManager()) require.NotNil(t, r) @@ -164,7 +164,7 @@ func TestNewRunner(t *testing.T) { func TestRunnerAddWorker(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "1") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "1") r := NewRunner(aggregator.NewNoOpSenderManager()) require.NotNil(t, r) @@ -179,7 +179,7 @@ func TestRunnerAddWorker(t *testing.T) { func TestRunnerStaticUpdateNumWorkers(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "2") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "2") r := NewRunner(aggregator.NewNoOpSenderManager()) require.NotNil(t, r) @@ -198,14 +198,14 @@ func TestRunnerStaticUpdateNumWorkers(t *testing.T) { func TestRunnerDynamicUpdateNumWorkers(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "0") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "0") testCases := [][]int{ {0, 10, 4}, {11, 15, 10}, {16, 20, 15}, {21, 25, 20}, - {26, 35, config.MaxNumWorkers}, + {26, 35, pkgconfigsetup.MaxNumWorkers}, } for _, testCase := range testCases { @@ -251,7 +251,7 @@ func TestRunner(t *testing.T) { func TestRunnerStop(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "10") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "10") numChecks := 8 checks := make([]*testCheck, numChecks) @@ -304,7 +304,7 @@ func TestRunnerStop(t *testing.T) { func TestRunnerStopWithStuckCheck(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "10") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "10") numChecks := 8 checks := make([]*testCheck, numChecks) @@ -360,7 +360,7 @@ func TestRunnerStopWithStuckCheck(t *testing.T) { func TestRunnerStopCheck(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "3") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "3") testCheck := newCheck(t, "mycheck:123", false, nil) blockedCheck := newCheck(t, "mycheck2:123", false, nil) @@ -408,7 +408,7 @@ func TestRunnerStopCheck(t *testing.T) { func TestRunnerScheduler(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "3") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "3") sched1 := newScheduler() sched2 := newScheduler() @@ -428,7 +428,7 @@ func TestRunnerScheduler(t *testing.T) { func TestRunnerShouldAddCheckStats(t *testing.T) { testSetUp(t) - config.Datadog().SetWithoutSource("check_runners", "3") + pkgconfigsetup.Datadog().SetWithoutSource("check_runners", "3") testCheck := newCheck(t, "test", false, nil) sched := newScheduler() diff --git a/pkg/collector/worker/check_logger.go b/pkg/collector/worker/check_logger.go index 6ce024c3b20ea..c26cb4e6ef2f3 100644 --- a/pkg/collector/worker/check_logger.go +++ b/pkg/collector/worker/check_logger.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -52,7 +52,7 @@ func (cl *CheckLogger) CheckFinished() { if cl.lastVerboseLog { message += fmt.Sprintf( ", next runs will be logged every %v runs", - config.Datadog().GetInt64(loggingFrequencyConfigKey), + pkgconfigsetup.Datadog().GetInt64(loggingFrequencyConfigKey), ) } @@ -79,7 +79,7 @@ func (cl *CheckLogger) Debug(message string) { // shouldLogCheck returns if we should log the check start/stop message with higher // verbosity and if this is the end of the initial series of check log statements func shouldLogCheck(id checkid.ID) (shouldLog, lastVerboseLog bool) { - loggingFrequency := uint64(config.Datadog().GetInt64(loggingFrequencyConfigKey)) + loggingFrequency := uint64(pkgconfigsetup.Datadog().GetInt64(loggingFrequencyConfigKey)) // If this is the first time we see the check, log it stats, idFound := expvars.CheckStats(id) diff --git a/pkg/collector/worker/check_logger_test.go b/pkg/collector/worker/check_logger_test.go index 9c0431dd472ec..41ad99b777f89 100644 --- a/pkg/collector/worker/check_logger_test.go +++ b/pkg/collector/worker/check_logger_test.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/stats" "github.com/DataDog/datadog-agent/pkg/collector/check/stub" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) type stubCheck struct { @@ -36,7 +36,7 @@ func addExpvarsCheckStats(c check.Check) { } func setUp() { - config.Datadog().SetWithoutSource(loggingFrequencyConfigKey, "20") + pkgconfigsetup.Datadog().SetWithoutSource(loggingFrequencyConfigKey, "20") expvars.Reset() } diff --git a/pkg/collector/worker/worker.go b/pkg/collector/worker/worker.go index da60890471f06..7a366434e317a 100644 --- a/pkg/collector/worker/worker.go +++ b/pkg/collector/worker/worker.go @@ -15,7 +15,7 @@ import ( checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" "github.com/DataDog/datadog-agent/pkg/collector/runner/tracker" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -179,7 +179,7 @@ func (w *Worker) Run() { } if sender != nil && !longRunning { - if config.Datadog().GetBool("integration_check_status_enabled") { + if pkgconfigsetup.Datadog().GetBool("integration_check_status_enabled") { sender.ServiceCheck(serviceCheckStatusKey, serviceCheckStatus, hname, serviceCheckTags, "") } // FIXME(remy): this `Commit()` should be part of the `if` above, we keep diff --git a/pkg/collector/worker/worker_test.go b/pkg/collector/worker/worker_test.go index c31e4613be7ae..129ed30a499e8 100644 --- a/pkg/collector/worker/worker_test.go +++ b/pkg/collector/worker/worker_test.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/stub" "github.com/DataDog/datadog-agent/pkg/collector/runner/expvars" "github.com/DataDog/datadog-agent/pkg/collector/runner/tracker" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" ) @@ -182,7 +182,7 @@ func TestWorkerName(t *testing.T) { func TestWorker(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") var wg sync.WaitGroup @@ -258,7 +258,7 @@ func TestWorker(t *testing.T) { func TestWorkerUtilizationExpvars(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") var wg sync.WaitGroup @@ -328,7 +328,7 @@ func TestWorkerUtilizationExpvars(t *testing.T) { func TestWorkerErrorAndWarningHandling(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") var wg sync.WaitGroup @@ -336,7 +336,7 @@ func TestWorkerErrorAndWarningHandling(t *testing.T) { pendingChecksChan := make(chan check.Check, 10) mockShouldAddStatsFunc := func(checkid.ID) bool { return true } - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") testCheck1 := newCheck(t, "testing:123", true, nil) testCheck2 := newCheck(t, "testing2:234", true, nil) @@ -383,13 +383,13 @@ func TestWorkerErrorAndWarningHandling(t *testing.T) { func TestWorkerConcurrentCheckScheduling(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") checksTracker := tracker.NewRunningChecksTracker() pendingChecksChan := make(chan check.Check, 10) mockShouldAddStatsFunc := func(checkid.ID) bool { return true } - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") testCheck := newCheck(t, "testing:123", true, nil) @@ -412,7 +412,7 @@ func TestWorkerConcurrentCheckScheduling(t *testing.T) { func TestWorkerStatsAddition(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") checksTracker := tracker.NewRunningChecksTracker() pendingChecksChan := make(chan check.Check, 10) @@ -421,7 +421,7 @@ func TestWorkerStatsAddition(t *testing.T) { return string(id) != "squelched:123" } - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") longRunningCheckNoErrorNoWarning := &testCheck{ t: t, @@ -471,8 +471,8 @@ func TestWorkerStatsAddition(t *testing.T) { func TestWorkerServiceCheckSending(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") - config.Datadog().SetWithoutSource("integration_check_status_enabled", "true") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("integration_check_status_enabled", "true") var wg sync.WaitGroup @@ -557,7 +557,7 @@ func TestWorkerServiceCheckSending(t *testing.T) { func TestWorkerSenderNil(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") checksTracker := tracker.NewRunningChecksTracker() pendingChecksChan := make(chan check.Check, 10) @@ -588,7 +588,7 @@ func TestWorkerSenderNil(t *testing.T) { func TestWorkerServiceCheckSendingLongRunningTasks(t *testing.T) { expvars.Reset() - config.Datadog().SetWithoutSource("hostname", "myhost") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "myhost") checksTracker := tracker.NewRunningChecksTracker() pendingChecksChan := make(chan check.Check, 10) diff --git a/pkg/commonchecks/corechecks.go b/pkg/commonchecks/corechecks.go index 4bc7dd5649d2d..9cebc3657119a 100644 --- a/pkg/commonchecks/corechecks.go +++ b/pkg/commonchecks/corechecks.go @@ -58,7 +58,7 @@ func RegisterChecks(store workloadmeta.Component, cfg config.Component, telemetr corecheckLoader.RegisterCheck(cpu.CheckName, cpu.Factory()) corecheckLoader.RegisterCheck(memory.CheckName, memory.Factory()) corecheckLoader.RegisterCheck(uptime.CheckName, uptime.Factory()) - corecheckLoader.RegisterCheck(telemetryCheck.CheckName, telemetryCheck.Factory()) + corecheckLoader.RegisterCheck(telemetryCheck.CheckName, telemetryCheck.Factory(telemetry)) corecheckLoader.RegisterCheck(ntp.CheckName, ntp.Factory()) corecheckLoader.RegisterCheck(snmp.CheckName, snmp.Factory()) corecheckLoader.RegisterCheck(networkpath.CheckName, networkpath.Factory(telemetry)) diff --git a/pkg/compliance/agent.go b/pkg/compliance/agent.go index d7d2dfb9acae6..1c73f1b3f5321 100644 --- a/pkg/compliance/agent.go +++ b/pkg/compliance/agent.go @@ -23,19 +23,20 @@ import ( "sync" "time" + "github.com/shirou/gopsutil/v3/process" + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/compliance/aptconfig" "github.com/DataDog/datadog-agent/pkg/compliance/dbconfig" "github.com/DataDog/datadog-agent/pkg/compliance/k8sconfig" "github.com/DataDog/datadog-agent/pkg/compliance/metrics" "github.com/DataDog/datadog-agent/pkg/compliance/utils" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/rules" secl "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/shirou/gopsutil/v3/process" ) const containersCountMetricName = "datadog.security_agent.compliance.containers_running" @@ -129,7 +130,7 @@ type Agent struct { } func xccdfEnabled() bool { - return config.Datadog().GetBool("compliance_config.xccdf.enabled") || config.Datadog().GetBool("compliance_config.host_benchmarks.enabled") + return pkgconfigsetup.Datadog().GetBool("compliance_config.xccdf.enabled") || pkgconfigsetup.Datadog().GetBool("compliance_config.host_benchmarks.enabled") } // DefaultRuleFilter implements the default filtering of benchmarks' rules. It diff --git a/pkg/compliance/reporter.go b/pkg/compliance/reporter.go index 2bbf3a39f4c27..501ca6f10bc43 100644 --- a/pkg/compliance/reporter.go +++ b/pkg/compliance/reporter.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" @@ -44,7 +44,7 @@ func NewLogReporter(hostname string, sourceName, sourceType string, endpoints *c auditor.Start() // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(config.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, dstcontext, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), coreconfig.Datadog()) + pipelineProvider := pipeline.NewProvider(config.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, dstcontext, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) pipelineProvider.Start() logSource := sources.NewLogSource( @@ -62,7 +62,7 @@ func NewLogReporter(hostname string, sourceName, sourceType string, endpoints *c } // merge tags from config - for _, tag := range configUtils.GetConfiguredTags(coreconfig.Datadog(), true) { + for _, tag := range configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true) { if strings.HasPrefix(tag, "host") { continue } diff --git a/pkg/config/aliases.go b/pkg/config/aliases.go deleted file mode 100644 index 8114edc845796..0000000000000 --- a/pkg/config/aliases.go +++ /dev/null @@ -1,217 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package config defines the configuration of the agent -package config - -import ( - "context" - - slog "github.com/cihub/seelog" - - "github.com/DataDog/datadog-agent/comp/core/secrets" - "github.com/DataDog/datadog-agent/pkg/config/env" - "github.com/DataDog/datadog-agent/pkg/config/model" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" - "github.com/DataDog/datadog-agent/pkg/util/optional" -) - -// Aliases to conf package -type ( - // Proxy alias to model.Proxy - Proxy = model.Proxy - // Reader is alias to model.Reader - Reader = model.Reader - // Writer is alias to model.Reader - Writer = model.Writer - // ReaderWriter is alias to model.ReaderWriter - ReaderWriter = model.ReaderWriter - // Config is alias to model.Config - Config = model.Config -) - -// NewConfig is alias for Config object. -var NewConfig = model.NewConfig - -// Warnings represent the warnings in the config -type Warnings = model.Warnings - -var ( - // Datadog Alias - Datadog = pkgconfigsetup.Datadog - // SystemProbe Alias - SystemProbe = pkgconfigsetup.SystemProbe -) - -// IsAutoconfigEnabled is alias for model.IsAutoconfigEnabled -func IsAutoconfigEnabled() bool { - return env.IsAutoconfigEnabled(Datadog()) -} - -// Aliases for config overrides -var ( - AddOverride = model.AddOverride - AddOverrides = model.AddOverrides - AddOverrideFunc = model.AddOverrideFunc -) - -// LoggerName Alias -type LoggerName = pkglogsetup.LoggerName - -// Aliases for logs -var ( - NewLogWriter = pkglogsetup.NewLogWriter - NewTLSHandshakeErrorWriter = pkglogsetup.NewTLSHandshakeErrorWriter -) - -// SetupLogger Alias using Datadog config -func SetupLogger(loggerName LoggerName, logLevel, logFile, syslogURI string, syslogRFC, logToConsole, jsonFormat bool) error { - return pkglogsetup.SetupLogger(loggerName, logLevel, logFile, syslogURI, syslogRFC, logToConsole, jsonFormat, Datadog()) -} - -// SetupJMXLogger Alias using Datadog config -func SetupJMXLogger(logFile, syslogURI string, syslogRFC, logToConsole, jsonFormat bool) error { - return pkglogsetup.SetupJMXLogger(logFile, syslogURI, syslogRFC, logToConsole, jsonFormat, Datadog()) -} - -// GetSyslogURI Alias using Datadog config -func GetSyslogURI() string { - return pkglogsetup.GetSyslogURI(Datadog()) -} - -// SetupDogstatsdLogger Alias using Datadog config -func SetupDogstatsdLogger(logFile string) (slog.LoggerInterface, error) { - return pkglogsetup.SetupDogstatsdLogger(logFile, Datadog()) -} - -// IsCloudProviderEnabled Alias using Datadog config -func IsCloudProviderEnabled(cloudProvider string) bool { - return pkgconfigsetup.IsCloudProviderEnabled(cloudProvider, Datadog()) -} - -// GetIPCAddress Alias using Datadog config -func GetIPCAddress() (string, error) { - return pkgconfigsetup.GetIPCAddress(Datadog()) -} - -// Datatype Aliases -const ( - Metrics = pkgconfigsetup.Metrics - Traces = pkgconfigsetup.Traces - Logs = pkgconfigsetup.Logs -) - -// Aliases for config defaults -const ( - DefaultForwarderRecoveryInterval = pkgconfigsetup.DefaultForwarderRecoveryInterval - DefaultAPIKeyValidationInterval = pkgconfigsetup.DefaultAPIKeyValidationInterval - DefaultBatchWait = pkgconfigsetup.DefaultBatchWait - DefaultInputChanSize = pkgconfigsetup.DefaultInputChanSize - DefaultBatchMaxConcurrentSend = pkgconfigsetup.DefaultBatchMaxConcurrentSend - DefaultBatchMaxContentSize = pkgconfigsetup.DefaultBatchMaxContentSize - DefaultLogsSenderBackoffRecoveryInterval = pkgconfigsetup.DefaultLogsSenderBackoffRecoveryInterval - DefaultLogsSenderBackoffMax = pkgconfigsetup.DefaultLogsSenderBackoffMax - DefaultLogsSenderBackoffFactor = pkgconfigsetup.DefaultLogsSenderBackoffFactor - DefaultLogsSenderBackoffBase = pkgconfigsetup.DefaultLogsSenderBackoffBase - DefaultBatchMaxSize = pkgconfigsetup.DefaultBatchMaxSize - DefaultNumWorkers = pkgconfigsetup.DefaultNumWorkers - MaxNumWorkers = pkgconfigsetup.MaxNumWorkers - DefaultSite = pkgconfigsetup.DefaultSite - OTLPTracePort = pkgconfigsetup.OTLPTracePort - DefaultAuditorTTL = pkgconfigsetup.DefaultAuditorTTL - DefaultMaxMessageSizeBytes = pkgconfigsetup.DefaultMaxMessageSizeBytes - DefaultProcessEntityStreamPort = pkgconfigsetup.DefaultProcessEntityStreamPort - DefaultProcessEventsCheckInterval = pkgconfigsetup.DefaultProcessEventsCheckInterval - DefaultProcessEventsMinCheckInterval = pkgconfigsetup.DefaultProcessEventsMinCheckInterval - ProcessMaxPerMessageLimit = pkgconfigsetup.ProcessMaxPerMessageLimit - DefaultProcessMaxPerMessage = pkgconfigsetup.DefaultProcessMaxPerMessage - ProcessMaxMessageBytesLimit = pkgconfigsetup.ProcessMaxMessageBytesLimit - DefaultProcessDiscoveryHintFrequency = pkgconfigsetup.DefaultProcessDiscoveryHintFrequency - DefaultProcessMaxMessageBytes = pkgconfigsetup.DefaultProcessMaxMessageBytes - DefaultProcessExpVarPort = pkgconfigsetup.DefaultProcessExpVarPort - DefaultProcessQueueBytes = pkgconfigsetup.DefaultProcessQueueBytes - DefaultProcessQueueSize = pkgconfigsetup.DefaultProcessQueueSize - DefaultProcessRTQueueSize = pkgconfigsetup.DefaultProcessRTQueueSize - DefaultRuntimePoliciesDir = pkgconfigsetup.DefaultRuntimePoliciesDir - DefaultGRPCConnectionTimeoutSecs = pkgconfigsetup.DefaultGRPCConnectionTimeoutSecs - DefaultProcessEndpoint = pkgconfigsetup.DefaultProcessEndpoint - DefaultProcessEventsEndpoint = pkgconfigsetup.DefaultProcessEventsEndpoint -) - -type ( - // ConfigurationProviders Alias - ConfigurationProviders = pkgconfigsetup.ConfigurationProviders - // Listeners Alias - Listeners = pkgconfigsetup.Listeners -) - -// GetObsPipelineURL Alias using Datadog config -func GetObsPipelineURL(datatype pkgconfigsetup.DataType) (string, error) { - return pkgconfigsetup.GetObsPipelineURL(datatype, Datadog()) -} - -// LoadCustom Alias -func LoadCustom(config model.Config, additionalKnownEnvVars []string) error { - return pkgconfigsetup.LoadCustom(config, additionalKnownEnvVars) -} - -// LoadDatadogCustom Alias -func LoadDatadogCustom(config model.Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*model.Warnings, error) { - return pkgconfigsetup.LoadDatadogCustom(config, origin, secretResolver, additionalKnownEnvVars) -} - -// GetValidHostAliases Alias using Datadog config -func GetValidHostAliases(ctx context.Context) ([]string, error) { - return pkgconfigsetup.GetValidHostAliases(ctx, Datadog()) -} - -// IsCLCRunner Alias using Datadog config -func IsCLCRunner() bool { - return pkgconfigsetup.IsCLCRunner(Datadog()) -} - -// GetBindHostFromConfig Alias using Datadog config -func GetBindHostFromConfig(config model.Reader) string { - return pkgconfigsetup.GetBindHostFromConfig(config) -} - -// GetBindHost Alias using Datadog config -func GetBindHost() string { - return pkgconfigsetup.GetBindHost(Datadog()) -} - -var ( - // IsRemoteConfigEnabled Alias - IsRemoteConfigEnabled = pkgconfigsetup.IsRemoteConfigEnabled - // StartTime Alias - StartTime = pkgconfigsetup.StartTime - // StandardJMXIntegrations Alias - StandardJMXIntegrations = pkgconfigsetup.StandardJMXIntegrations - // SetupOTLP Alias - SetupOTLP = pkgconfigsetup.OTLP - // InitSystemProbeConfig Alias - InitSystemProbeConfig = pkgconfigsetup.InitSystemProbeConfig - // InitConfig Alias - InitConfig = pkgconfigsetup.InitConfig - - // GetRemoteConfigurationAllowedIntegrations Alias - GetRemoteConfigurationAllowedIntegrations = pkgconfigsetup.GetRemoteConfigurationAllowedIntegrations - // LoadProxyFromEnv Alias - LoadProxyFromEnv = pkgconfigsetup.LoadProxyFromEnv - - // GetIPCPort Alias - GetIPCPort = pkgconfigsetup.GetIPCPort -) - -// LoadWithoutSecret Alias using Datadog config -func LoadWithoutSecret() (*model.Warnings, error) { - return pkgconfigsetup.LoadDatadogCustom(Datadog(), "datadog.yaml", optional.NewNoneOption[secrets.Component](), SystemProbe().GetEnvVars()) -} - -// GetProcessAPIAddressPort Alias using Datadog config -func GetProcessAPIAddressPort() (string, error) { - return pkgconfigsetup.GetProcessAPIAddressPort(Datadog()) -} diff --git a/pkg/config/aliases_darwin.go b/pkg/config/aliases_darwin.go deleted file mode 100644 index 17dc1cb3a0f76..0000000000000 --- a/pkg/config/aliases_darwin.go +++ /dev/null @@ -1,18 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package config - -import ( - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" -) - -// Aliases to setup package -const ( - DefaultUpdaterLogFile = pkgconfigsetup.DefaultUpdaterLogFile - DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile - DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile - DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin -) diff --git a/pkg/config/aliases_nix.go b/pkg/config/aliases_nix.go deleted file mode 100644 index 4bbce899be6bd..0000000000000 --- a/pkg/config/aliases_nix.go +++ /dev/null @@ -1,20 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build linux || freebsd || netbsd || openbsd || solaris || dragonfly || aix - -package config - -import ( - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" -) - -// Aliases to setup package -var ( - DefaultUpdaterLogFile = pkgconfigsetup.DefaultUpdaterLogFile - DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile - DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile - DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin -) diff --git a/pkg/config/aliases_windows.go b/pkg/config/aliases_windows.go deleted file mode 100644 index cab0f23bfedbd..0000000000000 --- a/pkg/config/aliases_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package config - -import ( - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" -) - -// Aliases to setup package -var ( - DefaultUpdaterLogFile = pkgconfigsetup.DefaultUpdaterLogFile - DefaultSecurityAgentLogFile = pkgconfigsetup.DefaultSecurityAgentLogFile - DefaultProcessAgentLogFile = pkgconfigsetup.DefaultProcessAgentLogFile - DefaultDDAgentBin = pkgconfigsetup.DefaultDDAgentBin -) diff --git a/pkg/config/autodiscovery/autodiscovery.go b/pkg/config/autodiscovery/autodiscovery.go index ccd4ae2930eb4..1f1455c39761e 100644 --- a/pkg/config/autodiscovery/autodiscovery.go +++ b/pkg/config/autodiscovery/autodiscovery.go @@ -11,32 +11,32 @@ package autodiscovery import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" snmplistener "github.com/DataDog/datadog-agent/pkg/snmp" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" ) // DiscoverComponentsFromConfig returns a list of AD Providers and Listeners based on the agent configuration -func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.Listeners) { - detectedProviders := []config.ConfigurationProviders{} - detectedListeners := []config.Listeners{} +func DiscoverComponentsFromConfig() ([]pkgconfigsetup.ConfigurationProviders, []pkgconfigsetup.Listeners) { + detectedProviders := []pkgconfigsetup.ConfigurationProviders{} + detectedListeners := []pkgconfigsetup.Listeners{} // Auto-add Prometheus config provider based on `prometheus_scrape.enabled` - if config.Datadog().GetBool("prometheus_scrape.enabled") { - var prometheusProvider config.ConfigurationProviders + if pkgconfigsetup.Datadog().GetBool("prometheus_scrape.enabled") { + var prometheusProvider pkgconfigsetup.ConfigurationProviders if flavor.GetFlavor() == flavor.ClusterAgent { - prometheusProvider = config.ConfigurationProviders{Name: "prometheus_services", Polling: true} + prometheusProvider = pkgconfigsetup.ConfigurationProviders{Name: "prometheus_services", Polling: true} } else { - prometheusProvider = config.ConfigurationProviders{Name: "prometheus_pods", Polling: true} + prometheusProvider = pkgconfigsetup.ConfigurationProviders{Name: "prometheus_pods", Polling: true} } log.Infof("Prometheus scraping is enabled: Adding the Prometheus config provider '%s'", prometheusProvider.Name) detectedProviders = append(detectedProviders, prometheusProvider) } // Add database-monitoring aurora listener if the feature is enabled - if config.Datadog().GetBool("database_monitoring.autodiscovery.aurora.enabled") { - detectedListeners = append(detectedListeners, config.Listeners{Name: "database-monitoring-aurora"}) + if pkgconfigsetup.Datadog().GetBool("database_monitoring.autodiscovery.aurora.enabled") { + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "database-monitoring-aurora"}) log.Info("Database monitoring aurora discovery is enabled: Adding the aurora listener") } @@ -55,7 +55,7 @@ func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.L log.Info("Configs with advanced kube service identifiers detected: Adding the 'kube service file' config provider") // Polling is set to false because kube_services_file is a static config provider. // It generates entity IDs based on the provided advanced config: kube_service:/// - detectedProviders = append(detectedProviders, config.ConfigurationProviders{Name: names.KubeServicesFileRegisterName, Polling: false}) + detectedProviders = append(detectedProviders, pkgconfigsetup.ConfigurationProviders{Name: names.KubeServicesFileRegisterName, Polling: false}) } if !epFound && !adv.KubeEndpoints.IsEmpty() { @@ -64,7 +64,7 @@ func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.L // Polling is set to true because kube_endpoints_file is a dynamic config provider. // It generates entity IDs based on the provided advanced config + the IPs found in the corresponding Endpoints object: kube_endpoint://// // The generated entity IDs are subject to change, thus the continuous polling. - detectedProviders = append(detectedProviders, config.ConfigurationProviders{Name: names.KubeEndpointsFileRegisterName, Polling: true}) + detectedProviders = append(detectedProviders, pkgconfigsetup.ConfigurationProviders{Name: names.KubeEndpointsFileRegisterName, Polling: true}) } } @@ -76,10 +76,10 @@ func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.L // Auto-activate autodiscovery without listeners: - snmp configs := []snmplistener.Config{} - err := config.Datadog().UnmarshalKey("network_devices.autodiscovery.configs", &configs) + err := pkgconfigsetup.Datadog().UnmarshalKey("network_devices.autodiscovery.configs", &configs) if err == nil && len(configs) > 0 { - detectedListeners = append(detectedListeners, config.Listeners{Name: "snmp"}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "snmp"}) log.Info("Configs for autodiscovery detected: Adding the snmp listener") } @@ -87,14 +87,14 @@ func DiscoverComponentsFromConfig() ([]config.ConfigurationProviders, []config.L } // DiscoverComponentsFromEnv returns a list of AD Providers and Listeners based on environment characteristics -func DiscoverComponentsFromEnv() ([]config.ConfigurationProviders, []config.Listeners) { - detectedProviders := []config.ConfigurationProviders{} - detectedListeners := []config.Listeners{} +func DiscoverComponentsFromEnv() ([]pkgconfigsetup.ConfigurationProviders, []pkgconfigsetup.Listeners) { + detectedProviders := []pkgconfigsetup.ConfigurationProviders{} + detectedListeners := []pkgconfigsetup.Listeners{} // When using automatic discovery of providers/listeners // We automatically activate the environment and static config listener - detectedListeners = append(detectedListeners, config.Listeners{Name: "environment"}) - detectedListeners = append(detectedListeners, config.Listeners{Name: "static config"}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "environment"}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "static config"}) // Automatic handling of AD providers/listeners should only run in the core or process agent. if flavor.GetFlavor() != flavor.DefaultAgent && flavor.GetFlavor() != flavor.ProcessAgent { @@ -108,17 +108,17 @@ func DiscoverComponentsFromEnv() ([]config.ConfigurationProviders, []config.List isKubeEnv := env.IsFeaturePresent(env.Kubernetes) if isContainerEnv || isKubeEnv { - detectedProviders = append(detectedProviders, config.ConfigurationProviders{Name: names.KubeContainer}) + detectedProviders = append(detectedProviders, pkgconfigsetup.ConfigurationProviders{Name: names.KubeContainer}) log.Info("Adding KubeContainer provider from environment") } if isContainerEnv && !isKubeEnv { - detectedListeners = append(detectedListeners, config.Listeners{Name: names.Container}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: names.Container}) log.Info("Adding Container listener from environment") } if isKubeEnv { - detectedListeners = append(detectedListeners, config.Listeners{Name: "kubelet"}) + detectedListeners = append(detectedListeners, pkgconfigsetup.Listeners{Name: "kubelet"}) log.Info("Adding Kubelet listener from environment") } diff --git a/pkg/config/autodiscovery/autodiscovery_test.go b/pkg/config/autodiscovery/autodiscovery_test.go index 45499431766f9..09a7f28ae1a04 100644 --- a/pkg/config/autodiscovery/autodiscovery_test.go +++ b/pkg/config/autodiscovery/autodiscovery_test.go @@ -9,14 +9,15 @@ import ( "strings" "testing" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/stretchr/testify/assert" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestDiscoverComponentsFromConfigForSnmp(t *testing.T) { - config.Datadog().SetConfigType("yaml") + pkgconfigsetup.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -27,7 +28,7 @@ network_devices: assert.Len(t, configListeners, 1) assert.Equal(t, "snmp", configListeners[0].Name) - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -36,7 +37,7 @@ network_devices: _, configListeners = DiscoverComponentsFromConfig() assert.Empty(t, len(configListeners)) - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: configs: - network: 127.0.0.1/30 diff --git a/pkg/config/config_template.yaml b/pkg/config/config_template.yaml index 979f769cfff8e..14cf6d8c013d0 100644 --- a/pkg/config/config_template.yaml +++ b/pkg/config/config_template.yaml @@ -885,6 +885,8 @@ api_key: # enabled: false {{ end }} + + {{ end -}} {{- if .LogsAgent }} @@ -1483,6 +1485,15 @@ api_key: # # process_config: + {{- if (eq .OS "linux")}} + ## @param run_in_core_agent - custom object - optional + ## Controls whether the process Agent or core Agent collects process and/or container information (Linux only). + # run_in_core_agent: + ## @param enabled - boolean - optional - default: false + ## Enables process/container collection on the core Agent instead of the process Agent. + # enabled: false + {{ end }} + ## @param process_collection - custom object - optional ## Specifies settings for collecting processes. # process_collection: @@ -1708,6 +1719,28 @@ api_key: ## # check_max_events_per_run: 100 {{ end -}} + +{{- if .SBOM }} +## @param sbom - custom object - optional +## Enter specific configuration for the Cloud Security Management Vulnerability Management feature +# sbom: + ## @param enabled - boolean - optional - default: false + ## set to true to enable Cloud Security Management Vulnerability Management + # enabled: false + + ## uncomment the sections below to enable where the vulnerability scanning is done + + ## @param enabled - boolean - optional - default: false + ## set to true to enable Infrastructure Vulnerabiltilies + # host: + # enabled: false +{{- if (eq .OS "linux")}} + + + # container_image: + # enabled: false +{{ end -}} +{{ end -}} {{- if .SystemProbe }} ################################## diff --git a/pkg/config/legacy/converter.go b/pkg/config/legacy/converter.go index be8fce6d99443..f64ca2eb1f19f 100644 --- a/pkg/config/legacy/converter.go +++ b/pkg/config/legacy/converter.go @@ -13,13 +13,30 @@ import ( "strconv" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" ) +// ConfigConverter is used in the legacy package +// to convert A5 config to A6 +type ConfigConverter struct { + model.Config +} + +// Set is used for setting configuration from A5 config +func (c *ConfigConverter) Set(key string, value interface{}) { + c.Config.Set(key, value, model.SourceAgentRuntime) +} + +// NewConfigConverter is creating and returning a config converter +func NewConfigConverter() *ConfigConverter { + return &ConfigConverter{pkgconfigsetup.Datadog()} +} + // FromAgentConfig reads the old agentConfig configuration, converts and merges // the values into the current configuration object -func FromAgentConfig(agentConfig Config, converter *config.LegacyConfigConverter) error { +func FromAgentConfig(agentConfig Config, converter *ConfigConverter) error { if err := extractURLAPIKeys(agentConfig, converter); err != nil { return err } @@ -84,8 +101,8 @@ func FromAgentConfig(agentConfig Config, converter *config.LegacyConfigConverter if agentConfig["service_discovery_backend"] == "docker" { // `docker` is the only possible value also on the Agent v5 - dockerListener := config.Listeners{Name: "docker"} - converter.Set("listeners", []config.Listeners{dockerListener}) + dockerListener := pkgconfigsetup.Listeners{Name: "docker"} + converter.Set("listeners", []pkgconfigsetup.Listeners{dockerListener}) } if providers, err := buildConfigProviders(agentConfig); err == nil { @@ -160,7 +177,7 @@ func FromAgentConfig(agentConfig Config, converter *config.LegacyConfigConverter return extractTraceAgentConfig(agentConfig, converter) } -func extractTraceAgentConfig(agentConfig Config, converter *config.LegacyConfigConverter) error { +func extractTraceAgentConfig(agentConfig Config, converter *ConfigConverter) error { for iniKey, yamlKey := range map[string]string{ "trace.api.api_key": "apm_config.api_key", "trace.api.endpoint": "apm_config.apm_dd_url", @@ -233,7 +250,7 @@ func isAffirmative(value string) (bool, error) { return v == "true" || v == "yes" || v == "1", nil } -func extractURLAPIKeys(agentConfig Config, converter *config.LegacyConfigConverter) error { +func extractURLAPIKeys(agentConfig Config, converter *ConfigConverter) error { urls := strings.Split(agentConfig["dd_url"], ",") keys := strings.Split(agentConfig["api_key"], ",") @@ -323,7 +340,7 @@ func buildSyslogURI(agentConfig Config) string { return host } -func buildConfigProviders(agentConfig Config) ([]config.ConfigurationProviders, error) { +func buildConfigProviders(agentConfig Config) ([]pkgconfigsetup.ConfigurationProviders, error) { // the list of SD_CONFIG_BACKENDS supported in v5 SdConfigBackends := map[string]struct{}{ "etcd": {}, @@ -340,7 +357,7 @@ func buildConfigProviders(agentConfig Config) ([]config.ConfigurationProviders, url = url + ":" + agentConfig["sd_backend_port"] } - cp := config.ConfigurationProviders{ + cp := pkgconfigsetup.ConfigurationProviders{ Username: agentConfig["sd_backend_username"], Password: agentConfig["sd_backend_password"], TemplateURL: url, @@ -358,7 +375,7 @@ func buildConfigProviders(agentConfig Config) ([]config.ConfigurationProviders, cp.Name = "zookeeper" // name is different in v6 } - return []config.ConfigurationProviders{cp}, nil + return []pkgconfigsetup.ConfigurationProviders{cp}, nil } func buildHistogramAggregates(agentConfig Config) []string { diff --git a/pkg/config/legacy/converter_test.go b/pkg/config/legacy/converter_test.go index c1eb5ab27ec14..30a02255a8a30 100644 --- a/pkg/config/legacy/converter_test.go +++ b/pkg/config/legacy/converter_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestIsAffirmative(t *testing.T) { @@ -200,15 +200,15 @@ func TestBuildHistogramPercentiles(t *testing.T) { } func TestDefaultValues(t *testing.T) { - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() agentConfig := make(Config) FromAgentConfig(agentConfig, configConverter) - assert.Equal(t, true, config.Datadog().GetBool("hostname_fqdn")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("hostname_fqdn")) } func TestTraceIgnoreResources(t *testing.T) { require := require.New(t) - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() cases := []struct { config string @@ -225,19 +225,19 @@ func TestTraceIgnoreResources(t *testing.T) { cfg["trace.ignore.resource"] = c.config err := FromAgentConfig(cfg, configConverter) require.NoError(err) - require.Equal(c.expected, config.Datadog().GetStringSlice("apm_config.ignore_resources")) + require.Equal(c.expected, pkgconfigsetup.Datadog().GetStringSlice("apm_config.ignore_resources")) } } func TestConverter(t *testing.T) { require := require.New(t) - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() cfg, err := GetAgentConfig("./tests/datadog.conf") require.NoError(err) err = FromAgentConfig(cfg, configConverter) require.NoError(err) - c := config.Datadog() + c := pkgconfigsetup.Datadog() require.Equal([]string{ "GET|POST /healthcheck", "GET /V1", @@ -317,7 +317,7 @@ func TestConverter(t *testing.T) { } func TestExtractURLAPIKeys(t *testing.T) { - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() defer func() { configConverter.Set("dd_url", "") configConverter.Set("api_key", "") @@ -330,28 +330,28 @@ func TestExtractURLAPIKeys(t *testing.T) { agentConfig["api_key"] = "" err := extractURLAPIKeys(agentConfig, configConverter) assert.NoError(t, err) - assert.Equal(t, "", config.Datadog().GetString("dd_url")) - assert.Equal(t, "", config.Datadog().GetString("api_key")) - assert.Empty(t, config.Datadog().GetStringMapStringSlice("additional_endpoints")) + assert.Equal(t, "", pkgconfigsetup.Datadog().GetString("dd_url")) + assert.Equal(t, "", pkgconfigsetup.Datadog().GetString("api_key")) + assert.Empty(t, pkgconfigsetup.Datadog().GetStringMapStringSlice("additional_endpoints")) // one url and one key agentConfig["dd_url"] = "https://datadoghq.com" agentConfig["api_key"] = "123456789" err = extractURLAPIKeys(agentConfig, configConverter) assert.NoError(t, err) - assert.Equal(t, "https://datadoghq.com", config.Datadog().GetString("dd_url")) - assert.Equal(t, "123456789", config.Datadog().GetString("api_key")) - assert.Empty(t, config.Datadog().GetStringMapStringSlice("additional_endpoints")) + assert.Equal(t, "https://datadoghq.com", pkgconfigsetup.Datadog().GetString("dd_url")) + assert.Equal(t, "123456789", pkgconfigsetup.Datadog().GetString("api_key")) + assert.Empty(t, pkgconfigsetup.Datadog().GetStringMapStringSlice("additional_endpoints")) // multiple dd_url and api_key agentConfig["dd_url"] = "https://datadoghq.com,https://datadoghq.com,https://datadoghq.com,https://staging.com" agentConfig["api_key"] = "123456789,abcdef,secret_key,secret_key2" err = extractURLAPIKeys(agentConfig, configConverter) assert.NoError(t, err) - assert.Equal(t, "https://datadoghq.com", config.Datadog().GetString("dd_url")) - assert.Equal(t, "123456789", config.Datadog().GetString("api_key")) + assert.Equal(t, "https://datadoghq.com", pkgconfigsetup.Datadog().GetString("dd_url")) + assert.Equal(t, "123456789", pkgconfigsetup.Datadog().GetString("api_key")) - endpoints := config.Datadog().GetStringMapStringSlice("additional_endpoints") + endpoints := pkgconfigsetup.Datadog().GetStringMapStringSlice("additional_endpoints") assert.Equal(t, 2, len(endpoints)) assert.Equal(t, []string{"abcdef", "secret_key"}, endpoints["https://datadoghq.com"]) assert.Equal(t, []string{"secret_key2"}, endpoints["https://staging.com"]) diff --git a/pkg/config/legacy/docker.go b/pkg/config/legacy/docker.go index 567bd07966f6c..3c8b7ab49f49d 100644 --- a/pkg/config/legacy/docker.go +++ b/pkg/config/legacy/docker.go @@ -15,7 +15,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/docker" - "github.com/DataDog/datadog-agent/pkg/config" yaml "gopkg.in/yaml.v2" ) @@ -64,7 +63,7 @@ type legacyDockerInstance struct { // ImportDockerConf read the configuration from docker_daemon check (agent5) // and create the configuration for the new docker check (agent 6) and move // needed option to datadog.yaml -func ImportDockerConf(src, dst string, overwrite bool, converter *config.LegacyConfigConverter) error { +func ImportDockerConf(src, dst string, overwrite bool, converter *ConfigConverter) error { fmt.Printf("%s\n", warningNewCheck) // read docker_daemon.yaml diff --git a/pkg/config/legacy/docker_test.go b/pkg/config/legacy/docker_test.go index 161f14c308fab..0163b4a5ed9d5 100644 --- a/pkg/config/legacy/docker_test.go +++ b/pkg/config/legacy/docker_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ( @@ -89,7 +89,7 @@ func TestConvertDocker(t *testing.T) { err := os.WriteFile(src, []byte(dockerDaemonLegacyConf), 0640) require.NoError(t, err) - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() err = ImportDockerConf(src, dst, true, configConverter) require.NoError(t, err) @@ -98,15 +98,15 @@ func TestConvertDocker(t *testing.T) { assert.Equal(t, dockerNewConf, string(newConf)) - assert.Equal(t, true, config.Datadog().GetBool("exclude_pause_container")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("exclude_pause_container")) assert.Equal(t, []string{"name:test", "name:some_image.*", "image:some_image_2", "image:some_image_3"}, - config.Datadog().GetStringSlice("ac_exclude")) - assert.Equal(t, []string{"image:some_image_3"}, config.Datadog().GetStringSlice("ac_include")) + pkgconfigsetup.Datadog().GetStringSlice("ac_exclude")) + assert.Equal(t, []string{"image:some_image_3"}, pkgconfigsetup.Datadog().GetStringSlice("ac_include")) - assert.Equal(t, "/host/test/proc", config.Datadog().GetString("container_proc_root")) - assert.Equal(t, "/host/test/sys/fs/cgroup", config.Datadog().GetString("container_cgroup_root")) + assert.Equal(t, "/host/test/proc", pkgconfigsetup.Datadog().GetString("container_proc_root")) + assert.Equal(t, "/host/test/sys/fs/cgroup", pkgconfigsetup.Datadog().GetString("container_cgroup_root")) assert.Equal(t, map[string]string{"test1": "test1", "test2": "test2"}, - config.Datadog().GetStringMapString("docker_labels_as_tags")) + pkgconfigsetup.Datadog().GetStringMapString("docker_labels_as_tags")) // test overwrite err = ImportDockerConf(src, dst, false, configConverter) diff --git a/pkg/config/legacy/kubernetes.go b/pkg/config/legacy/kubernetes.go index fdda56b95a00b..92be3537ac353 100644 --- a/pkg/config/legacy/kubernetes.go +++ b/pkg/config/legacy/kubernetes.go @@ -13,7 +13,6 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" - "github.com/DataDog/datadog-agent/pkg/config" yaml "gopkg.in/yaml.v2" ) @@ -83,13 +82,13 @@ func (k kubeDeprecations) print() { // ImportKubernetesConf reads the configuration from the kubernetes check (agent5) // and create the configuration for the new kubelet check (agent 6) and moves // relevant options to datadog.yaml -func ImportKubernetesConf(src, dst string, overwrite bool, converter *config.LegacyConfigConverter) error { +func ImportKubernetesConf(src, dst string, overwrite bool, converter *ConfigConverter) error { _, err := importKubernetesConfWithDeprec(src, dst, overwrite, converter) return err } // Deprecated options are listed in the kubeDeprecations return value, for testing -func importKubernetesConfWithDeprec(src, dst string, overwrite bool, converter *config.LegacyConfigConverter) (kubeDeprecations, error) { +func importKubernetesConfWithDeprec(src, dst string, overwrite bool, converter *ConfigConverter) (kubeDeprecations, error) { fmt.Printf("%s\n", warningNewKubeCheck) deprecations := make(kubeDeprecations) diff --git a/pkg/config/legacy/kubernetes_test.go b/pkg/config/legacy/kubernetes_test.go index 6facf05d09564..0e8495f097fc8 100644 --- a/pkg/config/legacy/kubernetes_test.go +++ b/pkg/config/legacy/kubernetes_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ( @@ -118,7 +118,7 @@ func TestConvertKubernetes(t *testing.T) { err = os.WriteFile(srcEmpty, []byte(kubernetesLegacyEmptyConf), 0640) require.NoError(t, err) - configConverter := config.NewConfigConverter() + configConverter := NewConfigConverter() deprecations, err := importKubernetesConfWithDeprec(src, dst, true, configConverter) require.NoError(t, err) require.EqualValues(t, expectedKubeDeprecations, deprecations) @@ -127,26 +127,26 @@ func TestConvertKubernetes(t *testing.T) { require.NoError(t, err) assert.Equal(t, kubeletNewConf, string(newConf)) - assert.Equal(t, 1234, config.Datadog().GetInt("kubernetes_http_kubelet_port")) - assert.Equal(t, 1234, config.Datadog().GetInt("kubernetes_https_kubelet_port")) - assert.Equal(t, "localhost", config.Datadog().GetString("kubernetes_kubelet_host")) - assert.Equal(t, "/path/to/client.crt", config.Datadog().GetString("kubelet_client_crt")) - assert.Equal(t, "/path/to/client.key", config.Datadog().GetString("kubelet_client_key")) - assert.Equal(t, "/path/to/ca.pem", config.Datadog().GetString("kubelet_client_ca")) - assert.Equal(t, "/path/to/token", config.Datadog().GetString("kubelet_auth_token_path")) - assert.EqualValues(t, expectedHostTags, config.Datadog().GetStringMapString("kubernetes_node_labels_as_tags")) - assert.Equal(t, false, config.Datadog().GetBool("kubelet_tls_verify")) - - assert.Equal(t, true, config.Datadog().GetBool("kubernetes_collect_service_tags")) - assert.Equal(t, true, config.Datadog().GetBool("collect_kubernetes_events")) - assert.Equal(t, true, config.Datadog().GetBool("leader_election")) - assert.Equal(t, 1200, config.Datadog().GetInt("leader_lease_duration")) - assert.Equal(t, 3000, config.Datadog().GetInt("kubernetes_service_tag_update_freq")) + assert.Equal(t, 1234, pkgconfigsetup.Datadog().GetInt("kubernetes_http_kubelet_port")) + assert.Equal(t, 1234, pkgconfigsetup.Datadog().GetInt("kubernetes_https_kubelet_port")) + assert.Equal(t, "localhost", pkgconfigsetup.Datadog().GetString("kubernetes_kubelet_host")) + assert.Equal(t, "/path/to/client.crt", pkgconfigsetup.Datadog().GetString("kubelet_client_crt")) + assert.Equal(t, "/path/to/client.key", pkgconfigsetup.Datadog().GetString("kubelet_client_key")) + assert.Equal(t, "/path/to/ca.pem", pkgconfigsetup.Datadog().GetString("kubelet_client_ca")) + assert.Equal(t, "/path/to/token", pkgconfigsetup.Datadog().GetString("kubelet_auth_token_path")) + assert.EqualValues(t, expectedHostTags, pkgconfigsetup.Datadog().GetStringMapString("kubernetes_node_labels_as_tags")) + assert.Equal(t, false, pkgconfigsetup.Datadog().GetBool("kubelet_tls_verify")) + + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("kubernetes_collect_service_tags")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("collect_kubernetes_events")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("leader_election")) + assert.Equal(t, 1200, pkgconfigsetup.Datadog().GetInt("leader_lease_duration")) + assert.Equal(t, 3000, pkgconfigsetup.Datadog().GetInt("kubernetes_service_tag_update_freq")) configConverter.Set("kubelet_tls_verify", true) deprecations, err = importKubernetesConfWithDeprec(srcEmpty, dstEmpty, true, configConverter) require.NoError(t, err) - assert.Equal(t, true, config.Datadog().GetBool("kubelet_tls_verify")) + assert.Equal(t, true, pkgconfigsetup.Datadog().GetBool("kubelet_tls_verify")) assert.Equal(t, 0, len(deprecations)) newEmptyConf, err := os.ReadFile(dstEmpty) require.NoError(t, err) diff --git a/pkg/config/legacy/no_docker.go b/pkg/config/legacy/no_docker.go index 3932b6e5cc658..6941bdac3bfbf 100644 --- a/pkg/config/legacy/no_docker.go +++ b/pkg/config/legacy/no_docker.go @@ -9,12 +9,10 @@ package legacy import ( "fmt" - - "github.com/DataDog/datadog-agent/pkg/config" ) // ImportDockerConf is a place holder if the agent is built without the docker flag -func ImportDockerConf(_, _ string, _ bool, _ *config.LegacyConfigConverter) error { +func ImportDockerConf(_, _ string, _ bool, _ *ConfigConverter) error { fmt.Println("This agent was build without docker support: could not convert docker_daemon.yaml") return nil } diff --git a/pkg/config/legacy_converter.go b/pkg/config/legacy_converter.go deleted file mode 100644 index f0b173070893a..0000000000000 --- a/pkg/config/legacy_converter.go +++ /dev/null @@ -1,27 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package config - -import ( - "github.com/DataDog/datadog-agent/pkg/config/model" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" -) - -// LegacyConfigConverter is used in the legacy package -// to convert A5 config to A6 -type LegacyConfigConverter struct { - Config -} - -// Set is used for setting configuration from A5 config -func (c *LegacyConfigConverter) Set(key string, value interface{}) { - c.Config.Set(key, value, model.SourceAgentRuntime) -} - -// NewConfigConverter is creating and returning a config converter -func NewConfigConverter() *LegacyConfigConverter { - return &LegacyConfigConverter{pkgconfigsetup.Datadog()} -} diff --git a/pkg/config/model/go.mod b/pkg/config/model/go.mod index 6d714751b915c..beb9f68bd7c75 100644 --- a/pkg/config/model/go.mod +++ b/pkg/config/model/go.mod @@ -3,6 +3,7 @@ module github.com/DataDog/datadog-agent/pkg/config/model go 1.22.0 replace ( + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure/ github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log/ github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../util/system/socket/ diff --git a/pkg/config/remote/go.mod b/pkg/config/remote/go.mod index 87dbbcf5ebb2a..241aca51f7c21 100644 --- a/pkg/config/remote/go.mod +++ b/pkg/config/remote/go.mod @@ -6,6 +6,7 @@ replace ( github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../comp/def github.com/DataDog/datadog-agent/pkg/config/model => ../model + github.com/DataDog/datadog-agent/pkg/obfuscate => ../../obfuscate github.com/DataDog/datadog-agent/pkg/proto => ../../proto github.com/DataDog/datadog-agent/pkg/remoteconfig/state => ../../remoteconfig/state github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry @@ -36,14 +37,27 @@ require ( go.etcd.io/bbolt v1.3.7 go.uber.org/atomic v1.11.0 google.golang.org/protobuf v1.33.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 ) require ( + github.com/DataDog/appsec-internal-go v1.7.0 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect github.com/DataDog/datadog-agent/pkg/util/cache v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-go/v5 v5.5.0 // indirect + github.com/DataDog/go-libddwaf/v3 v3.3.0 // indirect + github.com/DataDog/go-sqllexer v0.0.14 // indirect + github.com/DataDog/sketches-go v1.4.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/ebitengine/purego v0.6.0-alpha.5 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/google/uuid v1.5.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/outcaste-io/ristretto v0.2.3 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect @@ -53,6 +67,11 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.24.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect ) require ( @@ -60,17 +79,17 @@ require ( github.com/DataDog/go-tuf v1.1.0-0.5.2 github.com/DataDog/viper v1.13.5 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/fsnotify/fsnotify v1.4.7 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/magiconair/properties v1.8.1 // indirect - github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pelletier/go-toml v1.2.0 // indirect github.com/philhofer/fwd v1.1.2 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.3.0 // indirect github.com/spf13/jwalterweatherman v1.0.0 // indirect diff --git a/pkg/config/remote/go.sum b/pkg/config/remote/go.sum index 61c0ef33e4625..bd75efb7f9d96 100644 --- a/pkg/config/remote/go.sum +++ b/pkg/config/remote/go.sum @@ -44,12 +44,27 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/appsec-internal-go v1.7.0 h1:iKRNLih83dJeVya3IoUfK+6HLD/hQsIbyBlfvLmAeb0= +github.com/DataDog/appsec-internal-go v1.7.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/go-libddwaf/v3 v3.3.0 h1:jS72fuQpFgJZEdEJDmHJCPAgNTEMZoz1EUvimPUOiJ4= +github.com/DataDog/go-libddwaf/v3 v3.3.0/go.mod h1:Bz/0JkpGf689mzbUjKJeheJINqsyyhM8p9PDuHdK2Ec= +github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q= +github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= +github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= +github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= +github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE= +github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -66,6 +81,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -85,10 +102,21 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg= +github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds= +github.com/ebitengine/purego v0.6.0-alpha.5 h1:EYID3JOAdmQ4SNZYJHu9V6IqOeRQDBYxqKAg9PyoHFY= +github.com/ebitengine/purego v0.6.0-alpha.5/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -98,6 +126,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= +github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -123,6 +153,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -170,8 +201,12 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -185,10 +220,17 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -215,9 +257,12 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -227,6 +272,10 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwd github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= +github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= @@ -239,8 +288,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -260,12 +310,18 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY= +github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= @@ -276,10 +332,13 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= @@ -304,6 +363,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= @@ -325,6 +385,7 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= @@ -339,8 +400,11 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= @@ -392,8 +456,11 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -430,6 +497,7 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= @@ -456,8 +524,11 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -473,6 +544,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -495,11 +567,15 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -525,6 +601,8 @@ golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -577,12 +655,17 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -687,6 +770,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/DataDog/dd-trace-go.v1 v1.67.0 h1:3Cb46zyKIlEWac21tvDF2O4KyMlOHQxrQkyiaUpdwM0= +gopkg.in/DataDog/dd-trace-go.v1 v1.67.0/go.mod h1:6DdiJPKOeJfZyd/IUGCAd5elY8qPGkztK6wbYYsMjag= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -714,6 +799,14 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw= +modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= +modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= +modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/config/remote/service/service.go b/pkg/config/remote/service/service.go index 61cc0e5d21e72..f09c59df87d88 100644 --- a/pkg/config/remote/service/service.go +++ b/pkg/config/remote/service/service.go @@ -17,6 +17,8 @@ import ( "errors" "expvar" "fmt" + "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" "net/url" "path" "strconv" @@ -65,6 +67,12 @@ const ( initialFetchErrorLog uint64 = 5 ) +const ( + // the minimum amount of time that must pass before a new cache + // bypass request is allowed for the CDN client + maxCDNUpdateFrequency = 50 * time.Second +) + var ( exportedMapStatus = expvar.NewMap("remoteConfigStatus") // Status expvar exported @@ -83,6 +91,75 @@ type Service struct { // via logs. rcType string + db *bbolt.DB +} + +func (s *Service) getNewDirectorRoots(uptane uptaneClient, currentVersion uint64, newVersion uint64) ([][]byte, error) { + var roots [][]byte + for i := currentVersion + 1; i <= newVersion; i++ { + root, err := uptane.DirectorRoot(i) + if err != nil { + return nil, err + } + canonicalRoot, err := enforceCanonicalJSON(root) + if err != nil { + return nil, err + } + roots = append(roots, canonicalRoot) + } + return roots, nil +} + +func (s *Service) getTargetFiles(uptane uptaneClient, products []rdata.Product, cachedTargetFiles []*pbgo.TargetFileMeta) ([]*pbgo.File, error) { + productSet := make(map[rdata.Product]struct{}) + for _, product := range products { + productSet[product] = struct{}{} + } + targets, err := uptane.Targets() + if err != nil { + return nil, err + } + cachedTargets := make(map[string]data.FileMeta) + for _, cachedTarget := range cachedTargetFiles { + hashes := make(data.Hashes) + for _, hash := range cachedTarget.Hashes { + h, err := hex.DecodeString(hash.Hash) + if err != nil { + return nil, err + } + hashes[hash.Algorithm] = h + } + cachedTargets[cachedTarget.Path] = data.FileMeta{ + Hashes: hashes, + Length: cachedTarget.Length, + } + } + var configFiles []*pbgo.File + for targetPath, targetMeta := range targets { + configPathMeta, err := rdata.ParseConfigPath(targetPath) + if err != nil { + return nil, err + } + if _, inClientProducts := productSet[rdata.Product(configPathMeta.Product)]; inClientProducts { + if notEqualErr := tufutil.FileMetaEqual(cachedTargets[targetPath], targetMeta.FileMeta); notEqualErr == nil { + continue + } + fileContents, err := uptane.TargetFile(targetPath) + if err != nil { + return nil, err + } + configFiles = append(configFiles, &pbgo.File{ + Path: targetPath, + Raw: fileContents, + }) + } + } + return configFiles, nil +} + +// CoreAgentService fetches Remote Configurations from the RC backend +type CoreAgentService struct { + Service firstUpdate bool defaultRefreshInterval time.Duration @@ -101,8 +178,7 @@ type Service struct { hostname string tagsGetter func() []string traceAgentEnv string - db *bbolt.DB - uptane uptaneClient + uptane coreAgentUptaneClient api api.API products map[rdata.Product]struct{} @@ -125,9 +201,8 @@ type Service struct { agentVersion string } -// uptaneClient is used to mock the uptane component for testing +// uptaneClient provides functions to get TUF/uptane repo data. type uptaneClient interface { - Update(response *pbgo.LatestConfigsResponse) error State() (uptane.State, error) DirectorRoot(version uint64) ([]byte, error) StoredOrgUUID() (string, error) @@ -138,6 +213,18 @@ type uptaneClient interface { TUFVersionState() (uptane.TUFVersions, error) } +// coreAgentUptaneClient provides functions to get TUF/uptane repo data and update the agent's state via the RC backend. +type coreAgentUptaneClient interface { + uptaneClient + Update(response *pbgo.LatestConfigsResponse) error +} + +// cdnUptaneClient provides functions to get TUF/uptane repo data and update the agent's state via the CDN. +type cdnUptaneClient interface { + uptaneClient + Update(ctx context.Context) error +} + // RcTelemetryReporter should be implemented by the agent to publish metrics on exceptional cache bypass request events type RcTelemetryReporter interface { // IncRateLimit is invoked when a cache bypass request is prevented due to rate limiting @@ -286,7 +373,7 @@ func WithClientTTL(interval time.Duration, cfgPath string) func(s *options) { } // NewService instantiates a new remote configuration management service -func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGetter func() []string, telemetryReporter RcTelemetryReporter, agentVersion string, opts ...Option) (*Service, error) { +func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGetter func() []string, telemetryReporter RcTelemetryReporter, agentVersion string, opts ...Option) (*CoreAgentService, error) { options := defaultOptions for _, opt := range opts { opt(&options) @@ -337,7 +424,7 @@ func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGette if authKeys.rcKeySet { opt = append(opt, uptane.WithOrgIDCheck(authKeys.rcKey.OrgID)) } - uptaneClient, err := uptane.NewClient( + uptaneClient, err := uptane.NewCoreAgentClient( db, newRCBackendOrgUUIDProvider(http), opt..., @@ -349,8 +436,11 @@ func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGette clock := clock.New() - return &Service{ - rcType: rcType, + return &CoreAgentService{ + Service: Service{ + rcType: rcType, + db: db, + }, firstUpdate: true, defaultRefreshInterval: options.refresh, refreshIntervalOverrideAllowed: options.refreshIntervalOverrideAllowed, @@ -362,7 +452,6 @@ func NewService(cfg model.Reader, rcType, baseRawURL, hostname string, tagsGette tagsGetter: tagsGetter, clock: clock, traceAgentEnv: options.traceAgentEnv, - db: db, api: http, uptane: uptaneClient, clients: newClients(clock, options.clientTTL), @@ -393,7 +482,7 @@ func newRCBackendOrgUUIDProvider(http api.API) uptane.OrgUUIDProvider { } // Start the remote configuration management service -func (s *Service) Start() { +func (s *CoreAgentService) Start() { go func() { s.pollOrgStatus() for { @@ -452,7 +541,7 @@ func (s *Service) Start() { } // Stop stops the refresh loop and closes the on-disk DB cache -func (s *Service) Stop() error { +func (s *CoreAgentService) Stop() error { if s.stopConfigPoller != nil { close(s.stopConfigPoller) } @@ -460,7 +549,7 @@ func (s *Service) Stop() error { return s.db.Close() } -func (s *Service) pollOrgStatus() { +func (s *CoreAgentService) pollOrgStatus() { response, err := s.api.FetchOrgStatus(context.Background()) if err != nil { // Unauthorized and proxy error are caught by the main loop requesting the latest config, @@ -498,13 +587,13 @@ func (s *Service) pollOrgStatus() { exportedStatusKeyAuthorized.Set(strconv.FormatBool(response.Authorized)) } -func (s *Service) calculateRefreshInterval() time.Duration { +func (s *CoreAgentService) calculateRefreshInterval() time.Duration { backoffTime := s.backoffPolicy.GetBackoffDuration(s.backoffErrorCount) return s.defaultRefreshInterval + backoffTime } -func (s *Service) refresh() error { +func (s *CoreAgentService) refresh() error { s.Lock() activeClients := s.clients.activeClients() s.refreshProducts(activeClients) @@ -585,11 +674,11 @@ func (s *Service) refresh() error { return nil } -func (s *Service) forceRefresh() bool { +func (s *CoreAgentService) forceRefresh() bool { return s.firstUpdate } -func (s *Service) refreshProducts(activeClients []*pbgo.Client) { +func (s *CoreAgentService) refreshProducts(activeClients []*pbgo.Client) { for _, client := range activeClients { for _, product := range client.Products { if _, hasProduct := s.products[rdata.Product(product)]; !hasProduct { @@ -599,7 +688,7 @@ func (s *Service) refreshProducts(activeClients []*pbgo.Client) { } } -func (s *Service) getClientState() ([]byte, error) { +func (s *CoreAgentService) getClientState() ([]byte, error) { rawTargetsCustom, err := s.uptane.TargetsCustom() if err != nil { return nil, err @@ -611,7 +700,7 @@ func (s *Service) getClientState() ([]byte, error) { return custom.OpaqueBackendState, nil } -func (s *Service) getRefreshInterval() (time.Duration, error) { +func (s *CoreAgentService) getRefreshInterval() (time.Duration, error) { rawTargetsCustom, err := s.uptane.TargetsCustom() if err != nil { return 0, err @@ -633,7 +722,7 @@ func (s *Service) getRefreshInterval() (time.Duration, error) { // ClientGetConfigs is the polling API called by tracers and agents to get the latest configurations // //nolint:revive // TODO(RC) Fix revive linter -func (s *Service) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error) { +func (s *CoreAgentService) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetConfigsRequest) (*pbgo.ClientGetConfigsResponse, error) { s.Lock() defer s.Unlock() err := validateRequest(request) @@ -682,7 +771,7 @@ func (s *Service) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetCon if tufVersions.DirectorTargets == request.Client.State.TargetsVersion { return &pbgo.ClientGetConfigsResponse{}, nil } - roots, err := s.getNewDirectorRoots(request.Client.State.RootVersion, tufVersions.DirectorRoot) + roots, err := s.getNewDirectorRoots(s.uptane, request.Client.State.RootVersion, tufVersions.DirectorRoot) if err != nil { return nil, err } @@ -690,7 +779,7 @@ func (s *Service) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetCon if err != nil { return nil, err } - targetFiles, err := s.getTargetFiles(rdata.StringListToProduct(request.Client.Products), request.CachedTargetFiles) + targetFiles, err := s.getTargetFiles(s.uptane, rdata.StringListToProduct(request.Client.Products), request.CachedTargetFiles) if err != nil { return nil, err } @@ -730,7 +819,7 @@ func (s *Service) ClientGetConfigs(_ context.Context, request *pbgo.ClientGetCon } // ConfigGetState returns the state of the configuration and the director repos in the local store -func (s *Service) ConfigGetState() (*pbgo.GetStateConfigResponse, error) { +func (s *CoreAgentService) ConfigGetState() (*pbgo.GetStateConfigResponse, error) { state, err := s.uptane.State() if err != nil { return nil, err @@ -758,69 +847,6 @@ func (s *Service) ConfigGetState() (*pbgo.GetStateConfigResponse, error) { return response, nil } -func (s *Service) getNewDirectorRoots(currentVersion uint64, newVersion uint64) ([][]byte, error) { - var roots [][]byte - for i := currentVersion + 1; i <= newVersion; i++ { - root, err := s.uptane.DirectorRoot(i) - if err != nil { - return nil, err - } - canonicalRoot, err := enforceCanonicalJSON(root) - if err != nil { - return nil, err - } - roots = append(roots, canonicalRoot) - } - return roots, nil -} - -func (s *Service) getTargetFiles(products []rdata.Product, cachedTargetFiles []*pbgo.TargetFileMeta) ([]*pbgo.File, error) { - productSet := make(map[rdata.Product]struct{}) - for _, product := range products { - productSet[product] = struct{}{} - } - targets, err := s.uptane.Targets() - if err != nil { - return nil, err - } - cachedTargets := make(map[string]data.FileMeta) - for _, cachedTarget := range cachedTargetFiles { - hashes := make(data.Hashes) - for _, hash := range cachedTarget.Hashes { - h, err := hex.DecodeString(hash.Hash) - if err != nil { - return nil, err - } - hashes[hash.Algorithm] = h - } - cachedTargets[cachedTarget.Path] = data.FileMeta{ - Hashes: hashes, - Length: cachedTarget.Length, - } - } - var configFiles []*pbgo.File - for targetPath, targetMeta := range targets { - configPathMeta, err := rdata.ParseConfigPath(targetPath) - if err != nil { - return nil, err - } - if _, inClientProducts := productSet[rdata.Product(configPathMeta.Product)]; inClientProducts { - if notEqualErr := tufutil.FileMetaEqual(cachedTargets[targetPath], targetMeta.FileMeta); notEqualErr == nil { - continue - } - fileContents, err := s.uptane.TargetFile(targetPath) - if err != nil { - return nil, err - } - configFiles = append(configFiles, &pbgo.File{ - Path: targetPath, - Raw: fileContents, - }) - } - } - return configFiles, nil -} - func validateRequest(request *pbgo.ClientGetConfigsRequest) error { if request.Client == nil { return status.Error(codes.InvalidArgument, "client is a required field for client config update requests") @@ -925,3 +951,179 @@ func enforceCanonicalJSON(raw []byte) ([]byte, error) { return canonical, nil } + +// HTTPClient fetches Remote Configurations from an HTTP(s)-based backend +type HTTPClient struct { + Service + lastUpdate time.Time + uptane cdnUptaneClient +} + +// NewHTTPClient creates a new HTTPClient that can be used to fetch Remote Configurations from an HTTP(s)-based backend +// It uses a local db to cache the fetched configurations. Only one HTTPClient should be created per agent. +// An HTTPClient must be closed via HTTPClient.Close() before creating a new one. +func NewHTTPClient(runPath, site, apiKey, agentVersion string) (*HTTPClient, error) { + dbPath := path.Join(runPath, "remote-config-cdn.db") + db, err := openCacheDB(dbPath, agentVersion, apiKey) + if err != nil { + return nil, err + } + + uptaneCDNClient, err := uptane.NewCDNClient(db, site, apiKey) + if err != nil { + return nil, err + } + + return &HTTPClient{ + Service: Service{ + rcType: "CDN", + db: db, + }, + uptane: uptaneCDNClient, + }, nil +} + +// Close closes the HTTPClient and cleans up any resources. Close must be called +// before any other HTTPClients are instantiated via NewHTTPClient +func (c *HTTPClient) Close() error { + return c.db.Close() +} + +// GetCDNConfigUpdate returns any updated configs. If multiple requests have been made +// in a short amount of time, a cached response is returned. If RC has been disabled, +// an error is returned. If there is no update (the targets version is up-to-date) nil +// is returned for both the update and error. +func (c *HTTPClient) GetCDNConfigUpdate( + ctx context.Context, + products []string, + currentTargetsVersion, currentRootVersion uint64, + cachedTargetFiles []*pbgo.TargetFileMeta, +) (*state.Update, error) { + var err error + span, ctx := tracer.StartSpanFromContext(ctx, "HTTPClient.GetCDNConfigUpdate") + defer span.Finish(tracer.WithError(err)) + if !c.shouldUpdate() { + span.SetTag("use_cache", true) + return c.getUpdate(ctx, products, currentTargetsVersion, currentRootVersion, cachedTargetFiles) + } + + err = c.update(ctx) + if err != nil { + span.SetTag("cache_update_error", true) + _ = log.Warn(fmt.Sprintf("Error updating CDN config repo: %v", err)) + } + + u, err := c.getUpdate(ctx, products, currentTargetsVersion, currentRootVersion, cachedTargetFiles) + return u, err +} + +func (c *HTTPClient) update(ctx context.Context) error { + var err error + span, ctx := tracer.StartSpanFromContext(ctx, "HTTPClient.update") + defer span.Finish(tracer.WithError(err)) + c.Lock() + defer c.Unlock() + + err = c.uptane.Update(ctx) + if err != nil { + return err + } + + return nil +} + +func (c *HTTPClient) shouldUpdate() bool { + c.Lock() + defer c.Unlock() + if time.Since(c.lastUpdate) > maxCDNUpdateFrequency { + c.lastUpdate = time.Now() + return true + } + return false +} + +func (c *HTTPClient) getUpdate( + ctx context.Context, + products []string, + currentTargetsVersion, currentRootVersion uint64, + cachedTargetFiles []*pbgo.TargetFileMeta, +) (*state.Update, error) { + c.Lock() + defer c.Unlock() + span, _ := tracer.StartSpanFromContext(ctx, "HTTPClient.getUpdate") + defer span.Finish() + span.SetTag("products", products) + span.SetTag("current_targets_version", currentTargetsVersion) + span.SetTag("current_root_version", currentRootVersion) + span.SetTag("cached_target_files", cachedTargetFiles) + + tufVersions, err := c.uptane.TUFVersionState() + if err != nil { + return nil, err + } + if tufVersions.DirectorTargets == currentTargetsVersion { + return nil, nil + } + roots, err := c.getNewDirectorRoots(c.uptane, currentRootVersion, tufVersions.DirectorRoot) + if err != nil { + return nil, err + } + targetsRaw, err := c.uptane.TargetsMeta() + if err != nil { + return nil, err + } + targetFiles, err := c.getTargetFiles(c.uptane, rdata.StringListToProduct(products), cachedTargetFiles) + if err != nil { + return nil, err + } + + canonicalTargets, err := enforceCanonicalJSON(targetsRaw) + if err != nil { + return nil, err + } + + directorTargets, err := c.uptane.Targets() + if err != nil { + return nil, err + } + + productsMap := make(map[string]struct{}) + for _, product := range products { + productsMap[product] = struct{}{} + } + configs := make([]string, 0) + expiredConfigs := make([]string, 0) + for path, meta := range directorTargets { + pathMeta, err := rdata.ParseConfigPath(path) + if err != nil { + return nil, err + } + if _, productRequested := productsMap[pathMeta.Product]; !productRequested { + continue + } + configMetadata, err := parseFileMetaCustom(meta.Custom) + if err != nil { + return nil, err + } + if configExpired(configMetadata.Expires) { + expiredConfigs = append(expiredConfigs, path) + continue + } + + configs = append(configs, path) + } + + fileMap := make(map[string][]byte, len(targetFiles)) + for _, f := range targetFiles { + fileMap[f.Path] = f.Raw + } + + span.SetTag("configs.returned", configs) + span.SetTag("configs.expired", expiredConfigs) + return &state.Update{ + TUFRoots: roots, + TUFTargets: canonicalTargets, + TargetFiles: fileMap, + ClientConfigs: configs, + }, nil +} diff --git a/pkg/config/remote/service/service_test.go b/pkg/config/remote/service/service_test.go index 683713d49a747..860b6546fcbe0 100644 --- a/pkg/config/remote/service/service_test.go +++ b/pkg/config/remote/service/service_test.go @@ -42,6 +42,11 @@ const ( testEnv = "test-env" ) +const ( + site = "test-site" + k = "test-api-key" +) + // Setup overrides for tests func init() { uuid.GetUUID = func() string { @@ -69,11 +74,24 @@ type mockUptane struct { mock.Mock } -func (m *mockUptane) Update(response *pbgo.LatestConfigsResponse) error { +type mockCoreAgentUptane struct { + mockUptane +} + +type mockCDNUptane struct { + mockUptane +} + +func (m *mockCoreAgentUptane) Update(response *pbgo.LatestConfigsResponse) error { args := m.Called(response) return args.Error(0) } +func (m *mockCDNUptane) Update(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + func (m *mockUptane) State() (uptane.State, error) { args := m.Called() return args.Get(0).(uptane.State), args.Error(1) @@ -139,7 +157,7 @@ var testRCKey = msgpgo.RemoteConfigKey{ Datacenter: "dd.com", } -func newTestService(t *testing.T, api *mockAPI, uptane *mockUptane, clock clock.Clock) *Service { +func newTestService(t *testing.T, api *mockAPI, uptane *mockCoreAgentUptane, clock clock.Clock) *CoreAgentService { cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) cfg.SetWithoutSource("hostname", "test-hostname") @@ -167,7 +185,7 @@ func newTestService(t *testing.T, api *mockAPI, uptane *mockUptane, clock clock. func TestServiceBackoffFailure(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -251,7 +269,7 @@ func TestServiceBackoffFailure(t *testing.T) { func TestServiceBackoffFailureRecovery(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -319,7 +337,7 @@ func customMeta(tracerPredicates []*pbgo.TracerPredicateV1, expiration int64) *j // gRPC's InvalidArgument status code. func TestClientGetConfigsRequestMissingFields(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -384,7 +402,7 @@ func TestClientGetConfigsRequestMissingFields(t *testing.T) { func TestService(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -415,7 +433,7 @@ func TestService(t *testing.T) { api.AssertExpectations(t) uptaneClient.AssertExpectations(t) - *uptaneClient = mockUptane{} + *uptaneClient = mockCoreAgentUptane{} *api = mockAPI{} root3 := []byte(`{"signatures": "testroot3", "signed": "signed"}`) @@ -530,7 +548,7 @@ func TestServiceClientPredicates(t *testing.T) { lastConfigResponse := &pbgo.LatestConfigsResponse{ TargetFiles: []*pbgo.File{{Path: "test"}}, } - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} api := &mockAPI{} service := newTestService(t, api, uptaneClient, clock) @@ -624,7 +642,7 @@ func TestServiceClientPredicates(t *testing.T) { func TestServiceGetRefreshIntervalNone(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -664,7 +682,7 @@ func TestServiceGetRefreshIntervalNone(t *testing.T) { func TestServiceGetRefreshIntervalValid(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -704,7 +722,7 @@ func TestServiceGetRefreshIntervalValid(t *testing.T) { func TestServiceGetRefreshIntervalTooSmall(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -744,7 +762,7 @@ func TestServiceGetRefreshIntervalTooSmall(t *testing.T) { func TestServiceGetRefreshIntervalTooBig(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -784,7 +802,7 @@ func TestServiceGetRefreshIntervalTooBig(t *testing.T) { func TestServiceGetRefreshIntervalNoOverrideAllowed(t *testing.T) { api := &mockAPI{} - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} clock := clock.NewMock() service := newTestService(t, api, uptaneClient, clock) @@ -836,7 +854,7 @@ func TestConfigExpiration(t *testing.T) { lastConfigResponse := &pbgo.LatestConfigsResponse{ TargetFiles: []*pbgo.File{{Path: "test"}}, } - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} api := &mockAPI{} service := newTestService(t, api, uptaneClient, clock) @@ -914,7 +932,7 @@ func TestConfigExpiration(t *testing.T) { func TestOrgStatus(t *testing.T) { api := &mockAPI{} clock := clock.NewMock() - uptaneClient := &mockUptane{} + uptaneClient := &mockCoreAgentUptane{} service := newTestService(t, api, uptaneClient, clock) response := &pbgo.OrgStatusResponse{ @@ -1146,3 +1164,103 @@ func TestWithClientTTL(t *testing.T) { func getHostTags() []string { return []string{"dogo_state:hungry"} } + +func setupCDNClient(t *testing.T, uptaneClient *mockCDNUptane) *HTTPClient { + client, err := NewHTTPClient(t.TempDir(), site, k, "9.9.9") + require.NoError(t, err) + if uptaneClient != nil { + client.uptane = uptaneClient + } + return client +} + +// TestHTTPClientRecentUpdate tests that with a recent (<50s ago) last-update-time, +// the client will not fetch a new update and will return the cached state +func TestHTTPClientRecentUpdate(t *testing.T) { + uptaneClient := &mockCDNUptane{} + uptaneClient.On("TUFVersionState").Return(uptane.TUFVersions{ + DirectorRoot: 1, + DirectorTargets: 1, + ConfigRoot: 1, + ConfigSnapshot: 1, + }, nil) + uptaneClient.On("DirectorRoot", uint64(1)).Return([]byte(`{"signatures": "testroot1", "signed": "one"}`), nil) + uptaneClient.On("TargetsMeta").Return([]byte(`{"signatures": "testtargets", "signed": "stuff"}`), nil) + uptaneClient.On("Targets").Return( + data.TargetFiles{ + "datadog/2/TESTING1/id/1": {}, + "datadog/2/TESTING2/id/2": {}, + }, + nil, + ) + uptaneClient.On("TargetFile", "datadog/2/TESTING1/id/1").Return([]byte(`testing_1`), nil) + + client := setupCDNClient(t, uptaneClient) + defer client.Close() + client.lastUpdate = time.Now() + + u, err := client.GetCDNConfigUpdate(context.TODO(), []string{"TESTING1"}, 0, 0, []*pbgo.TargetFileMeta{}) + require.NoError(t, err) + uptaneClient.AssertExpectations(t) + require.NotNil(t, u) + require.Len(t, u.TargetFiles, 1) + require.Equal(t, []byte(`testing_1`), u.TargetFiles["datadog/2/TESTING1/id/1"]) + require.Len(t, u.ClientConfigs, 1) + require.Equal(t, "datadog/2/TESTING1/id/1", u.ClientConfigs[0]) + require.Len(t, u.TUFRoots, 1) + require.Equal(t, []byte(`{"signatures":"testroot1","signed":"one"}`), u.TUFRoots[0]) +} + +// TestHTTPClientUpdateSuccess tests that a stale state will trigger an update of the cached state +// before returning the cached state. In the event that the Update fails, the stale state will be returned. +func TestHTTPClientUpdateSuccess(t *testing.T) { + var tests = []struct { + updateSucceeds bool + }{ + {true}, + {false}, + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("updateSucceeds=%t", tt.updateSucceeds), func(t *testing.T) { + uptaneClient := &mockCDNUptane{} + uptaneClient.On("TUFVersionState").Return(uptane.TUFVersions{ + DirectorRoot: 1, + DirectorTargets: 1, + ConfigRoot: 1, + ConfigSnapshot: 1, + }, nil) + uptaneClient.On("DirectorRoot", uint64(1)).Return([]byte(`{"signatures": "testroot1", "signed": "one"}`), nil) + uptaneClient.On("TargetsMeta").Return([]byte(`{"signatures": "testtargets", "signed": "stuff"}`), nil) + uptaneClient.On("Targets").Return( + data.TargetFiles{ + "datadog/2/TESTING1/id/1": {}, + "datadog/2/TESTING2/id/2": {}, + }, + nil, + ) + uptaneClient.On("TargetFile", "datadog/2/TESTING1/id/1").Return([]byte(`testing_1`), nil) + + updateErr := fmt.Errorf("uh oh") + if tt.updateSucceeds { + updateErr = nil + } + uptaneClient.On("Update", mock.Anything).Return(updateErr) + + client := setupCDNClient(t, uptaneClient) + defer client.Close() + client.lastUpdate = time.Now().Add(time.Second * -60) + + u, err := client.GetCDNConfigUpdate(context.TODO(), []string{"TESTING1"}, 0, 0, []*pbgo.TargetFileMeta{}) + require.NoError(t, err) + uptaneClient.AssertExpectations(t) + require.NotNil(t, u) + require.Len(t, u.TargetFiles, 1) + require.Equal(t, []byte(`testing_1`), u.TargetFiles["datadog/2/TESTING1/id/1"]) + require.Len(t, u.ClientConfigs, 1) + require.Equal(t, "datadog/2/TESTING1/id/1", u.ClientConfigs[0]) + require.Len(t, u.TUFRoots, 1) + require.Equal(t, []byte(`{"signatures":"testroot1","signed":"one"}`), u.TUFRoots[0]) + }) + } +} diff --git a/pkg/config/remote/uptane/client.go b/pkg/config/remote/uptane/client.go index c0ace9b0321e3..f56e86fba0b8f 100644 --- a/pkg/config/remote/uptane/client.go +++ b/pkg/config/remote/uptane/client.go @@ -9,14 +9,17 @@ package uptane import ( "bytes" + "context" "fmt" + "github.com/pkg/errors" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "net/http" "strings" "sync" "time" "github.com/DataDog/go-tuf/client" "github.com/DataDog/go-tuf/data" - "github.com/pkg/errors" "go.etcd.io/bbolt" rdata "github.com/DataDog/datadog-agent/pkg/config/remote/data" @@ -31,24 +34,38 @@ type Client struct { orgID int64 orgUUIDProvider OrgUUIDProvider - configLocalStore *localStore - configRemoteStore *remoteStoreConfig - configTUFClient *client.Client + configLocalStore *localStore + configTUFClient *client.Client + configRootOverride string + directorLocalStore *localStore + directorTUFClient *client.Client - directorLocalStore *localStore - directorRemoteStore *remoteStoreDirector - directorTUFClient *client.Client directorRootOverride string - - targetStore *targetStore - orgStore *orgStore + targetStore *targetStore + orgStore *orgStore cachedVerify bool cachedVerifyTime time.Time // TUF transaction tracker transactionalStore *transactionalStore + + orgVerificationEnabled bool +} + +// CoreAgentClient is an uptane client that fetches the latest configs from the Core Agent +type CoreAgentClient struct { + *Client + configRemoteStore *remoteStoreConfig + directorRemoteStore *remoteStoreDirector +} + +// CDNClient is an uptane client that fetches the latest configs from the server over HTTP(s) +type CDNClient struct { + *Client + directorRemoteStore *cdnRemoteDirectorStore + configRemoteStore *cdnRemoteConfigStore } // ClientOption describes a function in charge of changing the uptane client @@ -80,23 +97,26 @@ func WithConfigRootOverride(site string, configRootOverride string) ClientOption // OrgUUIDProvider is a provider of the agent org UUID type OrgUUIDProvider func() (string, error) -// NewClient creates a new uptane client -func NewClient(cacheDB *bbolt.DB, orgUUIDProvider OrgUUIDProvider, options ...ClientOption) (c *Client, err error) { +// NewCoreAgentClient creates a new uptane client +func NewCoreAgentClient(cacheDB *bbolt.DB, orgUUIDProvider OrgUUIDProvider, options ...ClientOption) (c *CoreAgentClient, err error) { transactionalStore := newTransactionalStore(cacheDB) targetStore := newTargetStore(transactionalStore) orgStore := newOrgStore(transactionalStore) - c = &Client{ + c = &CoreAgentClient{ configRemoteStore: newRemoteStoreConfig(targetStore), directorRemoteStore: newRemoteStoreDirector(targetStore), - targetStore: targetStore, - orgStore: orgStore, - transactionalStore: transactionalStore, - orgUUIDProvider: orgUUIDProvider, + Client: &Client{ + orgStore: orgStore, + orgUUIDProvider: orgUUIDProvider, + targetStore: targetStore, + transactionalStore: transactionalStore, + orgVerificationEnabled: true, + }, } for _, o := range options { - o(c) + o(c.Client) } if c.configLocalStore, err = newLocalStoreConfig(transactionalStore, c.site, c.configRootOverride); err != nil { @@ -113,7 +133,7 @@ func NewClient(cacheDB *bbolt.DB, orgUUIDProvider OrgUUIDProvider, options ...Cl } // Update updates the uptane client and rollbacks in case of error -func (c *Client) Update(response *pbgo.LatestConfigsResponse) error { +func (c *CoreAgentClient) Update(response *pbgo.LatestConfigsResponse) error { c.Lock() defer c.Unlock() c.cachedVerify = false @@ -134,7 +154,7 @@ func (c *Client) Update(response *pbgo.LatestConfigsResponse) error { } // update updates the uptane client -func (c *Client) update(response *pbgo.LatestConfigsResponse) error { +func (c *CoreAgentClient) update(response *pbgo.LatestConfigsResponse) error { err := c.updateRepos(response) if err != nil { return err @@ -146,6 +166,121 @@ func (c *Client) update(response *pbgo.LatestConfigsResponse) error { return c.verify() } +func (c *CoreAgentClient) updateRepos(response *pbgo.LatestConfigsResponse) error { + err := c.targetStore.storeTargetFiles(response.TargetFiles) + if err != nil { + return err + } + c.directorRemoteStore.update(response) + c.configRemoteStore.update(response) + _, err = c.directorTUFClient.Update() + if err != nil { + return errors.Wrap(err, "failed updating director repository") + } + _, err = c.configTUFClient.Update() + if err != nil { + e := fmt.Sprintf("could not update config repository [%s]", configMetasUpdateSummary(response.ConfigMetas)) + return errors.Wrap(err, e) + } + return nil +} + +// NewCDNClient creates a new uptane client that will fetch the latest configs from the server over HTTP(s) +func NewCDNClient(cacheDB *bbolt.DB, site, apiKey string, options ...ClientOption) (c *CDNClient, err error) { + transactionalStore := newTransactionalStore(cacheDB) + targetStore := newTargetStore(transactionalStore) + orgStore := newOrgStore(transactionalStore) + + httpClient := &http.Client{} + + c = &CDNClient{ + configRemoteStore: newCDNRemoteConfigStore(httpClient, site, apiKey), + directorRemoteStore: newCDNRemoteDirectorStore(httpClient, site, apiKey), + Client: &Client{ + site: site, + targetStore: targetStore, + transactionalStore: transactionalStore, + orgStore: orgStore, + orgVerificationEnabled: false, + orgUUIDProvider: func() (string, error) { + return "", nil + }, + }, + } + for _, o := range options { + o(c.Client) + } + + if c.configLocalStore, err = newLocalStoreConfig(transactionalStore, site, c.configRootOverride); err != nil { + return nil, err + } + + if c.directorLocalStore, err = newLocalStoreDirector(transactionalStore, site, c.directorRootOverride); err != nil { + return nil, err + } + + c.configTUFClient = client.NewClient(c.configLocalStore, c.configRemoteStore) + c.directorTUFClient = client.NewClient(c.directorLocalStore, c.directorRemoteStore) + return c, nil +} + +// Update updates the uptane client and rollbacks in case of error +func (c *CDNClient) Update(ctx context.Context) error { + var err error + span, ctx := tracer.StartSpanFromContext(ctx, "CDNClient.Update") + defer span.Finish(tracer.WithError(err)) + c.Lock() + defer c.Unlock() + c.cachedVerify = false + + // in case the commit is successful it is a no-op. + // the defer is present to be sure a transaction is never left behind. + defer c.transactionalStore.rollback() + + err = c.update(ctx) + if err != nil { + c.configTUFClient = client.NewClient(c.configLocalStore, c.configRemoteStore) + c.directorTUFClient = client.NewClient(c.directorLocalStore, c.directorRemoteStore) + return err + } + return c.transactionalStore.commit() +} + +// update updates the uptane client +func (c *CDNClient) update(ctx context.Context) error { + var err error + span, ctx := tracer.StartSpanFromContext(ctx, "CDNClient.update") + defer span.Finish(tracer.WithError(err)) + + err = c.updateRepos(ctx) + if err != nil { + return err + } + err = c.pruneTargetFiles() + if err != nil { + return err + } + return c.verify() +} + +func (c *CDNClient) updateRepos(ctx context.Context) error { + var err error + span, _ := tracer.StartSpanFromContext(ctx, "CDNClient.updateRepos") + defer span.Finish(tracer.WithError(err)) + + _, err = c.directorTUFClient.Update() + if err != nil { + err = errors.Wrap(err, "failed updating director repository") + return err + } + _, err = c.configTUFClient.Update() + if err != nil { + err = errors.Wrap(err, "could not update config repository") + return err + } + return nil +} + // TargetsCustom returns the current targets custom of this uptane client func (c *Client) TargetsCustom() ([]byte, error) { c.Lock() @@ -225,25 +360,6 @@ func (c *Client) TargetsMeta() ([]byte, error) { return targets, nil } -func (c *Client) updateRepos(response *pbgo.LatestConfigsResponse) error { - err := c.targetStore.storeTargetFiles(response.TargetFiles) - if err != nil { - return err - } - c.directorRemoteStore.update(response) - c.configRemoteStore.update(response) - _, err = c.directorTUFClient.Update() - if err != nil { - return errors.Wrap(err, "failed updating director repository") - } - _, err = c.configTUFClient.Update() - if err != nil { - e := fmt.Sprintf("could not update config repository [%s]", configMetasUpdateSummary(response.ConfigMetas)) - return errors.Wrap(err, e) - } - return nil -} - func (c *Client) pruneTargetFiles() error { targetFiles, err := c.directorTUFClient.Targets() if err != nil { @@ -302,6 +418,9 @@ func (c *Client) StoredOrgUUID() (string, error) { } func (c *Client) verifyOrg() error { + if !c.orgVerificationEnabled { + return nil + } rawCustom, err := c.configLocalStore.GetMetaCustom(metaSnapshot) if err != nil { return fmt.Errorf("could not obtain snapshot custom: %v", err) diff --git a/pkg/config/remote/uptane/client_test.go b/pkg/config/remote/uptane/client_test.go index 3f59d6c1f93f7..171eb605b985f 100644 --- a/pkg/config/remote/uptane/client_test.go +++ b/pkg/config/remote/uptane/client_test.go @@ -42,13 +42,13 @@ func newTestConfig(repo testRepositories) model.Config { return cfg } -func newTestClient(db *bbolt.DB, cfg model.Config) (*Client, error) { +func newTestClient(db *bbolt.DB, cfg model.Config) (*CoreAgentClient, error) { opts := []ClientOption{ WithOrgIDCheck(2), WithConfigRootOverride("datadoghq.com", cfg.GetString("remote_configuration.config_root")), WithDirectorRootOverride("datadoghq.com", cfg.GetString("remote_configuration.director_root")), } - return NewClient(db, getTestOrgUUIDProvider(2), opts...) + return NewCoreAgentClient(db, getTestOrgUUIDProvider(2), opts...) } func TestClientState(t *testing.T) { @@ -278,7 +278,7 @@ func TestClientVerifyOrgUUID(t *testing.T) { func TestOrgStore(t *testing.T) { db := getTestDB(t) - client, err := NewClient(db, getTestOrgUUIDProvider(2), WithOrgIDCheck(2)) + client, err := NewCoreAgentClient(db, getTestOrgUUIDProvider(2), WithOrgIDCheck(2)) assert.NoError(t, err) // Store key diff --git a/pkg/config/remote/uptane/remote_store.go b/pkg/config/remote/uptane/remote_store.go index 0286057ce1474..88c28666cb116 100644 --- a/pkg/config/remote/uptane/remote_store.go +++ b/pkg/config/remote/uptane/remote_store.go @@ -7,7 +7,11 @@ package uptane import ( "bytes" + "fmt" "io" + "net/http" + "path" + "strings" "github.com/DataDog/go-tuf/client" @@ -82,7 +86,7 @@ func (s *remoteStore) GetMeta(path string) (io.ReadCloser, int64, error) { return io.NopCloser(bytes.NewReader(requestedVersion)), int64(len(requestedVersion)), nil } -// GetMeta implements go-tuf's RemoteStore.GetTarget +// GetTarget implements go-tuf's RemoteStore.GetTarget // See https://pkg.go.dev/github.com/DataDog/go-tuf/client#RemoteStore func (s *remoteStore) GetTarget(targetPath string) (stream io.ReadCloser, size int64, err error) { target, found, err := s.targetStore.getTargetFile(targetPath) @@ -167,3 +171,142 @@ func (sc *remoteStoreConfig) update(update *pbgo.LatestConfigsResponse) { sc.metas[roleTargets][metas.TopTargets.Version] = metas.TopTargets.Raw } } + +// cdnRemoteStore implements go-tuf's RemoteStore +// It is an HTTP interface to an authenticated remote server that serves an uptane repository +// See https://pkg.go.dev/github.com/DataDog/go-tuf/client#RemoteStore +type cdnRemoteStore struct { + httpClient RequestDoer + host string + pathPrefix string + apiKey string + repositoryType string + + authnToken string +} + +// RequestDoer is an interface that abstracts the http.Client.Do method +type RequestDoer interface { + Do(req *http.Request) (*http.Response, error) +} + +type cdnRemoteConfigStore struct { + cdnRemoteStore +} + +type cdnRemoteDirectorStore struct { + cdnRemoteStore +} + +// getCDNHostnameFromSite returns the staging or production CDN hostname for a given site. +// Site can be any of the (non-fed) documented DD sites per https://docs.datadoghq.com/getting_started/site/ +func getCDNHostnameFromSite(site string) string { + s := strings.TrimPrefix(site, "https://") + switch s { + // staging: + case "datad0g.com": + return "remote-config.datad0g.com" + // prod: + case "ap1.datadoghq.com": + return "remote-config.datadoghq.com" + case "us5.datadoghq.com": + return "remote-config.datadoghq.com" + case "us3.datadoghq.com": + return "remote-config.datadoghq.com" + case "app.datadoghq.eu": + return "remote-config.datadoghq.com" + case "app.datadoghq.com": + return "remote-config.datadoghq.com" + } + return "remote-config.datadoghq.com" +} + +// Trims any schemas or non-datacenter related subdomains from the site to get the path prefix for the CDN +// e.g. https://us3.datadoghq.com -> us3.datadoghq.com +// e.g. https://app.datadoghq.com -> datadoghq.com +func getCDNPathPrefixFromSite(site string) string { + s := strings.TrimPrefix(site, "https://app.") + s = strings.TrimPrefix(s, "https://") + return s +} + +func newCDNRemoteConfigStore(client *http.Client, site, apiKey string) *cdnRemoteConfigStore { + return &cdnRemoteConfigStore{ + cdnRemoteStore: cdnRemoteStore{ + httpClient: client, + host: getCDNHostnameFromSite(site), + pathPrefix: getCDNPathPrefixFromSite(site), + apiKey: apiKey, + repositoryType: "config", + }, + } +} + +func newCDNRemoteDirectorStore(client *http.Client, site, apiKey string) *cdnRemoteDirectorStore { + return &cdnRemoteDirectorStore{ + cdnRemoteStore: cdnRemoteStore{ + httpClient: client, + host: getCDNHostnameFromSite(site), + pathPrefix: getCDNPathPrefixFromSite(site), + apiKey: apiKey, + repositoryType: "director", + }, + } +} + +// GetMeta implements go-tuf's RemoteStore.GetMeta +// See https://pkg.go.dev/github.com/DataDog/go-tuf/client#RemoteStore +func (s *cdnRemoteStore) GetMeta(p string) (io.ReadCloser, int64, error) { + return s.getRCFile(path.Join(s.repositoryType, p)) +} + +// GetTarget implements go-tuf's RemoteStore.GetTarget +// See https://pkg.go.dev/github.com/DataDog/go-tuf/client#RemoteStore +func (s *cdnRemoteStore) GetTarget(path string) (io.ReadCloser, int64, error) { + return s.getRCFile(path) +} + +func (s *cdnRemoteStore) newAuthenticatedHTTPReq(method, p string) (*http.Request, error) { + req, err := http.NewRequest(method, s.host, nil) + if err != nil { + return nil, err + } + + req.Header.Add("X-Dd-Api-Key", s.apiKey) + if s.authnToken != "" { + req.Header.Add("Authorization", s.authnToken) + } + + req.URL.Scheme = "https" + req.URL.Host = s.host + req.URL.Path = "/" + path.Join(s.pathPrefix, p) + req.Host = s.host + + return req, err +} + +func (s *cdnRemoteStore) updateAuthnToken(resp *http.Response) { + authToken := resp.Header.Get("X-Dd-Refreshed-Authorization") + if authToken != "" { + s.authnToken = authToken + } +} + +func (s *cdnRemoteStore) getRCFile(path string) (io.ReadCloser, int64, error) { + req, err := s.newAuthenticatedHTTPReq("GET", path) + if err != nil { + return nil, 0, err + } + resp, err := s.httpClient.Do(req) + if err != nil { + return nil, 0, err + } + if resp.StatusCode == http.StatusNotFound { + return nil, 0, client.ErrNotFound{File: path} + } + if resp.StatusCode != http.StatusOK { + return nil, 0, fmt.Errorf("unexpected status code %d", resp.StatusCode) + } + s.updateAuthnToken(resp) + return resp.Body, resp.ContentLength, nil +} diff --git a/pkg/config/remote/uptane/remote_store_test.go b/pkg/config/remote/uptane/remote_store_test.go index 0d4ad9295ac13..9d9d09e8cd15d 100644 --- a/pkg/config/remote/uptane/remote_store_test.go +++ b/pkg/config/remote/uptane/remote_store_test.go @@ -7,7 +7,13 @@ package uptane import ( "fmt" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "io" + "net/http" + "path" + "strconv" + "strings" "testing" "github.com/DataDog/go-tuf/client" @@ -16,6 +22,11 @@ import ( pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" ) +const ( + host = "test-host" + k = "test" +) + func generateUpdate(baseVersion uint64) *pbgo.LatestConfigsResponse { baseVersion *= 10000 return &pbgo.LatestConfigsResponse{ @@ -239,3 +250,254 @@ func assertGetTarget(t *testing.T, store *remoteStore, path string, expectedCont assert.NoError(t, err) assert.Equal(t, expectedContent, content) } + +type mockHTTPClient struct { + mock.Mock +} + +func (m *mockHTTPClient) Do(req *http.Request) (*http.Response, error) { + args := m.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} + +func getRequestMatcher(storeType, p, apiKey, token string) interface{} { + return mock.MatchedBy(func(arg interface{}) bool { + req := arg.(*http.Request) + return req.Method == "GET" && + req.URL.Scheme == "https" && + req.URL.Host == host && + req.URL.Path == "/"+path.Join("test-site", storeType, p) && + req.Host == host && + req.Header.Get("X-Dd-Api-Key") == apiKey && + req.Header.Get("Authorization") == token + }) +} + +// TestCDNRemoteStore tests that a series of GetMeta and GetTarget invocations will make the +// correct HTTP requests and handle authz tokens correctly +func TestCDNRemoteStore(t *testing.T) { + storeType := "director" + root2 := "path/to/2.root.json" + body2 := "body2" + length := len(body2) + + // First GetMeta request should pass the api key but no token, since the remote store is freshly initialized + apiKeyMatcher := getRequestMatcher(storeType, root2, k, "") + httpClient := &mockHTTPClient{} + + // Response with no authz token in the response headers + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(body2)), + ContentLength: int64(length), + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: storeType, + } + + readCloser, contentLength, err := cdnStore.GetMeta(root2) + require.NoError(t, err) + require.NotNil(t, readCloser) + require.Equal(t, int64(length), contentLength) + content := make([]byte, length) + n, err := readCloser.Read(content) + require.NoError(t, err) + require.Equal(t, length, n) + require.Equal(t, body2, string(content)) + httpClient.AssertExpectations(t) + require.NoError(t, readCloser.Close()) + + root3 := "path/to/3.root.json" + body3 := "body3" + length = len(body3) + // For the second GetMeta request, we still expect to only pass the api key, since the first request's response did not contain a token + apiKeyMatcher = getRequestMatcher(storeType, root3, k, "") + + // Second response will include an authz token in the headers + token := "Bearer test-token" + resp = &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(body3)), + ContentLength: int64(length), + Header: http.Header{ + "X-Dd-Refreshed-Authorization": []string{token}, + }, + } + + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + readCloser, contentLength, err = cdnStore.GetMeta(root3) + require.NoError(t, err) + require.NotNil(t, readCloser) + require.Equal(t, int64(length), contentLength) + content = make([]byte, length) + n, err = readCloser.Read(content) + require.NoError(t, err) + require.Equal(t, length, n) + require.Equal(t, body3, string(content)) + httpClient.AssertExpectations(t) + require.NoError(t, readCloser.Close()) + + root4 := "path/to/4.root.json" + body4 := "body4" + resp = &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(body4)), + ContentLength: int64(length), + } + + // For the third and final GetMeta request, we still expect to pass both the api key and the authz token that was returned in the second response + apiKeyAndAuthzMatcher := getRequestMatcher(storeType, root4, k, token) + httpClient.On("Do", apiKeyAndAuthzMatcher).Return(resp, nil) + + readCloser, contentLength, err = cdnStore.GetMeta(root4) + require.NoError(t, err) + require.NotNil(t, readCloser) + require.Equal(t, int64(length), contentLength) + content = make([]byte, length) + n, err = readCloser.Read(content) + require.NoError(t, err) + require.Equal(t, length, n) + require.Equal(t, body4, string(content)) + httpClient.AssertExpectations(t) + require.NoError(t, readCloser.Close()) + + // Lastly, perform a GetTarget request to ensure that the authz token is passed along correctly, and + // the path is correctly constructed (does not have the repository type prefix) + target := "path/to/target/abc" + body := "targetBody" + length = len(body) + targetMatcher := getRequestMatcher("", target, k, token) + resp = &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(body)), + ContentLength: int64(length), + } + httpClient.On("Do", targetMatcher).Return(resp, nil) + readCloser, contentLength, err = cdnStore.GetTarget(target) + require.NoError(t, err) + require.NotNil(t, readCloser) + require.Equal(t, int64(length), contentLength) + content = make([]byte, length) + n, err = readCloser.Read(content) + require.NoError(t, err) + require.Equal(t, length, n) + require.Equal(t, body, string(content)) + httpClient.AssertExpectations(t) + require.NoError(t, readCloser.Close()) +} + +func TestGetMetaNotFound(t *testing.T) { + storeType := "director" + root2 := "path/to/2.root.json" + + apiKeyMatcher := getRequestMatcher(storeType, root2, k, "") + httpClient := &mockHTTPClient{} + + resp := &http.Response{ + StatusCode: http.StatusNotFound, + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: storeType, + } + + readCloser, contentLength, err := cdnStore.GetMeta(root2) + require.Error(t, err) + require.ErrorIs(t, err, client.ErrNotFound{File: path.Join(storeType, root2)}) + require.Nil(t, readCloser) + require.Equal(t, int64(0), contentLength) + httpClient.AssertExpectations(t) +} + +func TestGetMetaError(t *testing.T) { + storeType := "director" + root2 := "path/to/2.root.json" + + apiKeyMatcher := getRequestMatcher(storeType, root2, k, "") + httpClient := &mockHTTPClient{} + + resp := &http.Response{ + StatusCode: http.StatusInternalServerError, + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: storeType, + } + + readCloser, contentLength, err := cdnStore.GetMeta(root2) + require.Error(t, err) + require.Equal(t, err.Error(), "unexpected status code "+strconv.Itoa(http.StatusInternalServerError)) + require.Nil(t, readCloser) + require.Equal(t, int64(0), contentLength) + httpClient.AssertExpectations(t) +} + +func TestGetTargetNotFound(t *testing.T) { + targetFile := "path/to/target/abc" + + apiKeyMatcher := getRequestMatcher("", targetFile, k, "") + httpClient := &mockHTTPClient{} + + resp := &http.Response{ + StatusCode: http.StatusNotFound, + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: "director", + } + + readCloser, contentLength, err := cdnStore.GetTarget(targetFile) + require.Error(t, err) + require.ErrorIs(t, err, client.ErrNotFound{File: targetFile}) + require.Nil(t, readCloser) + require.Equal(t, int64(0), contentLength) + httpClient.AssertExpectations(t) +} + +func TestGetTargetError(t *testing.T) { + targetFile := "path/to/target/abc" + + apiKeyMatcher := getRequestMatcher("", targetFile, k, "") + httpClient := &mockHTTPClient{} + + resp := &http.Response{ + StatusCode: http.StatusInternalServerError, + } + httpClient.On("Do", apiKeyMatcher).Return(resp, nil) + + cdnStore := &cdnRemoteStore{ + httpClient: httpClient, + host: host, + pathPrefix: "test-site", + apiKey: k, + repositoryType: "director", + } + + readCloser, contentLength, err := cdnStore.GetTarget(targetFile) + require.Error(t, err) + require.Equal(t, err.Error(), "unexpected status code "+strconv.Itoa(http.StatusInternalServerError)) + require.Nil(t, readCloser) + require.Equal(t, int64(0), contentLength) + httpClient.AssertExpectations(t) +} diff --git a/pkg/config/render_config.go b/pkg/config/render_config.go index 7d2412e8a9ac1..e56f0e86c578b 100644 --- a/pkg/config/render_config.go +++ b/pkg/config/render_config.go @@ -50,6 +50,7 @@ type context struct { SNMP bool SecurityModule bool SecurityAgent bool + SBOM bool // enables CSM Vulnerability Management NetworkModule bool // Sub-module of System Probe UniversalServiceMonitoringModule bool // Sub-module of System Probe DataStreamsModule bool // Sub-module of System Probe @@ -87,6 +88,7 @@ func mkContext(buildType string) context { Kubelet: true, KubeApiServer: true, // TODO: remove when phasing out from node-agent Compliance: true, + SBOM: true, SNMP: true, PrometheusScrape: true, OTLP: true, diff --git a/pkg/config/settings/runtime_profiling.go b/pkg/config/settings/runtime_profiling.go index 60d1c946bd7bc..108043204d203 100644 --- a/pkg/config/settings/runtime_profiling.go +++ b/pkg/config/settings/runtime_profiling.go @@ -11,7 +11,7 @@ import ( "github.com/fatih/color" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // ProfilingOpts defines the options used for profiling @@ -24,7 +24,7 @@ type ProfilingOpts struct { // ExecWithRuntimeProfilingSettings runs the callback func with the given runtime profiling settings func ExecWithRuntimeProfilingSettings(callback func(), opts ProfilingOpts, settingsClient Client) error { - if err := util.SetAuthToken(config.Datadog()); err != nil { + if err := util.SetAuthToken(pkgconfigsetup.Datadog()); err != nil { return fmt.Errorf("unable to set up authentication token: %v", err) } diff --git a/pkg/config/settings/runtime_setting_profiling.go b/pkg/config/settings/runtime_setting_profiling.go index 4517e6deeac3e..6086640275352 100644 --- a/pkg/config/settings/runtime_setting_profiling.go +++ b/pkg/config/settings/runtime_setting_profiling.go @@ -10,8 +10,8 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/profiling" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -72,7 +72,7 @@ func (l *ProfilingRuntimeSetting) Set(config config.Component, v interface{}, so if profile { // populate site - s := pkgconfig.DefaultSite + s := pkgconfigsetup.DefaultSite if config.IsSet(l.ConfigPrefix + "site") { s = config.GetString(l.ConfigPrefix + "site") } diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 9ed374b26e64b..1cc1f63d2cc9e 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -975,6 +975,8 @@ func InitConfig(config pkgconfigmodel.Config) { config.BindEnvAndSetDefault("remote_policies", false) config.BindEnvAndSetDefault("installer.registry.url", "") config.BindEnvAndSetDefault("installer.registry.auth", "") + config.BindEnvAndSetDefault("installer.registry.username", "") + config.BindEnvAndSetDefault("installer.registry.password", "") config.BindEnv("fleet_policies_dir") config.SetDefault("fleet_layers", []string{}) diff --git a/pkg/config/config_change_checker.go b/pkg/config/setup/config_change_checker.go similarity index 99% rename from pkg/config/config_change_checker.go rename to pkg/config/setup/config_change_checker.go index 8ec0218cb70f9..674ad0d314eaf 100644 --- a/pkg/config/config_change_checker.go +++ b/pkg/config/setup/config_change_checker.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "fmt" diff --git a/pkg/config/config_change_checker_test.go b/pkg/config/setup/config_change_checker_test.go similarity index 98% rename from pkg/config/config_change_checker_test.go rename to pkg/config/setup/config_change_checker_test.go index b36baca084e3a..8fd8f2cf70351 100644 --- a/pkg/config/config_change_checker_test.go +++ b/pkg/config/setup/config_change_checker_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package setup import ( "testing" diff --git a/pkg/config/setup/constants/constants.go b/pkg/config/setup/constants/constants.go index d23620e464e22..7f68ba0973a77 100644 --- a/pkg/config/setup/constants/constants.go +++ b/pkg/config/setup/constants/constants.go @@ -9,4 +9,6 @@ package constants const ( // DefaultEBPFLessProbeAddr defines the default ebpfless probe address DefaultEBPFLessProbeAddr = "localhost:5678" + // ClusterIDCacheKey is the key name for the orchestrator cluster id in the agent in-mem cache + ClusterIDCacheKey = "orchestratorClusterID" ) diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go index 7ebde9fda3f96..eb15387624d4f 100644 --- a/pkg/config/setup/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -206,7 +206,7 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault(join(spNS, "enable_conntrack_all_namespaces"), true, "DD_SYSTEM_PROBE_ENABLE_CONNTRACK_ALL_NAMESPACES") cfg.BindEnvAndSetDefault(join(netNS, "enable_protocol_classification"), true, "DD_ENABLE_PROTOCOL_CLASSIFICATION") cfg.BindEnvAndSetDefault(join(netNS, "enable_ringbuffers"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_RINGBUFFERS") - cfg.BindEnvAndSetDefault(join(netNS, "enable_tcp_failed_connections"), false, "DD_SYSTEM_PROBE_NETWORK_ENABLE_FAILED_CONNS") + cfg.BindEnvAndSetDefault(join(netNS, "enable_tcp_failed_connections"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_FAILED_CONNS") cfg.BindEnvAndSetDefault(join(netNS, "ignore_conntrack_init_failure"), false, "DD_SYSTEM_PROBE_NETWORK_IGNORE_CONNTRACK_INIT_FAILURE") cfg.BindEnvAndSetDefault(join(netNS, "conntrack_init_timeout"), 10*time.Second) cfg.BindEnvAndSetDefault(join(netNS, "allow_netlink_conntracker_fallback"), true) diff --git a/pkg/config/setup/system_probe_cws.go b/pkg/config/setup/system_probe_cws.go index 3497ffc143007..e904416d75d7e 100644 --- a/pkg/config/setup/system_probe_cws.go +++ b/pkg/config/setup/system_probe_cws.go @@ -86,7 +86,6 @@ func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.watch_dir", true) cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.cache_size", 10) cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.max_count", 400) - cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.remote_configuration.enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.security_profile.dns_match_max_depth", 3) // CWS - Auto suppression diff --git a/pkg/config/structure/go.mod b/pkg/config/structure/go.mod new file mode 100644 index 0000000000000..9dede435250a1 --- /dev/null +++ b/pkg/config/structure/go.mod @@ -0,0 +1,86 @@ +module github.com/DataDog/datadog-agent/pkg/config/structure + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def + github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../comp/core/flare/builder + github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../comp/core/flare/types + github.com/DataDog/datadog-agent/comp/core/secrets => ../../../comp/core/secrets + github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry + github.com/DataDog/datadog-agent/comp/def => ../../../comp/def + github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults + github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env + github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock + github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable + github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate + github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log + github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber + github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system + github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../pkg/util/system/socket + github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../pkg/util/testutil + github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../pkg/util/winutil +) + +require ( + github.com/DataDog/datadog-agent/pkg/config/mock v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/stretchr/testify v1.9.0 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/viper v1.13.5 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/spf13/afero v1.1.2 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/jwalterweatherman v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.24.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/pkg/config/structure/go.sum b/pkg/config/structure/go.sum new file mode 100644 index 0000000000000..77ba213060c82 --- /dev/null +++ b/pkg/config/structure/go.sum @@ -0,0 +1,352 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= +github.com/DataDog/viper v1.13.5/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/pkg/config/structure/unmarshal.go b/pkg/config/structure/unmarshal.go index 97420043e2268..be426cb88fe81 100644 --- a/pkg/config/structure/unmarshal.go +++ b/pkg/config/structure/unmarshal.go @@ -9,6 +9,7 @@ package structure import ( "fmt" "reflect" + "slices" "strings" "unicode" "unicode/utf8" @@ -16,12 +17,44 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/model" ) +// features allowed for handling edge-cases +type featureSet struct { + allowSquash bool + convertEmptyStrNil bool +} + +// UnmarshalKeyOption is an option that affects the enabled features in UnmarshalKey +type UnmarshalKeyOption func(*featureSet) + +// EnableSquash allows UnmarshalKey to take advantage of `mapstructure`s `squash` feature +// a squashed field hoists its fields up a level in the marshalled representation and directly embeds them +var EnableSquash UnmarshalKeyOption = func(fs *featureSet) { + fs.allowSquash = true +} + +// ConvertEmptyStringToNil allows UnmarshalKey to implicitly convert empty strings into nil slices +var ConvertEmptyStringToNil UnmarshalKeyOption = func(fs *featureSet) { + fs.convertEmptyStrNil = true +} + +// error for when a key is not found +var errNotFound = fmt.Errorf("not found") + // UnmarshalKey retrieves data from the config at the given key and deserializes it // to be stored on the target struct. It is implemented entirely using reflection, and // does not depend upon details of the data model of the config. // Target struct can use of struct tag of "yaml", "json", or "mapstructure" to rename fields -func UnmarshalKey(cfg model.Reader, key string, target interface{}) error { - source, err := newNode(reflect.ValueOf(cfg.Get(key))) +func UnmarshalKey(cfg model.Reader, key string, target interface{}, opts ...UnmarshalKeyOption) error { + fs := &featureSet{} + for _, o := range opts { + o(fs) + } + rawval := cfg.Get(key) + // Don't create a reflect.Value out of nil, just return immediately + if rawval == nil { + return nil + } + source, err := newNode(reflect.ValueOf(rawval)) if err != nil { return err } @@ -31,21 +64,25 @@ func UnmarshalKey(cfg model.Reader, key string, target interface{}) error { } switch outValue.Kind() { case reflect.Map: - return copyMap(outValue, source) + return copyMap(outValue, source, fs) case reflect.Struct: - return copyStruct(outValue, source) + return copyStruct(outValue, source, fs) case reflect.Slice: if arr, ok := source.(arrayNode); ok { - return copyList(outValue, arr) + return copyList(outValue, arr, fs) } - return fmt.Errorf("can not UnmarshalKey to a slice from a non-list source") + if isEmptyString(source) { + if fs.convertEmptyStrNil { + return nil + } + return fmt.Errorf("treating empty string as a nil slice not allowed for UnmarshalKey without ConvertEmptyStrNil option") + } + return fmt.Errorf("can not UnmarshalKey to a slice from a non-list source: %T", source) default: return fmt.Errorf("can only UnmarshalKey to struct, map, or slice, got %v", outValue.Kind()) } } -var errNotFound = fmt.Errorf("not found") - // leafNode represents a leaf with a scalar value type leafNode interface { @@ -93,6 +130,9 @@ type innerNodeImpl struct { type innerMapNodeImpl struct { // val must be a map[string]interface{} val reflect.Value + // remapCase maps each lower-case key to the original case. This + // enables GetChild to retrieve values using case-insensitive keys + remapCase map[string]string } var _ node = (*innerNodeImpl)(nil) @@ -103,7 +143,7 @@ func newNode(v reflect.Value) (node, error) { if v.Kind() == reflect.Struct { return &innerNodeImpl{val: v}, nil } else if v.Kind() == reflect.Map { - return &innerMapNodeImpl{val: v}, nil + return &innerMapNodeImpl{val: v, remapCase: makeRemapCase(v)}, nil } else if v.Kind() == reflect.Slice { return &arrayNodeImpl{val: v}, nil } else if isScalarKind(v) { @@ -112,7 +152,7 @@ func newNode(v reflect.Value) (node, error) { return nil, fmt.Errorf("could not create node from: %v of type %T and kind %v", v, v, v.Kind()) } -// GetChild returns the child node at the given key, or an error if not found +// GetChild returns the child node at the given case-insensitive key, or an error if not found func (n *innerNodeImpl) GetChild(key string) (node, error) { findex := findFieldMatch(n.val, key) if findex == -1 { @@ -135,14 +175,16 @@ func (n *innerNodeImpl) ChildrenKeys() ([]string, error) { if unicode.IsLower(ch) { continue } - keys = append(keys, fieldNameToKey(f)) + fieldKey, _ := fieldNameToKey(f) + keys = append(keys, fieldKey) } return keys, nil } -// GetChild returns the child node at the given key, or an error if not found +// GetChild returns the child node at the given case-insensitive key, or an error if not found func (n *innerMapNodeImpl) GetChild(key string) (node, error) { - inner := n.val.MapIndex(reflect.ValueOf(key)) + mkey := n.remapCase[strings.ToLower(key)] + inner := n.val.MapIndex(reflect.ValueOf(mkey)) if !inner.IsValid() { return nil, errNotFound } @@ -163,6 +205,8 @@ func (n *innerMapNodeImpl) ChildrenKeys() ([]string, error) { return nil, fmt.Errorf("map node has invalid non-string key: %v", kv) } } + // map keys are iterated non-deterministically, sort them + slices.Sort(keys) return keys, nil } @@ -192,8 +236,8 @@ func (n *arrayNodeImpl) Index(k int) (node, error) { } // GetChild returns an error because a leaf has no children -func (n *leafNodeImpl) GetChild(string) (node, error) { - return nil, fmt.Errorf("can't GetChild of a leaf node") +func (n *leafNodeImpl) GetChild(key string) (node, error) { + return nil, fmt.Errorf("can't GetChild(%s) of a leaf node", key) } // ChildrenKeys returns an error because a leaf has no children @@ -256,23 +300,40 @@ func convertToBool(text string) (bool, error) { return false, newConversionError(reflect.ValueOf(text), "bool") } -func fieldNameToKey(field reflect.StructField) string { +type specifierSet map[string]struct{} + +// fieldNameToKey returns the lower-cased field name, for case insensitive comparisons, +// with struct tag rename applied, as well as the set of specifiers from struct tags +// struct tags are handled in order of yaml, then json, then mapstructure +func fieldNameToKey(field reflect.StructField) (string, specifierSet) { name := field.Name - if tagtext := field.Tag.Get("yaml"); tagtext != "" { - name = tagtext - } else if tagtext := field.Tag.Get("json"); tagtext != "" { - name = tagtext - } else if tagtext := field.Tag.Get("mapstructure"); tagtext != "" { - name = tagtext + + tagtext := "" + if val := field.Tag.Get("yaml"); val != "" { + tagtext = val + } else if val := field.Tag.Get("json"); val != "" { + tagtext = val + } else if val := field.Tag.Get("mapstructure"); val != "" { + tagtext = val } - // skip any additional specifiers such as ",omitempty" - if commaPos := strings.IndexRune(name, ','); commaPos != -1 { - name = name[:commaPos] + + // skip any additional specifiers such as ",omitempty" or ",squash" + // TODO: support multiple specifiers + var specifiers map[string]struct{} + if commaPos := strings.IndexRune(tagtext, ','); commaPos != -1 { + specifiers = make(map[string]struct{}) + val := tagtext[:commaPos] + specifiers[tagtext[commaPos+1:]] = struct{}{} + if val != "" { + name = val + } + } else if tagtext != "" { + name = tagtext } - return name + return strings.ToLower(name), specifiers } -func copyStruct(target reflect.Value, source node) error { +func copyStruct(target reflect.Value, source node, fs *featureSet) error { targetType := target.Type() for i := 0; i < targetType.NumField(); i++ { f := targetType.Field(i) @@ -280,13 +341,25 @@ func copyStruct(target reflect.Value, source node) error { if unicode.IsLower(ch) { continue } - child, err := source.GetChild(fieldNameToKey(f)) + fieldKey, specifiers := fieldNameToKey(f) + if _, ok := specifiers["squash"]; ok { + if !fs.allowSquash { + return fmt.Errorf("feature 'squash' not allowed for UnmarshalKey without EnableSquash option") + } + err := copyAny(target.FieldByName(f.Name), source, fs) + if err != nil { + return err + } + continue + } + child, err := source.GetChild(fieldKey) if err == errNotFound { continue - } else if err != nil { + } + if err != nil { return err } - err = copyAny(target.FieldByName(f.Name), child) + err = copyAny(target.FieldByName(f.Name), child, fs) if err != nil { return err } @@ -294,7 +367,7 @@ func copyStruct(target reflect.Value, source node) error { return nil } -func copyMap(target reflect.Value, source node) error { +func copyMap(target reflect.Value, source node, _ *featureSet) error { // TODO: Should handle maps with more complex types in a future PR ktype := reflect.TypeOf("") vtype := reflect.TypeOf("") @@ -325,7 +398,7 @@ func copyMap(target reflect.Value, source node) error { return nil } -func copyLeaf(target reflect.Value, source leafNode) error { +func copyLeaf(target reflect.Value, source leafNode, _ *featureSet) error { if source == nil { return fmt.Errorf("source value is not a scalar") } @@ -369,7 +442,7 @@ func copyLeaf(target reflect.Value, source leafNode) error { return fmt.Errorf("unsupported scalar type %v", target.Kind()) } -func copyList(target reflect.Value, source arrayNode) error { +func copyList(target reflect.Value, source arrayNode, fs *featureSet) error { if source == nil { return fmt.Errorf("source value is not a list") } @@ -384,7 +457,7 @@ func copyList(target reflect.Value, source arrayNode) error { } ptrOut := reflect.New(elemType) outTarget := ptrOut.Elem() - err = copyAny(outTarget, elemSource) + err = copyAny(outTarget, elemSource, fs) if err != nil { return err } @@ -394,7 +467,7 @@ func copyList(target reflect.Value, source arrayNode) error { return nil } -func copyAny(target reflect.Value, source node) error { +func copyAny(target reflect.Value, source node, fs *featureSet) error { if target.Kind() == reflect.Pointer { allocPtr := reflect.New(target.Type().Elem()) target.Set(allocPtr) @@ -402,16 +475,16 @@ func copyAny(target reflect.Value, source node) error { } if isScalarKind(target) { if leaf, ok := source.(leafNode); ok { - return copyLeaf(target, leaf) + return copyLeaf(target, leaf, fs) } return fmt.Errorf("can't copy into target: scalar required, but source is not a leaf") } else if target.Kind() == reflect.Map { - return copyMap(target, source) + return copyMap(target, source, fs) } else if target.Kind() == reflect.Struct { - return copyStruct(target, source) + return copyStruct(target, source, fs) } else if target.Kind() == reflect.Slice { if arr, ok := source.(arrayNode); ok { - return copyList(target, arr) + return copyList(target, arr, fs) } return fmt.Errorf("can't copy into target: []T required, but source is not an array") } else if target.Kind() == reflect.Invalid { @@ -420,15 +493,43 @@ func copyAny(target reflect.Value, source node) error { return fmt.Errorf("unknown value to copy: %v", target.Type()) } +func isEmptyString(source node) bool { + if leaf, ok := source.(leafNode); ok { + if str, err := leaf.GetString(); err == nil { + return str == "" + } + } + return false +} + func isScalarKind(v reflect.Value) bool { k := v.Kind() return (k >= reflect.Bool && k <= reflect.Float64) || k == reflect.String } +func makeRemapCase(v reflect.Value) map[string]string { + remap := make(map[string]string) + iter := v.MapRange() + for iter.Next() { + mkey := "" + switch k := iter.Key().Interface().(type) { + case string: + mkey = k + default: + mkey = fmt.Sprintf("%s", k) + } + remap[strings.ToLower(mkey)] = mkey + } + return remap +} + func findFieldMatch(val reflect.Value, key string) int { + // case-insensitive match for struct names + key = strings.ToLower(key) schema := val.Type() for i := 0; i < schema.NumField(); i++ { - if key == fieldNameToKey(schema.Field(i)) { + fieldKey, _ := fieldNameToKey(schema.Field(i)) + if key == fieldKey { return i } } diff --git a/pkg/config/structure/unmarshal_test.go b/pkg/config/structure/unmarshal_test.go index 3f8546020811f..e6d15fae0fe9e 100644 --- a/pkg/config/structure/unmarshal_test.go +++ b/pkg/config/structure/unmarshal_test.go @@ -83,6 +83,11 @@ network_devices: assert.Equal(t, trapsCfg.Namespace, "abc") } +type ServiceDescription struct { + Host string + Endpoint Endpoint `mapstructure:",squash"` +} + type Endpoint struct { Name string `yaml:"name"` APIKey string `yaml:"apikey"` @@ -114,6 +119,31 @@ endpoints: assert.Equal(t, endpoints[2].APIKey, "abc3") } +func TestUnmarshalKeyWithSquash(t *testing.T) { + confYaml := ` +service: + host: datad0g.com + name: intake + apikey: abc1 +` + mockConfig := mock.NewFromYAML(t, confYaml) + mockConfig.SetKnown("service") + + var svc = ServiceDescription{} + // fails without EnableSquash being given + err := UnmarshalKey(mockConfig, "service", &svc) + assert.Error(t, err) + assert.Contains(t, err.Error(), "EnableSquash") + + // succeeds + err = UnmarshalKey(mockConfig, "service", &svc, EnableSquash) + assert.NoError(t, err) + + assert.Equal(t, svc.Host, "datad0g.com") + assert.Equal(t, svc.Endpoint.Name, "intake") + assert.Equal(t, svc.Endpoint.APIKey, "abc1") +} + type FeatureConfig struct { Enabled bool `yaml:"enabled"` } @@ -133,6 +163,46 @@ feature: assert.Equal(t, feature.Enabled, true) } +type FeatureConfigDiffCase struct { + ENaBLEd bool +} + +func TestUnmarshalKeyCaseInsensitive(t *testing.T) { + confYaml := ` +feature: + EnABLeD: "true" +` + mockConfig := mock.NewFromYAML(t, confYaml) + mockConfig.SetKnown("feature") + + var feature = FeatureConfig{} + err := UnmarshalKey(mockConfig, "feature", &feature) + assert.NoError(t, err) + + assert.Equal(t, feature.Enabled, true) + + var diffcase = FeatureConfigDiffCase{} + err = UnmarshalKey(mockConfig, "feature", &diffcase) + assert.NoError(t, err) + + assert.Equal(t, diffcase.ENaBLEd, true) +} + +func TestUnmarshalKeyMissing(t *testing.T) { + confYaml := ` +feature: + enabled: "true" +` + mockConfig := mock.NewFromYAML(t, confYaml) + mockConfig.SetKnown("feature") + + // If the data from the config is missing, UnmarshalKey is a no-op, does + // nothing, and returns no error + var endpoints = []Endpoint{} + err := UnmarshalKey(mockConfig, "config_providers", &endpoints) + assert.NoError(t, err) +} + func TestMapGetChildNotFound(t *testing.T) { m := map[string]string{"a": "apple", "b": "banana"} n, err := newNode(reflect.ValueOf(m)) diff --git a/pkg/config/test_helpers.go b/pkg/config/test_helpers.go deleted file mode 100644 index 6642302554da5..0000000000000 --- a/pkg/config/test_helpers.go +++ /dev/null @@ -1,19 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build test - -package config - -import ( - "github.com/DataDog/datadog-agent/pkg/config/env" -) - -var ( - // SetFeatures is alias from env - SetFeatures = env.SetFeatures - // SetFeaturesNoCleanup is alias from env - SetFeaturesNoCleanup = env.SetFeaturesNoCleanup -) diff --git a/pkg/databasemonitoring/config/config.go b/pkg/databasemonitoring/config/config.go index b118928a533f6..ef6915b945cfb 100644 --- a/pkg/databasemonitoring/config/config.go +++ b/pkg/databasemonitoring/config/config.go @@ -6,7 +6,7 @@ // Package config contains database-monitoring auto-discovery configuration package config -import coreconfig "github.com/DataDog/datadog-agent/pkg/config" +import pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" const ( autoDiscoveryAuroraConfigKey = "database_monitoring.autodiscovery.aurora" @@ -30,10 +30,10 @@ type AuroraConfig struct { func NewAuroraAutodiscoveryConfig() (AuroraConfig, error) { var discoveryConfigs AuroraConfig // defaults for all values are set in the config package - discoveryConfigs.Enabled = coreconfig.Datadog().GetBool(autoDiscoveryAuroraConfigKey + ".enabled") - discoveryConfigs.QueryTimeout = coreconfig.Datadog().GetInt(autoDiscoveryAuroraConfigKey + ".query_timeout") - discoveryConfigs.DiscoveryInterval = coreconfig.Datadog().GetInt(autoDiscoveryAuroraConfigKey + ".discovery_interval") - discoveryConfigs.Tags = coreconfig.Datadog().GetStringSlice(autoDiscoveryAuroraConfigKey + ".tags") - discoveryConfigs.Region = coreconfig.Datadog().GetString(autoDiscoveryAuroraConfigKey + ".region") + discoveryConfigs.Enabled = pkgconfigsetup.Datadog().GetBool(autoDiscoveryAuroraConfigKey + ".enabled") + discoveryConfigs.QueryTimeout = pkgconfigsetup.Datadog().GetInt(autoDiscoveryAuroraConfigKey + ".query_timeout") + discoveryConfigs.DiscoveryInterval = pkgconfigsetup.Datadog().GetInt(autoDiscoveryAuroraConfigKey + ".discovery_interval") + discoveryConfigs.Tags = pkgconfigsetup.Datadog().GetStringSlice(autoDiscoveryAuroraConfigKey + ".tags") + discoveryConfigs.Region = pkgconfigsetup.Datadog().GetString(autoDiscoveryAuroraConfigKey + ".region") return discoveryConfigs, nil } diff --git a/pkg/diagnose/check.go b/pkg/diagnose/check.go index 41cd48bf4718c..17f2e0d016b17 100644 --- a/pkg/diagnose/check.go +++ b/pkg/diagnose/check.go @@ -19,7 +19,7 @@ import ( integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" "github.com/DataDog/datadog-agent/pkg/collector/check" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -104,7 +104,7 @@ func diagnoseChecksInCLIProcess(_ diagnosis.Config, senderManager diagnosesender } } // Initializing the aggregator with a flush interval of 0 (to disable the flush goroutines) - common.LoadComponents(secretResolver, wmetaInstance, ac, pkgconfig.Datadog().GetString("confd_path")) + common.LoadComponents(secretResolver, wmetaInstance, ac, pkgconfigsetup.Datadog().GetString("confd_path")) ac.LoadAndRun(context.Background()) // Create the CheckScheduler, but do not attach it to diff --git a/pkg/diagnose/connectivity/core_endpoint.go b/pkg/diagnose/connectivity/core_endpoint.go index ffc1ecf9e52a5..81e1ea946c5d9 100644 --- a/pkg/diagnose/connectivity/core_endpoint.go +++ b/pkg/diagnose/connectivity/core_endpoint.go @@ -21,7 +21,7 @@ import ( forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/resolver" logsConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" @@ -29,7 +29,7 @@ import ( ) func getLogsHTTPEndpoints() (*logsConfig.Endpoints, error) { - datadogConfig := config.Datadog() + datadogConfig := pkgconfigsetup.Datadog() logsConfigKey := logsConfig.NewLogsConfigKeys("logs_config.", datadogConfig) return logsConfig.BuildHTTPEndpointsWithConfig(datadogConfig, logsConfigKey, "agent-http-intake.logs.", "logs", logsConfig.AgentJSONIntakeProtocol, logsConfig.DefaultIntakeOrigin) } @@ -38,7 +38,7 @@ func getLogsHTTPEndpoints() (*logsConfig.Endpoints, error) { func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { // Create domain resolvers - keysPerDomain, err := utils.GetMultipleEndpoints(config.Datadog()) + keysPerDomain, err := utils.GetMultipleEndpoints(pkgconfigsetup.Datadog()) if err != nil { return []diagnosis.Diagnosis{ { @@ -53,10 +53,10 @@ func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { var diagnoses []diagnosis.Diagnosis domainResolvers := resolver.NewSingleDomainResolvers(keysPerDomain) - client := forwarder.NewHTTPClient(config.Datadog()) + client := forwarder.NewHTTPClient(pkgconfigsetup.Datadog()) // Create diagnosis for logs - if config.Datadog().GetBool("logs_enabled") { + if pkgconfigsetup.Datadog().GetBool("logs_enabled") { endpoints, err := getLogsHTTPEndpoints() if err != nil { @@ -68,7 +68,7 @@ func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { RawError: err.Error(), }) } else { - url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, config.Datadog()) + url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, pkgconfigsetup.Datadog()) name := fmt.Sprintf("Connectivity to %s", url) diag := createDiagnosis(name, url, "", err) @@ -78,7 +78,7 @@ func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { } - endpointsInfo := getEndpointsInfo(config.Datadog()) + endpointsInfo := getEndpointsInfo(pkgconfigsetup.Datadog()) // Send requests to all endpoints for all domains for _, domainResolver := range domainResolvers { @@ -222,7 +222,7 @@ func verifyEndpointResponse(diagCfg diagnosis.Config, statusCode int, responseBo // the endpoint send an empty response. As the error 'EOF' is not very informative, it can // be interesting to 'wrap' this error to display more context. func noResponseHints(err error) string { - endpoint := utils.GetInfraEndpoint(config.Datadog()) + endpoint := utils.GetInfraEndpoint(pkgconfigsetup.Datadog()) parsedURL, parseErr := url.Parse(endpoint) if parseErr != nil { return fmt.Sprintf("Could not parse url '%v' : %v", scrubber.ScrubLine(endpoint), scrubber.ScrubLine(parseErr.Error())) diff --git a/pkg/diagnose/connectivity/core_endpoint_test.go b/pkg/diagnose/connectivity/core_endpoint_test.go index 0998f5db74992..028f833277388 100644 --- a/pkg/diagnose/connectivity/core_endpoint_test.go +++ b/pkg/diagnose/connectivity/core_endpoint_test.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/endpoints" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var ( @@ -46,7 +46,7 @@ func TestSendHTTPRequestToEndpoint(t *testing.T) { })) defer ts1.Close() - client := defaultforwarder.NewHTTPClient(config.Datadog()) + client := defaultforwarder.NewHTTPClient(pkgconfigsetup.Datadog()) // With the correct API Key, it should be a 200 statusCodeWithKey, responseBodyWithKey, _, errWithKey := sendHTTPRequestToEndpoint(context.Background(), client, ts1.URL, endpointInfoTest, apiKey1) diff --git a/pkg/diagnose/connectivity/endpoint_info.go b/pkg/diagnose/connectivity/endpoint_info.go index c90294d30cd19..5c6bf3d245bc9 100644 --- a/pkg/diagnose/connectivity/endpoint_info.go +++ b/pkg/diagnose/connectivity/endpoint_info.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/flare/helpers" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/endpoints" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // endpointInfo is a value object that contains all the information we need to @@ -31,7 +31,7 @@ type endpointInfo struct { Payload []byte } -func getEndpointsInfo(cfg config.Reader) []endpointInfo { +func getEndpointsInfo(cfg model.Reader) []endpointInfo { emptyPayload := []byte("{}") checkRunPayload := []byte("{\"check\": \"test\", \"status\": 0}") diff --git a/pkg/diagnose/ports/ports.go b/pkg/diagnose/ports/ports.go index 7a3321bdb1f81..f06a178e9bf41 100644 --- a/pkg/diagnose/ports/ports.go +++ b/pkg/diagnose/ports/ports.go @@ -11,7 +11,7 @@ import ( "path" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/port" ) @@ -39,14 +39,14 @@ func DiagnosePortSuite() []diagnosis.Diagnosis { } var diagnoses []diagnosis.Diagnosis - for _, key := range config.Datadog().AllKeysLowercased() { + for _, key := range pkgconfigsetup.Datadog().AllKeysLowercased() { splitKey := strings.Split(key, ".") keyName := splitKey[len(splitKey)-1] if keyName != "port" && !strings.HasPrefix(keyName, "port_") && !strings.HasSuffix(keyName, "_port") { continue } - value := config.Datadog().GetInt(key) + value := pkgconfigsetup.Datadog().GetInt(key) if value <= 0 { continue } diff --git a/pkg/diagnose/runner.go b/pkg/diagnose/runner.go index cc2e582fd3a8d..160a667b65d80 100644 --- a/pkg/diagnose/runner.go +++ b/pkg/diagnose/runner.go @@ -26,7 +26,7 @@ import ( "github.com/fatih/color" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/connectivity" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/diagnose/ports" @@ -271,19 +271,19 @@ func getDiagnosesFromCurrentProcess(diagCfg diagnosis.Config, suites []diagnosis func requestDiagnosesFromAgentProcess(diagCfg diagnosis.Config) (*diagnosis.DiagnoseResult, error) { // Get client to Agent's RPC call c := util.GetClient(false) - ipcAddress, err := pkgconfig.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("error getting IPC address for the agent: %w", err) } // Make sure we have a session token (for privileged information) - if err = util.SetAuthToken(pkgconfig.Datadog()); err != nil { + if err = util.SetAuthToken(pkgconfigsetup.Datadog()); err != nil { return nil, fmt.Errorf("auth error: %w", err) } // Form call end-point //nolint:revive // TODO(CINT) Fix revive linter - diagnoseURL := fmt.Sprintf("https://%v:%v/agent/diagnose", ipcAddress, pkgconfig.Datadog().GetInt("cmd_port")) + diagnoseURL := fmt.Sprintf("https://%v:%v/agent/diagnose", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) // Serialized diag config to pass it to Agent execution context var cfgSer []byte diff --git a/pkg/dynamicinstrumentation/diconfig/binary_inspection.go b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go index 4722aa6505202..3a59dde426759 100644 --- a/pkg/dynamicinstrumentation/diconfig/binary_inspection.go +++ b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go @@ -220,6 +220,7 @@ func correctStructLocations(structParam *ditypes.Parameter, fieldLocations map[b offset, ok := fieldLocations[fieldID] if !ok { log.Infof("no field location available for %s.%s\n", fieldID.StructName, fieldID.FieldName) + structParam.ParameterPieces[i].NotCaptureReason = ditypes.NoFieldLocation continue } diff --git a/pkg/dynamicinstrumentation/diconfig/dwarf.go b/pkg/dynamicinstrumentation/diconfig/dwarf.go index 03bc95335d409..96e6d0e385660 100644 --- a/pkg/dynamicinstrumentation/diconfig/dwarf.go +++ b/pkg/dynamicinstrumentation/diconfig/dwarf.go @@ -161,10 +161,11 @@ entryLoop: } } - typeFields.Name = name - - // We've collected information about this ditypes.Parameter, append it to the slice of ditypes.Parameters for this function - result.Functions[funcName] = append(result.Functions[funcName], *typeFields) + if typeFields != nil { + // We've collected information about this ditypes.Parameter, append it to the slice of ditypes.Parameters for this function + typeFields.Name = name + result.Functions[funcName] = append(result.Functions[funcName], *typeFields) + } seenTypes = make(map[string]*seenTypeCounter) // reset seen types map for next parameter } diff --git a/pkg/dynamicinstrumentation/ditypes/analysis.go b/pkg/dynamicinstrumentation/ditypes/analysis.go index e10ab9657c53e..0aa4a698e5782 100644 --- a/pkg/dynamicinstrumentation/ditypes/analysis.go +++ b/pkg/dynamicinstrumentation/ditypes/analysis.go @@ -53,6 +53,7 @@ type NotCaptureReason uint8 const ( Unsupported NotCaptureReason = iota + 1 // Unsupported means the data type of the parameter is unsupported + NoFieldLocation // NoFieldLocation means the parameter wasn't captured because location information is missing from analysis FieldLimitReached // FieldLimitReached means the parameter wasn't captured because the data type has too many fields CaptureDepthReached // CaptureDepthReached means the parameter wasn't captures because the data type has too many levels ) diff --git a/pkg/ebpf/cgo/genpost.go b/pkg/ebpf/cgo/genpost.go index 512d0542c62d9..97035a29bea46 100644 --- a/pkg/ebpf/cgo/genpost.go +++ b/pkg/ebpf/cgo/genpost.go @@ -44,9 +44,9 @@ func main() { convertInt8ArrayToByteArrayRegex := regexp.MustCompile(`(` + strings.Join(int8variableNames, "|") + `)(\s+)\[(\d+)\]u?int8`) b = convertInt8ArrayToByteArrayRegex.ReplaceAll(b, []byte("$1$2[$3]byte")) - // Convert generated pointers to CGo structs to uintptr + // Convert generated pointers to CGo structs to uint64 convertPointerToUint64Regex := regexp.MustCompile(`\*_Ctype_struct_(\w+)`) - b = convertPointerToUint64Regex.ReplaceAll(b, []byte("uintptr")) + b = convertPointerToUint64Regex.ReplaceAll(b, []byte("uint64")) b, err = format.Source(b) if err != nil { diff --git a/pkg/ebpf/config.go b/pkg/ebpf/config.go index c400e69da049e..3bcea9ba21de2 100644 --- a/pkg/ebpf/config.go +++ b/pkg/ebpf/config.go @@ -9,7 +9,7 @@ import ( "strings" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - aconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -75,6 +75,10 @@ type Config struct { // AttachKprobesWithKprobeEventsABI uses the kprobe_events ABI to attach kprobes rather than the newer perf ABI. AttachKprobesWithKprobeEventsABI bool + + // BypassEnabled is used in tests only. + // It enables a ebpf-manager feature to bypass programs on-demand for controlled visibility. + BypassEnabled bool } func key(pieces ...string) string { @@ -83,7 +87,7 @@ func key(pieces ...string) string { // NewConfig creates a config with ebpf-related settings func NewConfig() *Config { - cfg := aconfig.SystemProbe() + cfg := pkgconfigsetup.SystemProbe() sysconfig.Adjust(cfg) c := &Config{ diff --git a/pkg/ebpf/map_cleaner.go b/pkg/ebpf/map_cleaner.go index 311d294929c46..6c0493731b4f9 100644 --- a/pkg/ebpf/map_cleaner.go +++ b/pkg/ebpf/map_cleaner.go @@ -71,7 +71,7 @@ func (mc *MapCleaner[K, V]) Clean(interval time.Duration, preClean func() bool, // of a version comparison because some distros have backported this API), and fallback to // the old method otherwise. The new API is also more efficient because it minimizes the number of allocations. cleaner := mc.cleanWithoutBatches - if maps.BatchAPISupported() { + if mc.emap.CanUseBatchAPI() { cleaner = mc.cleanWithBatches } @@ -135,6 +135,10 @@ func (mc *MapCleaner[K, V]) cleanWithBatches(nowTS int64, shouldClean func(nowTS keysToDelete = append(keysToDelete, key) } + if err := it.Err(); err != nil { + log.Errorf("error iterating map=%s: %s", mc.emap, err) + } + var deletionError error if len(keysToDelete) > 0 { deletedCount, deletionError = mc.emap.BatchDelete(keysToDelete) @@ -179,6 +183,10 @@ func (mc *MapCleaner[K, V]) cleanWithoutBatches(nowTS int64, shouldClean func(no keysToDelete = append(keysToDelete, key) } + if err := entries.Err(); err != nil { + log.Errorf("error iterating map=%s: %s", mc.emap, err) + } + for _, k := range keysToDelete { err := mc.emap.Delete(&k) if err == nil { diff --git a/pkg/ebpf/maps/generic_map.go b/pkg/ebpf/maps/generic_map.go index 0e762c48cf00c..a8fbdb4c06665 100644 --- a/pkg/ebpf/maps/generic_map.go +++ b/pkg/ebpf/maps/generic_map.go @@ -9,6 +9,8 @@ package maps import ( + "bytes" + "encoding/binary" "errors" "fmt" "reflect" @@ -23,6 +25,8 @@ import ( const defaultBatchSize = 100 +var ErrBatchAPINotSupported = errors.New("batch API not supported for this map: check whether key is fixed-size, kernel supports batch API and if this map is not per-cpu") + // BatchAPISupported returns true if the kernel supports the batch API for maps var BatchAPISupported = funcs.MemoizeNoError(func() bool { // Do feature detection directly instead of based on kernel versions for more accuracy @@ -53,7 +57,18 @@ var BatchAPISupported = funcs.MemoizeNoError(func() bool { // GenericMap is a wrapper around ebpf.Map that allows to use generic types. // Also includes support for batch iterations type GenericMap[K any, V any] struct { - m *ebpf.Map + m *ebpf.Map + keySupportsBatchAPI bool +} + +func canBinaryReadKey[K any]() bool { + kval := new(K) + buffer := make([]byte, unsafe.Sizeof(*kval)) + reader := bytes.NewReader(buffer) + + err := binary.Read(reader, binary.LittleEndian, kval) + + return err == nil } // NewGenericMap creates a new GenericMap with the given spec. Key and Value sizes are automatically @@ -78,13 +93,21 @@ func NewGenericMap[K any, V any](spec *ebpf.MapSpec) (*GenericMap[K, V], error) spec.ValueSize = uint32(unsafe.Sizeof(vval)) } + // See if we can perform binary.Read on the key type. If we can't we can't use the batch API + // for this map + keySupportsBatchAPI := canBinaryReadKey[K]() + if !keySupportsBatchAPI { + log.Warnf("Key type %T does not support binary.Read, batch API will not be used for this map", kval) + } + m, err := ebpf.NewMap(spec) if err != nil { return nil, err } return &GenericMap[K, V]{ - m: m, + m: m, + keySupportsBatchAPI: keySupportsBatchAPI, }, nil } @@ -169,11 +192,19 @@ func (g *GenericMap[K, V]) Delete(key *K) error { // BatchDelete deletes a batch of keys from the map. Returns the number of deleted items func (g *GenericMap[K, V]) BatchDelete(keys []K) (int, error) { + if !g.CanUseBatchAPI() { + return 0, ErrBatchAPINotSupported + } + return g.m.BatchDelete(keys, nil) } // BatchUpdate updates a batch of keys in the map func (g *GenericMap[K, V]) BatchUpdate(keys []K, values []V, opts *ebpf.BatchOptions) (int, error) { + if !g.CanUseBatchAPI() { + return 0, ErrBatchAPINotSupported + } + return g.m.BatchUpdate(keys, values, opts) } @@ -186,6 +217,12 @@ type GenericMapIterator[K any, V any] interface { Err() error } +// CanUseBatchAPI returns whether this map can use the batch API. Takes into account map type, batch API support +// in the kernel and the key type (keys with pointers). Returns an error describing the reason. +func (g *GenericMap[K, V]) CanUseBatchAPI() bool { + return g.keySupportsBatchAPI && BatchAPISupported() && !g.isPerCPU() +} + func isPerCPU(t ebpf.MapType) bool { switch t { case ebpf.PerCPUHash, ebpf.PerCPUArray, ebpf.LRUCPUHash: @@ -221,7 +258,7 @@ func (g *GenericMap[K, V]) IterateWithBatchSize(batchSize int) GenericMapIterato batchSize = int(g.m.MaxEntries()) } - if BatchAPISupported() && !g.isPerCPU() && batchSize > 1 { + if batchSize > 1 && g.CanUseBatchAPI() { it := &genericMapBatchIterator[K, V]{ m: g.m, batchSize: batchSize, diff --git a/pkg/ebpf/maps/generic_map_test.go b/pkg/ebpf/maps/generic_map_test.go index 261571b440a73..3bc59f494f8b6 100644 --- a/pkg/ebpf/maps/generic_map_test.go +++ b/pkg/ebpf/maps/generic_map_test.go @@ -531,3 +531,44 @@ func TestBatchUpdate(t *testing.T) { require.True(t, foundElements[i]) } } + +type keyWithPointer struct { + Pointer *uint32 + Value uint32 +} + +func TestIterateWithPointerKey(t *testing.T) { + require.NoError(t, rlimit.RemoveMemlock()) + + m, err := NewGenericMap[keyWithPointer, uint32](&ebpf.MapSpec{ + Type: ebpf.Hash, + MaxEntries: 100, + }) + require.NoError(t, err) + + numsToPut := uint32(50) + theNumber := uint32(42) + expectedNumbers := make([]uint32, numsToPut) + for i := uint32(0); i < numsToPut; i++ { + require.NoError(t, m.Put(&keyWithPointer{Pointer: &theNumber, Value: i}, &i)) + expectedNumbers[i] = i + } + + var k keyWithPointer + var v uint32 + actualNumbers := make([]uint32, numsToPut) + + // Should automatically revert to the single item iterator, as we cannot use pointers + // in batch iterators + it := m.IterateWithBatchSize(10) + require.NotNil(t, it) + for it.Next(&k, &v) { + actualNumbers[k.Value] = v + require.Equal(t, theNumber, *k.Pointer) + require.Equal(t, &theNumber, k.Pointer) + require.Equal(t, k.Value, v) + } + + require.NoError(t, it.Err()) + require.Equal(t, expectedNumbers, actualNumbers) +} diff --git a/pkg/ebpf/telemetry/errors_telemetry_test.go b/pkg/ebpf/telemetry/errors_telemetry_test.go index ceb412e3e1fc6..c03290b80a543 100644 --- a/pkg/ebpf/telemetry/errors_telemetry_test.go +++ b/pkg/ebpf/telemetry/errors_telemetry_test.go @@ -15,7 +15,7 @@ import ( "golang.org/x/sys/unix" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - aconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" manager "github.com/DataDog/ebpf-manager" @@ -48,7 +48,7 @@ type config struct { } func testConfig() *config { - cfg := aconfig.SystemProbe() + cfg := pkgconfigsetup.SystemProbe() sysconfig.Adjust(cfg) return &config{ diff --git a/pkg/ebpf/testdata/c/uprobe_attacher-test.c b/pkg/ebpf/testdata/c/uprobe_attacher-test.c new file mode 100644 index 0000000000000..bcc755a245786 --- /dev/null +++ b/pkg/ebpf/testdata/c/uprobe_attacher-test.c @@ -0,0 +1,20 @@ +// This program is used to test the UprobeAttacher object, it defines two simple probes that attach +// to userspace functions. +#include "kconfig.h" +#include "ktypes.h" +#include "bpf_metadata.h" +#include +#include "bpf_tracing.h" +#include "bpf_helpers.h" +#include "bpf_helpers_custom.h" +#include + +SEC("uprobe/SSL_connect") +int uprobe__SSL_connect(struct pt_regs *ctx) { + return 0; +} + +SEC("uprobe/main") +int uprobe__main(struct pt_regs *ctx) { + return 0; +} diff --git a/pkg/ebpf/uprobes/attacher.go b/pkg/ebpf/uprobes/attacher.go new file mode 100644 index 0000000000000..ae7c254f58dc7 --- /dev/null +++ b/pkg/ebpf/uprobes/attacher.go @@ -0,0 +1,945 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "bufio" + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + manager "github.com/DataDog/ebpf-manager" + "github.com/hashicorp/go-multierror" + "golang.org/x/exp/maps" + + "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" + "github.com/DataDog/datadog-agent/pkg/process/monitor" + "github.com/DataDog/datadog-agent/pkg/util/kernel" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// ExcludeMode defines the different optiont to exclude processes from attachment +type ExcludeMode uint8 + +const ( + // ExcludeSelf excludes the agent's own PID + ExcludeSelf ExcludeMode = 1 << iota + // ExcludeInternal excludes internal DataDog processes + ExcludeInternal + // ExcludeBuildkit excludes buildkitd processes + ExcludeBuildkit + // ExcludeContainerdTmp excludes containerd tmp mounts + ExcludeContainerdTmp +) + +var ( + // ErrSelfExcluded is returned when the PID is the same as the agent's PID. + ErrSelfExcluded = errors.New("self-excluded") + // ErrInternalDDogProcessRejected is returned when the PID is an internal datadog process. + ErrInternalDDogProcessRejected = errors.New("internal datadog process rejected") + // ErrNoMatchingRule is returned when no rule matches the shared library path. + ErrNoMatchingRule = errors.New("no matching rule") + // regex that defines internal DataDog processes + internalProcessRegex = regexp.MustCompile("datadog-agent/.*/((process|security|trace)-agent|system-probe|agent)") +) + +// AttachTarget defines the target to which we should attach the probes, libraries or executables +type AttachTarget uint8 + +const ( + // AttachToExecutable attaches to the main executable + AttachToExecutable AttachTarget = 1 << iota + // AttachToSharedLibraries attaches to shared libraries + AttachToSharedLibraries +) + +// ProbeOptions is a structure that holds the options for a probe attachment. By default +// these values will be inferred from the probe name, but the user can override them if needed. +type ProbeOptions struct { + // IsManualReturn indicates that the probe is a manual return probe, which means that the inspector + // will find the return locations of the function and attach to them instead of using uretprobes. + IsManualReturn bool + + // Symbol is the symbol name to attach the probe to. This is useful when the symbol name is not a valid + // C identifier (e.g. Go functions) + Symbol string +} + +// AttachRule defines how to attach a certain set of probes. Uprobes can be attached +// to shared libraries or executables, this structure tells the attacher which ones to +// select and to which targets to do it. +type AttachRule struct { + // LibraryNameRegex defines which libraries should be matched by this rule + LibraryNameRegex *regexp.Regexp + // ExecutableFilter is a function that receives the path of the executable and returns true if it should be matched + ExecutableFilter func(string, *ProcInfo) bool + // Targets defines the targets to which we should attach the probes, shared libraries and/or executables + Targets AttachTarget + // ProbesSelectors defines which probes should be attached and how should we validate + // the attachment (e.g., whether we need all probes active or just one of them, or in a best-effort basis) + ProbesSelector []manager.ProbesSelector + // ProbeOptionsOverride allows the user to override the options for a probe that are inferred from the name + // of the probe. This way the user can set options such as manual return detection or symbol names for probes + // whose names aren't valid C identifiers. + ProbeOptionsOverride map[string]ProbeOptions +} + +// canTarget returns true if the rule matches the given AttachTarget +func (r *AttachRule) canTarget(target AttachTarget) bool { + return r.Targets&target != 0 +} + +func (r *AttachRule) matchesLibrary(path string) bool { + return r.canTarget(AttachToSharedLibraries) && r.LibraryNameRegex != nil && r.LibraryNameRegex.MatchString(path) +} + +func (r *AttachRule) matchesExecutable(path string, procInfo *ProcInfo) bool { + return r.canTarget(AttachToExecutable) && (r.ExecutableFilter == nil || r.ExecutableFilter(path, procInfo)) +} + +// getProbeOptions returns the options for a given probe, checking if we have specific overrides +// in this rule and, if not, using the options inferred from the probe name. +func (r *AttachRule) getProbeOptions(probeID manager.ProbeIdentificationPair) (ProbeOptions, error) { + if r.ProbeOptionsOverride != nil { + if options, ok := r.ProbeOptionsOverride[probeID.EBPFFuncName]; ok { + return options, nil + } + } + + symbol, isManualReturn, err := parseSymbolFromEBPFProbeName(probeID.EBPFFuncName) + if err != nil { + return ProbeOptions{}, err + } + + return ProbeOptions{ + Symbol: symbol, + IsManualReturn: isManualReturn, + }, nil +} + +// Validate checks whether the rule is valid, returns nil if it is, an error message otherwise +func (r *AttachRule) Validate() error { + var result error + + if r.Targets == 0 { + result = multierror.Append(result, errors.New("no targets specified")) + } + + if r.canTarget(AttachToSharedLibraries) && r.LibraryNameRegex == nil { + result = multierror.Append(result, errors.New("no library name regex specified")) + } + + for _, selector := range r.ProbesSelector { + for _, probeID := range selector.GetProbesIdentificationPairList() { + _, err := r.getProbeOptions(probeID) + if err != nil { + result = multierror.Append(result, fmt.Errorf("cannot get options for probe %s: %w", probeID.EBPFFuncName, err)) + } + } + } + + return result +} + +// AttacherConfig defines the configuration for the attacher +type AttacherConfig struct { + // Rules defines a series of rules that tell the attacher how to attach the probes + Rules []*AttachRule + + // ScanProcessesInterval defines the interval at which we scan for terminated processes and new processes we haven't seen + ScanProcessesInterval time.Duration + + // EnablePeriodicScanNewProcesses defines whether the attacher should scan for new processes periodically (with ScanProcessesInterval) + EnablePeriodicScanNewProcesses bool + + // ProcRoot is the root directory of the proc filesystem + ProcRoot string + + // ExcludeTargets defines the targets that should be excluded from the attacher + ExcludeTargets ExcludeMode + + // EbpfConfig is the configuration for the eBPF program + EbpfConfig *ebpf.Config + + // PerformInitialScan defines if the attacher should perform an initial scan of the processes before starting the monitor + PerformInitialScan bool + + // ProcessMonitorEventStream defines whether the process monitor is using the event stream + ProcessMonitorEventStream bool + + // EnableDetailedLogging makes the attacher log why it's attaching or not attaching to a process + // This is useful for debugging purposes, do not enable in production. + EnableDetailedLogging bool +} + +// SetDefaults configures the AttacherConfig with default values for those fields for which the compiler +// defaults are not enough +func (ac *AttacherConfig) SetDefaults() { + if ac.ScanProcessesInterval == 0 { + ac.ScanProcessesInterval = 30 * time.Second + } + + if ac.ProcRoot == "" { + ac.ProcRoot = kernel.HostProc() + } + + if ac.EbpfConfig == nil { + ac.EbpfConfig = ebpf.NewConfig() + } +} + +// Validate checks whether the configuration is valid, returns nil if it is, an error message otherwise +func (ac *AttacherConfig) Validate() error { + var errs []string + + if ac.EbpfConfig == nil { + errs = append(errs, "missing ebpf config") + } + + if ac.ProcRoot == "" { + errs = append(errs, "missing proc root") + } + + for _, rule := range ac.Rules { + err := rule.Validate() + if err != nil { + errs = append(errs, err.Error()) + } + } + + if len(errs) == 0 { + return nil + } + + return errors.New("invalid attacher configuration: " + strings.Join(errs, ", ")) +} + +// ProbeManager is an interface that defines the methods that a Manager implements, +// so that we can replace it in tests for a mock object +type ProbeManager interface { + // AddHook adds a hook to the manager with the given UID and probe + AddHook(UID string, probe *manager.Probe) error + + // DetachHook detaches the hook with the ID pair + DetachHook(manager.ProbeIdentificationPair) error + + // GetProbe returns the probe with the given ID pair, and a boolean indicating if it was found + GetProbe(manager.ProbeIdentificationPair) (*manager.Probe, bool) +} + +// FileRegistry is an interface that defines the methods that a FileRegistry implements, so that we can replace it in tests for a mock object +type FileRegistry interface { + // Register registers a file path to be tracked by the attacher for the given PID. The registry will call the activationCB when the file is opened + // the first time, and the deactivationCB when the file is closed. If the file is already registered, the alreadyRegistered callback + // will be called instead of the activationCB. + Register(namespacedPath string, pid uint32, activationCB, deactivationCB, alreadyRegistered utils.Callback) error + + // Unregister unregisters a file path from the attacher. The deactivation callback will be called for all + // files that were registered with the given PID and aren't used anymore. + Unregister(uint32) error + + // Clear clears the registry, removing all registered files + Clear() + + // GetRegisteredProcesses returns a map of all the processes that are currently registered in the registry + GetRegisteredProcesses() map[uint32]struct{} +} + +// AttachCallback is a callback that is called whenever a probe is attached successfully +type AttachCallback func(*manager.Probe, *utils.FilePath) + +// UprobeAttacher is a struct that handles the attachment of uprobes to processes and libraries +type UprobeAttacher struct { + // name contains the name of this attacher for identification + name string + + // done is a channel to signal the attacher to stop + done chan struct{} + + // wg is a wait group to wait for the attacher to stop + wg sync.WaitGroup + + // config holds the configuration of the attacher. Not a pointer as we want + // a copy of the configuration so that the user cannot change it, as we have + // certain cached values that we have no way to invalidate if the config + // changes after the attacher is created + config AttacherConfig + + // fileRegistry is used to keep track of the files we are attached to, and attach only once to each file + fileRegistry FileRegistry + + // manager is used to manage the eBPF probes (attach/detach to processes) + manager ProbeManager + + // inspector is used extract the metadata from the binaries + inspector BinaryInspector + + // pathToAttachedProbes maps a filesystem path to the probes attached to it. + // Used to detach them once the path is no longer used. + pathToAttachedProbes map[string][]manager.ProbeIdentificationPair + + // onAttachCallback is a callback that is called whenever a probe is attached + onAttachCallback AttachCallback + + // soWatcher is the program that launches events whenever shared libraries are + // opened + soWatcher *sharedlibraries.EbpfProgram + + // handlesLibrariesCached is a cache for the handlesLibraries function, avoiding + // recomputation every time + handlesLibrariesCached *bool + + // handlesExecutablesCached is a cache for the handlesExecutables function, avoiding + // recomputation every time + handlesExecutablesCached *bool +} + +// NewUprobeAttacher creates a new UprobeAttacher. Receives as arguments the +// name of the attacher, the configuration, the probe manager (ebpf.Manager +// usually), a callback to be called whenever a probe is attached (optional, can +// be nil), and the binary inspector to be used (e.g., while we usually want +// NativeBinaryInspector here, we might want the GoBinaryInspector to attach to +// Go functions in a different way). +// Note that the config is copied, not referenced. The attacher caches some values +// that depend on the configuration, so any changes to the configuration after the +// attacher would make those caches incoherent. This way we ensure that the attacher +// is always consistent with the configuration it was created with. +func NewUprobeAttacher(name string, config AttacherConfig, mgr ProbeManager, onAttachCallback AttachCallback, inspector BinaryInspector) (*UprobeAttacher, error) { + config.SetDefaults() + + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("invalid attacher configuration: %w", err) + } + + ua := &UprobeAttacher{ + name: name, + config: config, + fileRegistry: utils.NewFileRegistry(name), + manager: mgr, + onAttachCallback: onAttachCallback, + pathToAttachedProbes: make(map[string][]manager.ProbeIdentificationPair), + done: make(chan struct{}), + inspector: inspector, + } + + utils.AddAttacher(name, ua) + + return ua, nil +} + +// handlesLibraries returns whether the attacher has rules configured to attach to shared libraries. +// It caches the result to avoid recalculating it every time we are attaching to a PID. +func (ua *UprobeAttacher) handlesLibraries() bool { + if ua.handlesLibrariesCached != nil { + return *ua.handlesLibrariesCached + } + + result := false + for _, rule := range ua.config.Rules { + if rule.canTarget(AttachToSharedLibraries) { + result = true + break + } + } + ua.handlesLibrariesCached = &result + return result +} + +// handlesLibraries returns whether the attacher has rules configured to attach to executables directly +// It caches the result to avoid recalculating it every time we are attaching to a PID. +func (ua *UprobeAttacher) handlesExecutables() bool { + if ua.handlesExecutablesCached != nil { + return *ua.handlesExecutablesCached + } + + result := false + for _, rule := range ua.config.Rules { + if rule.canTarget(AttachToExecutable) { + result = true + break + } + } + ua.handlesExecutablesCached = &result + return result +} + +// Start starts the attacher, attaching to the processes and libraries as needed +func (ua *UprobeAttacher) Start() error { + var cleanupExec, cleanupExit func() + procMonitor := monitor.GetProcessMonitor() + err := procMonitor.Initialize(ua.config.ProcessMonitorEventStream) + if err != nil { + return fmt.Errorf("error initializing process monitor: %w", err) + } + + if ua.handlesExecutables() { + cleanupExec = procMonitor.SubscribeExec(ua.handleProcessStart) + } + // We always want to track process deletions, to avoid memory leaks + cleanupExit = procMonitor.SubscribeExit(ua.handleProcessExit) + + if ua.handlesLibraries() { + if !sharedlibraries.IsSupported(ua.config.EbpfConfig) { + return errors.New("shared libraries tracing not supported for this platform") + } + + ua.soWatcher = sharedlibraries.NewEBPFProgram(ua.config.EbpfConfig) + + err := ua.soWatcher.Init() + if err != nil { + return fmt.Errorf("error initializing shared library program: %w", err) + } + err = ua.soWatcher.Start() + if err != nil { + return fmt.Errorf("error starting shared library program: %w", err) + } + } + + if ua.config.PerformInitialScan { + // Initial scan only looks at existing processes, and as it's the first scan + // we don't have to track deletions + err := ua.Sync(true, false) + if err != nil { + return fmt.Errorf("error during initial scan: %w", err) + } + } + + ua.wg.Add(1) + go func() { + processSync := time.NewTicker(ua.config.ScanProcessesInterval) + + defer func() { + processSync.Stop() + if cleanupExec != nil { + cleanupExec() + } + cleanupExit() + procMonitor.Stop() + ua.fileRegistry.Clear() + if ua.soWatcher != nil { + ua.soWatcher.Stop() + } + ua.wg.Done() + log.Infof("uprobe attacher %s stopped", ua.name) + }() + + var sharedLibDataChan <-chan ebpf.DataEvent + var sharedLibLostChan <-chan uint64 + + if ua.soWatcher != nil { + sharedLibDataChan = ua.soWatcher.GetPerfHandler().DataChannel() + sharedLibLostChan = ua.soWatcher.GetPerfHandler().LostChannel() + } + + for { + select { + case <-ua.done: + return + case <-processSync.C: + // We always track process deletions in the scan, to avoid memory leaks. + _ = ua.Sync(ua.config.EnablePeriodicScanNewProcesses, true) + case event, ok := <-sharedLibDataChan: + if !ok { + return + } + _ = ua.handleLibraryOpen(&event) + case <-sharedLibLostChan: + // Nothing to do in this case + break + } + } + }() + log.Infof("uprobe attacher %s started", ua.name) + + return nil +} + +// Sync scans the proc filesystem for new processes and detaches from terminated ones +func (ua *UprobeAttacher) Sync(trackCreations, trackDeletions bool) error { + if !trackDeletions && !trackCreations { + return nil // Nothing to do + } + + var deletionCandidates map[uint32]struct{} + if trackDeletions { + deletionCandidates = ua.fileRegistry.GetRegisteredProcesses() + } + thisPID, err := kernel.RootNSPID() + if err != nil { + return err + } + + _ = kernel.WithAllProcs(ua.config.ProcRoot, func(pid int) error { + if pid == thisPID { // don't scan ourselves + return nil + } + + if trackDeletions { + if _, ok := deletionCandidates[uint32(pid)]; ok { + // We have previously hooked into this process and it remains active, + // so we remove it from the deletionCandidates list, and move on to the next PID + delete(deletionCandidates, uint32(pid)) + return nil + } + } + + if trackCreations { + // This is a new PID so we attempt to attach SSL probes to it + _ = ua.AttachPID(uint32(pid)) + } + return nil + }) + + if trackDeletions { + // At this point all entries from deletionCandidates are no longer alive, so + // we should detach our SSL probes from them + for pid := range deletionCandidates { + ua.handleProcessExit(pid) + } + } + + return nil +} + +// Stop stops the attacher +func (ua *UprobeAttacher) Stop() { + close(ua.done) + ua.wg.Wait() +} + +// handleProcessStart is called when a new process is started, wraps AttachPIDWithOptions but ignoring the error +// for API compatibility with processMonitor +func (ua *UprobeAttacher) handleProcessStart(pid uint32) { + _ = ua.AttachPIDWithOptions(pid, false) // Do not try to attach to libraries on process start, it hasn't loaded them yet +} + +// handleProcessExit is called when a process finishes, wraps DetachPID but ignoring the error +// for API compatibility with processMonitor +func (ua *UprobeAttacher) handleProcessExit(pid uint32) { + _ = ua.DetachPID(pid) +} + +func (ua *UprobeAttacher) handleLibraryOpen(event *ebpf.DataEvent) error { + defer event.Done() + + libpath := sharedlibraries.ToLibPath(event.Data) + path := sharedlibraries.ToBytes(&libpath) + + return ua.AttachLibrary(string(path), libpath.Pid) +} + +func (ua *UprobeAttacher) buildRegisterCallbacks(matchingRules []*AttachRule, procInfo *ProcInfo) (func(utils.FilePath) error, func(utils.FilePath) error) { + registerCB := func(p utils.FilePath) error { + err := ua.attachToBinary(p, matchingRules, procInfo) + if ua.config.EnableDetailedLogging { + log.Debugf("uprobes: attaching to %s (PID %d): err=%v", p.HostPath, procInfo.PID, err) + } + return err + } + unregisterCB := func(p utils.FilePath) error { + err := ua.detachFromBinary(p) + if ua.config.EnableDetailedLogging { + log.Debugf("uprobes: detaching from %s (PID %d): err=%v", p.HostPath, p.PID, err) + } + return err + } + + return registerCB, unregisterCB +} + +// AttachLibrary attaches the probes to the given library, opened by a given PID +func (ua *UprobeAttacher) AttachLibrary(path string, pid uint32) error { + if (ua.config.ExcludeTargets&ExcludeSelf) != 0 && int(pid) == os.Getpid() { + return ErrSelfExcluded + } + + matchingRules := ua.getRulesForLibrary(path) + if len(matchingRules) == 0 { + return ErrNoMatchingRule + } + + registerCB, unregisterCB := ua.buildRegisterCallbacks(matchingRules, NewProcInfo(ua.config.ProcRoot, pid)) + + return ua.fileRegistry.Register(path, pid, registerCB, unregisterCB, utils.IgnoreCB) +} + +// getRulesForLibrary returns the rules that match the given library path +func (ua *UprobeAttacher) getRulesForLibrary(path string) []*AttachRule { + var matchedRules []*AttachRule + + for _, rule := range ua.config.Rules { + if rule.matchesLibrary(path) { + matchedRules = append(matchedRules, rule) + } + } + return matchedRules +} + +// getRulesForExecutable returns the rules that match the given executable +func (ua *UprobeAttacher) getRulesForExecutable(path string, procInfo *ProcInfo) []*AttachRule { + var matchedRules []*AttachRule + + for _, rule := range ua.config.Rules { + if rule.matchesExecutable(path, procInfo) { + matchedRules = append(matchedRules, rule) + } + } + return matchedRules +} + +var errIterationStart = errors.New("iteration start") + +// getExecutablePath resolves the executable of the given PID looking in procfs. Automatically +// handles delays in procfs updates. Will return an error if the path cannot be resolved +func (ua *UprobeAttacher) getExecutablePath(pid uint32) (string, error) { + pidAsStr := strconv.FormatUint(uint64(pid), 10) + exePath := filepath.Join(ua.config.ProcRoot, pidAsStr, "exe") + + var binPath string + err := errIterationStart + end := time.Now().Add(procFSUpdateTimeout) + + for err != nil && end.After(time.Now()) { + binPath, err = os.Readlink(exePath) + if err != nil { + time.Sleep(time.Millisecond) + } + } + + if err != nil { + return "", err + } + + return binPath, nil +} + +const optionAttachToLibs = true + +// AttachPID attaches the corresponding probes to a given pid +func (ua *UprobeAttacher) AttachPID(pid uint32) error { + return ua.AttachPIDWithOptions(pid, optionAttachToLibs) +} + +// AttachPIDWithOptions attaches the corresponding probes to a given pid +func (ua *UprobeAttacher) AttachPIDWithOptions(pid uint32, attachToLibs bool) error { + if (ua.config.ExcludeTargets&ExcludeSelf) != 0 && int(pid) == os.Getpid() { + return ErrSelfExcluded + } + + procInfo := NewProcInfo(ua.config.ProcRoot, pid) + + // Only compute the binary path if we are going to need it. It's better to do these two checks + // (which are cheak, the handlesExecutables function is cached) than to do the syscall + // every time + var binPath string + var err error + if ua.handlesExecutables() || (ua.config.ExcludeTargets&ExcludeInternal) != 0 { + binPath, err = procInfo.Exe() + if err != nil { + return err + } + } + + if (ua.config.ExcludeTargets&ExcludeInternal) != 0 && internalProcessRegex.MatchString(binPath) { + return ErrInternalDDogProcessRejected + } + + if ua.handlesExecutables() { + matchingRules := ua.getRulesForExecutable(binPath, procInfo) + + if len(matchingRules) != 0 { + registerCB, unregisterCB := ua.buildRegisterCallbacks(matchingRules, procInfo) + err = ua.fileRegistry.Register(binPath, pid, registerCB, unregisterCB, utils.IgnoreCB) + if err != nil { + return err + } + } + } + + if attachToLibs && ua.handlesLibraries() { + return ua.attachToLibrariesOfPID(pid) + } + + return nil +} + +// DetachPID detaches the uprobes attached to a PID +func (ua *UprobeAttacher) DetachPID(pid uint32) error { + return ua.fileRegistry.Unregister(pid) +} + +const buildKitProcessName = "buildkitd" + +func isBuildKit(procInfo *ProcInfo) bool { + comm, err := procInfo.Comm() + if err != nil { + return false + } + return strings.HasPrefix(comm, buildKitProcessName) +} + +func isContainerdTmpMount(path string) bool { + return strings.Contains(path, "tmpmounts/containerd-mount") +} + +// getUID() return a key of length 5 as the kernel uprobe registration path is limited to a length of 64 +// ebpf-manager/utils.go:GenerateEventName() MaxEventNameLen = 64 +// MAX_EVENT_NAME_LEN (linux/kernel/trace/trace.h) +// +// Length 5 is arbitrary value as the full string of the eventName format is +// +// fmt.Sprintf("%s_%.*s_%s_%s", probeType, maxFuncNameLen, functionName, UID, attachPIDstr) +// +// functionName is variable but with a minimum guarantee of 10 chars +func getUID(lib utils.PathIdentifier) string { + return lib.Key()[:5] +} + +func parseSymbolFromEBPFProbeName(probeName string) (symbol string, isManualReturn bool, err error) { + parts := strings.Split(probeName, "__") + if len(parts) < 2 { + err = fmt.Errorf("invalid probe name %s, no double underscore (__) separating probe type and function name", probeName) + return + } + + symbol = parts[1] + if len(parts) > 2 { + if parts[2] == "return" { + isManualReturn = true + } else { + err = fmt.Errorf("invalid probe name %s, unexpected third part %s. Format should be probeType__funcName[__return]", probeName, parts[2]) + return + } + } + + return +} + +// attachToBinary attaches the probes to the given binary. Important: it does not perform any cleanup on failure. +// This is to match the behavior of the FileRegistry, which will call the deactivation callback on failure of the registration +// callback. +func (ua *UprobeAttacher) attachToBinary(fpath utils.FilePath, matchingRules []*AttachRule, procInfo *ProcInfo) error { + if ua.config.ExcludeTargets&ExcludeBuildkit != 0 && isBuildKit(procInfo) { + return fmt.Errorf("process %d is buildkitd, skipping", fpath.PID) + } else if ua.config.ExcludeTargets&ExcludeContainerdTmp != 0 && isContainerdTmpMount(fpath.HostPath) { + return fmt.Errorf("path %s from process %d is tempmount of containerd, skipping", fpath.HostPath, fpath.PID) + } + + symbolsToRequest, err := ua.computeSymbolsToRequest(matchingRules) + if err != nil { + return fmt.Errorf("error computing symbols to request for rules %+v: %w", matchingRules, err) + } + + inspectResult, isAttachable, err := ua.inspector.Inspect(fpath, symbolsToRequest) + if err != nil { + return fmt.Errorf("error inspecting %s: %w", fpath.HostPath, err) + } + if !isAttachable { + return fmt.Errorf("incompatible binary %s", fpath.HostPath) + } + + uid := getUID(fpath.ID) + + for _, rule := range matchingRules { + for _, selector := range rule.ProbesSelector { + err = ua.attachProbeSelector(selector, fpath, uid, rule, inspectResult) + if err != nil { + return err + } + } + } + + return nil +} + +func (ua *UprobeAttacher) attachProbeSelector(selector manager.ProbesSelector, fpath utils.FilePath, fpathUID string, rule *AttachRule, inspectResult map[string]bininspect.FunctionMetadata) error { + _, isBestEffort := selector.(*manager.BestEffort) + + for _, probeID := range selector.GetProbesIdentificationPairList() { + probeOpts, err := rule.getProbeOptions(probeID) + if err != nil { + return fmt.Errorf("error parsing probe name %s: %w", probeID.EBPFFuncName, err) + } + + data, found := inspectResult[probeOpts.Symbol] + if !found { + if isBestEffort { + return nil + } + // This should not happen, as Inspect should have already + // returned an error if mandatory symbols weren't found. + // However and for safety, we'll check again and return an + // error if the symbol is not found. + return fmt.Errorf("symbol %s not found in %s", probeOpts.Symbol, fpath.HostPath) + } + + var locationsToAttach []uint64 + var probeTypeCode string // to make unique UIDs between return/non-return probes + if probeOpts.IsManualReturn { + locationsToAttach = data.ReturnLocations + probeTypeCode = "r" + } else { + locationsToAttach = []uint64{data.EntryLocation} + probeTypeCode = "d" + } + + for i, location := range locationsToAttach { + newProbeID := manager.ProbeIdentificationPair{ + EBPFFuncName: probeID.EBPFFuncName, + UID: fmt.Sprintf("%s%s%d", fpathUID, probeTypeCode, i), // Make UID unique even if we have multiple locations + } + + probe, found := ua.manager.GetProbe(newProbeID) + if found { + // We have already probed this process, just ensure it's running and skip it + if !probe.IsRunning() { + err := probe.Attach() + if err != nil { + return fmt.Errorf("cannot attach running probe %v: %w", newProbeID, err) + } + } + if ua.config.EnableDetailedLogging { + log.Debugf("Probe %v already attached to %s", newProbeID, fpath.HostPath) + } + continue + } + + newProbe := &manager.Probe{ + ProbeIdentificationPair: newProbeID, + BinaryPath: fpath.HostPath, + UprobeOffset: location, + HookFuncName: probeOpts.Symbol, + } + err = ua.manager.AddHook("", newProbe) + if err != nil { + return fmt.Errorf("error attaching probe %+v: %w", newProbe, err) + } + + ebpf.AddProgramNameMapping(newProbe.ID(), newProbe.EBPFFuncName, ua.name) + ua.pathToAttachedProbes[fpath.HostPath] = append(ua.pathToAttachedProbes[fpath.HostPath], newProbeID) + + if ua.onAttachCallback != nil { + ua.onAttachCallback(newProbe, &fpath) + } + + // Update the probe IDs with the new UID, so that the validator can find them + // correctly (we're changing UIDs every time) + selector.EditProbeIdentificationPair(probeID, newProbeID) + + if ua.config.EnableDetailedLogging { + log.Debugf("Attached probe %v to %s (PID %d)", newProbeID, fpath.HostPath, fpath.PID) + } + } + } + + manager, ok := ua.manager.(*manager.Manager) + if ok { + if err := selector.RunValidator(manager); err != nil { + return fmt.Errorf("error validating probes: %w", err) + } + } + + return nil +} + +func (ua *UprobeAttacher) computeSymbolsToRequest(rules []*AttachRule) ([]SymbolRequest, error) { + var requests []SymbolRequest + for _, rule := range rules { + for _, selector := range rule.ProbesSelector { + _, isBestEffort := selector.(*manager.BestEffort) + for _, selector := range selector.GetProbesIdentificationPairList() { + opts, err := rule.getProbeOptions(selector) + if err != nil { + return nil, fmt.Errorf("error parsing probe name %s: %w", selector.EBPFFuncName, err) + } + + requests = append(requests, SymbolRequest{ + Name: opts.Symbol, + IncludeReturnLocations: opts.IsManualReturn, + BestEffort: isBestEffort, + }) + } + } + } + + return requests, nil +} + +func (ua *UprobeAttacher) detachFromBinary(fpath utils.FilePath) error { + for _, probeID := range ua.pathToAttachedProbes[fpath.HostPath] { + err := ua.manager.DetachHook(probeID) + if err != nil { + return fmt.Errorf("error detaching probe %+v: %w", probeID, err) + } + } + + ua.inspector.Cleanup(fpath) + + return nil +} + +func (ua *UprobeAttacher) getLibrariesFromMapsFile(pid int) ([]string, error) { + mapsPath := filepath.Join(ua.config.ProcRoot, strconv.Itoa(pid), "maps") + mapsFile, err := os.Open(mapsPath) + if err != nil { + return nil, fmt.Errorf("cannot open maps file at %s: %w", mapsPath, err) + } + defer mapsFile.Close() + + scanner := bufio.NewScanner(bufio.NewReader(mapsFile)) + libs := make(map[string]struct{}) + for scanner.Scan() { + line := scanner.Text() + cols := strings.Fields(line) + // ensuring we have exactly 6 elements (skip '(deleted)' entries) in the line, and the 4th element (inode) is + // not zero (indicates it is a path, and not an anonymous path). + if len(cols) == 6 && cols[4] != "0" { + libs[cols[5]] = struct{}{} + } + } + + return maps.Keys(libs), nil +} + +func (ua *UprobeAttacher) attachToLibrariesOfPID(pid uint32) error { + registerErrors := make([]error, 0) + successfulMatches := make([]string, 0) + libs, err := ua.getLibrariesFromMapsFile(int(pid)) + if err != nil { + return err + } + for _, libpath := range libs { + err := ua.AttachLibrary(libpath, pid) + + if err == nil { + successfulMatches = append(successfulMatches, libpath) + } else if !errors.Is(err, ErrNoMatchingRule) { + registerErrors = append(registerErrors, err) + } + } + + if len(successfulMatches) == 0 { + if len(registerErrors) == 0 { + return nil // No libraries found to attach + } + return fmt.Errorf("no rules matched for pid %d, errors: %v", pid, registerErrors) + } + if len(registerErrors) > 0 { + return fmt.Errorf("partially hooked (%v), errors while attaching pid %d: %v", successfulMatches, pid, registerErrors) + } + return nil +} diff --git a/pkg/ebpf/uprobes/attacher_test.go b/pkg/ebpf/uprobes/attacher_test.go new file mode 100644 index 0000000000000..e71a62d771306 --- /dev/null +++ b/pkg/ebpf/uprobes/attacher_test.go @@ -0,0 +1,935 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + manager "github.com/DataDog/ebpf-manager" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" + eventmonitortestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/testutil" + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries" + fileopener "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries/testutil" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" + "github.com/DataDog/datadog-agent/pkg/process/monitor" + procmontestutil "github.com/DataDog/datadog-agent/pkg/process/monitor/testutil" + "github.com/DataDog/datadog-agent/pkg/util/kernel" +) + +// === Tests + +func TestCanCreateAttacher(t *testing.T) { + ua, err := NewUprobeAttacher("mock", AttacherConfig{}, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) +} + +func TestAttachPidExcludesInternal(t *testing.T) { + exe := "datadog-agent/bin/system-probe" + procRoot := CreateFakeProcFS(t, []FakeProcFSEntry{{Pid: 1, Cmdline: exe, Command: exe, Exe: exe}}) + config := AttacherConfig{ + ExcludeTargets: ExcludeInternal, + ProcRoot: procRoot, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + err = ua.AttachPIDWithOptions(1, false) + require.ErrorIs(t, err, ErrInternalDDogProcessRejected) +} + +func TestAttachPidExcludesSelf(t *testing.T) { + config := AttacherConfig{ + ExcludeTargets: ExcludeSelf, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + err = ua.AttachPIDWithOptions(uint32(os.Getpid()), false) + require.ErrorIs(t, err, ErrSelfExcluded) +} + +func TestGetExecutablePath(t *testing.T) { + exe := "/bin/bash" + procRoot := CreateFakeProcFS(t, []FakeProcFSEntry{{Pid: 1, Cmdline: "", Command: exe, Exe: exe}}) + config := AttacherConfig{ + ProcRoot: procRoot, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + path, err := ua.getExecutablePath(1) + require.NoError(t, err, "failed to get executable path for existing PID") + require.Equal(t, path, exe) + + path, err = ua.getExecutablePath(404) + require.Error(t, err, "should fail to get executable path for non-existing PID") + require.Empty(t, path, "should return empty path for non-existing PID") +} + +const mapsFileSample = ` +08048000-08049000 r-xp 00000000 03:00 8312 /opt/test +08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test +0804a000-0806b000 rw-p 00000000 00:00 0 [heap] +a7cb1000-a7cb2000 ---p 00000000 00:00 0 +a7cb2000-a7eb2000 rw-p 00000000 00:00 0 +a7eb2000-a7eb3000 ---p 00000000 00:00 0 +a7eb3000-a7ed5000 rw-p 00000000 00:00 0 +a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 +a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6 +a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6 +a800b000-a800e000 rw-p 00000000 00:00 0 +a800e000-a8022000 r-xp 00000000 03:00 14462 /lib/libpthread.so.0 +a8022000-a8023000 r--p 00013000 03:00 14462 /lib/libpthread.so.0 +a8023000-a8024000 rw-p 00014000 03:00 14462 /lib/libpthread.so.0 +a8024000-a8027000 rw-p 00000000 00:00 0 +a8027000-a8043000 r-xp 00000000 03:00 8317 /lib/ld-linux.so.2 +a8043000-a8044000 r--p 0001b000 03:00 8317 /lib/ld-linux.so.2 +a8044000-a8045000 rw-p 0001c000 03:00 8317 /lib/ld-linux.so.2 +aff35000-aff4a000 rw-p 00000000 00:00 0 [stack] +ffffe000-fffff000 r-xp 00000000 00:00 0 [vdso] +01c00000-02000000 rw-p 00000000 00:0d 6123886 /anon_hugepage (deleted) +` + +func TestGetLibrariesFromMapsFile(t *testing.T) { + pid := 1 + procRoot := CreateFakeProcFS(t, []FakeProcFSEntry{{Pid: uint32(pid), Maps: mapsFileSample}}) + config := AttacherConfig{ + ProcRoot: procRoot, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + libs, err := ua.getLibrariesFromMapsFile(pid) + require.NoError(t, err, "failed to get libraries from maps file") + require.NotEmpty(t, libs, "should return libraries from maps file") + expectedLibs := []string{"/opt/test", "/lib/libc.so.6", "/lib/libpthread.so.0", "/lib/ld-linux.so.2"} + require.ElementsMatch(t, expectedLibs, libs) +} + +func TestComputeRequestedSymbols(t *testing.T) { + ua, err := NewUprobeAttacher("mock", AttacherConfig{}, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + selectorsOnlyAllOf := []manager.ProbesSelector{ + &manager.AllOf{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"}}, + }, + }, + } + + t.Run("OnlyMandatory", func(tt *testing.T) { + rules := []*AttachRule{{ProbesSelector: selectorsOnlyAllOf}} + requested, err := ua.computeSymbolsToRequest(rules) + require.NoError(tt, err) + require.ElementsMatch(tt, []SymbolRequest{{Name: "SSL_connect"}}, requested) + }) + + selectorsBestEffortAndMandatory := []manager.ProbesSelector{ + &manager.AllOf{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"}}, + }, + }, + &manager.BestEffort{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__ThisFunctionDoesNotExistEver"}}, + }, + }, + } + + t.Run("MandatoryAndBestEffort", func(tt *testing.T) { + rules := []*AttachRule{{ProbesSelector: selectorsBestEffortAndMandatory}} + requested, err := ua.computeSymbolsToRequest(rules) + require.NoError(tt, err) + require.ElementsMatch(tt, []SymbolRequest{{Name: "SSL_connect"}, {Name: "ThisFunctionDoesNotExistEver", BestEffort: true}}, requested) + }) + + selectorsBestEffort := []manager.ProbesSelector{ + &manager.BestEffort{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"}}, + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__ThisFunctionDoesNotExistEver"}}, + }, + }, + } + + t.Run("OnlyBestEffort", func(tt *testing.T) { + rules := []*AttachRule{{ProbesSelector: selectorsBestEffort}} + requested, err := ua.computeSymbolsToRequest(rules) + require.NoError(tt, err) + require.ElementsMatch(tt, []SymbolRequest{{Name: "SSL_connect", BestEffort: true}, {Name: "ThisFunctionDoesNotExistEver", BestEffort: true}}, requested) + }) + + selectorsWithReturnFunctions := []manager.ProbesSelector{ + &manager.AllOf{ + Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect__return"}}, + }, + }, + } + + t.Run("SelectorsWithReturnFunctions", func(tt *testing.T) { + rules := []*AttachRule{{ProbesSelector: selectorsWithReturnFunctions}} + requested, err := ua.computeSymbolsToRequest(rules) + require.NoError(tt, err) + require.ElementsMatch(tt, []SymbolRequest{{Name: "SSL_connect", IncludeReturnLocations: true}}, requested) + }) +} + +func TestStartAndStopWithoutLibraryWatcher(t *testing.T) { + ua, err := NewUprobeAttacher("mock", AttacherConfig{}, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + err = ua.Start() + require.NoError(t, err) + + ua.Stop() +} + +func TestStartAndStopWithLibraryWatcher(t *testing.T) { + ebpfCfg := ddebpf.NewConfig() + require.NotNil(t, ebpfCfg) + if !sharedlibraries.IsSupported(ebpfCfg) { + t.Skip("Kernel version does not support shared libraries") + return + } + + rules := []*AttachRule{{LibraryNameRegex: regexp.MustCompile(`libssl.so`), Targets: AttachToSharedLibraries}} + ua, err := NewUprobeAttacher("mock", AttacherConfig{Rules: rules, EbpfConfig: ebpfCfg}, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + require.True(t, ua.handlesLibraries()) + + err = ua.Start() + require.NoError(t, err) + require.NotNil(t, ua.soWatcher) + + ua.Stop() +} + +func TestRuleMatches(t *testing.T) { + t.Run("Library", func(tt *testing.T) { + rule := AttachRule{ + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + Targets: AttachToSharedLibraries, + } + require.True(tt, rule.matchesLibrary("pkg/network/usm/testdata/site-packages/dd-trace/libssl.so.arm64")) + require.False(tt, rule.matchesExecutable("pkg/network/usm/testdata/site-packages/dd-trace/libssl.so.arm64", nil)) + }) + + t.Run("Executable", func(tt *testing.T) { + rule := AttachRule{ + Targets: AttachToExecutable, + } + require.False(tt, rule.matchesLibrary("/bin/bash")) + require.True(tt, rule.matchesExecutable("/bin/bash", nil)) + }) + + t.Run("ExecutableWithFuncFilter", func(tt *testing.T) { + rule := AttachRule{ + Targets: AttachToExecutable, + ExecutableFilter: func(path string, _ *ProcInfo) bool { + return strings.Contains(path, "bash") + }, + } + require.False(tt, rule.matchesLibrary("/bin/bash")) + require.True(tt, rule.matchesExecutable("/bin/bash", nil)) + require.False(tt, rule.matchesExecutable("/bin/thing", nil)) + }) +} + +func TestMonitor(t *testing.T) { + ebpfCfg := ddebpf.NewConfig() + require.NotNil(t, ebpfCfg) + if !sharedlibraries.IsSupported(ebpfCfg) { + t.Skip("Kernel version does not support shared libraries") + return + } + + config := AttacherConfig{ + Rules: []*AttachRule{{ + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + Targets: AttachToExecutable | AttachToSharedLibraries, + }}, + ProcessMonitorEventStream: false, + EbpfConfig: ebpfCfg, + } + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(t, err) + require.NotNil(t, ua) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry to return on any calls, we will check the values later + mockRegistry.On("Clear").Return() + mockRegistry.On("Unregister", mock.Anything).Return(nil) + mockRegistry.On("Register", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + lib := getLibSSLPath(t) + + require.NoError(t, ua.Start()) + t.Cleanup(ua.Stop) + + cmd, err := fileopener.OpenFromAnotherProcess(t, lib) + require.NoError(t, err) + require.Eventually(t, func() bool { + return methodHasBeenCalledAtLeastTimes(mockRegistry, "Register", 2) + }, 1500*time.Millisecond, 10*time.Millisecond, "received calls %v", mockRegistry.Calls) + + mockRegistry.AssertCalled(t, "Register", lib, uint32(cmd.Process.Pid), mock.Anything, mock.Anything, mock.Anything) + mockRegistry.AssertCalled(t, "Register", cmd.Path, uint32(cmd.Process.Pid), mock.Anything, mock.Anything, mock.Anything) +} + +func TestSync(t *testing.T) { + selfPID, err := kernel.RootNSPID() + require.NoError(t, err) + rules := []*AttachRule{{ + Targets: AttachToExecutable | AttachToSharedLibraries, + LibraryNameRegex: regexp.MustCompile(`.*`), + ExecutableFilter: func(path string, _ *ProcInfo) bool { return !strings.Contains(path, "donttrack") }, + }} + + t.Run("DetectsExistingProcesses", func(tt *testing.T) { + procs := []FakeProcFSEntry{ + {Pid: 1, Cmdline: "/bin/bash", Command: "/bin/bash", Exe: "/bin/bash"}, + {Pid: 2, Cmdline: "/bin/bash", Command: "/bin/bash", Exe: "/bin/bash"}, + {Pid: 3, Cmdline: "/bin/donttrack", Command: "/bin/donttrack", Exe: "/bin/donttrack"}, + {Pid: uint32(selfPID), Cmdline: "datadog-agent/bin/system-probe", Command: "sysprobe", Exe: "sysprobe"}, + } + procFS := CreateFakeProcFS(t, procs) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: rules, + EnablePeriodicScanNewProcesses: true, + } + + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(tt, err) + require.NotNil(tt, ua) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry which two processes to expect + mockRegistry.On("Register", "/bin/bash", uint32(1), mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockRegistry.On("Register", "/bin/bash", uint32(2), mock.Anything, mock.Anything, mock.Anything).Return(nil) + + err = ua.Sync(true, false) + require.NoError(tt, err) + + mockRegistry.AssertExpectations(tt) + }) + + t.Run("RemovesDeletedProcesses", func(tt *testing.T) { + procs := []FakeProcFSEntry{ + {Pid: 1, Cmdline: "/bin/bash", Command: "/bin/bash", Exe: "/bin/bash"}, + {Pid: 2, Cmdline: "/bin/bash", Command: "/bin/bash", Exe: "/bin/bash"}, + {Pid: 3, Cmdline: "/bin/donttrack", Command: "/bin/donttrack", Exe: "/bin/donttrack"}, + {Pid: uint32(selfPID), Cmdline: "datadog-agent/bin/system-probe", Command: "sysprobe", Exe: "sysprobe"}, + } + procFS := CreateFakeProcFS(t, procs) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: rules, + EnablePeriodicScanNewProcesses: true, + } + + ua, err := NewUprobeAttacher("mock", config, &MockManager{}, nil, nil) + require.NoError(tt, err) + require.NotNil(tt, ua) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry which two processes to expect + mockRegistry.On("Register", "/bin/bash", uint32(1), mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockRegistry.On("Register", "/bin/bash", uint32(2), mock.Anything, mock.Anything, mock.Anything).Return(nil) + mockRegistry.On("GetRegisteredProcesses").Return(map[uint32]struct{}{}) + + err = ua.Sync(true, true) + require.NoError(tt, err) + mockRegistry.AssertExpectations(tt) + + // Now remove one process + require.NoError(t, os.RemoveAll(filepath.Join(procFS, "2"))) + mockRegistry.ExpectedCalls = nil // Clear expected calls + mockRegistry.On("GetRegisteredProcesses").Return(map[uint32]struct{}{1: {}, 2: {}}) + mockRegistry.On("Unregister", uint32(2)).Return(nil) + + require.NoError(t, ua.Sync(true, true)) + mockRegistry.AssertExpectations(tt) + }) +} + +func TestParseSymbolFromEBPFProbeName(t *testing.T) { + t.Run("ValidName", func(tt *testing.T) { + name := "uprobe__SSL_connect" + symbol, manualReturn, err := parseSymbolFromEBPFProbeName(name) + require.NoError(tt, err) + require.False(tt, manualReturn) + require.Equal(tt, "SSL_connect", symbol) + }) + t.Run("ValidNameWithReturnMarker", func(tt *testing.T) { + name := "uprobe__SSL_connect__return" + symbol, manualReturn, err := parseSymbolFromEBPFProbeName(name) + require.NoError(tt, err) + require.True(tt, manualReturn) + require.Equal(tt, "SSL_connect", symbol) + }) + t.Run("InvalidNameWithUnrecognizedThirdPart", func(tt *testing.T) { + name := "uprobe__SSL_connect__something" + _, _, err := parseSymbolFromEBPFProbeName(name) + require.Error(tt, err) + }) + t.Run("InvalidNameNoSymbol", func(tt *testing.T) { + name := "nothing" + _, _, err := parseSymbolFromEBPFProbeName(name) + require.Error(tt, err) + }) +} + +func TestAttachToBinaryAndDetach(t *testing.T) { + proc := FakeProcFSEntry{ + Pid: 1, + Cmdline: "/bin/bash", + Exe: "/bin/bash", + } + procFS := CreateFakeProcFS(t, []FakeProcFSEntry{proc}) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: []*AttachRule{ + { + Targets: AttachToExecutable, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"}}, + }, + }, + }, + } + + mockMan := &MockManager{} + inspector := &MockBinaryInspector{} + ua, err := NewUprobeAttacher("mock", config, mockMan, nil, inspector) + require.NoError(t, err) + require.NotNil(t, ua) + + target := utils.FilePath{ + HostPath: proc.Exe, + PID: proc.Pid, + } + + // Tell the inspector to return a simple symbol + symbolToAttach := bininspect.FunctionMetadata{EntryLocation: 0x1234} + inspector.On("Inspect", target, mock.Anything).Return(map[string]bininspect.FunctionMetadata{"SSL_connect": symbolToAttach}, true, nil) + inspector.On("Cleanup", mock.Anything).Return(nil) + + // Tell the manager to return no probe when finding an existing one + var nilProbe *manager.Probe // we can't just pass nil directly, if we do that the mock cannot convert it to *manager.Probe + mockMan.On("GetProbe", mock.Anything).Return(nilProbe, false) + + // Tell the manager to accept the probe + uid := "1hipfd0" // this is the UID that the manager will generate, from a path identifier with 0/0 as device/inode + expectedProbe := &manager.Probe{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect", UID: uid}, + BinaryPath: target.HostPath, + UprobeOffset: symbolToAttach.EntryLocation, + HookFuncName: "SSL_connect", + } + mockMan.On("AddHook", mock.Anything, expectedProbe).Return(nil) + + err = ua.attachToBinary(target, config.Rules, NewProcInfo(procFS, proc.Pid)) + require.NoError(t, err) + mockMan.AssertExpectations(t) + + mockMan.On("DetachHook", expectedProbe.ProbeIdentificationPair).Return(nil) + err = ua.detachFromBinary(target) + require.NoError(t, err) + inspector.AssertExpectations(t) + mockMan.AssertExpectations(t) +} + +func TestAttachToBinaryAtReturnLocation(t *testing.T) { + proc := FakeProcFSEntry{ + Pid: 1, + Cmdline: "/bin/bash", + Exe: "/bin/bash", + } + procFS := CreateFakeProcFS(t, []FakeProcFSEntry{proc}) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: []*AttachRule{ + { + Targets: AttachToExecutable, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect__return"}}, + }, + }, + }, + } + + mockMan := &MockManager{} + inspector := &MockBinaryInspector{} + ua, err := NewUprobeAttacher("mock", config, mockMan, nil, inspector) + require.NoError(t, err) + require.NotNil(t, ua) + + target := utils.FilePath{ + HostPath: proc.Exe, + PID: proc.Pid, + } + + // Tell the inspector to return a simple symbol + symbolToAttach := bininspect.FunctionMetadata{EntryLocation: 0x1234, ReturnLocations: []uint64{0x0, 0x1}} + inspector.On("Inspect", target, mock.Anything).Return(map[string]bininspect.FunctionMetadata{"SSL_connect": symbolToAttach}, true, nil) + + // Tell the manager to return no probe when finding an existing one + var nilProbe *manager.Probe // we can't just pass nil directly, if we do that the mock cannot convert it to *manager.Probe + mockMan.On("GetProbe", mock.Anything).Return(nilProbe, false) + + // Tell the manager to accept the probe + uidBase := "1hipf" // this is the UID that the manager will generate, from a path identifier with 0/0 as device/inode + for n := 0; n < len(symbolToAttach.ReturnLocations); n++ { + expectedProbe := &manager.Probe{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "uprobe__SSL_connect__return", + UID: fmt.Sprintf("%sr%d", uidBase, n)}, + BinaryPath: target.HostPath, + UprobeOffset: symbolToAttach.ReturnLocations[n], + HookFuncName: "SSL_connect", + } + mockMan.On("AddHook", mock.Anything, expectedProbe).Return(nil) + } + + err = ua.attachToBinary(target, config.Rules, NewProcInfo(procFS, proc.Pid)) + require.NoError(t, err) + inspector.AssertExpectations(t) + mockMan.AssertExpectations(t) +} + +const mapsFileWithSSL = ` +08048000-08049000 r-xp 00000000 03:00 8312 /usr/lib/libssl.so +` + +func TestAttachToLibrariesOfPid(t *testing.T) { + proc := FakeProcFSEntry{ + Pid: 1, + Cmdline: "/bin/bash", + Exe: "/bin/bash", + Maps: mapsFileWithSSL, + } + procFS := CreateFakeProcFS(t, []FakeProcFSEntry{proc}) + + config := AttacherConfig{ + ProcRoot: procFS, + Rules: []*AttachRule{ + { + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "uprobe__SSL_connect", + }, + }, + }, + Targets: AttachToSharedLibraries, + }, + { + LibraryNameRegex: regexp.MustCompile(`libtls.so`), + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "uprobe__TLS_connect", + }, + }, + }, + Targets: AttachToSharedLibraries, + }, + }, + } + + mockMan := &MockManager{} + inspector := &MockBinaryInspector{} + registry := &MockFileRegistry{} + ua, err := NewUprobeAttacher("mock", config, mockMan, nil, inspector) + require.NoError(t, err) + require.NotNil(t, ua) + ua.fileRegistry = registry + + target := utils.FilePath{ + HostPath: "/usr/lib/libssl.so", + PID: proc.Pid, + } + + // Tell the inspector to return a simple symbol + symbolToAttach := bininspect.FunctionMetadata{EntryLocation: 0x1234} + inspector.On("Inspect", target, mock.Anything).Return(map[string]bininspect.FunctionMetadata{"SSL_connect": symbolToAttach}, true, nil) + + // Tell the manager to return no probe when finding an existing one + var nilProbe *manager.Probe // we can't just pass nil directly, if we do that the mock cannot convert it to *manager.Probe + mockMan.On("GetProbe", mock.Anything).Return(nilProbe, false) + + // Tell the manager to accept the probe + uid := "1hipfd0" // this is the UID that the manager will generate, from a path identifier with 0/0 as device/inode + expectedProbe := &manager.Probe{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect", UID: uid}, + BinaryPath: target.HostPath, + UprobeOffset: symbolToAttach.EntryLocation, + HookFuncName: "SSL_connect", + } + mockMan.On("AddHook", mock.Anything, expectedProbe).Return(nil) + + // Tell the registry to expect the process + registry.On("Register", target.HostPath, uint32(proc.Pid), mock.Anything, mock.Anything, mock.Anything).Return(nil) + + // if this function calls the manager adding a probe with a different name than the one we requested, the test + // will fail + err = ua.attachToLibrariesOfPID(proc.Pid) + require.NoError(t, err) + + // We need to retrieve the calls from the registry and manually call the callback + // to simulate the process being registered + registry.AssertExpectations(t) + cb := registry.Calls[0].Arguments[2].(utils.Callback) + require.NoError(t, cb(target)) + + inspector.AssertExpectations(t) + mockMan.AssertExpectations(t) +} + +type attachedProbe struct { + probe *manager.Probe + fpath *utils.FilePath +} + +func (ap *attachedProbe) String() string { + return fmt.Sprintf("attachedProbe{probe: %s, PID: %d, path: %s}", ap.probe.EBPFFuncName, ap.fpath.PID, ap.fpath.HostPath) +} + +func stringifyAttachedProbes(probes []attachedProbe) []string { + var result []string + for _, ap := range probes { + result = append(result, ap.String()) + } + return result +} + +func TestUprobeAttacher(t *testing.T) { + lib := getLibSSLPath(t) + ebpfCfg := ddebpf.NewConfig() + require.NotNil(t, ebpfCfg) + + if !sharedlibraries.IsSupported(ebpfCfg) { + t.Skip("Kernel version does not support shared libraries") + return + } + + buf, err := bytecode.GetReader(ebpfCfg.BPFDir, "uprobe_attacher-test.o") + require.NoError(t, err) + t.Cleanup(func() { buf.Close() }) + + connectProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"} + mainProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__main"} + + mgr := manager.Manager{} + + attacherCfg := AttacherConfig{ + Rules: []*AttachRule{ + { + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + Targets: AttachToSharedLibraries, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: connectProbeID}, + }, + }, + { + Targets: AttachToExecutable, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: mainProbeID}, + }, + ProbeOptionsOverride: map[string]ProbeOptions{ + mainProbeID.EBPFFuncName: { + IsManualReturn: false, + Symbol: "main.main", + }, + }, + }, + }, + ExcludeTargets: ExcludeInternal | ExcludeSelf, + EbpfConfig: ebpfCfg, + EnableDetailedLogging: true, + } + + var attachedProbes []attachedProbe + + callback := func(probe *manager.Probe, fpath *utils.FilePath) { + attachedProbes = append(attachedProbes, attachedProbe{probe: probe, fpath: fpath}) + } + + ua, err := NewUprobeAttacher("test", attacherCfg, &mgr, callback, &NativeBinaryInspector{}) + require.NoError(t, err) + require.NotNil(t, ua) + + require.NoError(t, mgr.InitWithOptions(buf, manager.Options{})) + require.NoError(t, mgr.Start()) + t.Cleanup(func() { mgr.Stop(manager.CleanAll) }) + require.NoError(t, ua.Start()) + t.Cleanup(ua.Stop) + + cmd, err := fileopener.OpenFromAnotherProcess(t, lib) + require.NoError(t, err) + + var connectProbe, mainProbe *attachedProbe + require.Eventually(t, func() bool { + // Find the probes we want to attach. + // Note that we might attach to other processes, so filter by ours only + for _, ap := range attachedProbes { + if ap.probe.EBPFFuncName == "uprobe__SSL_connect" && ap.fpath.PID == uint32(cmd.Process.Pid) { + connectProbe = &ap + } else if ap.probe.EBPFFuncName == "uprobe__main" && ap.fpath.PID == uint32(cmd.Process.Pid) { + mainProbe = &ap + } + } + + return connectProbe != nil && mainProbe != nil + }, 5*time.Second, 50*time.Millisecond, "expected to attach 2 probes, got %d: %v (%v)", len(attachedProbes), attachedProbes, stringifyAttachedProbes(attachedProbes)) + + require.NotNil(t, connectProbe) + // Allow suffix, as sometimes the path reported is /proc//root/ + require.True(t, strings.HasSuffix(connectProbe.fpath.HostPath, lib), "expected to attach to %s, got %s", lib, connectProbe.fpath.HostPath) + require.Equal(t, uint32(cmd.Process.Pid), connectProbe.fpath.PID) + + require.NotNil(t, mainProbe) + require.Equal(t, uint32(cmd.Process.Pid), mainProbe.fpath.PID) +} + +func launchProcessMonitor(t *testing.T, useEventStream bool) { + pm := monitor.GetProcessMonitor() + t.Cleanup(pm.Stop) + require.NoError(t, pm.Initialize(useEventStream)) + if useEventStream { + eventmonitortestutil.StartEventMonitor(t, procmontestutil.RegisterProcessMonitorEventConsumer) + } +} + +func createTempTestFile(t *testing.T, name string) (string, utils.PathIdentifier) { + fullPath := filepath.Join(t.TempDir(), name) + + f, err := os.Create(fullPath) + f.WriteString("foobar") + require.NoError(t, err) + f.Close() + t.Cleanup(func() { + os.RemoveAll(fullPath) + }) + + pathID, err := utils.NewPathIdentifier(fullPath) + require.NoError(t, err) + + return fullPath, pathID +} + +type SharedLibrarySuite struct { + suite.Suite +} + +func TestAttacherSharedLibrary(t *testing.T) { + ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(tt *testing.T) { + if !sharedlibraries.IsSupported(ddebpf.NewConfig()) { + tt.Skip("shared library tracing not supported for this platform") + } + + tt.Run("netlink", func(ttt *testing.T) { + launchProcessMonitor(ttt, false) + suite.Run(ttt, new(SharedLibrarySuite)) + }) + + tt.Run("event stream", func(ttt *testing.T) { + launchProcessMonitor(ttt, true) + suite.Run(ttt, new(SharedLibrarySuite)) + }) + }) +} + +func (s *SharedLibrarySuite) TestSingleFile() { + t := s.T() + ebpfCfg := ddebpf.NewConfig() + + fooPath1, _ := createTempTestFile(t, "foo-libssl.so") + + attachCfg := AttacherConfig{ + Rules: []*AttachRule{{ + LibraryNameRegex: regexp.MustCompile(`foo-libssl.so`), + Targets: AttachToSharedLibraries, + }}, + EbpfConfig: ebpfCfg, + } + + ua, err := NewUprobeAttacher("test", attachCfg, &MockManager{}, nil, nil) + require.NoError(t, err) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry to return on any calls, we will check the values later + mockRegistry.On("Clear").Return() + mockRegistry.On("Unregister", mock.Anything).Return(nil) + mockRegistry.On("Register", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + require.NoError(t, ua.Start()) + t.Cleanup(ua.Stop) + + // open files + cmd, err := fileopener.OpenFromAnotherProcess(t, fooPath1) + require.NoError(t, err) + require.Eventually(t, func() bool { + return methodHasBeenCalledTimes(mockRegistry, "Register", 1) + }, 1500*time.Millisecond, 10*time.Millisecond, "received calls %v", mockRegistry.Calls) + + mockRegistry.AssertCalled(t, "Register", fooPath1, uint32(cmd.Process.Pid), mock.Anything, mock.Anything, mock.Anything) + + mockRegistry.Calls = nil + require.NoError(t, cmd.Process.Kill()) + + require.Eventually(t, func() bool { + // Other processes might have finished and forced the Unregister call to the registry + return methodHasBeenCalledWithPredicate(mockRegistry, "Unregister", func(call mock.Call) bool { + return call.Arguments[0].(uint32) == uint32(cmd.Process.Pid) + }) + }, time.Second*10, 200*time.Millisecond, "received calls %v", mockRegistry.Calls) + + mockRegistry.AssertCalled(t, "Unregister", uint32(cmd.Process.Pid)) +} + +func (s *SharedLibrarySuite) TestDetectionWithPIDAndRootNamespace() { + t := s.T() + ebpfCfg := ddebpf.NewConfig() + + _, err := os.Stat("/usr/bin/busybox") + if err != nil { + t.Skip("skip for the moment as some distro are not friendly with busybox package") + } + + tempDir := t.TempDir() + root := filepath.Join(tempDir, "root") + err = os.MkdirAll(root, 0755) + require.NoError(t, err) + + libpath := "/fooroot-crypto.so" + + err = exec.Command("cp", "/usr/bin/busybox", root+"/ash").Run() + require.NoError(t, err) + err = exec.Command("cp", "/usr/bin/busybox", root+"/sleep").Run() + require.NoError(t, err) + + attachCfg := AttacherConfig{ + Rules: []*AttachRule{{ + LibraryNameRegex: regexp.MustCompile(`fooroot-crypto.so`), + Targets: AttachToSharedLibraries, + }}, + EbpfConfig: ebpfCfg, + } + + ua, err := NewUprobeAttacher("test", attachCfg, &MockManager{}, nil, nil) + require.NoError(t, err) + + mockRegistry := &MockFileRegistry{} + ua.fileRegistry = mockRegistry + + // Tell mockRegistry to return on any calls, we will check the values later + mockRegistry.On("Clear").Return() + mockRegistry.On("Unregister", mock.Anything).Return(nil) + mockRegistry.On("Register", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + + require.NoError(t, ua.Start()) + t.Cleanup(ua.Stop) + + time.Sleep(10 * time.Millisecond) + // simulate a slow (1 second) : open, write, close of the file + // in a new pid and mount namespaces + o, err := exec.Command("unshare", "--fork", "--pid", "-R", root, "/ash", "-c", fmt.Sprintf("sleep 1 > %s", libpath)).CombinedOutput() + if err != nil { + t.Log(err, string(o)) + } + require.NoError(t, err) + + time.Sleep(10 * time.Millisecond) + + require.Eventually(t, func() bool { + return methodHasBeenCalledTimes(mockRegistry, "Register", 1) + }, time.Second*10, 100*time.Millisecond, "received calls %v", mockRegistry.Calls) + + // assert that soWatcher detected foo-crypto.so being opened and triggered the callback + foundCall := false + for _, call := range mockRegistry.Calls { + if call.Method == "Register" { + args := call.Arguments + require.True(t, strings.HasSuffix(args[0].(string), libpath)) + foundCall = true + } + } + require.True(t, foundCall) + + // must fail on the host + _, err = os.Stat(libpath) + require.Error(t, err) +} + +func methodHasBeenCalledTimes(registry *MockFileRegistry, methodName string, times int) bool { + calls := 0 + for _, call := range registry.Calls { + if call.Method == methodName { + calls++ + } + } + return calls == times +} + +func methodHasBeenCalledAtLeastTimes(registry *MockFileRegistry, methodName string, times int) bool { + calls := 0 + for _, call := range registry.Calls { + if call.Method == methodName { + calls++ + } + } + return calls >= times +} + +func methodHasBeenCalledWithPredicate(registry *MockFileRegistry, methodName string, predicate func(mock.Call) bool) bool { + for _, call := range registry.Calls { + if call.Method == methodName && predicate(call) { + return true + } + } + return false +} diff --git a/pkg/ebpf/uprobes/doc.go b/pkg/ebpf/uprobes/doc.go new file mode 100644 index 0000000000000..887d653ae8d82 --- /dev/null +++ b/pkg/ebpf/uprobes/doc.go @@ -0,0 +1,60 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +/* +Package uprobes contains methods to help handling the attachment of uprobes to +userspace programs + +The main type for this package is the UprobeAttacher type, created with +NewUprobeAttacher. The main configuration it requires is a list of rules that +define how to match the possible targets (shared libraries and/or executables) +and which probes to attach to them. Example usage: + + connectProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"} + mainProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__main"} + + mgr := manager.Manager{} + + attacherCfg := AttacherConfig{ + Rules: []*AttachRule{ + { + LibraryNameRegex: regexp.MustCompile(`libssl.so`), + Targets: AttachToSharedLibraries, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: connectProbeID}, + }, + }, + { + Targets: AttachToExecutable, + ProbesSelector: []manager.ProbesSelector{ + &manager.ProbeSelector{ProbeIdentificationPair: mainProbeID}, + }, + }, + }, + ExcludeTargets: ExcludeInternal | ExcludeSelf, + EbpfConfig: ebpfCfg, + } + + ua, err := NewUprobeAttacher("test", attacherCfg, &mgr, callback, &NativeBinaryInspector{}) + ua.Start() + +Once started, the attacher monitors new processes and `open` calls for new +shared libraries. For the first task it uses pkg/process/monitor/ProcessMonitor, +and for the second it uses the shared-libraries program in +pkg/network/usm/sharedlibraries. + +# Notes and things to take into account + + - When adding new probes, be sure to add the corresponding code to + match the libraries in + pkg/network/ebpf/c/shared-libraries/probes.h:do_sys_open_helper_exit, as an + initial filtering is performed there. + + - If multiple rules match a binary file, and we fail to attach the required probes for one of them, + the whole attach operation will be considered as failed, and the probes will be detached. If you want + to control which probes are optional and which are mandatory, you can use the manager.AllOf/manager.BestEffort + selectors in a single rule. +*/ +package uprobes diff --git a/pkg/ebpf/uprobes/inspector.go b/pkg/ebpf/uprobes/inspector.go new file mode 100644 index 0000000000000..16df19346b5e5 --- /dev/null +++ b/pkg/ebpf/uprobes/inspector.go @@ -0,0 +1,135 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "debug/elf" + "errors" + "fmt" + "runtime" + + manager "github.com/DataDog/ebpf-manager" + + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" + "github.com/DataDog/datadog-agent/pkg/util/common" +) + +// BinaryInspector implementors are responsible for extracting the metadata required to attach from a binary. +type BinaryInspector interface { + // Inspect returns the metadata required to attach to a binary. The first + // return is a map of symbol names to their corresponding metadata, the + // second return is a boolean indicating whether this binary is compatible + // and can be attached or not. It is encouraged to return early if the + // binary is not compatible, to avoid unnecessary work. In the future, the + // first and second return values should be merged into a single struct, but + // for now this allows us to keep the API compatible with the existing + // implementation. + Inspect(fpath utils.FilePath, requests []SymbolRequest) (map[string]bininspect.FunctionMetadata, bool, error) + + // Cleanup is called when a certain file path is not needed anymore, the implementation can clean up + // any resources associated with the file path. + Cleanup(fpath utils.FilePath) +} + +// SymbolRequest represents a request for symbols and associated data from a binary +type SymbolRequest struct { + // Name of the symbol to request + Name string + // BestEffort indicates that the symbol is not mandatory, and the inspector should not return an error if it is not found + BestEffort bool + // IncludeReturnLocations indicates that the inspector should also include the return locations of the function, for manual + // attachment into those return points instead of using uretprobes. + IncludeReturnLocations bool +} + +// NativeBinaryInspector is a BinaryInspector that uses the ELF format to extract the metadata directly from native functions. +type NativeBinaryInspector struct { +} + +// Ensure NativeBinaryInspector implements BinaryInspector +var _ BinaryInspector = &NativeBinaryInspector{} + +// Inspect extracts the metadata required to attach to a binary from the ELF file at the given path. +func (p *NativeBinaryInspector) Inspect(fpath utils.FilePath, requests []SymbolRequest) (map[string]bininspect.FunctionMetadata, bool, error) { + path := fpath.HostPath + elfFile, err := elf.Open(path) + if err != nil { + return nil, false, err + } + defer elfFile.Close() + + // This only allows amd64 and arm64 and not the 32-bit variants, but that + // is fine since we don't monitor 32-bit applications at all in the shared + // library watcher since compat syscalls aren't supported by the syscall + // trace points. We do actually monitor 32-bit applications for istio and + // nodejs monitoring, but our uprobe hooks only properly support 64-bit + // applications, so there's no harm in rejecting 32-bit applications here. + arch, err := bininspect.GetArchitecture(elfFile) + if err != nil { + return nil, false, fmt.Errorf("cannot get architecture of %s: %w", path, err) + } + + // Ignore foreign architectures. This can happen when running stuff under + // qemu-user, for example, and installing a uprobe will lead to segfaults + // since the foreign instructions will be patched with the native break + // instruction. + if string(arch) != runtime.GOARCH { + return nil, false, nil + } + + mandatorySymbols := make(common.StringSet, len(requests)) + bestEffortSymbols := make(common.StringSet, len(requests)) + + for _, req := range requests { + if req.BestEffort { + bestEffortSymbols.Add(req.Name) + } else { + mandatorySymbols.Add(req.Name) + } + + if req.IncludeReturnLocations { + return nil, false, errors.New("return locations are not supported by the native binary inspector") + } + } + + symbolMap, err := bininspect.GetAllSymbolsInSetByName(elfFile, mandatorySymbols) + if err != nil { + return nil, false, err + } + /* Best effort to resolve symbols, so we don't care about the error */ + symbolMapBestEffort, _ := bininspect.GetAllSymbolsInSetByName(elfFile, bestEffortSymbols) + + funcMap := make(map[string]bininspect.FunctionMetadata, len(symbolMap)+len(symbolMapBestEffort)) + for _, symMap := range []map[string]elf.Symbol{symbolMap, symbolMapBestEffort} { + for symbolName, symbol := range symMap { + m, err := p.symbolToFuncMetadata(elfFile, symbol) + if err != nil { + return nil, false, fmt.Errorf("failed to convert symbol %s to function metadata: %w", symbolName, err) + } + funcMap[symbolName] = *m + } + } + + return funcMap, true, nil +} + +func (*NativeBinaryInspector) symbolToFuncMetadata(elfFile *elf.File, sym elf.Symbol) (*bininspect.FunctionMetadata, error) { + manager.SanitizeUprobeAddresses(elfFile, []elf.Symbol{sym}) + offset, err := bininspect.SymbolToOffset(elfFile, sym) + if err != nil { + return nil, err + } + + return &bininspect.FunctionMetadata{EntryLocation: uint64(offset)}, nil +} + +// Cleanup is a no-op for the native inspector +func (*NativeBinaryInspector) Cleanup(_ utils.FilePath) { + // Nothing to do here for the native inspector +} diff --git a/pkg/ebpf/uprobes/inspector_test.go b/pkg/ebpf/uprobes/inspector_test.go new file mode 100644 index 0000000000000..56624eaff8673 --- /dev/null +++ b/pkg/ebpf/uprobes/inspector_test.go @@ -0,0 +1,63 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + + "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" +) + +func TestNativeBinarySymbolRetrieval(t *testing.T) { + curDir, err := testutil.CurDir() + require.NoError(t, err) + + libmmap := filepath.Join(curDir, "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace") + lib := filepath.Join(libmmap, fmt.Sprintf("libssl.so.%s", runtime.GOARCH)) + fpath := utils.FilePath{HostPath: lib} + + allMandatoryExisting := []SymbolRequest{{Name: "SSL_connect"}} + allBestEffortExisting := []SymbolRequest{{Name: "SSL_connect", BestEffort: true}} + mandatoryExistBestEffortDont := []SymbolRequest{{Name: "SSL_connect"}, {Name: "ThisFunctionDoesNotExistEver", BestEffort: true}} + mandatoryNonExisting := []SymbolRequest{{Name: "ThisFunctionDoesNotExistEver"}} + + inspector := &NativeBinaryInspector{} + + t.Run("MandatoryAllExist", func(tt *testing.T) { + result, compat, err := inspector.Inspect(fpath, allMandatoryExisting) + require.NoError(tt, err) + require.True(tt, compat) + require.ElementsMatch(tt, []string{"SSL_connect"}, maps.Keys(result)) + }) + + t.Run("BestEffortAllExist", func(tt *testing.T) { + result, compat, err := inspector.Inspect(fpath, allBestEffortExisting) + require.NoError(tt, err) + require.True(tt, compat) + require.ElementsMatch(tt, []string{"SSL_connect"}, maps.Keys(result)) + }) + + t.Run("BestEffortDontExist", func(tt *testing.T) { + result, compat, err := inspector.Inspect(fpath, mandatoryExistBestEffortDont) + require.NoError(tt, err) + require.True(tt, compat) + require.ElementsMatch(tt, []string{"SSL_connect"}, maps.Keys(result)) + }) + + t.Run("SomeMandatoryDontExist", func(tt *testing.T) { + _, _, err := inspector.Inspect(fpath, mandatoryNonExisting) + require.Error(tt, err, "should have failed to find mandatory symbols") + }) +} diff --git a/pkg/ebpf/uprobes/procfs.go b/pkg/ebpf/uprobes/procfs.go new file mode 100644 index 0000000000000..ff355e2fe9e1b --- /dev/null +++ b/pkg/ebpf/uprobes/procfs.go @@ -0,0 +1,119 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package uprobes + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strconv" + "sync" + "time" +) + +const procFSUpdateTimeout = 100 * time.Millisecond + +// ProcInfo holds the information extracted from procfs, to avoid repeat calls to the filesystem. +type ProcInfo struct { + procRoot string + PID uint32 + exe string + comm string +} + +// NewProcInfo creates a new ProcInfo object. +func NewProcInfo(procRoot string, pid uint32) *ProcInfo { + return &ProcInfo{ + procRoot: procRoot, + PID: pid, + } +} + +// Avoid allocations, reuse the error to mark "iteration start" in the loop +var errIterStart = errors.New("iteration start") + +func waitUntilSucceeds[T any](p *ProcInfo, procFile string, readFunc func(string) (T, error)) (T, error) { + // Read the exe link + pidAsStr := strconv.FormatUint(uint64(p.PID), 10) + filePath := filepath.Join(p.procRoot, pidAsStr, procFile) + + var result T + err := errIterStart + end := time.Now().Add(procFSUpdateTimeout) + + for err != nil && end.After(time.Now()) { + result, err = readFunc(filePath) + if err != nil { + time.Sleep(10 * time.Millisecond) + } + } + + return result, err +} + +// Exe returns the path to the executable of the process. +func (p *ProcInfo) Exe() (string, error) { + var err error + if p.exe == "" { + p.exe, err = waitUntilSucceeds(p, "exe", os.Readlink) + if err != nil { + return "", err + } + } + + if p.exe == "" { + return "", errors.New("exe link is empty") + } + + return p.exe, nil +} + +const ( + // Defined in https://man7.org/linux/man-pages/man5/proc.5.html. + taskCommLen = 16 +) + +var ( + taskCommLenBufferPool = sync.Pool{ + New: func() any { + buf := make([]byte, taskCommLen) + return &buf + }, + } +) + +func (p *ProcInfo) readComm(commFile string) (string, error) { + file, err := os.Open(commFile) + if err != nil { + return "", err + } + defer file.Close() + + buf := taskCommLenBufferPool.Get().(*[]byte) + defer taskCommLenBufferPool.Put(buf) + n, err := file.Read(*buf) + if err != nil { + // short living process can hit here, or slow start of another process. + return "", nil + } + return string(bytes.TrimSpace((*buf)[:n])), nil +} + +// Comm returns the command name of the process. +func (p *ProcInfo) Comm() (string, error) { + var err error + if p.comm == "" { + p.comm, err = waitUntilSucceeds(p, "comm", p.readComm) + if err != nil { + return "", err + } + } + + return p.comm, nil +} diff --git a/pkg/ebpf/uprobes/testutil.go b/pkg/ebpf/uprobes/testutil.go new file mode 100644 index 0000000000000..f61c45a8adb02 --- /dev/null +++ b/pkg/ebpf/uprobes/testutil.go @@ -0,0 +1,155 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package uprobes + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strconv" + "testing" + + manager "github.com/DataDog/ebpf-manager" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" + "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" + "github.com/DataDog/datadog-agent/pkg/network/usm/utils" +) + +// === Mocks + +// MockManager is a mock implementation of the manager.Manager interface. +type MockManager struct { + mock.Mock +} + +// AddHook is a mock implementation of the manager.Manager.AddHook method. +func (m *MockManager) AddHook(name string, probe *manager.Probe) error { + args := m.Called(name, probe) + return args.Error(0) +} + +// DetachHook is a mock implementation of the manager.Manager.DetachHook method. +func (m *MockManager) DetachHook(probeID manager.ProbeIdentificationPair) error { + args := m.Called(probeID) + return args.Error(0) +} + +// GetProbe is a mock implementation of the manager.Manager.GetProbe method. +func (m *MockManager) GetProbe(probeID manager.ProbeIdentificationPair) (*manager.Probe, bool) { + args := m.Called(probeID) + return args.Get(0).(*manager.Probe), args.Bool(1) +} + +// MockFileRegistry is a mock implementation of the FileRegistry interface. +type MockFileRegistry struct { + mock.Mock +} + +// Register is a mock implementation of the FileRegistry.Register method. +func (m *MockFileRegistry) Register(namespacedPath string, pid uint32, activationCB, deactivationCB, alreadyRegistered utils.Callback) error { + args := m.Called(namespacedPath, pid, activationCB, deactivationCB) + return args.Error(0) +} + +// Unregister is a mock implementation of the FileRegistry.Unregister method. +func (m *MockFileRegistry) Unregister(pid uint32) error { + args := m.Called(pid) + return args.Error(0) +} + +// Clear is a mock implementation of the FileRegistry.Clear method. +func (m *MockFileRegistry) Clear() { + m.Called() +} + +// GetRegisteredProcesses is a mock implementation of the FileRegistry.GetRegisteredProcesses method. +func (m *MockFileRegistry) GetRegisteredProcesses() map[uint32]struct{} { + args := m.Called() + return args.Get(0).(map[uint32]struct{}) +} + +// MockBinaryInspector is a mock implementation of the BinaryInspector interface. +type MockBinaryInspector struct { + mock.Mock +} + +// Inspect is a mock implementation of the BinaryInspector.Inspect method. +func (m *MockBinaryInspector) Inspect(fpath utils.FilePath, requests []SymbolRequest) (map[string]bininspect.FunctionMetadata, bool, error) { + args := m.Called(fpath, requests) + return args.Get(0).(map[string]bininspect.FunctionMetadata), args.Bool(1), args.Error(2) +} + +// Cleanup is a mock implementation of the BinaryInspector.Cleanup method. +func (m *MockBinaryInspector) Cleanup(fpath utils.FilePath) { + _ = m.Called(fpath) +} + +// === Test utils + +// FakeProcFSEntry represents a fake /proc filesystem entry for testing purposes. +type FakeProcFSEntry struct { + Pid uint32 + Cmdline string + Command string + Exe string + Maps string +} + +// CreateFakeProcFS creates a fake /proc filesystem with the given entries, useful for testing attachment to processes. +func CreateFakeProcFS(t *testing.T, entries []FakeProcFSEntry) string { + procRoot := t.TempDir() + + for _, entry := range entries { + baseDir := filepath.Join(procRoot, strconv.Itoa(int(entry.Pid))) + + createFile(t, filepath.Join(baseDir, "cmdline"), entry.Cmdline) + createFile(t, filepath.Join(baseDir, "comm"), entry.Command) + createFile(t, filepath.Join(baseDir, "maps"), entry.Maps) + createSymlink(t, entry.Exe, filepath.Join(baseDir, "exe")) + } + + return procRoot +} + +func createFile(t *testing.T, path, data string) { + if data == "" { + return + } + + dir := filepath.Dir(path) + require.NoError(t, os.MkdirAll(dir, 0775)) + require.NoError(t, os.WriteFile(path, []byte(data), 0775)) +} + +func createSymlink(t *testing.T, target, link string) { + if target == "" { + return + } + + dir := filepath.Dir(link) + require.NoError(t, os.MkdirAll(dir, 0775)) + require.NoError(t, os.Symlink(target, link)) +} + +func getLibSSLPath(t *testing.T) string { + curDir, err := testutil.CurDir() + require.NoError(t, err) + + libmmap := filepath.Join(curDir, "..", "..", "network", "usm", "testdata", "site-packages", "ddtrace") + return filepath.Join(libmmap, fmt.Sprintf("libssl.so.%s", runtime.GOARCH)) +} + +// SetRegistry allows changing the file registry used by the attacher. This is useful for testing purposes, to +// replace the registry with a mock object +func (ua *UprobeAttacher) SetRegistry(registry FileRegistry) { + ua.fileRegistry = registry +} diff --git a/pkg/eventmonitor/config/config.go b/pkg/eventmonitor/config/config.go index 65bffc9f13550..785b1ffd968ef 100644 --- a/pkg/eventmonitor/config/config.go +++ b/pkg/eventmonitor/config/config.go @@ -9,7 +9,7 @@ package config import ( "strings" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -34,8 +34,8 @@ type Config struct { func NewConfig() *Config { return &Config{ // event server - SocketPath: coreconfig.SystemProbe().GetString(join(evNS, "socket")), - EventServerBurst: coreconfig.SystemProbe().GetInt(join(evNS, "event_server.burst")), + SocketPath: pkgconfigsetup.SystemProbe().GetString(join(evNS, "socket")), + EventServerBurst: pkgconfigsetup.SystemProbe().GetInt(join(evNS, "event_server.burst")), // consumers ProcessConsumerEnabled: getBool("process.enabled"), @@ -54,9 +54,9 @@ func getAllKeys(key string) (string, string) { func getBool(key string) bool { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetBool(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetBool(deprecatedKey) } - return coreconfig.SystemProbe().GetBool(newKey) + return pkgconfigsetup.SystemProbe().GetBool(newKey) } diff --git a/pkg/eventmonitor/eventmonitor.go b/pkg/eventmonitor/eventmonitor.go index 1b57d1a9560b0..a0b4a84e2cc9f 100644 --- a/pkg/eventmonitor/eventmonitor.go +++ b/pkg/eventmonitor/eventmonitor.go @@ -21,7 +21,6 @@ import ( "github.com/DataDog/datadog-agent/cmd/system-probe/api/module" "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" procstatsd "github.com/DataDog/datadog-agent/pkg/process/statsd" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -229,7 +228,7 @@ func (m *EventMonitor) GetStats() map[string]interface{} { } // NewEventMonitor instantiates an event monitoring system-probe module -func NewEventMonitor(config *config.Config, secconfig *secconfig.Config, opts Opts, wmeta workloadmeta.Component, telemetry telemetry.Component) (*EventMonitor, error) { +func NewEventMonitor(config *config.Config, secconfig *secconfig.Config, opts Opts, telemetry telemetry.Component) (*EventMonitor, error) { if opts.StatsdClient == nil { opts.StatsdClient = procstatsd.Client } @@ -238,7 +237,7 @@ func NewEventMonitor(config *config.Config, secconfig *secconfig.Config, opts Op opts.ProbeOpts.StatsdClient = opts.StatsdClient } - probe, err := probe.NewProbe(secconfig, opts.ProbeOpts, wmeta, telemetry) + probe, err := probe.NewProbe(secconfig, opts.ProbeOpts, telemetry) if err != nil { return nil, err } diff --git a/pkg/eventmonitor/testutil/testutil.go b/pkg/eventmonitor/testutil/testutil.go index 74bded51f3498..83e1083ad2659 100644 --- a/pkg/eventmonitor/testutil/testutil.go +++ b/pkg/eventmonitor/testutil/testutil.go @@ -15,11 +15,8 @@ import ( "github.com/stretchr/testify/require" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/eventmonitor" emconfig "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -43,11 +40,7 @@ func StartEventMonitor(t *testing.T, callback PreStartCallback) { opts := eventmonitor.Opts{} telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) - wmeta := fxutil.Test[workloadmeta.Component](t, - core.MockBundle(), - wmmock.MockModule(workloadmeta.NewParams()), - ) - evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, wmeta, telemetry) + evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, telemetry) require.NoError(t, err) require.NoError(t, evm.Init()) callback(t, evm) diff --git a/pkg/flare/archive.go b/pkg/flare/archive.go index 59024af02be5e..393fa43497798 100644 --- a/pkg/flare/archive.go +++ b/pkg/flare/archive.go @@ -27,7 +27,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/api/security" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/status/health" @@ -45,6 +45,11 @@ var cnfFileExtRx = regexp.MustCompile(`(?i)\.ya?ml`) // searchPaths is a list of path where to look for checks configurations type searchPaths map[string]string +// getProcessAPIAddress is an Alias to GetProcessAPIAddressPort using Datadog config +func getProcessAPIAddressPort() (string, error) { + return pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) +} + // CompleteFlare packages up the files with an already created builder. This is aimed to be used by the flare // component while we migrate to a component architecture. func CompleteFlare(fb flaretypes.FlareBuilder, diagnoseDeps diagnose.SuitesDeps) error { @@ -62,51 +67,51 @@ func CompleteFlare(fb flaretypes.FlareBuilder, diagnoseDeps diagnose.SuitesDeps) fb.AddFileFromFunc("tagger-list.json", getAgentTaggerList) //nolint:errcheck fb.AddFileFromFunc("workload-list.log", getAgentWorkloadList) //nolint:errcheck fb.AddFileFromFunc("process-agent_tagger-list.json", getProcessAgentTaggerList) //nolint:errcheck - if !config.Datadog().GetBool("process_config.run_in_core_agent.enabled") { - getChecksFromProcessAgent(fb, config.GetProcessAPIAddressPort) + if !pkgconfigsetup.Datadog().GetBool("process_config.run_in_core_agent.enabled") { + getChecksFromProcessAgent(fb, getProcessAPIAddressPort) } } - fb.RegisterFilePerm(security.GetAuthTokenFilepath(config.Datadog())) + fb.RegisterFilePerm(security.GetAuthTokenFilepath(pkgconfigsetup.Datadog())) - systemProbeConfigBPFDir := config.SystemProbe().GetString("system_probe_config.bpf_dir") + systemProbeConfigBPFDir := pkgconfigsetup.SystemProbe().GetString("system_probe_config.bpf_dir") if systemProbeConfigBPFDir != "" { fb.RegisterDirPerm(systemProbeConfigBPFDir) } addSystemProbePlatformSpecificEntries(fb) - if config.SystemProbe().GetBool("system_probe_config.enabled") { + if pkgconfigsetup.SystemProbe().GetBool("system_probe_config.enabled") { fb.AddFileFromFunc(filepath.Join("expvar", "system-probe"), getSystemProbeStats) //nolint:errcheck } pprofURL := fmt.Sprintf("http://127.0.0.1:%s/debug/pprof/goroutine?debug=2", - config.Datadog().GetString("expvar_port")) - - fb.AddFileFromFunc("process_agent_runtime_config_dump.yaml", getProcessAgentFullConfig) //nolint:errcheck - fb.AddFileFromFunc("runtime_config_dump.yaml", func() ([]byte, error) { return yaml.Marshal(config.Datadog().AllSettings()) }) //nolint:errcheck - fb.AddFileFromFunc("system_probe_runtime_config_dump.yaml", func() ([]byte, error) { return yaml.Marshal(config.SystemProbe().AllSettings()) }) //nolint:errcheck - fb.AddFileFromFunc("diagnose.log", getDiagnoses(fb.IsLocal(), diagnoseDeps)) //nolint:errcheck - fb.AddFileFromFunc("envvars.log", getEnvVars) //nolint:errcheck - fb.AddFileFromFunc("health.yaml", getHealth) //nolint:errcheck - fb.AddFileFromFunc("go-routine-dump.log", func() ([]byte, error) { return getHTTPCallContent(pprofURL) }) //nolint:errcheck - fb.AddFileFromFunc("docker_inspect.log", func() ([]byte, error) { return getDockerSelfInspect(diagnoseDeps.GetWMeta()) }) //nolint:errcheck - fb.AddFileFromFunc("docker_ps.log", getDockerPs) //nolint:errcheck - fb.AddFileFromFunc("k8s/kubelet_config.yaml", getKubeletConfig) //nolint:errcheck - fb.AddFileFromFunc("k8s/kubelet_pods.yaml", getKubeletPods) //nolint:errcheck - fb.AddFileFromFunc("ecs_metadata.json", getECSMeta) //nolint:errcheck + pkgconfigsetup.Datadog().GetString("expvar_port")) + + fb.AddFileFromFunc("process_agent_runtime_config_dump.yaml", getProcessAgentFullConfig) //nolint:errcheck + fb.AddFileFromFunc("runtime_config_dump.yaml", func() ([]byte, error) { return yaml.Marshal(pkgconfigsetup.Datadog().AllSettings()) }) //nolint:errcheck + fb.AddFileFromFunc("system_probe_runtime_config_dump.yaml", func() ([]byte, error) { return yaml.Marshal(pkgconfigsetup.SystemProbe().AllSettings()) }) //nolint:errcheck + fb.AddFileFromFunc("diagnose.log", getDiagnoses(fb.IsLocal(), diagnoseDeps)) //nolint:errcheck + fb.AddFileFromFunc("envvars.log", getEnvVars) //nolint:errcheck + fb.AddFileFromFunc("health.yaml", getHealth) //nolint:errcheck + fb.AddFileFromFunc("go-routine-dump.log", func() ([]byte, error) { return getHTTPCallContent(pprofURL) }) //nolint:errcheck + fb.AddFileFromFunc("docker_inspect.log", func() ([]byte, error) { return getDockerSelfInspect(diagnoseDeps.GetWMeta()) }) //nolint:errcheck + fb.AddFileFromFunc("docker_ps.log", getDockerPs) //nolint:errcheck + fb.AddFileFromFunc("k8s/kubelet_config.yaml", getKubeletConfig) //nolint:errcheck + fb.AddFileFromFunc("k8s/kubelet_pods.yaml", getKubeletPods) //nolint:errcheck + fb.AddFileFromFunc("ecs_metadata.json", getECSMeta) //nolint:errcheck getRegistryJSON(fb) getVersionHistory(fb) - fb.CopyFile(installinfo.GetFilePath(config.Datadog())) //nolint:errcheck + fb.CopyFile(installinfo.GetFilePath(pkgconfigsetup.Datadog())) //nolint:errcheck getExpVar(fb) //nolint:errcheck getWindowsData(fb) - telemetryURL := fmt.Sprintf("http://127.0.0.1:%s/telemetry", config.Datadog().GetString("expvar_port")) + telemetryURL := fmt.Sprintf("http://127.0.0.1:%s/telemetry", pkgconfigsetup.Datadog().GetString("expvar_port")) fb.AddFileFromFunc("telemetry.log", func() ([]byte, error) { return getHTTPCallContent(telemetryURL) }) //nolint:errcheck - if config.IsRemoteConfigEnabled(config.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { if err := exportRemoteConfig(fb); err != nil { log.Errorf("Could not export remote-config state: %s", err) } @@ -115,11 +120,11 @@ func CompleteFlare(fb flaretypes.FlareBuilder, diagnoseDeps diagnose.SuitesDeps) } func getVersionHistory(fb flaretypes.FlareBuilder) { - fb.CopyFile(filepath.Join(config.Datadog().GetString("run_path"), "version-history.json")) //nolint:errcheck + fb.CopyFile(filepath.Join(pkgconfigsetup.Datadog().GetString("run_path"), "version-history.json")) //nolint:errcheck } func getRegistryJSON(fb flaretypes.FlareBuilder) { - fb.CopyFile(filepath.Join(config.Datadog().GetString("logs_config.run_path"), "registry.json")) //nolint:errcheck + fb.CopyFile(filepath.Join(pkgconfigsetup.Datadog().GetString("logs_config.run_path"), "registry.json")) //nolint:errcheck } func getLogFiles(fb flaretypes.FlareBuilder, logFileDir string) { @@ -156,7 +161,7 @@ func getExpVar(fb flaretypes.FlareBuilder) error { } } - apmDebugPort := config.Datadog().GetInt("apm_config.debug.port") + apmDebugPort := pkgconfigsetup.Datadog().GetInt("apm_config.debug.port") f := filepath.Join("expvar", "trace-agent") resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/debug/vars", apmDebugPort)) if err != nil { @@ -184,7 +189,7 @@ func getExpVar(fb flaretypes.FlareBuilder) error { func getSystemProbeStats() ([]byte, error) { // TODO: (components) - Temporary until we can use the status component to extract the system probe status from it. stats := map[string]interface{}{} - systemprobeStatus.GetStatus(stats, config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + systemprobeStatus.GetStatus(stats, pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) sysProbeBuf, err := yaml.Marshal(stats["systemProbeStats"]) if err != nil { return nil, err @@ -195,7 +200,7 @@ func getSystemProbeStats() ([]byte, error) { // getProcessAgentFullConfig fetches process-agent runtime config as YAML and returns it to be added to process_agent_runtime_config_dump.yaml func getProcessAgentFullConfig() ([]byte, error) { - addressPort, err := config.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("wrong configuration to connect to process-agent") } @@ -226,8 +231,8 @@ func getConfigFiles(fb flaretypes.FlareBuilder, confSearchPaths map[string]strin }) } - if config.Datadog().ConfigFileUsed() != "" { - mainConfpath := config.Datadog().ConfigFileUsed() + if pkgconfigsetup.Datadog().ConfigFileUsed() != "" { + mainConfpath := pkgconfigsetup.Datadog().ConfigFileUsed() confDir := filepath.Dir(mainConfpath) // zip up the config file that was actually used, if one exists @@ -253,7 +258,7 @@ func getChecksFromProcessAgent(fb flaretypes.FlareBuilder, getAddressPort func() getCheck := func(checkName, setting string) { filename := fmt.Sprintf("%s_check_output.json", checkName) - if !config.Datadog().GetBool(setting) { + if !pkgconfigsetup.Datadog().GetBool(setting) { fb.AddFile(filename, []byte(fmt.Sprintf("'%s' is disabled", setting))) //nolint:errcheck return } @@ -313,18 +318,18 @@ func getDiagnoses(isFlareLocal bool, deps diagnose.SuitesDeps) func() ([]byte, e } func getAgentTaggerList() ([]byte, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - taggerListURL := fmt.Sprintf("https://%v:%v/agent/tagger-list", ipcAddress, config.Datadog().GetInt("cmd_port")) + taggerListURL := fmt.Sprintf("https://%v:%v/agent/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) return getTaggerList(taggerListURL) } func getProcessAgentTaggerList() ([]byte, error) { - addressPort, err := config.GetProcessAPIAddressPort() + addressPort, err := pkgconfigsetup.GetProcessAPIAddressPort(pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("wrong configuration to connect to process-agent") } @@ -354,12 +359,12 @@ func getTaggerList(remoteURL string) ([]byte, error) { } func getAgentWorkloadList() ([]byte, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - return getWorkloadList(fmt.Sprintf("https://%v:%v/agent/workload-list?verbose=true", ipcAddress, config.Datadog().GetInt("cmd_port"))) + return getWorkloadList(fmt.Sprintf("https://%v:%v/agent/workload-list?verbose=true", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port"))) } func getWorkloadList(url string) ([]byte, error) { diff --git a/pkg/flare/archive_dca.go b/pkg/flare/archive_dca.go index 6cf4a37a32d4a..f031efb4a5ac7 100644 --- a/pkg/flare/archive_dca.go +++ b/pkg/flare/archive_dca.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/status" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/render" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -36,7 +36,7 @@ func CreateDCAArchive(local bool, distPath, logFilePath string, pdata ProfileDat } confSearchPaths := map[string]string{ - "": config.Datadog().GetString("confd_path"), + "": pkgconfigsetup.Datadog().GetString("confd_path"), "dist": filepath.Join(distPath, "conf.d"), } @@ -75,14 +75,14 @@ func createDCAArchive(fb flaretypes.FlareBuilder, confSearchPaths map[string]str fb.AddFileFromFunc("workload-list.log", getDCAWorkloadList) //nolint:errcheck getPerformanceProfileDCA(fb, pdata) - if config.Datadog().GetBool("external_metrics_provider.enabled") { + if pkgconfigsetup.Datadog().GetBool("external_metrics_provider.enabled") { getHPAStatus(fb) //nolint:errcheck } } // QueryDCAMetrics gets the metrics payload exposed by the cluster agent func QueryDCAMetrics() ([]byte, error) { - r, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", config.Datadog().GetInt("metrics_port"))) + r, err := http.Get(fmt.Sprintf("http://localhost:%d/metrics", pkgconfigsetup.Datadog().GetInt("metrics_port"))) if err != nil { return nil, err } @@ -168,23 +168,23 @@ func getClusterAgentDiagnose(fb flaretypes.FlareBuilder) error { } func getDCATaggerList() ([]byte, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - taggerListURL := fmt.Sprintf("https://%v:%v/tagger-list", ipcAddress, config.Datadog().GetInt("cluster_agent.cmd_port")) + taggerListURL := fmt.Sprintf("https://%v:%v/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) return getTaggerList(taggerListURL) } func getDCAWorkloadList() ([]byte, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } - return getWorkloadList(fmt.Sprintf("https://%v:%v/workload-list?verbose=true", ipcAddress, config.Datadog().GetInt("cluster_agent.cmd_port"))) + return getWorkloadList(fmt.Sprintf("https://%v:%v/workload-list?verbose=true", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"))) } func getPerformanceProfileDCA(fb flaretypes.FlareBuilder, pdata ProfileData) { diff --git a/pkg/flare/archive_linux.go b/pkg/flare/archive_linux.go index 0374d1688bf69..ac03dd5d4ad3d 100644 --- a/pkg/flare/archive_linux.go +++ b/pkg/flare/archive_linux.go @@ -18,11 +18,11 @@ import ( "github.com/DataDog/ebpf-manager/tracefs" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func addSystemProbePlatformSpecificEntries(fb flaretypes.FlareBuilder) { - sysprobeSocketLocation := config.SystemProbe().GetString("system_probe_config.sysprobe_socket") + sysprobeSocketLocation := pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket") if sysprobeSocketLocation != "" { fb.RegisterDirPerm(filepath.Dir(sysprobeSocketLocation)) } diff --git a/pkg/flare/archive_security.go b/pkg/flare/archive_security.go index bcdb281c20ea8..85545b8c0b139 100644 --- a/pkg/flare/archive_security.go +++ b/pkg/flare/archive_security.go @@ -11,7 +11,7 @@ import ( flarehelpers "github.com/DataDog/datadog-agent/comp/core/flare/helpers" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" "github.com/DataDog/datadog-agent/comp/core/status" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -61,7 +61,7 @@ func createSecurityAgentArchive(fb flaretypes.FlareBuilder, logFilePath string, } func getComplianceFiles(fb flaretypes.FlareBuilder) error { - compDir := config.Datadog().GetString("compliance_config.dir") + compDir := pkgconfigsetup.Datadog().GetString("compliance_config.dir") return fb.CopyDirTo(compDir, "compliance.d", func(path string) bool { f, err := os.Lstat(path) @@ -73,7 +73,7 @@ func getComplianceFiles(fb flaretypes.FlareBuilder) error { } func getRuntimeFiles(fb flaretypes.FlareBuilder) error { - runtimeDir := config.SystemProbe().GetString("runtime_security_config.policies.dir") + runtimeDir := pkgconfigsetup.SystemProbe().GetString("runtime_security_config.policies.dir") return fb.CopyDirTo(runtimeDir, "runtime-security.d", func(path string) bool { f, err := os.Lstat(path) diff --git a/pkg/flare/archive_test.go b/pkg/flare/archive_test.go index d58d877579119..6e6036d2ddee6 100644 --- a/pkg/flare/archive_test.go +++ b/pkg/flare/archive_test.go @@ -26,7 +26,6 @@ import ( flarehelpers "github.com/DataDog/datadog-agent/comp/core/flare/helpers" "github.com/DataDog/datadog-agent/comp/core/tagger/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" model "github.com/DataDog/datadog-agent/pkg/config/model" ) @@ -302,7 +301,7 @@ func TestProcessAgentChecks(t *testing.T) { setupIPCAddress(t, configmock.New(t), srv.URL) mock := flarehelpers.NewFlareBuilderMock(t, false) - getChecksFromProcessAgent(mock.Fb, config.GetProcessAPIAddressPort) + getChecksFromProcessAgent(mock.Fb, getProcessAPIAddressPort) mock.AssertFileContent(string(expectedProcessesJSON), "process_check_output.json") mock.AssertFileContent(string(expectedContainersJSON), "container_check_output.json") diff --git a/pkg/flare/cluster_checks.go b/pkg/flare/cluster_checks.go index 68a004f465389..47108857ab8e2 100644 --- a/pkg/flare/cluster_checks.go +++ b/pkg/flare/cluster_checks.go @@ -17,18 +17,18 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // GetClusterChecks dumps the clustercheck dispatching state to the writer func GetClusterChecks(w io.Writer, checkName string) error { - urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks", config.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr := fmt.Sprintf("https://localhost:%v/api/v1/clusterchecks", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) if w != color.Output { color.NoColor = true } - if !config.Datadog().GetBool("cluster_checks.enabled") { + if !pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") { fmt.Fprintln(w, "Cluster-checks are not enabled") return nil } @@ -36,7 +36,7 @@ func GetClusterChecks(w io.Writer, checkName string) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - err := util.SetAuthToken(config.Datadog()) + err := util.SetAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } @@ -114,7 +114,7 @@ func GetEndpointsChecks(w io.Writer, checkName string) error { return nil } - urlstr := fmt.Sprintf("https://localhost:%v/api/v1/endpointschecks/configs", config.Datadog().GetInt("cluster_agent.cmd_port")) + urlstr := fmt.Sprintf("https://localhost:%v/api/v1/endpointschecks/configs", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) if w != color.Output { color.NoColor = true @@ -123,7 +123,7 @@ func GetEndpointsChecks(w io.Writer, checkName string) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - if err := util.SetAuthToken(config.Datadog()); err != nil { + if err := util.SetAuthToken(pkgconfigsetup.Datadog()); err != nil { return err } @@ -153,7 +153,7 @@ func GetEndpointsChecks(w io.Writer, checkName string) error { } func endpointschecksEnabled() bool { - for _, provider := range config.Datadog().GetStringSlice("extra_config_providers") { + for _, provider := range pkgconfigsetup.Datadog().GetStringSlice("extra_config_providers") { if provider == names.KubeEndpointsRegisterName { return true } diff --git a/pkg/flare/config_check.go b/pkg/flare/config_check.go index a575043366852..6d6bfca1970b8 100644 --- a/pkg/flare/config_check.go +++ b/pkg/flare/config_check.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/api/util" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // GetClusterAgentConfigCheck gets config check from the server for cluster agent @@ -24,14 +24,14 @@ func GetClusterAgentConfigCheck(w io.Writer, withDebug bool) error { c := util.GetClient(false) // FIX: get certificates right then make this true // Set session token - err := util.SetAuthToken(config.Datadog()) + err := util.SetAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } targetURL := url.URL{ Scheme: "https", - Host: fmt.Sprintf("localhost:%v", config.Datadog().GetInt("cluster_agent.cmd_port")), + Host: fmt.Sprintf("localhost:%v", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")), Path: "config-check", } diff --git a/pkg/flare/envvars.go b/pkg/flare/envvars.go index 183e2e2861429..7f53fcbc2ceef 100644 --- a/pkg/flare/envvars.go +++ b/pkg/flare/envvars.go @@ -11,7 +11,7 @@ import ( "os" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) var allowedEnvvarNames = []string{ @@ -138,7 +138,7 @@ var allowedEnvvarNames = []string{ func getAllowedEnvvars() []string { allowed := allowedEnvvarNames - allowed = append(allowed, config.Datadog().GetEnvVars()...) + allowed = append(allowed, pkgconfigsetup.Datadog().GetEnvVars()...) var found []string for _, envvar := range os.Environ() { parts := strings.SplitN(envvar, "=", 2) diff --git a/pkg/flare/flare.go b/pkg/flare/flare.go index 0f384f0d9f895..4fa84cdada489 100644 --- a/pkg/flare/flare.go +++ b/pkg/flare/flare.go @@ -7,13 +7,13 @@ package flare import ( "github.com/DataDog/datadog-agent/comp/core/flare/helpers" - "github.com/DataDog/datadog-agent/pkg/config" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" ) // SendFlare sends a flare and returns the message returned by the backend. This entry point is deprecated in favor of // the 'Send' method of the flare component. func SendFlare(cfg pkgconfigmodel.Reader, archivePath string, caseID string, email string, source helpers.FlareSource) (string, error) { - return helpers.SendTo(cfg, archivePath, caseID, email, config.Datadog().GetString("api_key"), utils.GetInfraEndpoint(config.Datadog()), source) + return helpers.SendTo(cfg, archivePath, caseID, email, pkgconfigsetup.Datadog().GetString("api_key"), utils.GetInfraEndpoint(pkgconfigsetup.Datadog()), source) } diff --git a/pkg/flare/remote_config.go b/pkg/flare/remote_config.go index 17350cbf0ae3e..12094ea947fb9 100644 --- a/pkg/flare/remote_config.go +++ b/pkg/flare/remote_config.go @@ -23,7 +23,7 @@ import ( flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util" agentgrpc "github.com/DataDog/datadog-agent/pkg/util/grpc" @@ -36,7 +36,7 @@ func exportRemoteConfig(fb flaretypes.FlareBuilder) error { } // Dump the state - token, err := security.FetchAuthToken(config.Datadog()) + token, err := security.FetchAuthToken(pkgconfigsetup.Datadog()) if err != nil { return fmt.Errorf("couldn't get auth token: %v", err) } @@ -47,12 +47,12 @@ func exportRemoteConfig(fb flaretypes.FlareBuilder) error { } ctx = metadata.NewOutgoingContext(ctx, md) - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - cli, err := agentgrpc.GetDDAgentSecureClient(ctx, ipcAddress, config.GetIPCPort()) + cli, err := agentgrpc.GetDDAgentSecureClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } @@ -64,7 +64,7 @@ func exportRemoteConfig(fb flaretypes.FlareBuilder) error { } var haState *pbgo.GetStateConfigResponse - if config.Datadog().GetBool("multi_region_failover.enabled") { + if pkgconfigsetup.Datadog().GetBool("multi_region_failover.enabled") { if haState, err = cli.GetConfigStateHA(ctx, in); err != nil { return fmt.Errorf("couldn't get the MRF repositories state: %v", err) } @@ -97,7 +97,7 @@ func hashRCTargets(raw []byte) []byte { func getRemoteConfigDB(fb flaretypes.FlareBuilder) error { dstPath, _ := fb.PrepareFilePath("remote-config.db") tempPath, _ := fb.PrepareFilePath("remote-config.temp.db") - srcPath := filepath.Join(config.Datadog().GetString("run_path"), "remote-config.db") + srcPath := filepath.Join(pkgconfigsetup.Datadog().GetString("run_path"), "remote-config.db") // Copies the db so it avoids bbolt from being locked // Also avoid concurrent modifications diff --git a/pkg/fleet/bootstraper/bootstraper.go b/pkg/fleet/bootstraper/bootstraper.go index 453b00afd6821..932469b1153d8 100644 --- a/pkg/fleet/bootstraper/bootstraper.go +++ b/pkg/fleet/bootstraper/bootstraper.go @@ -9,6 +9,7 @@ package bootstraper import ( "context" "fmt" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" "os" "github.com/DataDog/datadog-agent/pkg/fleet/env" @@ -17,23 +18,23 @@ import ( "github.com/DataDog/datadog-agent/pkg/fleet/internal/oci" ) -const ( - installerPackage = "datadog-installer" -) - // Bootstrap bootstraps the installer and uses it to install the default packages. func Bootstrap(ctx context.Context, env *env.Env) error { version := "latest" - if env.DefaultPackagesVersionOverride[installerPackage] != "" { - version = env.DefaultPackagesVersionOverride[installerPackage] + if env.DefaultPackagesVersionOverride[bootstrap.InstallerPackage] != "" { + version = env.DefaultPackagesVersionOverride[bootstrap.InstallerPackage] } - installerURL := oci.PackageURL(env, installerPackage, version) + installerURL := oci.PackageURL(env, bootstrap.InstallerPackage, version) err := bootstrap.Install(ctx, env, installerURL) if err != nil { return fmt.Errorf("failed to bootstrap the installer: %w", err) } + return InstallDefaultPackages(ctx, env) +} - cmd := exec.NewInstallerExec(env, exec.StableInstallerPath) +// InstallDefaultPackages installs the default packages. +func InstallDefaultPackages(ctx context.Context, env *env.Env) error { + cmd := exec.NewInstallerExec(env, paths.StableInstallerPath) defaultPackages, err := cmd.DefaultPackages(ctx) if err != nil { return fmt.Errorf("failed to get default packages: %w", err) diff --git a/pkg/fleet/daemon/daemon.go b/pkg/fleet/daemon/daemon.go index beeb8aafdfa60..cff2045dc36f0 100644 --- a/pkg/fleet/daemon/daemon.go +++ b/pkg/fleet/daemon/daemon.go @@ -94,15 +94,19 @@ func NewDaemon(rcFetcher client.ConfigFetcher, config config.Reader) (Daemon, er } env := env.FromConfig(config) installer := newInstaller(env, installerBin) - return newDaemon(rc, installer, env), nil + cdn, err := cdn.New(env, "opt/datadog-packages/run/rc/daemon") + if err != nil { + return nil, err + } + return newDaemon(rc, installer, env, cdn), nil } -func newDaemon(rc *remoteConfig, installer installer.Installer, env *env.Env) *daemonImpl { +func newDaemon(rc *remoteConfig, installer installer.Installer, env *env.Env, cdn *cdn.CDN) *daemonImpl { i := &daemonImpl{ env: env, rc: rc, installer: installer, - cdn: cdn.New(env), + cdn: cdn, requests: make(chan remoteAPIRequest, 32), catalog: catalog{}, stopChan: make(chan struct{}), @@ -233,6 +237,7 @@ func (d *daemonImpl) Stop(_ context.Context) error { defer d.m.Unlock() d.rc.Close() close(d.stopChan) + d.cdn.Close() d.requestsWG.Wait() return nil } @@ -371,8 +376,18 @@ func (d *daemonImpl) handleRemoteAPIRequest(request remoteAPIRequest) (err error if err != nil { return fmt.Errorf("could not get installer state: %w", err) } + + c, err := d.installer.ConfigState(request.Package) + if err != nil { + return fmt.Errorf("could not get installer config state: %w", err) + } + versionEqual := request.ExpectedState.InstallerVersion == "" || version.AgentVersion == request.ExpectedState.InstallerVersion - if versionEqual && s.Stable != request.ExpectedState.Stable || s.Experiment != request.ExpectedState.Experiment { + if versionEqual && + (s.Stable != request.ExpectedState.Stable || + s.Experiment != request.ExpectedState.Experiment || + c.Stable != request.ExpectedState.StableConfig || + c.Experiment != request.ExpectedState.ExperimentConfig) { log.Infof("remote request %s not executed as state does not match: expected %v, got %v", request.ID, request.ExpectedState, s) setRequestInvalid(ctx) d.refreshState(ctx) diff --git a/pkg/fleet/daemon/daemon_test.go b/pkg/fleet/daemon/daemon_test.go index 1aa78c95d4b4e..1301c58c598f6 100644 --- a/pkg/fleet/daemon/daemon_test.go +++ b/pkg/fleet/daemon/daemon_test.go @@ -22,6 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/fleet/env" "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/cdn" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/version" @@ -100,6 +101,11 @@ func (m *testPackageManager) UninstrumentAPMInjector(ctx context.Context, method return args.Error(0) } +func (m *testPackageManager) Close() error { + args := m.Called() + return args.Error(0) +} + type testRemoteConfigClient struct { sync.Mutex t *testing.T @@ -186,8 +192,13 @@ func newTestInstaller(t *testing.T) *testInstaller { pm.On("ConfigStates").Return(map[string]repository.State{}, nil) rcc := newTestRemoteConfigClient(t) rc := &remoteConfig{client: rcc} + env := &env.Env{RemoteUpdates: true} + cdn, err := cdn.New(env, t.TempDir()) + require.NoError(t, err) + daemon := newDaemon(rc, pm, env, cdn) + require.NoError(t, err) i := &testInstaller{ - daemonImpl: newDaemon(rc, pm, &env.Env{RemoteUpdates: true}), + daemonImpl: daemon, rcc: rcc, pm: pm, } @@ -310,10 +321,11 @@ func TestRemoteRequest(t *testing.T) { ID: "test-request-1", Method: methodStartExperiment, Package: testExperimentPackage.Name, - ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version}, + ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, StableConfig: testStablePackage.Version}, Params: versionParamsJSON, } i.pm.On("State", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version}, nil).Once() + i.pm.On("ConfigState", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version}, nil).Once() i.pm.On("InstallExperiment", mock.Anything, testExperimentPackage.URL).Return(nil).Once() i.rcc.SubmitRequest(testRequest) i.requestsWG.Wait() @@ -322,9 +334,10 @@ func TestRemoteRequest(t *testing.T) { ID: "test-request-2", Method: methodStopExperiment, Package: testExperimentPackage.Name, - ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version}, + ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version, StableConfig: testStablePackage.Version}, } i.pm.On("State", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version}, nil).Once() + i.pm.On("ConfigState", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version}, nil).Once() i.pm.On("RemoveExperiment", mock.Anything, testExperimentPackage.Name).Return(nil).Once() i.rcc.SubmitRequest(testRequest) i.requestsWG.Wait() @@ -333,9 +346,10 @@ func TestRemoteRequest(t *testing.T) { ID: "test-request-3", Method: methodPromoteExperiment, Package: testExperimentPackage.Name, - ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version}, + ExpectedState: expectedState{InstallerVersion: version.AgentVersion, Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version, StableConfig: testStablePackage.Version}, } i.pm.On("State", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version, Experiment: testExperimentPackage.Version}, nil).Once() + i.pm.On("ConfigState", testStablePackage.Name).Return(repository.State{Stable: testStablePackage.Version}, nil).Once() i.pm.On("PromoteExperiment", mock.Anything, testExperimentPackage.Name).Return(nil).Once() i.rcc.SubmitRequest(testRequest) i.requestsWG.Wait() diff --git a/pkg/fleet/daemon/remote_config.go b/pkg/fleet/daemon/remote_config.go index d68b8489ad62e..377ab00282620 100644 --- a/pkg/fleet/daemon/remote_config.go +++ b/pkg/fleet/daemon/remote_config.go @@ -183,6 +183,8 @@ type expectedState struct { InstallerVersion string `json:"installer_version"` Stable string `json:"stable"` Experiment string `json:"experiment"` + StableConfig string `json:"stable_config"` + ExperimentConfig string `json:"experiment_config"` } type taskWithVersionParams struct { diff --git a/pkg/fleet/env/env.go b/pkg/fleet/env/env.go index 780382c143c30..b11a38048e489 100644 --- a/pkg/fleet/env/env.go +++ b/pkg/fleet/env/env.go @@ -12,7 +12,7 @@ import ( "slices" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/config/utils" ) @@ -23,6 +23,8 @@ const ( envRemotePolicies = "DD_REMOTE_POLICIES" envRegistryURL = "DD_INSTALLER_REGISTRY_URL" envRegistryAuth = "DD_INSTALLER_REGISTRY_AUTH" + envRegistryUsername = "DD_INSTALLER_REGISTRY_USERNAME" + envRegistryPassword = "DD_INSTALLER_REGISTRY_PASSWORD" envDefaultPackageVersion = "DD_INSTALLER_DEFAULT_PKG_VERSION" envDefaultPackageInstall = "DD_INSTALLER_DEFAULT_PKG_INSTALL" envApmLibraries = "DD_APM_INSTRUMENTATION_LIBRARIES" @@ -38,8 +40,12 @@ var defaultEnv = Env{ RegistryOverride: "", RegistryAuthOverride: "", + RegistryUsername: "", + RegistryPassword: "", RegistryOverrideByImage: map[string]string{}, RegistryAuthOverrideByImage: map[string]string{}, + RegistryUsernameByImage: map[string]string{}, + RegistryPasswordByImage: map[string]string{}, DefaultPackagesInstallOverride: map[string]bool{}, DefaultPackagesVersionOverride: map[string]string{}, @@ -64,8 +70,12 @@ type Env struct { RegistryOverride string RegistryAuthOverride string + RegistryUsername string + RegistryPassword string RegistryOverrideByImage map[string]string RegistryAuthOverrideByImage map[string]string + RegistryUsernameByImage map[string]string + RegistryPasswordByImage map[string]string DefaultPackagesInstallOverride map[string]bool DefaultPackagesVersionOverride map[string]string @@ -88,8 +98,12 @@ func FromEnv() *Env { RegistryOverride: getEnvOrDefault(envRegistryURL, defaultEnv.RegistryOverride), RegistryAuthOverride: getEnvOrDefault(envRegistryAuth, defaultEnv.RegistryAuthOverride), + RegistryUsername: getEnvOrDefault(envRegistryUsername, defaultEnv.RegistryUsername), + RegistryPassword: getEnvOrDefault(envRegistryPassword, defaultEnv.RegistryPassword), RegistryOverrideByImage: overridesByNameFromEnv(envRegistryURL, func(s string) string { return s }), RegistryAuthOverrideByImage: overridesByNameFromEnv(envRegistryAuth, func(s string) string { return s }), + RegistryUsernameByImage: overridesByNameFromEnv(envRegistryUsername, func(s string) string { return s }), + RegistryPasswordByImage: overridesByNameFromEnv(envRegistryPassword, func(s string) string { return s }), DefaultPackagesInstallOverride: overridesByNameFromEnv(envDefaultPackageInstall, func(s string) bool { return strings.ToLower(s) == "true" }), DefaultPackagesVersionOverride: overridesByNameFromEnv(envDefaultPackageVersion, func(s string) string { return s }), @@ -104,7 +118,7 @@ func FromEnv() *Env { } // FromConfig returns an Env struct with values from the configuration. -func FromConfig(config config.Reader) *Env { +func FromConfig(config model.Reader) *Env { return &Env{ APIKey: utils.SanitizeAPIKey(config.GetString("api_key")), Site: config.GetString("site"), @@ -112,6 +126,8 @@ func FromConfig(config config.Reader) *Env { RemotePolicies: config.GetBool("remote_policies"), RegistryOverride: config.GetString("installer.registry.url"), RegistryAuthOverride: config.GetString("installer.registry.auth"), + RegistryUsername: config.GetString("installer.registry.username"), + RegistryPassword: config.GetString("installer.registry.password"), } } @@ -136,6 +152,12 @@ func (e *Env) ToEnv() []string { if e.RegistryAuthOverride != "" { env = append(env, envRegistryAuth+"="+e.RegistryAuthOverride) } + if e.RegistryUsername != "" { + env = append(env, envRegistryUsername+"="+e.RegistryUsername) + } + if e.RegistryPassword != "" { + env = append(env, envRegistryPassword+"="+e.RegistryPassword) + } if len(e.ApmLibraries) > 0 { libraries := []string{} for l, v := range e.ApmLibraries { @@ -150,6 +172,8 @@ func (e *Env) ToEnv() []string { } env = append(env, overridesByNameToEnv(envRegistryURL, e.RegistryOverrideByImage)...) env = append(env, overridesByNameToEnv(envRegistryAuth, e.RegistryAuthOverrideByImage)...) + env = append(env, overridesByNameToEnv(envRegistryUsername, e.RegistryUsernameByImage)...) + env = append(env, overridesByNameToEnv(envRegistryPassword, e.RegistryPasswordByImage)...) env = append(env, overridesByNameToEnv(envDefaultPackageInstall, e.DefaultPackagesInstallOverride)...) env = append(env, overridesByNameToEnv(envDefaultPackageVersion, e.DefaultPackagesVersionOverride)...) return env diff --git a/pkg/fleet/env/env_test.go b/pkg/fleet/env/env_test.go index b4e86c46e3f93..9c42a96f99983 100644 --- a/pkg/fleet/env/env_test.go +++ b/pkg/fleet/env/env_test.go @@ -25,8 +25,12 @@ func TestFromEnv(t *testing.T) { Site: "datadoghq.com", RegistryOverride: "", RegistryAuthOverride: "", + RegistryUsername: "", + RegistryPassword: "", RegistryOverrideByImage: map[string]string{}, RegistryAuthOverrideByImage: map[string]string{}, + RegistryUsernameByImage: map[string]string{}, + RegistryPasswordByImage: map[string]string{}, DefaultPackagesInstallOverride: map[string]bool{}, DefaultPackagesVersionOverride: map[string]string{}, ApmLibraries: map[ApmLibLanguage]ApmLibVersion{}, @@ -44,10 +48,16 @@ func TestFromEnv(t *testing.T) { envRemotePolicies: "true", envRegistryURL: "registry.example.com", envRegistryAuth: "auth", + envRegistryUsername: "username", + envRegistryPassword: "password", envRegistryURL + "_IMAGE": "another.registry.example.com", envRegistryURL + "_ANOTHER_IMAGE": "yet.another.registry.example.com", envRegistryAuth + "_IMAGE": "another.auth", envRegistryAuth + "_ANOTHER_IMAGE": "yet.another.auth", + envRegistryUsername + "_IMAGE": "another.username", + envRegistryUsername + "_ANOTHER_IMAGE": "yet.another.username", + envRegistryPassword + "_IMAGE": "another.password", + envRegistryPassword + "_ANOTHER_IMAGE": "yet.another.password", envDefaultPackageInstall + "_PACKAGE": "true", envDefaultPackageInstall + "_ANOTHER_PACKAGE": "false", envDefaultPackageVersion + "_PACKAGE": "1.2.3", @@ -62,6 +72,8 @@ func TestFromEnv(t *testing.T) { RemotePolicies: true, RegistryOverride: "registry.example.com", RegistryAuthOverride: "auth", + RegistryUsername: "username", + RegistryPassword: "password", RegistryOverrideByImage: map[string]string{ "image": "another.registry.example.com", "another-image": "yet.another.registry.example.com", @@ -70,6 +82,14 @@ func TestFromEnv(t *testing.T) { "image": "another.auth", "another-image": "yet.another.auth", }, + RegistryUsernameByImage: map[string]string{ + "image": "another.username", + "another-image": "yet.another.username", + }, + RegistryPasswordByImage: map[string]string{ + "image": "another.password", + "another-image": "yet.another.password", + }, DefaultPackagesInstallOverride: map[string]bool{ "package": true, "another-package": false, @@ -100,6 +120,8 @@ func TestFromEnv(t *testing.T) { RegistryAuthOverride: "", RegistryOverrideByImage: map[string]string{}, RegistryAuthOverrideByImage: map[string]string{}, + RegistryUsernameByImage: map[string]string{}, + RegistryPasswordByImage: map[string]string{}, DefaultPackagesInstallOverride: map[string]bool{}, DefaultPackagesVersionOverride: map[string]string{}, ApmLibraries: map[ApmLibLanguage]ApmLibVersion{ @@ -133,6 +155,8 @@ func TestFromEnv(t *testing.T) { }, RegistryOverrideByImage: map[string]string{}, RegistryAuthOverrideByImage: map[string]string{}, + RegistryUsernameByImage: map[string]string{}, + RegistryPasswordByImage: map[string]string{}, DefaultPackagesInstallOverride: map[string]bool{}, DefaultPackagesVersionOverride: map[string]string{}, }, @@ -171,6 +195,8 @@ func TestToEnv(t *testing.T) { RemotePolicies: true, RegistryOverride: "registry.example.com", RegistryAuthOverride: "auth", + RegistryUsername: "username", + RegistryPassword: "password", RegistryOverrideByImage: map[string]string{ "image": "another.registry.example.com", "another-image": "yet.another.registry.example.com", @@ -179,6 +205,14 @@ func TestToEnv(t *testing.T) { "image": "another.auth", "another-image": "yet.another.auth", }, + RegistryUsernameByImage: map[string]string{ + "image": "another.username", + "another-image": "yet.another.username", + }, + RegistryPasswordByImage: map[string]string{ + "image": "another.password", + "another-image": "yet.another.password", + }, DefaultPackagesInstallOverride: map[string]bool{ "package": true, "another-package": false, @@ -200,11 +234,17 @@ func TestToEnv(t *testing.T) { "DD_REMOTE_POLICIES=true", "DD_INSTALLER_REGISTRY_URL=registry.example.com", "DD_INSTALLER_REGISTRY_AUTH=auth", + "DD_INSTALLER_REGISTRY_USERNAME=username", + "DD_INSTALLER_REGISTRY_PASSWORD=password", "DD_APM_INSTRUMENTATION_LIBRARIES=dotnet:latest,java,ruby:1.2", "DD_INSTALLER_REGISTRY_URL_IMAGE=another.registry.example.com", "DD_INSTALLER_REGISTRY_URL_ANOTHER_IMAGE=yet.another.registry.example.com", "DD_INSTALLER_REGISTRY_AUTH_IMAGE=another.auth", "DD_INSTALLER_REGISTRY_AUTH_ANOTHER_IMAGE=yet.another.auth", + "DD_INSTALLER_REGISTRY_USERNAME_IMAGE=another.username", + "DD_INSTALLER_REGISTRY_USERNAME_ANOTHER_IMAGE=yet.another.username", + "DD_INSTALLER_REGISTRY_PASSWORD_IMAGE=another.password", + "DD_INSTALLER_REGISTRY_PASSWORD_ANOTHER_IMAGE=yet.another.password", "DD_INSTALLER_DEFAULT_PKG_INSTALL_PACKAGE=true", "DD_INSTALLER_DEFAULT_PKG_INSTALL_ANOTHER_PACKAGE=false", "DD_INSTALLER_DEFAULT_PKG_VERSION_PACKAGE=1.2.3", diff --git a/pkg/fleet/installer/default_packages.go b/pkg/fleet/installer/default_packages.go index dad23e5ce3241..27e767ad6758d 100644 --- a/pkg/fleet/installer/default_packages.go +++ b/pkg/fleet/installer/default_packages.go @@ -40,7 +40,7 @@ var apmPackageDefaultVersions = map[string]string{ "datadog-apm-library-java": "1", "datadog-apm-library-ruby": "2", "datadog-apm-library-js": "5", - "datadog-apm-library-dotnet": "2", + "datadog-apm-library-dotnet": "3", "datadog-apm-library-python": "2", "datadog-apm-library-php": "1", } diff --git a/pkg/fleet/installer/default_packages_test.go b/pkg/fleet/installer/default_packages_test.go index 1707fff0a5609..c1d9734a68e4b 100644 --- a/pkg/fleet/installer/default_packages_test.go +++ b/pkg/fleet/installer/default_packages_test.go @@ -34,7 +34,7 @@ func TestDefaultPackagesAPMInjectEnabled(t *testing.T) { "oci://gcr.io/datadoghq/apm-library-java-package:1", "oci://gcr.io/datadoghq/apm-library-ruby-package:2", "oci://gcr.io/datadoghq/apm-library-js-package:5", - "oci://gcr.io/datadoghq/apm-library-dotnet-package:2", + "oci://gcr.io/datadoghq/apm-library-dotnet-package:3", "oci://gcr.io/datadoghq/apm-library-python-package:2", }, packages) } diff --git a/pkg/fleet/installer/installer.go b/pkg/fleet/installer/installer.go index 6e910d161f71f..27557b0bbd376 100644 --- a/pkg/fleet/installer/installer.go +++ b/pkg/fleet/installer/installer.go @@ -64,6 +64,8 @@ type Installer interface { InstrumentAPMInjector(ctx context.Context, method string) error UninstrumentAPMInjector(ctx context.Context, method string) error + + Close() error } // installerImpl is the implementation of the package manager. @@ -82,7 +84,7 @@ type installerImpl struct { } // NewInstaller returns a new Package Manager. -func NewInstaller(env *env.Env) (Installer, error) { +func NewInstaller(env *env.Env, configDBPath string) (Installer, error) { err := ensureRepositoriesExist() if err != nil { return nil, fmt.Errorf("could not ensure packages and config directory exists: %w", err) @@ -91,9 +93,13 @@ func NewInstaller(env *env.Env) (Installer, error) { if err != nil { return nil, fmt.Errorf("could not create packages db: %w", err) } + cdn, err := cdn.New(env, configDBPath) + if err != nil { + return nil, fmt.Errorf("could not create CDN client: %w", err) + } return &installerImpl{ env: env, - cdn: cdn.New(env), + cdn: cdn, db: db, downloader: oci.NewDownloader(env, http.DefaultClient), packages: repository.NewRepositories(paths.PackagesPath, paths.LocksPath), @@ -396,6 +402,11 @@ func (i *installerImpl) UninstrumentAPMInjector(ctx context.Context, method stri return nil } +// Close cleans up the Installer's dependencies +func (i *installerImpl) Close() error { + return i.cdn.Close() +} + func (i *installerImpl) startExperiment(ctx context.Context, pkg string) error { switch pkg { case packageDatadogAgent: diff --git a/pkg/fleet/installer/service/commands.go b/pkg/fleet/installer/service/commands.go deleted file mode 100644 index dc9472d61a368..0000000000000 --- a/pkg/fleet/installer/service/commands.go +++ /dev/null @@ -1,45 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build !windows - -// Package service provides a way to interact with os services -package service - -import ( - "bytes" - "context" - "fmt" - "os/exec" - "strings" -) - -type commandRunner interface { - runWithError() error -} - -type realCmd struct { - *exec.Cmd -} - -func (r *realCmd) runWithError() error { - var errBuf bytes.Buffer - r.Stderr = &errBuf - err := r.Cmd.Run() - if err == nil { - return nil - } - - if len(errBuf.Bytes()) == 0 { - return fmt.Errorf("command failed: %s", err.Error()) - } - - return fmt.Errorf("command failed: %s \n%s", strings.TrimSpace(errBuf.String()), err.Error()) -} - -func newCommandRunner(ctx context.Context, name string, args ...string) commandRunner { - cmd := exec.CommandContext(ctx, name, args...) - return &realCmd{Cmd: cmd} -} diff --git a/pkg/fleet/installer/service/datadog_agent.go b/pkg/fleet/installer/service/datadog_agent.go index 30a69271d59d9..9a7f067323c70 100644 --- a/pkg/fleet/installer/service/datadog_agent.go +++ b/pkg/fleet/installer/service/datadog_agent.go @@ -75,7 +75,7 @@ func SetupAgent(ctx context.Context, _ []string) (err error) { span, ctx := tracer.StartSpanFromContext(ctx, "setup_agent") defer func() { if err != nil { - log.Errorf("Failed to setup agent: %s, reverting", err) + log.Errorf("Failed to setup agent, reverting: %s", err) err = errors.Join(err, RemoveAgent(ctx)) } span.Finish(tracer.WithError(err)) @@ -195,11 +195,6 @@ func stopOldAgentUnits(ctx context.Context) error { defer span.Finish() for _, unit := range stableUnits { if err := stopUnit(ctx, unit); err != nil { - exitError, ok := err.(*exec.ExitError) - if ok && exitError.ExitCode() == 5 { - // exit code 5 means the unit is not loaded, we can continue - continue - } return fmt.Errorf("failed to stop %s: %v", unit, err) } if err := disableUnit(ctx, unit); err != nil { diff --git a/pkg/fleet/installer/service/datadog_agent_windows.go b/pkg/fleet/installer/service/datadog_agent_windows.go index 736b39b5eaef7..5b5b9097cb56f 100644 --- a/pkg/fleet/installer/service/datadog_agent_windows.go +++ b/pkg/fleet/installer/service/datadog_agent_windows.go @@ -10,6 +10,7 @@ package service import ( "context" + "fmt" "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" "github.com/DataDog/datadog-agent/pkg/fleet/internal/cdn" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -31,7 +32,7 @@ func SetupAgent(ctx context.Context, args []string) (err error) { }() // Make sure there are no Agent already installed _ = removeProduct("Datadog Agent") - err = msiexec("stable", datadogAgent, "/i", args) + err = installAgentPackage("stable", args) return err } @@ -44,8 +45,18 @@ func StartAgentExperiment(ctx context.Context) (err error) { } span.Finish(tracer.WithError(err)) }() - err = msiexec("experiment", datadogAgent, "/i", nil) - return err + + err = removeAgentIfInstalled(ctx) + if err != nil { + return err + } + + err = installAgentPackage("experiment", nil) + if err != nil { + // experiment failed, expect stop-experiment to restore the stable Agent + return err + } + return nil } // StopAgentExperiment stops the agent experiment, i.e. removes/uninstalls it. @@ -57,14 +68,19 @@ func StopAgentExperiment(ctx context.Context) (err error) { } span.Finish(tracer.WithError(err)) }() - err = msiexec("experiment", datadogAgent, "/x", nil) + + err = removeAgentIfInstalled(ctx) if err != nil { return err } - // TODO: Need args here to restore DDAGENTUSER - err = msiexec("stable", datadogAgent, "/i", nil) - return err + err = installAgentPackage("stable", nil) + if err != nil { + // if we cannot restore the stable Agent, the system is left without an Agent + return err + } + + return nil } // PromoteAgentExperiment promotes the agent experiment @@ -78,6 +94,8 @@ func RemoveAgent(ctx context.Context) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "remove_agent") defer func() { if err != nil { + // removal failed, this should rarely happen. + // Rollback might have restored the Agent, but we can't be sure. log.Errorf("Failed to remove agent: %s", err) } span.Finish(tracer.WithError(err)) @@ -90,3 +108,24 @@ func RemoveAgent(ctx context.Context) (err error) { func ConfigureAgent(_ context.Context, _ *cdn.CDN, _ *repository.Repositories) error { return nil } + +func installAgentPackage(target string, args []string) error { + // TODO: Need args here to restore DDAGENTUSER + err := msiexec(target, datadogAgent, "/i", args) + if err != nil { + return fmt.Errorf("failed to install Agent %s: %w", target, err) + } + return nil +} + +func removeAgentIfInstalled(ctx context.Context) error { + if isProductInstalled("Datadog Agent") { + err := RemoveAgent(ctx) + if err != nil { + return err + } + } else { + log.Debugf("Agent not installed") + } + return nil +} diff --git a/pkg/fleet/installer/service/datadog_installer.go b/pkg/fleet/installer/service/datadog_installer.go index c410a2b2fd32e..17eff00d3c3f1 100644 --- a/pkg/fleet/installer/service/datadog_installer.go +++ b/pkg/fleet/installer/service/datadog_installer.go @@ -44,7 +44,7 @@ func addDDAgentGroup(ctx context.Context) error { func SetupInstaller(ctx context.Context) (err error) { defer func() { if err != nil { - log.Errorf("Failed to setup installer: %s, reverting", err) + log.Errorf("Failed to setup installer, reverting: %s", err) err = RemoveInstaller(ctx) } }() diff --git a/pkg/fleet/installer/service/msiexec.go b/pkg/fleet/installer/service/msiexec.go index 287ccdd6ace6c..4e5ed5897021c 100644 --- a/pkg/fleet/installer/service/msiexec.go +++ b/pkg/fleet/installer/service/msiexec.go @@ -10,6 +10,7 @@ package service import ( "fmt" "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" + "github.com/DataDog/datadog-agent/pkg/util/log" "golang.org/x/sys/windows/registry" "os/exec" "path/filepath" @@ -84,6 +85,7 @@ func processKey(rootPath, key, name string) (*Product, error) { // reflect the installed version, and using those installers can lead to undefined behavior (either failure to uninstall, // or weird bugs from uninstalling a product with an installer from a different version). func removeProduct(productName string) error { + log.Debugf("Removing product %s", productName) product, err := findProductCode(productName) if err != nil { return fmt.Errorf("error trying to find product %s: %w", productName, err) @@ -94,3 +96,11 @@ func removeProduct(productName string) error { } return fmt.Errorf("product %s not found", productName) } + +func isProductInstalled(productName string) bool { + product, err := findProductCode(productName) + if err != nil { + return false + } + return product != nil +} diff --git a/pkg/fleet/installer/service/systemd.go b/pkg/fleet/installer/service/systemd.go index 9242d465dd815..264394b6858f2 100644 --- a/pkg/fleet/installer/service/systemd.go +++ b/pkg/fleet/installer/service/systemd.go @@ -10,8 +10,10 @@ package service import ( "context" + "errors" "fmt" "os" + "os/exec" "path" "path/filepath" @@ -22,39 +24,78 @@ import ( const systemdPath = "/etc/systemd/system" -func stopUnit(ctx context.Context, unit string, args ...string) error { +func stopUnit(ctx context.Context, unit string, args ...string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "stop_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) args = append([]string{"stop", unit}, args...) - return newCommandRunner(ctx, "systemctl", args...).runWithError() + err = exec.CommandContext(ctx, "systemctl", args...).Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + if exitErr.ExitCode() == 5 { + // exit code 5 means the unit is not loaded, we can continue + return nil + } + return errors.New(string(exitErr.Stderr)) } -func startUnit(ctx context.Context, unit string, args ...string) error { +func startUnit(ctx context.Context, unit string, args ...string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "start_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) args = append([]string{"start", unit}, args...) - return newCommandRunner(ctx, "systemctl", args...).runWithError() + err = exec.CommandContext(ctx, "systemctl", args...).Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + return errors.New(string(exitErr.Stderr)) } -func enableUnit(ctx context.Context, unit string) error { +func enableUnit(ctx context.Context, unit string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "enable_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) - return newCommandRunner(ctx, "systemctl", "enable", unit).runWithError() + err = exec.CommandContext(ctx, "systemctl", "enable", unit).Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + return errors.New(string(exitErr.Stderr)) } -func disableUnit(ctx context.Context, unit string) error { +func disableUnit(ctx context.Context, unit string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "disable_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) - return newCommandRunner(ctx, "systemctl", "disable", unit).runWithError() + + enabledErr := exec.CommandContext(ctx, "systemctl", "is-enabled", "--quiet", unit).Run() + if enabledErr != nil { + // unit is already disabled or doesn't exist, we can return fast + return nil + } + + err = exec.CommandContext(ctx, "systemctl", "disable", unit).Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + if exitErr.ExitCode() == 5 { + // exit code 5 means the unit is not loaded, we can continue + return nil + } + return errors.New(string(exitErr.Stderr)) } -func loadUnit(ctx context.Context, unit string) error { +func loadUnit(ctx context.Context, unit string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "load_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) content, err := embedded.FS.ReadFile(unit) if err != nil { @@ -64,17 +105,23 @@ func loadUnit(ctx context.Context, unit string) error { return os.WriteFile(unitPath, content, 0644) } -func removeUnit(ctx context.Context, unit string) error { +func removeUnit(ctx context.Context, unit string) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "remove_unit") - defer span.Finish() + defer func() { span.Finish(tracer.WithError(err)) }() span.SetTag("unit", unit) return os.Remove(path.Join(systemdPath, unit)) } -func systemdReload(ctx context.Context) error { +func systemdReload(ctx context.Context) (err error) { span, _ := tracer.StartSpanFromContext(ctx, "systemd_reload") - defer span.Finish() - return newCommandRunner(ctx, "systemctl", "daemon-reload").runWithError() + defer func() { span.Finish(tracer.WithError(err)) }() + err = exec.CommandContext(ctx, "systemctl", "daemon-reload").Run() + exitErr := &exec.ExitError{} + if !errors.As(err, &exitErr) { + return err + } + span.SetTag("exit_code", exitErr.ExitCode()) + return errors.New(string(exitErr.Stderr)) } // isSystemdRunning checks if systemd is running using the documented way diff --git a/pkg/fleet/internal/bootstrap/bootstrap.go b/pkg/fleet/internal/bootstrap/bootstrap.go index f34d43764d61a..e16f0cd9d603e 100644 --- a/pkg/fleet/internal/bootstrap/bootstrap.go +++ b/pkg/fleet/internal/bootstrap/bootstrap.go @@ -8,21 +8,13 @@ package bootstrap import ( "context" - "fmt" - "net/http" - "os" - "path/filepath" - "github.com/DataDog/datadog-agent/pkg/fleet/env" - "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" - "github.com/DataDog/datadog-agent/pkg/fleet/internal/oci" ) const ( - installerPackage = "datadog-installer" + // InstallerPackage is the name of the Datadog Installer OCI package + InstallerPackage = "datadog-installer" installerBinPath = "bin/installer/installer" - - rootTmpDir = "/opt/datadog-installer/tmp" ) // Install self-installs the installer package from the given URL. @@ -34,63 +26,3 @@ func Install(ctx context.Context, env *env.Env, url string) error { func InstallExperiment(ctx context.Context, env *env.Env, url string) error { return install(ctx, env, url, true) } - -func install(ctx context.Context, env *env.Env, url string, experiment bool) error { - err := os.MkdirAll(rootTmpDir, 0755) - if err != nil { - return fmt.Errorf("failed to create temporary directory: %w", err) - } - tmpDir, err := os.MkdirTemp(rootTmpDir, "") - if err != nil { - return fmt.Errorf("failed to create temporary directory: %w", err) - } - defer os.RemoveAll(tmpDir) - cmd, err := downloadInstaller(ctx, env, url, tmpDir) - if err != nil { - return fmt.Errorf("failed to download installer: %w", err) - } - if experiment { - return cmd.InstallExperiment(ctx, url) - } - return cmd.Install(ctx, url, nil) -} - -// downloadInstaller downloads the installer package from the registry and returns an installer executor. -// -// This process is made to have the least assumption possible as it is long lived and should always work in the future. -// 1. Download the installer package from the registry. -// 2. Export the installer image as an OCI layout on the disk. -// 3. Extract the installer image layers on the disk. -// 4. Create an installer executor from the extract layer. -func downloadInstaller(ctx context.Context, env *env.Env, url string, tmpDir string) (*exec.InstallerExec, error) { - // 1. Download the installer package from the registry. - downloader := oci.NewDownloader(env, http.DefaultClient) - downloadedPackage, err := downloader.Download(ctx, url) - if err != nil { - return nil, fmt.Errorf("failed to download installer package: %w", err) - } - if downloadedPackage.Name != installerPackage { - return nil, fmt.Errorf("unexpected package name: %s, expected %s", downloadedPackage.Name, installerPackage) - } - - // 2. Export the installer image as an OCI layout on the disk. - layoutTmpDir, err := os.MkdirTemp(rootTmpDir, "") - if err != nil { - return nil, fmt.Errorf("failed to create temporary directory: %w", err) - } - defer os.RemoveAll(layoutTmpDir) - err = downloadedPackage.WriteOCILayout(layoutTmpDir) - if err != nil { - return nil, fmt.Errorf("failed to write OCI layout: %w", err) - } - - // 3. Extract the installer image layers on the disk. - err = downloadedPackage.ExtractLayers(oci.DatadogPackageLayerMediaType, tmpDir) - if err != nil { - return nil, fmt.Errorf("failed to extract layers: %w", err) - } - - // 4. Create an installer executor from the extract layer. - installerBinPath := filepath.Join(tmpDir, installerBinPath) - return exec.NewInstallerExec(env, installerBinPath), nil -} diff --git a/pkg/fleet/internal/bootstrap/bootstrap_nix.go b/pkg/fleet/internal/bootstrap/bootstrap_nix.go new file mode 100644 index 0000000000000..83b0276814933 --- /dev/null +++ b/pkg/fleet/internal/bootstrap/bootstrap_nix.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +// Package bootstrap provides logic to self-bootstrap the installer. +package bootstrap + +import ( + "context" + "fmt" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" + "net/http" + "os" + "path/filepath" + + "github.com/DataDog/datadog-agent/pkg/fleet/env" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/oci" +) + +func install(ctx context.Context, env *env.Env, url string, experiment bool) error { + err := os.MkdirAll(paths.RootTmpDir, 0755) + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + tmpDir, err := os.MkdirTemp(paths.RootTmpDir, "") + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + cmd, err := downloadInstaller(ctx, env, url, tmpDir) + if err != nil { + return fmt.Errorf("failed to download installer: %w", err) + } + if experiment { + return cmd.InstallExperiment(ctx, url) + } + return cmd.Install(ctx, url, nil) +} + +// downloadInstaller downloads the installer package from the registry and returns an installer executor. +// +// This process is made to have the least assumption possible as it is long lived and should always work in the future. +// 1. Download the installer package from the registry. +// 2. Export the installer image as an OCI layout on the disk. +// 3. Extract the installer image layers on the disk. +// 4. Create an installer executor from the extract layer. +func downloadInstaller(ctx context.Context, env *env.Env, url string, tmpDir string) (*exec.InstallerExec, error) { + // 1. Download the installer package from the registry. + downloader := oci.NewDownloader(env, http.DefaultClient) + downloadedPackage, err := downloader.Download(ctx, url) + if err != nil { + return nil, fmt.Errorf("failed to download installer package: %w", err) + } + if downloadedPackage.Name != InstallerPackage { + return nil, fmt.Errorf("unexpected package name: %s, expected %s", downloadedPackage.Name, InstallerPackage) + } + + // 2. Export the installer image as an OCI layout on the disk. + layoutTmpDir, err := os.MkdirTemp(paths.RootTmpDir, "") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(layoutTmpDir) + err = downloadedPackage.WriteOCILayout(layoutTmpDir) + if err != nil { + return nil, fmt.Errorf("failed to write OCI layout: %w", err) + } + + // 3. Extract the installer image layers on the disk. + err = downloadedPackage.ExtractLayers(oci.DatadogPackageLayerMediaType, tmpDir) + if err != nil { + return nil, fmt.Errorf("failed to extract layers: %w", err) + } + + // 4. Create an installer executor from the extract layer. + installerBinPath := filepath.Join(tmpDir, installerBinPath) + return exec.NewInstallerExec(env, installerBinPath), nil +} diff --git a/pkg/fleet/internal/bootstrap/bootstrap_windows.go b/pkg/fleet/internal/bootstrap/bootstrap_windows.go new file mode 100644 index 0000000000000..43e1f64b430ac --- /dev/null +++ b/pkg/fleet/internal/bootstrap/bootstrap_windows.go @@ -0,0 +1,85 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build windows + +// Package bootstrap provides logic to self-bootstrap the installer. +package bootstrap + +import ( + "context" + "fmt" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" + "net/http" + "os" + "os/exec" + "path/filepath" + + "github.com/DataDog/datadog-agent/pkg/fleet/env" + iexec "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" + "github.com/DataDog/datadog-agent/pkg/fleet/internal/oci" +) + +func install(ctx context.Context, env *env.Env, url string, experiment bool) error { + err := os.MkdirAll(paths.RootTmpDir, 0755) + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + tmpDir, err := os.MkdirTemp(paths.RootTmpDir, "") + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + cmd, err := downloadInstaller(ctx, env, url, tmpDir) + if err != nil { + return fmt.Errorf("failed to download installer: %w", err) + } + if experiment { + return cmd.InstallExperiment(ctx, url) + } + return cmd.Install(ctx, url, nil) +} + +// downloadInstaller downloads the installer package from the registry and returns the path to the executable. +func downloadInstaller(ctx context.Context, env *env.Env, url string, tmpDir string) (*iexec.InstallerExec, error) { + downloader := oci.NewDownloader(env, http.DefaultClient) + downloadedPackage, err := downloader.Download(ctx, url) + if err != nil { + return nil, fmt.Errorf("failed to download installer package: %w", err) + } + if downloadedPackage.Name != InstallerPackage { + return nil, fmt.Errorf("unexpected package name: %s, expected %s", downloadedPackage.Name, InstallerPackage) + } + + layoutTmpDir, err := os.MkdirTemp(paths.RootTmpDir, "") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(layoutTmpDir) + err = downloadedPackage.WriteOCILayout(layoutTmpDir) + if err != nil { + return nil, fmt.Errorf("failed to write OCI layout: %w", err) + } + + err = downloadedPackage.ExtractLayers(oci.DatadogPackageLayerMediaType, tmpDir) + if err != nil { + return nil, fmt.Errorf("failed to extract layers: %w", err) + } + + msis, err := filepath.Glob(filepath.Join(tmpDir, "datadog-installer-*-1-x86_64.msi")) + if err != nil { + return nil, err + } + if len(msis) > 1 { + return nil, fmt.Errorf("too many MSIs in package") + } else if len(msis) == 0 { + return nil, fmt.Errorf("no MSIs in package") + } + err = exec.Command("msiexec", "/i", msis[0], "/qn", "MSIFASTINSTALL=7").Run() + if err != nil { + return nil, fmt.Errorf("failed to install the Datadog Installer") + } + return iexec.NewInstallerExec(env, paths.StableInstallerPath), nil +} diff --git a/pkg/fleet/internal/cdn/cdn.go b/pkg/fleet/internal/cdn/cdn.go index 3689ee2e67f24..a37edc23a9375 100644 --- a/pkg/fleet/internal/cdn/cdn.go +++ b/pkg/fleet/internal/cdn/cdn.go @@ -10,22 +10,14 @@ import ( "context" "encoding/json" "fmt" - "regexp" - "time" - - "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" - "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl" - detectenv "github.com/DataDog/datadog-agent/pkg/config/env" - "github.com/DataDog/datadog-agent/pkg/config/model" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/fleet/env" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - pkghostname "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/version" - "github.com/google/uuid" + "github.com/DataDog/go-tuf/data" "go.uber.org/multierr" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + "regexp" ) var datadogConfigIDRegexp = regexp.MustCompile(`^datadog/\d+/AGENT_CONFIG/([^/]+)/[^/]+$`) @@ -34,7 +26,8 @@ const configOrderID = "configuration_order" // CDN provides access to the Remote Config CDN. type CDN struct { - env *env.Env + client *remoteconfig.HTTPClient + currentRootsVersion uint64 } // Config represents the configuration from the CDN. @@ -50,15 +43,26 @@ type orderConfig struct { } // New creates a new CDN. -func New(env *env.Env) *CDN { - return &CDN{ - env: env, +func New(env *env.Env, configDBPath string) (*CDN, error) { + client, err := remoteconfig.NewHTTPClient( + configDBPath, + env.Site, + env.APIKey, + version.AgentVersion, + ) + if err != nil { + return nil, err } + + return &CDN{ + client: client, + currentRootsVersion: 1, + }, nil } // Get gets the configuration from the CDN. func (c *CDN) Get(ctx context.Context) (_ *Config, err error) { - span, ctx := tracer.StartSpanFromContext(ctx, "cdn.Get") + span, _ := tracer.StartSpanFromContext(ctx, "cdn.Get") defer func() { span.Finish(tracer.WithError(err)) }() configLayers, err := c.getOrderedLayers(ctx) if err != nil { @@ -67,70 +71,71 @@ func (c *CDN) Get(ctx context.Context) (_ *Config, err error) { return newConfig(configLayers...) } +// Close cleans up the CDN's resources +func (c *CDN) Close() error { + return c.client.Close() +} + // getOrderedLayers calls the Remote Config service to get the ordered layers. -// Today it doesn't use the CDN, but it should in the future func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) { - // HACK(baptiste): Create a dedicated one-shot RC service just for the configuration - // We should use the CDN instead - config := pkgconfigsetup.Datadog() - detectenv.DetectFeatures(config) - hostname, err := pkghostname.Get(ctx) - if err != nil { - return nil, err - } - options := []remoteconfig.Option{ - remoteconfig.WithAPIKey(c.env.APIKey), - remoteconfig.WithConfigRootOverride(c.env.Site, ""), - remoteconfig.WithDirectorRootOverride(c.env.Site, ""), - remoteconfig.WithDatabaseFileName("remote-config-cdn-tmp"), - } - service, err := remoteconfig.NewService( - config, - "Datadog Installer", - fmt.Sprintf("https://config.%s", c.env.Site), - hostname, - getHostTags(ctx, config), - &rctelemetryreporterimpl.DdRcTelemetryReporter{}, // No telemetry for this client - version.AgentVersion, - options..., + agentConfigUpdate, err := c.client.GetCDNConfigUpdate( + ctx, + []string{"AGENT_CONFIG"}, + // Always send 0 since we are relying on the CDN cache state instead of our own tracer cache. This will fetch the latest configs from the cache/CDN everytime. + 0, + // Not using the roots; send the highest seen version of roots so don't received them all on every request + c.currentRootsVersion, + // Not using a client cache; fetch all the applicable target files every time. + []*pbgo.TargetFileMeta{}, ) if err != nil { return nil, err } - service.Start() - defer func() { _ = service.Stop() }() - // Force a cache bypass - cfgs, err := service.ClientGetConfigs(ctx, &pbgo.ClientGetConfigsRequest{ - Client: &pbgo.Client{ - Id: uuid.New().String(), - Products: []string{"AGENT_CONFIG"}, - IsUpdater: true, - ClientUpdater: &pbgo.ClientUpdater{}, - State: &pbgo.ClientState{ - RootVersion: 1, - TargetsVersion: 1, - }, - }, - }) - if err != nil { - return nil, err + + orderedLayers := []*layer{} + if agentConfigUpdate == nil { + return orderedLayers, nil + } + + // Update CDN root versions + for _, root := range agentConfigUpdate.TUFRoots { + var signedRoot data.Signed + err = json.Unmarshal(root, &signedRoot) + if err != nil { + continue + } + var r data.Root + err = json.Unmarshal(signedRoot.Signed, &r) + if err != nil { + continue + } + if uint64(r.Version) > c.currentRootsVersion { + c.currentRootsVersion = uint64(r.Version) + } } // Unmarshal RC results configLayers := map[string]*layer{} var configOrder *orderConfig var layersErr error - for _, file := range cfgs.TargetFiles { - matched := datadogConfigIDRegexp.FindStringSubmatch(file.GetPath()) + paths := agentConfigUpdate.ClientConfigs + targetFiles := agentConfigUpdate.TargetFiles + for _, path := range paths { + matched := datadogConfigIDRegexp.FindStringSubmatch(path) if len(matched) != 2 { - layersErr = multierr.Append(layersErr, fmt.Errorf("invalid config path: %s", file.GetPath())) + layersErr = multierr.Append(layersErr, fmt.Errorf("invalid config path: %s", path)) continue } configName := matched[1] + file, ok := targetFiles[path] + if !ok { + layersErr = multierr.Append(layersErr, fmt.Errorf("missing expected target file in update response: %s", path)) + continue + } if configName != configOrderID { configLayer := &layer{} - err = json.Unmarshal(file.GetRaw(), configLayer) + err = json.Unmarshal(file, configLayer) if err != nil { // If a layer is wrong, fail later to parse the rest and check them all layersErr = multierr.Append(layersErr, err) @@ -139,7 +144,7 @@ func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) { configLayers[configName] = configLayer } else { configOrder = &orderConfig{} - err = json.Unmarshal(file.GetRaw(), configOrder) + err = json.Unmarshal(file, configOrder) if err != nil { // Return first - we can't continue without the order return nil, err @@ -154,7 +159,6 @@ func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) { if configOrder == nil { return nil, fmt.Errorf("no configuration_order found") } - orderedLayers := []*layer{} for _, configName := range configOrder.Order { if configLayer, ok := configLayers[configName]; ok { orderedLayers = append(orderedLayers, configLayer) @@ -163,17 +167,3 @@ func (c *CDN) getOrderedLayers(ctx context.Context) ([]*layer, error) { return orderedLayers, nil } - -func getHostTags(ctx context.Context, config model.Config) func() []string { - return func() []string { - // Host tags are cached on host, but we add a timeout to avoid blocking the RC request - // if the host tags are not available yet and need to be fetched. They will be fetched - // by the first agent metadata V5 payload. - ctx, cc := context.WithTimeout(ctx, time.Second) - defer cc() - hostTags := hosttags.Get(ctx, true, config) - tags := append(hostTags.System, hostTags.GoogleCloudPlatform...) - tags = append(tags, "installer:true") - return tags - } -} diff --git a/pkg/fleet/internal/exec/installer_exec.go b/pkg/fleet/internal/exec/installer_exec.go index 6ac122dfdfa1e..d7908028ba881 100644 --- a/pkg/fleet/internal/exec/installer_exec.go +++ b/pkg/fleet/internal/exec/installer_exec.go @@ -12,6 +12,7 @@ import ( "fmt" "os" "os/exec" + "runtime" "strings" "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" @@ -23,13 +24,6 @@ import ( "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" ) -const ( - // StableInstallerPath is the path to the stable installer binary. - StableInstallerPath = "/opt/datadog-packages/datadog-installer/stable/bin/installer/installer" - // ExperimentInstallerPath is the path to the experiment installer binary. - ExperimentInstallerPath = "/opt/datadog-packages/datadog-installer/experiment/bin/installer/installer" -) - // InstallerExec is an implementation of the Installer interface that uses the installer binary. type InstallerExec struct { env *env.Env @@ -56,8 +50,12 @@ func (i *InstallerExec) newInstallerCmd(ctx context.Context, command string, arg span.SetTag("args", args) cmd := exec.CommandContext(ctx, i.installerBinPath, append([]string{command}, args...)...) env = append(os.Environ(), env...) - cmd.Cancel = func() error { - return cmd.Process.Signal(os.Interrupt) + if runtime.GOOS != "windows" { + // os.Interrupt is not support on Windows + // It gives " run failed: exec: canceling Cmd: not supported by windows" + cmd.Cancel = func() error { + return cmd.Process.Signal(os.Interrupt) + } } env = append(env, telemetry.EnvFromSpanContext(span.Context())...) cmd.Env = env @@ -195,6 +193,11 @@ func (i *InstallerExec) ConfigStates() (map[string]repository.State, error) { return states, err } +// Close cleans up any resources. +func (i *InstallerExec) Close() error { + return nil +} + func (iCmd *installerCmd) Run() error { var errBuf bytes.Buffer iCmd.Stderr = &errBuf diff --git a/pkg/fleet/internal/oci/download.go b/pkg/fleet/internal/oci/download.go index 95593831dc901..6900c771dc942 100644 --- a/pkg/fleet/internal/oci/download.go +++ b/pkg/fleet/internal/oci/download.go @@ -41,6 +41,8 @@ const ( RegistryAuthGCR string = "gcr" // RegistryAuthECR is the Amazon Elastic Container Registry authentication method. RegistryAuthECR string = "ecr" + // RegistryAuthPassword is the password registry authentication method. + RegistryAuthPassword string = "password" ) const ( @@ -132,12 +134,17 @@ func (d *Downloader) Download(ctx context.Context, packageURL string) (*Download }, nil } -func getKeychain(auth string) authn.Keychain { +func getKeychain(auth string, username string, password string) authn.Keychain { switch auth { case RegistryAuthGCR: return google.Keychain case RegistryAuthECR: return authn.NewKeychainFromHelper(ecr.NewECRHelper()) + case RegistryAuthPassword: + return usernamePasswordKeychain{ + username: username, + password: password, + } case RegistryAuthDefault, "": return authn.DefaultKeychain default: @@ -169,10 +176,10 @@ func getRefAndKeychain(env *env.Env, url string) urlWithKeychain { } ref = registryOverride + imageWithIdentifier } - keychain := getKeychain(env.RegistryAuthOverride) + keychain := getKeychain(env.RegistryAuthOverride, env.RegistryUsername, env.RegistryPassword) for image, override := range env.RegistryAuthOverrideByImage { if strings.HasPrefix(imageWithIdentifier, image+":") || strings.HasPrefix(imageWithIdentifier, image+"@") { - keychain = getKeychain(override) + keychain = getKeychain(override, env.RegistryUsername, env.RegistryPassword) break } } @@ -313,3 +320,15 @@ func isStreamResetError(err error) bool { } return false } + +type usernamePasswordKeychain struct { + username string + password string +} + +func (k usernamePasswordKeychain) Resolve(_ authn.Resource) (authn.Authenticator, error) { + return authn.FromConfig(authn.AuthConfig{ + Username: k.username, + Password: k.password, + }), nil +} diff --git a/pkg/fleet/internal/paths/installer_paths.go b/pkg/fleet/internal/paths/installer_paths.go index 24e53dc13dceb..394f1af1a5093 100644 --- a/pkg/fleet/internal/paths/installer_paths.go +++ b/pkg/fleet/internal/paths/installer_paths.go @@ -15,7 +15,12 @@ const ( ConfigsPath = "/etc/datadog-packages" // LocksPath is the path to the packages locks directory. LocksPath = "/opt/datadog-packages/run/locks" - + // RootTmpDir is the temporary path where the bootstrapper will be extracted to. + RootTmpDir = "/opt/datadog-installer/tmp" // DefaultUserConfigsDir is the default Agent configuration directory. DefaultUserConfigsDir = "/etc" + // StableInstallerPath is the path to the stable installer binary. + StableInstallerPath = "/opt/datadog-packages/datadog-installer/stable/bin/installer/installer" + // ExperimentInstallerPath is the path to the experiment installer binary. + ExperimentInstallerPath = "/opt/datadog-packages/datadog-installer/experiment/bin/installer/installer" ) diff --git a/pkg/fleet/internal/paths/installer_paths_windows.go b/pkg/fleet/internal/paths/installer_paths_windows.go index 456cb7e6af7c2..07aa3216ce9da 100644 --- a/pkg/fleet/internal/paths/installer_paths_windows.go +++ b/pkg/fleet/internal/paths/installer_paths_windows.go @@ -22,9 +22,12 @@ var ( ConfigsPath string // LocksPath is the path to the locks directory. LocksPath string - + // RootTmpDir is the temporary path where the bootstrapper will be extracted to. + RootTmpDir string // DefaultUserConfigsDir is the default Agent configuration directory DefaultUserConfigsDir string + // StableInstallerPath is the path to the stable installer binary. + StableInstallerPath string ) func init() { @@ -32,5 +35,8 @@ func init() { PackagesPath = filepath.Join(datadogInstallerData, "packages") ConfigsPath = filepath.Join(datadogInstallerData, "configs") LocksPath = filepath.Join(datadogInstallerData, "locks") + RootTmpDir = filepath.Join(datadogInstallerData, "tmp") + datadogInstallerPath := "C:\\Program Files\\Datadog\\Datadog Installer" + StableInstallerPath = filepath.Join(datadogInstallerPath, "datadog-installer.exe") DefaultUserConfigsDir, _ = windows.KnownFolderPath(windows.FOLDERID_ProgramData, 0) } diff --git a/pkg/gohai/filesystem/filesystem_nix.go b/pkg/gohai/filesystem/filesystem_nix.go index f25f6adc97d3e..27f5094d1e282 100644 --- a/pkg/gohai/filesystem/filesystem_nix.go +++ b/pkg/gohai/filesystem/filesystem_nix.go @@ -74,9 +74,7 @@ func replaceDev(oldMount, newMount MountInfo) bool { } // getFileSystemInfoWithMounts is an internal method to help testing with test mounts and mocking syscalls -func getFileSystemInfoWithMounts(initialMounts []*mountinfo.Info, sizeKB, dev fsInfoGetter) ([]MountInfo, error) { - mounts := initialMounts - +func getFileSystemInfoWithMounts(mounts []*mountinfo.Info, sizeKB, dev fsInfoGetter) ([]MountInfo, error) { devMountInfos := map[uint64]MountInfo{} for _, mount := range mounts { // Skip mounts that seem to be missing data diff --git a/pkg/gohai/filesystem/filesystem_nix_test.go b/pkg/gohai/filesystem/filesystem_nix_test.go index c41b7c369110d..55a7c92563370 100644 --- a/pkg/gohai/filesystem/filesystem_nix_test.go +++ b/pkg/gohai/filesystem/filesystem_nix_test.go @@ -117,7 +117,6 @@ func TestNixFSTypeFiltering(t *testing.T) { mounts, err := getFileSystemInfoWithMounts(inputMounts, mockFSSizeKB, getMockFSDev()) require.NoError(t, err) - require.Equal(t, len(expectedMounts), len(mounts)) assert.ElementsMatch(t, mounts, expectedMounts) }) } @@ -260,8 +259,9 @@ func TestFilterDev(t *testing.T) { func newTestInputMountinfo(name string) *mountinfo.Info { return &mountinfo.Info{ + // add suffixes to the name to avoid having an ignored source / type / mountpoint Source: name + "Source", - FSType: name, + FSType: name + "Type", Mountpoint: name + "MountPoint", } } diff --git a/pkg/internaltelemetry/client.go b/pkg/internaltelemetry/client.go index bb8b0569c501b..f9675ab906780 100644 --- a/pkg/internaltelemetry/client.go +++ b/pkg/internaltelemetry/client.go @@ -20,11 +20,11 @@ import ( "go.uber.org/atomic" - metadatautils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" + "github.com/shirou/gopsutil/v3/host" ) const ( @@ -115,7 +115,11 @@ type httpClient interface { // NewClient creates a new telemetry client func NewClient(httpClient httpClient, endpoints []*config.Endpoint, service string, debug bool) Client { - info := metadatautils.GetInformation() + info, err := host.Info() + if err != nil { + log.Errorf("failed to retrieve host info: %v", err) + info = &host.InfoStat{} + } return &client{ client: httpClient, endpoints: endpoints, diff --git a/pkg/jmxfetch/jmxfetch.go b/pkg/jmxfetch/jmxfetch.go index 2f0fb0537beca..1a65b1acb5886 100644 --- a/pkg/jmxfetch/jmxfetch.go +++ b/pkg/jmxfetch/jmxfetch.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" api "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/status/health" jmxStatus "github.com/DataDog/datadog-agent/pkg/status/jmx" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -121,7 +121,7 @@ func NewJMXFetch(logger jmxlogger.Component) *JMXFetch { // Monitor monitors this JMXFetch instance, waiting for JMX to stop. Gracefully handles restarting the JMXFetch process. func (j *JMXFetch) Monitor() { - limiter := newRestartLimiter(config.Datadog().GetInt("jmx_max_restarts"), float64(config.Datadog().GetInt("jmx_restart_interval"))) + limiter := newRestartLimiter(pkgconfigsetup.Datadog().GetInt("jmx_max_restarts"), float64(pkgconfigsetup.Datadog().GetInt("jmx_restart_interval"))) ticker := time.NewTicker(500 * time.Millisecond) defer ticker.Stop() @@ -192,7 +192,7 @@ func (j *JMXFetch) Start(manage bool) error { classpath = fmt.Sprintf("%s%s%s", j.JavaToolsJarPath, string(os.PathListSeparator), classpath) } - globalCustomJars := config.Datadog().GetStringSlice("jmx_custom_jars") + globalCustomJars := pkgconfigsetup.Datadog().GetStringSlice("jmx_custom_jars") if len(globalCustomJars) > 0 { classpath = fmt.Sprintf("%s%s%s", strings.Join(globalCustomJars, string(os.PathListSeparator)), string(os.PathListSeparator), classpath) } @@ -209,13 +209,13 @@ func (j *JMXFetch) Start(manage bool) error { reporter = "json" default: if j.DSD != nil && j.DSD.UdsListenerRunning() { - reporter = fmt.Sprintf("statsd:unix://%s", config.Datadog().GetString("dogstatsd_socket")) + reporter = fmt.Sprintf("statsd:unix://%s", pkgconfigsetup.Datadog().GetString("dogstatsd_socket")) } else { - bindHost := config.GetBindHost() + bindHost := pkgconfigsetup.GetBindHost(pkgconfigsetup.Datadog()) if bindHost == "" || bindHost == "0.0.0.0" { bindHost = "localhost" } - reporter = fmt.Sprintf("statsd:%s:%s", bindHost, config.Datadog().GetString("dogstatsd_port")) + reporter = fmt.Sprintf("statsd:%s:%s", bindHost, pkgconfigsetup.Datadog().GetString("dogstatsd_port")) } } @@ -226,14 +226,14 @@ func (j *JMXFetch) Start(manage bool) error { // Specify a maximum memory allocation pool for the JVM javaOptions := j.JavaOptions - useContainerSupport := config.Datadog().GetBool("jmx_use_container_support") - useCgroupMemoryLimit := config.Datadog().GetBool("jmx_use_cgroup_memory_limit") + useContainerSupport := pkgconfigsetup.Datadog().GetBool("jmx_use_container_support") + useCgroupMemoryLimit := pkgconfigsetup.Datadog().GetBool("jmx_use_cgroup_memory_limit") if useContainerSupport && useCgroupMemoryLimit { return fmt.Errorf("incompatible options %q and %q", jvmContainerSupport, jvmCgroupMemoryAwareness) } else if useContainerSupport { javaOptions += jvmContainerSupport - maxHeapSizeAsPercentRAM := config.Datadog().GetFloat64("jmx_max_ram_percentage") + maxHeapSizeAsPercentRAM := pkgconfigsetup.Datadog().GetFloat64("jmx_max_ram_percentage") passOption := true // These options overwrite the -XX:MaxRAMPercentage option, log a warning if they are found in the javaOptions if strings.Contains(javaOptions, "Xmx") || strings.Contains(javaOptions, "XX:MaxHeapSize") { @@ -278,11 +278,11 @@ func (j *JMXFetch) Start(manage bool) error { jmxLogLevel = "INFO" } - ipcHost, err := config.GetIPCAddress() + ipcHost, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - ipcPort := config.Datadog().GetInt("cmd_port") + ipcPort := pkgconfigsetup.Datadog().GetInt("cmd_port") if j.IPCHost != "" { ipcHost = j.IPCHost } @@ -296,37 +296,37 @@ func (j *JMXFetch) Start(manage bool) error { jmxMainClass, "--ipc_host", ipcHost, "--ipc_port", fmt.Sprintf("%v", ipcPort), - "--check_period", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_check_period")), // Period of the main loop of jmxfetch in ms - "--thread_pool_size", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_thread_pool_size")), // Size for the JMXFetch thread pool - "--collection_timeout", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_collection_timeout")), // Timeout for metric collection in seconds - "--reconnection_timeout", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_reconnection_timeout")), // Timeout for instance reconnection in seconds - "--reconnection_thread_pool_size", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_reconnection_thread_pool_size")), // Size for the JMXFetch reconnection thread pool + "--check_period", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_check_period")), // Period of the main loop of jmxfetch in ms + "--thread_pool_size", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_thread_pool_size")), // Size for the JMXFetch thread pool + "--collection_timeout", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_collection_timeout")), // Timeout for metric collection in seconds + "--reconnection_timeout", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_reconnection_timeout")), // Timeout for instance reconnection in seconds + "--reconnection_thread_pool_size", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_reconnection_thread_pool_size")), // Size for the JMXFetch reconnection thread pool "--log_level", jmxLogLevel, "--reporter", reporter, // Reporter to use - "--statsd_queue_size", fmt.Sprintf("%v", config.Datadog().GetInt("jmx_statsd_client_queue_size")), // Dogstatsd client queue size to use + "--statsd_queue_size", fmt.Sprintf("%v", pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_queue_size")), // Dogstatsd client queue size to use ) - if config.Datadog().GetBool("jmx_statsd_telemetry_enabled") { + if pkgconfigsetup.Datadog().GetBool("jmx_statsd_telemetry_enabled") { subprocessArgs = append(subprocessArgs, "--statsd_telemetry") } - if config.Datadog().GetBool("jmx_telemetry_enabled") { + if pkgconfigsetup.Datadog().GetBool("jmx_telemetry_enabled") { subprocessArgs = append(subprocessArgs, "--jmxfetch_telemetry") } - if config.Datadog().GetBool("jmx_statsd_client_use_non_blocking") { + if pkgconfigsetup.Datadog().GetBool("jmx_statsd_client_use_non_blocking") { subprocessArgs = append(subprocessArgs, "--statsd_nonblocking") } - if bufSize := config.Datadog().GetInt("jmx_statsd_client_buffer_size"); bufSize != 0 { + if bufSize := pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_buffer_size"); bufSize != 0 { subprocessArgs = append(subprocessArgs, "--statsd_buffer_size", fmt.Sprintf("%d", bufSize)) } - if socketTimeout := config.Datadog().GetInt("jmx_statsd_client_socket_timeout"); socketTimeout != 0 { + if socketTimeout := pkgconfigsetup.Datadog().GetInt("jmx_statsd_client_socket_timeout"); socketTimeout != 0 { subprocessArgs = append(subprocessArgs, "--statsd_socket_timeout", fmt.Sprintf("%d", socketTimeout)) } - if config.Datadog().GetBool("log_format_rfc3339") { + if pkgconfigsetup.Datadog().GetBool("log_format_rfc3339") { subprocessArgs = append(subprocessArgs, "--log_format_rfc3339") } diff --git a/pkg/jmxfetch/runner.go b/pkg/jmxfetch/runner.go index 7f53e9b571762..9717edfeb262e 100644 --- a/pkg/jmxfetch/runner.go +++ b/pkg/jmxfetch/runner.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/agent/jmxlogger" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" jmxStatus "github.com/DataDog/datadog-agent/pkg/status/jmx" ) @@ -24,7 +24,7 @@ type runner struct { func (r *runner) initRunner(server dogstatsdServer.Component, logger jmxlogger.Component) { r.jmxfetch = NewJMXFetch(logger) - r.jmxfetch.LogLevel = config.Datadog().GetString("log_level") + r.jmxfetch.LogLevel = pkgconfigsetup.Datadog().GetString("log_level") r.jmxfetch.DSD = server } diff --git a/pkg/languagedetection/detector.go b/pkg/languagedetection/detector.go index 2ba7e681a3113..92ab7fe4cc4c5 100644 --- a/pkg/languagedetection/detector.go +++ b/pkg/languagedetection/detector.go @@ -12,7 +12,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/languagedetection/internal/detectors" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/process/net" @@ -92,7 +92,7 @@ var ( ) // DetectLanguage uses a combination of commandline parsing and binary analysis to detect a process' language -func DetectLanguage(procs []languagemodels.Process, sysprobeConfig config.Reader) []*languagemodels.Language { +func DetectLanguage(procs []languagemodels.Process, sysprobeConfig model.Reader) []*languagemodels.Language { detectLanguageStart := time.Now() defer func() { detectLanguageRuntimeMs.Observe(float64(time.Since(detectLanguageStart).Milliseconds())) @@ -161,7 +161,7 @@ func DetectLanguage(procs []languagemodels.Process, sysprobeConfig config.Reader return langs } -func privilegedLanguageDetectionEnabled(sysProbeConfig config.Reader) bool { +func privilegedLanguageDetectionEnabled(sysProbeConfig model.Reader) bool { if sysProbeConfig == nil { return false } diff --git a/pkg/languagedetection/internal/detectors/dotnet_detector.go b/pkg/languagedetection/internal/detectors/dotnet_detector.go new file mode 100644 index 0000000000000..c3a8c7403d88c --- /dev/null +++ b/pkg/languagedetection/internal/detectors/dotnet_detector.go @@ -0,0 +1,84 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package detectors + +import ( + "bufio" + "fmt" + "io" + "os" + "path" + "strconv" + "strings" + + dderrors "github.com/DataDog/datadog-agent/pkg/errors" + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" + "github.com/DataDog/datadog-agent/pkg/util/kernel" +) + +const runtimeDll = "/System.Runtime.dll" + +var errorDllNotFound = dderrors.NewNotFound(runtimeDll) + +// DotnetDetector detects .NET processes. +type DotnetDetector struct { + hostProc string +} + +// NewDotnetDetector creates a new instance of DotnetDetector. +func NewDotnetDetector() DotnetDetector { + return DotnetDetector{hostProc: kernel.ProcFSRoot()} +} + +// mapsHasDotnetDll checks if the maps file includes a path with the .NET +// runtime DLL. +func mapsHasDotnetDll(reader io.Reader) (bool, error) { + scanner := bufio.NewScanner(bufio.NewReader(reader)) + + for scanner.Scan() { + line := scanner.Text() + + if strings.HasSuffix(line, runtimeDll) { + return true, nil + } + } + + return false, scanner.Err() +} + +func (d DotnetDetector) getMapsPath(pid int32) string { + return path.Join(d.hostProc, strconv.FormatInt(int64(pid), 10), "maps") +} + +// DetectLanguage detects if a process is a .NET process. It does this by using +// /proc/PID/maps to check if the process has mapped a standard .NET dll. This +// works for non-single-file deployments (both self-contained and +// framework-dependent), and framework-dependent single-file deployments. +// +// It does not work for self-contained single-file deployments since these do +// not have any DLLs in their maps file. +func (d DotnetDetector) DetectLanguage(process languagemodels.Process) (languagemodels.Language, error) { + path := d.getMapsPath(process.GetPid()) + mapsFile, err := os.Open(path) + if err != nil { + return languagemodels.Language{}, fmt.Errorf("open: %v", err) + } + defer mapsFile.Close() + + hasDLL, err := mapsHasDotnetDll(mapsFile) + if err != nil { + return languagemodels.Language{}, err + } + if !hasDLL { + return languagemodels.Language{}, errorDllNotFound + } + + return languagemodels.Language{ + Name: languagemodels.Dotnet, + }, nil +} diff --git a/pkg/languagedetection/internal/detectors/dotnet_detector_test.go b/pkg/languagedetection/internal/detectors/dotnet_detector_test.go new file mode 100644 index 0000000000000..78b7bf146e619 --- /dev/null +++ b/pkg/languagedetection/internal/detectors/dotnet_detector_test.go @@ -0,0 +1,79 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package detectors + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" + "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" + fileopener "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries/testutil" + "github.com/DataDog/datadog-agent/pkg/proto/pbgo/languagedetection" +) + +func TestDotnetMapsParser(t *testing.T) { + data := []struct { + name string + maps string + result bool + }{ + { + name: "empty maps", + maps: "", + }, + { + name: "not in maps", + maps: ` +79f6cd47d000-79f6cd47f000 r--p 00000000 fc:04 793163 /usr/lib/python3.10/lib-dynload/_bz2.cpython-310-x86_64-linux-gnu.so +79f6cd479000-79f6cd47a000 r-xp 00001000 fc:06 5507018 /home/foo/.local/lib/python3.10/site-packages/ddtrace_fake/md.cpython-310-x86_64-linux-gnu.so + `, + result: false, + }, + { + name: "in maps", + maps: ` +7d97b4e57000-7d97b4e85000 r--s 00000000 fc:04 1332568 /usr/lib/dotnet/shared/Microsoft.NETCore.App/8.0.8/System.Con +sole.dll +7d97b4e85000-7d97b4e8e000 r--s 00000000 fc:04 1332665 /usr/lib/dotnet/shared/Microsoft.NETCore.App/8.0.8/System.Runtime.dll +7d97b4e8e000-7d97b4e99000 r--p 00000000 fc:04 1332718 /usr/lib/dotnet/shared/Microsoft.NETCore.App/8.0.8/libSystem.Native.so + `, + result: true, + }, + } + for _, d := range data { + t.Run(d.name, func(t *testing.T) { + result, err := mapsHasDotnetDll(strings.NewReader(d.maps)) + assert.NoError(t, err) + assert.Equal(t, d.result, result) + }) + } +} + +func TestDotnetDetector(t *testing.T) { + curDir, err := testutil.CurDir() + require.NoError(t, err) + + dll := filepath.Join(curDir, "testdata", "System.Runtime.dll") + cmd, err := fileopener.OpenFromAnotherProcess(t, dll) + require.NoError(t, err) + + proc := &languagedetection.Process{Pid: int32(cmd.Process.Pid)} + langInfo, err := NewDotnetDetector().DetectLanguage(proc) + require.NoError(t, err) + assert.Equal(t, languagemodels.Dotnet, langInfo.Name) + + self := &languagedetection.Process{Pid: int32(os.Getpid())} + _, err = NewDotnetDetector().DetectLanguage(self) + require.Error(t, err) +} diff --git a/pkg/languagedetection/internal/detectors/testdata/System.Runtime.dll b/pkg/languagedetection/internal/detectors/testdata/System.Runtime.dll new file mode 100644 index 0000000000000..421376db9e8ae --- /dev/null +++ b/pkg/languagedetection/internal/detectors/testdata/System.Runtime.dll @@ -0,0 +1 @@ +dummy diff --git a/pkg/languagedetection/privileged/privileged_detector.go b/pkg/languagedetection/privileged/privileged_detector.go index cf050a77676f5..b6447ea310c14 100644 --- a/pkg/languagedetection/privileged/privileged_detector.go +++ b/pkg/languagedetection/privileged/privileged_detector.go @@ -30,6 +30,7 @@ import ( var detectorsWithPrivilege = []languagemodels.Detector{ detectors.NewGoDetector(), + detectors.NewDotnetDetector(), } var ( @@ -97,6 +98,7 @@ func (l *LanguageDetector) DetectWithPrivileges(procs []languagemodels.Process) continue } languages[i] = lang + break } l.mux.Lock() l.binaryIDCache.Add(bin, lang) diff --git a/pkg/languagedetection/privileged/privileged_detector_test.go b/pkg/languagedetection/privileged/privileged_detector_test.go index aba97bf96595c..485a02bbe5ec9 100644 --- a/pkg/languagedetection/privileged/privileged_detector_test.go +++ b/pkg/languagedetection/privileged/privileged_detector_test.go @@ -8,6 +8,7 @@ package privileged import ( + "errors" "os" "os/exec" "path/filepath" @@ -149,3 +150,74 @@ func TestShortLivingProc(t *testing.T) { require.Equal(t, languagemodels.Language{}, res[0]) require.Zero(t, d.binaryIDCache.Len()) } + +// DummyDetector is a detector used for testing +type DummyDetector struct { + language languagemodels.LanguageName +} + +// DummyProcess is a process used for testing +type DummyProcess struct{} + +// GetPid is unused +func (p DummyProcess) GetPid() int32 { + return int32(os.Getpid()) +} + +// GetCommand is unused +func (p DummyProcess) GetCommand() string { + return "dummy" +} + +// GetCmdline is unused +func (p DummyProcess) GetCmdline() []string { + return []string{"dummy"} +} + +// DetectLanguage "detects" a dummy language for testing +func (d DummyDetector) DetectLanguage(_ languagemodels.Process) (languagemodels.Language, error) { + if d.language == languagemodels.Unknown { + return languagemodels.Language{}, errors.New("unable to detect") + } + + return languagemodels.Language{Name: languagemodels.LanguageName(d.language)}, nil +} + +func TestDetectorOrder(t *testing.T) { + for _, test := range []struct { + name string + detectors []languagemodels.Detector + language languagemodels.LanguageName + }{ + { + name: "stop at first good", + detectors: []languagemodels.Detector{ + DummyDetector{languagemodels.Java}, + DummyDetector{languagemodels.Python}}, + language: languagemodels.Java, + }, + { + name: "try second if first fails", + detectors: []languagemodels.Detector{ + DummyDetector{}, + DummyDetector{languagemodels.Python}}, + language: languagemodels.Python, + }, + { + name: "all fail", + detectors: []languagemodels.Detector{ + DummyDetector{}, + DummyDetector{}}, + language: languagemodels.Unknown, + }, + } { + t.Run(test.name, func(t *testing.T) { + MockPrivilegedDetectors(t, test.detectors) + d := NewLanguageDetector() + res := d.DetectWithPrivileges([]languagemodels.Process{DummyProcess{}}) + require.Len(t, res, 1) + require.NotNil(t, res[0]) + assert.Equal(t, test.language, res[0].Name) + }) + } +} diff --git a/pkg/logs/auditor/auditor.go b/pkg/logs/auditor/auditor.go index 578107bfa0e22..cad651a7c7d27 100644 --- a/pkg/logs/auditor/auditor.go +++ b/pkg/logs/auditor/auditor.go @@ -133,8 +133,7 @@ func (a *RegistryAuditor) Channel() chan *message.Payload { // GetOffset returns the last committed offset for a given identifier, // returns an empty string if it does not exist. func (a *RegistryAuditor) GetOffset(identifier string) string { - r := a.readOnlyRegistryCopy() - entry, exists := r[identifier] + entry, exists := a.readOnlyRegistryEntryCopy(identifier) if !exists { return "" } @@ -144,8 +143,7 @@ func (a *RegistryAuditor) GetOffset(identifier string) string { // GetTailingMode returns the last committed offset for a given identifier, // returns an empty string if it does not exist. func (a *RegistryAuditor) GetTailingMode(identifier string) string { - r := a.readOnlyRegistryCopy() - entry, exists := r[identifier] + entry, exists := a.readOnlyRegistryEntryCopy(identifier) if !exists { return "" } @@ -265,6 +263,16 @@ func (a *RegistryAuditor) readOnlyRegistryCopy() map[string]RegistryEntry { return r } +func (a *RegistryAuditor) readOnlyRegistryEntryCopy(identifier string) (RegistryEntry, bool) { + a.registryMutex.Lock() + defer a.registryMutex.Unlock() + entry, exists := a.registry[identifier] + if !exists { + return RegistryEntry{}, false + } + return *entry, true +} + // flushRegistry writes on disk the registry at the given path func (a *RegistryAuditor) flushRegistry() error { r := a.readOnlyRegistryCopy() diff --git a/pkg/logs/auditor/go.mod b/pkg/logs/auditor/go.mod index 52a3aba073a47..51a13b17c7628 100644 --- a/pkg/logs/auditor/go.mod +++ b/pkg/logs/auditor/go.mod @@ -16,6 +16,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/message => ../message github.com/DataDog/datadog-agent/pkg/logs/sources => ../sources @@ -43,28 +44,29 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/pkg/logs/client/go.mod b/pkg/logs/client/go.mod index a57d90a5a1b37..edc28c13218b3 100644 --- a/pkg/logs/client/go.mod +++ b/pkg/logs/client/go.mod @@ -18,6 +18,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/message => ../message github.com/DataDog/datadog-agent/pkg/logs/metrics => ../metrics @@ -46,7 +47,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 @@ -55,32 +56,33 @@ require ( github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 github.com/stretchr/testify v1.9.0 golang.org/x/net v0.28.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/pkg/logs/diagnostic/go.mod b/pkg/logs/diagnostic/go.mod index 6c6616dd4d46d..e8525d5c8999f 100644 --- a/pkg/logs/diagnostic/go.mod +++ b/pkg/logs/diagnostic/go.mod @@ -17,6 +17,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/message => ../message github.com/DataDog/datadog-agent/pkg/logs/sources => ../sources @@ -49,26 +50,27 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go index db8e9832c1961..6ea356296bf61 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/timestamp_detector_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) type testInput struct { @@ -82,8 +82,8 @@ var inputs = []testInput{ } func TestCorrectLabelIsAssigned(t *testing.T) { - tokenizer := NewTokenizer(config.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes")) - timestampDetector := NewTimestampDetector(config.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold")) + tokenizer := NewTokenizer(pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes")) + timestampDetector := NewTimestampDetector(pkgconfigsetup.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold")) for _, testInput := range inputs { context := &messageContext{ @@ -102,7 +102,7 @@ func TestCorrectLabelIsAssigned(t *testing.T) { } func printMatchUnderline(context *messageContext, input string, match MatchContext) { - maxLen := config.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes") + maxLen := pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes") fmt.Printf("%.2f\t\t%v\n", match.probability, input) if match.start == match.end { diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go index d60a6031ea3db..25a22c209ecc7 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go @@ -7,7 +7,7 @@ package automultilinedetection import ( - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder/auto_multiline_detection/tokens" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -38,7 +38,7 @@ type UserSamples struct { } // NewUserSamples creates a new UserSamples instance. -func NewUserSamples(config config.Reader) *UserSamples { +func NewUserSamples(config model.Reader) *UserSamples { tokenizer := NewTokenizer(0) s := make([]*UserSample, 0) err := config.UnmarshalKey("logs_config.auto_multi_line_detection_custom_samples", &s) diff --git a/pkg/logs/internal/decoder/auto_multiline_handler.go b/pkg/logs/internal/decoder/auto_multiline_handler.go index bf69b41693415..16de7f50466c1 100644 --- a/pkg/logs/internal/decoder/auto_multiline_handler.go +++ b/pkg/logs/internal/decoder/auto_multiline_handler.go @@ -8,7 +8,7 @@ package decoder import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" automultilinedetection "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder/auto_multiline_detection" "github.com/DataDog/datadog-agent/pkg/logs/message" status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" @@ -26,22 +26,22 @@ func NewAutoMultilineHandler(outputFn func(m *message.Message), maxContentSize i // Order is important heuristics := []automultilinedetection.Heuristic{} - heuristics = append(heuristics, automultilinedetection.NewTokenizer(config.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes"))) + heuristics = append(heuristics, automultilinedetection.NewTokenizer(pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes"))) - if config.Datadog().GetBool("logs_config.auto_multi_line.enable_json_detection") { + if pkgconfigsetup.Datadog().GetBool("logs_config.auto_multi_line.enable_json_detection") { heuristics = append(heuristics, automultilinedetection.NewJSONDetector()) } - heuristics = append(heuristics, automultilinedetection.NewUserSamples(config.Datadog())) + heuristics = append(heuristics, automultilinedetection.NewUserSamples(pkgconfigsetup.Datadog())) - if config.Datadog().GetBool("logs_config.auto_multi_line.enable_datetime_detection") { + if pkgconfigsetup.Datadog().GetBool("logs_config.auto_multi_line.enable_datetime_detection") { heuristics = append(heuristics, automultilinedetection.NewTimestampDetector( - config.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold"))) + pkgconfigsetup.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold"))) } analyticsHeuristics := []automultilinedetection.Heuristic{automultilinedetection.NewPatternTable( - config.Datadog().GetInt("logs_config.auto_multi_line.pattern_table_max_size"), - config.Datadog().GetFloat64("logs_config.auto_multi_line.pattern_table_match_threshold"), + pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line.pattern_table_max_size"), + pkgconfigsetup.Datadog().GetFloat64("logs_config.auto_multi_line.pattern_table_match_threshold"), tailerInfo), } @@ -51,8 +51,8 @@ func NewAutoMultilineHandler(outputFn func(m *message.Message), maxContentSize i outputFn, maxContentSize, flushTimeout, - config.Datadog().GetBool("logs_config.tag_truncated_logs"), - config.Datadog().GetBool("logs_config.tag_auto_multi_line_logs"), + pkgconfigsetup.Datadog().GetBool("logs_config.tag_truncated_logs"), + pkgconfigsetup.Datadog().GetBool("logs_config.tag_auto_multi_line_logs"), tailerInfo), } } diff --git a/pkg/logs/internal/decoder/decoder.go b/pkg/logs/internal/decoder/decoder.go index 468cab5f81bee..e6186e5a86ace 100644 --- a/pkg/logs/internal/decoder/decoder.go +++ b/pkg/logs/internal/decoder/decoder.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" //nolint:revive // TODO(AML) Fix revive linter - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/framer" "github.com/DataDog/datadog-agent/pkg/logs/internal/parsers" "github.com/DataDog/datadog-agent/pkg/logs/message" @@ -83,7 +83,7 @@ func syncSourceInfo(source *sources.ReplaceableSource, lh *MultiLineHandler) { func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Parser, framing framer.Framing, multiLinePattern *regexp.Regexp, tailerInfo *status.InfoRegistry) *Decoder { inputChan := make(chan *message.Message) outputChan := make(chan *message.Message) - maxContentSize := config.MaxMessageSizeBytes(pkgConfig.Datadog()) + maxContentSize := config.MaxMessageSizeBytes(pkgconfigsetup.Datadog()) detectedPattern := &DetectedPattern{} outputFn := func(m *message.Message) { outputChan <- m } @@ -92,17 +92,17 @@ func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Par var lineHandler LineHandler for _, rule := range source.Config().ProcessingRules { if rule.Type == config.MultiLine { - lh := NewMultiLineHandler(outputFn, rule.Regex, config.AggregationTimeout(pkgConfig.Datadog()), maxContentSize, false, tailerInfo) + lh := NewMultiLineHandler(outputFn, rule.Regex, config.AggregationTimeout(pkgconfigsetup.Datadog()), maxContentSize, false, tailerInfo) syncSourceInfo(source, lh) lineHandler = lh } } if lineHandler == nil { - if source.Config().ExperimentalAutoMultiLineEnabled(pkgConfig.Datadog()) { + if source.Config().ExperimentalAutoMultiLineEnabled(pkgconfigsetup.Datadog()) { log.Infof("Experimental Auto multi line log detection enabled") - lineHandler = NewAutoMultilineHandler(outputFn, maxContentSize, config.AggregationTimeout(pkgConfig.Datadog()), tailerInfo) + lineHandler = NewAutoMultilineHandler(outputFn, maxContentSize, config.AggregationTimeout(pkgconfigsetup.Datadog()), tailerInfo) - } else if source.Config().AutoMultiLineEnabled(pkgConfig.Datadog()) { + } else if source.Config().AutoMultiLineEnabled(pkgconfigsetup.Datadog()) { log.Infof("Auto multi line log detection enabled") if multiLinePattern != nil { @@ -111,7 +111,7 @@ func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Par // Save the pattern again for the next rotation detectedPattern.Set(multiLinePattern) - lh := NewMultiLineHandler(outputFn, multiLinePattern, config.AggregationTimeout(pkgConfig.Datadog()), maxContentSize, true, tailerInfo) + lh := NewMultiLineHandler(outputFn, multiLinePattern, config.AggregationTimeout(pkgconfigsetup.Datadog()), maxContentSize, true, tailerInfo) syncSourceInfo(source, lh) lineHandler = lh } else { @@ -125,7 +125,7 @@ func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Par // construct the lineParser, wrapping the parser var lineParser LineParser if parser.SupportsPartialLine() { - lineParser = NewMultiLineParser(lineHandler, config.AggregationTimeout(pkgConfig.Datadog()), parser, maxContentSize) + lineParser = NewMultiLineParser(lineHandler, config.AggregationTimeout(pkgconfigsetup.Datadog()), parser, maxContentSize) } else { lineParser = NewSingleLineParser(lineHandler, parser) } @@ -139,13 +139,13 @@ func NewDecoderWithFraming(source *sources.ReplaceableSource, parser parsers.Par func buildLegacyAutoMultilineHandlerFromConfig(outputFn func(*message.Message), maxContentSize int, source *sources.ReplaceableSource, detectedPattern *DetectedPattern, tailerInfo *status.InfoRegistry) *LegacyAutoMultilineHandler { linesToSample := source.Config().AutoMultiLineSampleSize if linesToSample <= 0 { - linesToSample = pkgConfig.Datadog().GetInt("logs_config.auto_multi_line_default_sample_size") + linesToSample = pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line_default_sample_size") } matchThreshold := source.Config().AutoMultiLineMatchThreshold if matchThreshold == 0 { - matchThreshold = pkgConfig.Datadog().GetFloat64("logs_config.auto_multi_line_default_match_threshold") + matchThreshold = pkgconfigsetup.Datadog().GetFloat64("logs_config.auto_multi_line_default_match_threshold") } - additionalPatterns := pkgConfig.Datadog().GetStringSlice("logs_config.auto_multi_line_extra_patterns") + additionalPatterns := pkgconfigsetup.Datadog().GetStringSlice("logs_config.auto_multi_line_extra_patterns") additionalPatternsCompiled := []*regexp.Regexp{} for _, p := range additionalPatterns { @@ -157,14 +157,14 @@ func buildLegacyAutoMultilineHandlerFromConfig(outputFn func(*message.Message), additionalPatternsCompiled = append(additionalPatternsCompiled, compiled) } - matchTimeout := time.Second * pkgConfig.Datadog().GetDuration("logs_config.auto_multi_line_default_match_timeout") + matchTimeout := time.Second * pkgconfigsetup.Datadog().GetDuration("logs_config.auto_multi_line_default_match_timeout") return NewLegacyAutoMultilineHandler( outputFn, maxContentSize, linesToSample, matchThreshold, matchTimeout, - config.AggregationTimeout(pkgConfig.Datadog()), + config.AggregationTimeout(pkgconfigsetup.Datadog()), source, additionalPatternsCompiled, detectedPattern, diff --git a/pkg/logs/internal/decoder/file_decoder.go b/pkg/logs/internal/decoder/file_decoder.go index 48b8d901826e0..2cb7303f3242a 100644 --- a/pkg/logs/internal/decoder/file_decoder.go +++ b/pkg/logs/internal/decoder/file_decoder.go @@ -9,7 +9,7 @@ import ( "regexp" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/framer" "github.com/DataDog/datadog-agent/pkg/logs/internal/parsers" "github.com/DataDog/datadog-agent/pkg/logs/internal/parsers/dockerfile" @@ -36,7 +36,7 @@ func NewDecoderFromSourceWithPattern(source *sources.ReplaceableSource, multiLin case sources.KubernetesSourceType: lineParser = kubernetes.New() case sources.DockerSourceType: - if coreConfig.Datadog().GetBool("logs_config.use_podman_logs") { + if pkgconfigsetup.Datadog().GetBool("logs_config.use_podman_logs") { // podman's on-disk logs are in kubernetes format lineParser = kubernetes.New() } else { diff --git a/pkg/logs/internal/decoder/line_handler_benchmark_test.go b/pkg/logs/internal/decoder/line_handler_benchmark_test.go index 85d3266c920f4..5d5ec40772c2a 100644 --- a/pkg/logs/internal/decoder/line_handler_benchmark_test.go +++ b/pkg/logs/internal/decoder/line_handler_benchmark_test.go @@ -12,7 +12,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/sources" status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" @@ -24,7 +24,7 @@ func benchmarkSingleLineHandler(b *testing.B, logs int) { messages[i] = getDummyMessageWithLF(fmt.Sprintf("This is a log test line to benchmark the logs agent %d", i)) } - h := NewSingleLineHandler(func(*message.Message) {}, coreConfig.DefaultMaxMessageSizeBytes) + h := NewSingleLineHandler(func(*message.Message) {}, pkgconfigsetup.DefaultMaxMessageSizeBytes) b.ResetTimer() for n := 0; n < b.N; n++ { @@ -41,7 +41,7 @@ func benchmarkAutoMultiLineHandler(b *testing.B, logs int, line string) { } source := sources.NewReplaceableSource(sources.NewLogSource("config", &config.LogsConfig{})) - h := NewLegacyAutoMultilineHandler(func(*message.Message) {}, coreConfig.DefaultMaxMessageSizeBytes, 1000, 0.9, 30*time.Second, 1000*time.Millisecond, source, []*regexp.Regexp{}, &DetectedPattern{}, status.NewInfoRegistry()) + h := NewLegacyAutoMultilineHandler(func(*message.Message) {}, pkgconfigsetup.DefaultMaxMessageSizeBytes, 1000, 0.9, 30*time.Second, 1000*time.Millisecond, source, []*regexp.Regexp{}, &DetectedPattern{}, status.NewInfoRegistry()) b.ResetTimer() for n := 0; n < b.N; n++ { diff --git a/pkg/logs/internal/decoder/multiline_handler.go b/pkg/logs/internal/decoder/multiline_handler.go index 5fdeb2a80ce3f..d776a44bafea3 100644 --- a/pkg/logs/internal/decoder/multiline_handler.go +++ b/pkg/logs/internal/decoder/multiline_handler.go @@ -10,7 +10,7 @@ import ( "regexp" "time" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -163,7 +163,7 @@ func (h *MultiLineHandler) sendBuffer() { } msg := message.NewRawMessage(content, h.status, h.linesLen, h.timestamp) msg.ParsingExtra.IsTruncated = h.isBufferTruncated - if h.isBufferTruncated && coreConfig.Datadog().GetBool("logs_config.tag_truncated_logs") { + if h.isBufferTruncated && pkgconfigsetup.Datadog().GetBool("logs_config.tag_truncated_logs") { msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedTag) } h.outputFn(msg) diff --git a/pkg/logs/internal/decoder/single_line_handler.go b/pkg/logs/internal/decoder/single_line_handler.go index 4f2b192561c24..6d37920361c34 100644 --- a/pkg/logs/internal/decoder/single_line_handler.go +++ b/pkg/logs/internal/decoder/single_line_handler.go @@ -9,7 +9,7 @@ import ( "bytes" "time" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" ) @@ -38,7 +38,7 @@ func (h *SingleLineHandler) flush() { } func addTruncatedTag(msg *message.Message) { - if coreConfig.Datadog().GetBool("logs_config.tag_truncated_logs") { + if pkgconfigsetup.Datadog().GetBool("logs_config.tag_truncated_logs") { msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedTag) } } diff --git a/pkg/logs/internal/tag/local_provider.go b/pkg/logs/internal/tag/local_provider.go index fb0898b48cde6..c0e49730b8e73 100644 --- a/pkg/logs/internal/tag/local_provider.go +++ b/pkg/logs/internal/tag/local_provider.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/benbjohnson/clock" ) @@ -38,12 +38,12 @@ func newLocalProviderWithClock(t []string, clock clock.Clock) Provider { expectedTags: t, } - if config.IsExpectedTagsSet(coreConfig.Datadog()) { - p.expectedTags = append(p.tags, hostMetadataUtils.Get(context.TODO(), false, coreConfig.Datadog()).System...) + if config.IsExpectedTagsSet(pkgconfigsetup.Datadog()) { + p.expectedTags = append(p.tags, hostMetadataUtils.Get(context.TODO(), false, pkgconfigsetup.Datadog()).System...) // expected tags deadline is based on the agent start time, which may have been earlier // than the current time. - expectedTagsDeadline := coreConfig.StartTime.Add(coreConfig.Datadog().GetDuration("logs_config.expected_tags_duration")) + expectedTagsDeadline := pkgconfigsetup.StartTime.Add(pkgconfigsetup.Datadog().GetDuration("logs_config.expected_tags_duration")) // reset submitExpectedTags after deadline elapsed clock.AfterFunc(expectedTagsDeadline.Sub(clock.Now()), func() { diff --git a/pkg/logs/internal/tag/local_provider_test.go b/pkg/logs/internal/tag/local_provider_test.go index 5a8ed5e1f5280..77c0d50a9a3ac 100644 --- a/pkg/logs/internal/tag/local_provider_test.go +++ b/pkg/logs/internal/tag/local_provider_test.go @@ -13,8 +13,8 @@ import ( "github.com/benbjohnson/clock" "github.com/stretchr/testify/assert" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestLocalProviderShouldReturnEmptyList(t *testing.T) { @@ -36,10 +36,10 @@ func TestLocalProviderExpectedTags(t *testing.T) { mockConfig := configmock.New(t) clock := clock.NewMock() - oldStartTime := coreConfig.StartTime - coreConfig.StartTime = clock.Now() + oldStartTime := pkgconfigsetup.StartTime + pkgconfigsetup.StartTime = clock.Now() defer func() { - coreConfig.StartTime = oldStartTime + pkgconfigsetup.StartTime = oldStartTime }() tags := []string{"tag1:value1", "tag2", "tag3"} diff --git a/pkg/logs/internal/tag/provider.go b/pkg/logs/internal/tag/provider.go index b26b34bad479f..7196bacb02646 100644 --- a/pkg/logs/internal/tag/provider.go +++ b/pkg/logs/internal/tag/provider.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -46,7 +46,7 @@ func NewProvider(entityID string, tagAdder EntityTagAdder) Provider { func newProviderWithClock(entityID string, clock clock.Clock, tagAdder EntityTagAdder) Provider { p := &provider{ entityID: entityID, - taggerWarmupDuration: config.TaggerWarmupDuration(pkgConfig.Datadog()), + taggerWarmupDuration: config.TaggerWarmupDuration(pkgconfigsetup.Datadog()), localTagProvider: newLocalProviderWithClock([]string{}, clock), clock: clock, tagAdder: tagAdder, diff --git a/pkg/logs/internal/tag/provider_benchmark_test.go b/pkg/logs/internal/tag/provider_benchmark_test.go index 2ebc319fd9785..8b2ece36e2d83 100644 --- a/pkg/logs/internal/tag/provider_benchmark_test.go +++ b/pkg/logs/internal/tag/provider_benchmark_test.go @@ -10,16 +10,16 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" model "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func setupConfig(t testing.TB, tags []string) (model.Config, time.Time) { mockConfig := configmock.New(t) - startTime := config.StartTime - config.StartTime = time.Now() + startTime := pkgconfigsetup.StartTime + pkgconfigsetup.StartTime = time.Now() mockConfig.SetWithoutSource("tags", tags) @@ -37,7 +37,7 @@ func BenchmarkProviderExpectedTags(b *testing.B) { m, start := setupConfig(b, []string{"tag1:value1", "tag2", "tag3"}) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() defer m.SetWithoutSource("tags", nil) @@ -58,7 +58,7 @@ func BenchmarkProviderExpectedTagsEmptySlice(b *testing.B) { m, start := setupConfig(b, []string{}) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() if len(m.GetStringSlice("tags")) > 0 { @@ -81,7 +81,7 @@ func BenchmarkProviderExpectedTagsNil(b *testing.B) { m, start := setupConfig(b, nil) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() if len(m.GetStringSlice("tags")) > 0 { @@ -104,7 +104,7 @@ func BenchmarkProviderNoExpectedTags(b *testing.B) { m, start := setupConfig(b, []string{"tag1:value1", "tag2", "tag3"}) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() defer m.SetWithoutSource("tags", nil) @@ -124,7 +124,7 @@ func BenchmarkProviderNoExpectedTagsNil(b *testing.B) { m, start := setupConfig(b, nil) defer func() { - config.StartTime = start + pkgconfigsetup.StartTime = start }() defer m.SetWithoutSource("tags", nil) diff --git a/pkg/logs/internal/tag/provider_test.go b/pkg/logs/internal/tag/provider_test.go index 13786ec561b6d..b238d9c58b542 100644 --- a/pkg/logs/internal/tag/provider_test.go +++ b/pkg/logs/internal/tag/provider_test.go @@ -15,8 +15,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestProviderExpectedTags(t *testing.T) { @@ -24,11 +24,11 @@ func TestProviderExpectedTags(t *testing.T) { clock := clock.NewMock() fakeTagger := taggerimpl.SetupFakeTagger(t) defer fakeTagger.ResetTagger() - oldStartTime := coreConfig.StartTime + oldStartTime := pkgconfigsetup.StartTime then := clock.Now() - coreConfig.StartTime = then + pkgconfigsetup.StartTime = then defer func() { - coreConfig.StartTime = oldStartTime + pkgconfigsetup.StartTime = oldStartTime }() tags := []string{"tag1:value1", "tag2", "tag3"} diff --git a/pkg/logs/internal/util/containersorpods/containers_or_pods.go b/pkg/logs/internal/util/containersorpods/containers_or_pods.go index 4ee689bff2768..010c34c68c710 100644 --- a/pkg/logs/internal/util/containersorpods/containers_or_pods.go +++ b/pkg/logs/internal/util/containersorpods/containers_or_pods.go @@ -11,8 +11,8 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -158,7 +158,7 @@ func (ch *chooser) start() { // preferred returns the preferred LogWhat, based on configuration func (ch *chooser) preferred() LogWhat { - if config.Datadog().GetBool("logs_config.k8s_container_use_file") { + if pkgconfigsetup.Datadog().GetBool("logs_config.k8s_container_use_file") { return LogPods } return LogContainers diff --git a/pkg/logs/internal/util/containersorpods/containers_or_pods_test.go b/pkg/logs/internal/util/containersorpods/containers_or_pods_test.go index 5039a6fc4d2b9..dd30cd6b255ca 100644 --- a/pkg/logs/internal/util/containersorpods/containers_or_pods_test.go +++ b/pkg/logs/internal/util/containersorpods/containers_or_pods_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) @@ -29,7 +28,7 @@ func TestChoose(t *testing.T) { expected LogWhat, ) func(*testing.T) { return func(t *testing.T) { - config.SetFeatures(t, features...) + env.SetFeatures(t, features...) mockConfig := configmock.New(t) mockConfig.SetWithoutSource("logs_config.k8s_container_use_file", k8sContainerUseFile) diff --git a/pkg/logs/launchers/container/tailerfactory/file.go b/pkg/logs/launchers/container/tailerfactory/file.go index 9ef2b0f06777c..02289d82ae94b 100644 --- a/pkg/logs/launchers/container/tailerfactory/file.go +++ b/pkg/logs/launchers/container/tailerfactory/file.go @@ -20,7 +20,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util/containersorpods" "github.com/DataDog/datadog-agent/pkg/logs/launchers/container/tailerfactory/tailers" "github.com/DataDog/datadog-agent/pkg/logs/sources" @@ -33,7 +33,7 @@ import ( var podLogsBasePath = "/var/log/pods" var dockerLogsBasePathNix = "/var/lib/docker" var dockerLogsBasePathWin = "c:\\programdata\\docker" -var podmanLogsBasePath = "/var/lib/containers" +var podmanRootfullLogsBasePath = "/var/lib/containers" // makeFileTailer makes a file-based tailer for the given source, or returns // an error if it cannot do so (e.g., due to permission errors) @@ -147,7 +147,7 @@ func (tf *factory) makeDockerFileSource(source *sources.LogSource) (*sources.Log func (tf *factory) findDockerLogPath(containerID string) string { // if the user has set a custom docker data root, this will pick it up // and set it in place of the usual docker base path - overridePath := coreConfig.Datadog().GetString("logs_config.docker_path_override") + overridePath := pkgconfigsetup.Datadog().GetString("logs_config.docker_path_override") if len(overridePath) > 0 { return filepath.Join(overridePath, "containers", containerID, fmt.Sprintf("%s-json.log", containerID)) } @@ -160,7 +160,14 @@ func (tf *factory) findDockerLogPath(containerID string) string { default: // linux, darwin // this config flag provides temporary support for podman while it is // still recognized by AD as a "docker" runtime. - if coreConfig.Datadog().GetBool("logs_config.use_podman_logs") { + if pkgconfigsetup.Datadog().GetBool("logs_config.use_podman_logs") { + // Default path for podman rootfull containers + podmanLogsBasePath := podmanRootfullLogsBasePath + podmanDBPath := pkgconfigsetup.Datadog().GetString("podman_db_path") + // User provided a custom podman DB path, they are running rootless containers or modified the root directory. + if len(podmanDBPath) > 0 { + podmanLogsBasePath = log.ExtractPodmanRootDirFromDBPath(podmanDBPath) + } return filepath.Join( podmanLogsBasePath, "storage/overlay-containers", containerID, "userdata/ctr.log") diff --git a/pkg/logs/launchers/container/tailerfactory/file_test.go b/pkg/logs/launchers/container/tailerfactory/file_test.go index b98f9102c7308..314b804bfe8f0 100644 --- a/pkg/logs/launchers/container/tailerfactory/file_test.go +++ b/pkg/logs/launchers/container/tailerfactory/file_test.go @@ -43,7 +43,7 @@ func fileTestSetup(t *testing.T) { oldPodLogsBasePath, podLogsBasePath = podLogsBasePath, filepath.Join(tmp, "pods") oldDockerLogsBasePathNix, dockerLogsBasePathNix = dockerLogsBasePathNix, filepath.Join(tmp, "docker-nix") oldDockerLogsBasePathWin, dockerLogsBasePathWin = dockerLogsBasePathWin, filepath.Join(tmp, "docker-win") - oldPodmanLogsBasePath, podmanLogsBasePath = podmanLogsBasePath, filepath.Join(tmp, "containers") + oldPodmanLogsBasePath, podmanRootfullLogsBasePath = podmanRootfullLogsBasePath, filepath.Join(tmp, "containers") switch runtime.GOOS { case "windows": @@ -56,7 +56,7 @@ func fileTestSetup(t *testing.T) { podLogsBasePath = oldPodLogsBasePath dockerLogsBasePathNix = oldDockerLogsBasePathNix dockerLogsBasePathWin = oldDockerLogsBasePathWin - podmanLogsBasePath = oldPodmanLogsBasePath + podmanRootfullLogsBasePath = oldPodmanLogsBasePath }) } @@ -145,7 +145,7 @@ func TestMakeFileSource_podman_success(t *testing.T) { t.Skip("Skip on Windows due to WSL file path abstraction") } - p := filepath.Join(podmanLogsBasePath, filepath.FromSlash("storage/overlay-containers/abc/userdata/ctr.log")) + p := filepath.Join(podmanRootfullLogsBasePath, filepath.FromSlash("storage/overlay-containers/abc/userdata/ctr.log")) require.NoError(t, os.MkdirAll(filepath.Dir(p), 0o777)) require.NoError(t, os.WriteFile(p, []byte("{}"), 0o666)) @@ -178,6 +178,45 @@ func TestMakeFileSource_podman_success(t *testing.T) { require.Equal(t, source.Config.AutoMultiLineMatchThreshold, 0.321) } +func TestMakeFileSource_podman_with_db_path_success(t *testing.T) { + tmp := t.TempDir() + customPath := filepath.Join(tmp, "/custom/path/containers/storage/db.sql") + mockConfig := configmock.New(t) + mockConfig.SetWithoutSource("logs_config.use_podman_logs", true) + mockConfig.SetWithoutSource("podman_db_path", customPath) + + // On Windows, podman runs within a Linux virtual machine, so the Agent would believe it runs in a Linux environment with all the paths being nix-like. + // The real path on the system is abstracted by the Windows Subsystem for Linux layer, so this unit test is skipped. + // Ref: https://github.com/containers/podman/blob/main/docs/tutorials/podman-for-windows.md + if runtime.GOOS == "windows" { + t.Skip("Skip on Windows due to WSL file path abstraction") + } + + p := filepath.Join(filepath.Join(tmp, "/custom/path/containers"), filepath.FromSlash("storage/overlay-containers/abc/userdata/ctr.log")) + require.NoError(t, os.MkdirAll(filepath.Dir(p), 0o777)) + require.NoError(t, os.WriteFile(p, []byte("{}"), 0o666)) + + tf := &factory{ + pipelineProvider: pipeline.NewMockProvider(), + cop: containersorpods.NewDecidedChooser(containersorpods.LogContainers), + } + source := sources.NewLogSource("test", &config.LogsConfig{ + Type: "podman", + Identifier: "abc", + Source: "src", + Service: "svc", + }) + child, err := tf.makeFileSource(source) + require.NoError(t, err) + require.Equal(t, source.Name, child.Name) + require.Equal(t, "file", child.Config.Type) + require.Equal(t, source.Config.Identifier, child.Config.Identifier) + require.Equal(t, p, child.Config.Path) + require.Equal(t, source.Config.Source, child.Config.Source) + require.Equal(t, source.Config.Service, child.Config.Service) + require.Equal(t, sources.DockerSourceType, child.GetSourceType()) +} + func TestMakeFileSource_docker_no_file(t *testing.T) { fileTestSetup(t) diff --git a/pkg/logs/launchers/container/tailerfactory/socket.go b/pkg/logs/launchers/container/tailerfactory/socket.go index 714be2815d44c..5e5fbdda1e7da 100644 --- a/pkg/logs/launchers/container/tailerfactory/socket.go +++ b/pkg/logs/launchers/container/tailerfactory/socket.go @@ -15,7 +15,7 @@ import ( "fmt" "time" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/launchers/container/tailerfactory/tailers" "github.com/DataDog/datadog-agent/pkg/logs/sources" ) @@ -43,7 +43,7 @@ func (tf *factory) makeSocketTailer(source *sources.LogSource) (Tailer, error) { // available at some point, so chances are good that tailing will succeed. pipeline := tf.pipelineProvider.NextPipelineChan() - readTimeout := time.Duration(coreConfig.Datadog().GetInt("logs_config.docker_client_read_timeout")) * time.Second + readTimeout := time.Duration(pkgconfigsetup.Datadog().GetInt("logs_config.docker_client_read_timeout")) * time.Second // apply defaults for source and service directly to the LogSource struct (!!) source.Config.Source, source.Config.Service = tf.defaultSourceAndService(source, tf.cop.Get()) diff --git a/pkg/logs/launchers/container/tailerfactory/usefile.go b/pkg/logs/launchers/container/tailerfactory/usefile.go index 0c2665602bae9..dbecd83b1f292 100644 --- a/pkg/logs/launchers/container/tailerfactory/usefile.go +++ b/pkg/logs/launchers/container/tailerfactory/usefile.go @@ -11,7 +11,7 @@ import ( "context" "fmt" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util/containersorpods" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -30,12 +30,12 @@ func (tf *factory) useFile(source *sources.LogSource) bool { switch logWhat { case containersorpods.LogContainers: // docker_container_use_file is a suggestion - if !coreConfig.Datadog().GetBool("logs_config.docker_container_use_file") { + if !pkgconfigsetup.Datadog().GetBool("logs_config.docker_container_use_file") { return false } // docker_container_force_use_file is a requirement - if coreConfig.Datadog().GetBool("logs_config.docker_container_force_use_file") { + if pkgconfigsetup.Datadog().GetBool("logs_config.docker_container_force_use_file") { return true } @@ -51,7 +51,7 @@ func (tf *factory) useFile(source *sources.LogSource) bool { return true case containersorpods.LogPods: - return coreConfig.Datadog().GetBool("logs_config.k8s_container_use_file") + return pkgconfigsetup.Datadog().GetBool("logs_config.k8s_container_use_file") default: // if this occurs, then sources have been arriving before the diff --git a/pkg/logs/launchers/file/launcher_test.go b/pkg/logs/launchers/file/launcher_test.go index 96c44e8031568..a0e53f93f7d3b 100644 --- a/pkg/logs/launchers/file/launcher_test.go +++ b/pkg/logs/launchers/file/launcher_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/tagger/taggerimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" auditor "github.com/DataDog/datadog-agent/pkg/logs/auditor/mock" "github.com/DataDog/datadog-agent/pkg/logs/internal/util" "github.com/DataDog/datadog-agent/pkg/logs/launchers" @@ -80,7 +80,7 @@ func (suite *LauncherTestSuite) SetupTest() { suite.s.pipelineProvider = suite.pipelineProvider suite.s.registry = auditor.NewRegistry() suite.s.activeSources = append(suite.s.activeSources, suite.source) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{suite.source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{suite.source})) suite.s.scan() } @@ -244,7 +244,7 @@ func TestLauncherScanStartNewTailer(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Identifier: configID, Path: path}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) defer status.Clear() // create file @@ -454,7 +454,7 @@ func TestLauncherScanWithTooManyFiles(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: path}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) defer status.Clear() // test at scan @@ -544,7 +544,7 @@ func TestLauncherScanRecentFilesWithRemoval(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) return launcher } @@ -602,7 +602,7 @@ func TestLauncherScanRecentFilesWithNewFiles(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) return launcher } @@ -665,7 +665,7 @@ func TestLauncherFileRotation(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) return launcher } @@ -732,7 +732,7 @@ func TestLauncherFileDetectionSingleScan(t *testing.T) { source := sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: logDirectory}) launcher.activeSources = append(launcher.activeSources, source) status.Clear() - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{source})) return launcher } diff --git a/pkg/logs/launchers/file/provider/file_provider_test.go b/pkg/logs/launchers/file/provider/file_provider_test.go index efa0ad125901d..caa20a4b5f074 100644 --- a/pkg/logs/launchers/file/provider/file_provider_test.go +++ b/pkg/logs/launchers/file/provider/file_provider_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/status" @@ -135,7 +135,7 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsAllFilesFromDirectory() { path := fmt.Sprintf("%s/1/*.log", suite.testDir) fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources(logSources)) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources(logSources)) files := fileProvider.FilesToTail(true, logSources) suite.Equal(3, len(files)) @@ -197,7 +197,7 @@ func (suite *ProviderTestSuite) TestFilesToTailReturnsSpecificFileWithWildcard() path := fmt.Sprintf("%s/1/?.log", suite.testDir) fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources(logSources)) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources(logSources)) files := fileProvider.FilesToTail(true, logSources) suite.Equal(3, len(files)) @@ -237,7 +237,7 @@ func (suite *ProviderTestSuite) TestNumberOfFilesToTailDoesNotExceedLimit() { path := fmt.Sprintf("%s/*/*.log", suite.testDir) fileProvider := NewFileProvider(suite.filesLimit, WildcardUseFileName) logSources := suite.newLogSources(path) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources(logSources)) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources(logSources)) files := fileProvider.FilesToTail(true, logSources) suite.Equal(suite.filesLimit, len(files)) suite.Equal([]string{"3 files tailed out of 5 files matching"}, logSources[0].Messages.GetMessages()) @@ -256,7 +256,7 @@ func (suite *ProviderTestSuite) TestAllWildcardPathsAreUpdated() { sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/1/*.log", suite.testDir)}), sources.NewLogSource("", &config.LogsConfig{Type: config.FileType, Path: fmt.Sprintf("%s/2/*.log", suite.testDir)}), } - status.InitStatus(pkgConfig.Datadog(), util.CreateSources(logSources)) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources(logSources)) files := fileProvider.FilesToTail(true, logSources) suite.Equal(2, len(files)) suite.Equal([]string{"2 files tailed out of 3 files matching"}, logSources[0].Messages.GetMessages()) diff --git a/pkg/logs/launchers/integration/launcher.go b/pkg/logs/launchers/integration/launcher.go index 1d8a9b15f2079..b1c2f8a3725de 100644 --- a/pkg/logs/launchers/integration/launcher.go +++ b/pkg/logs/launchers/integration/launcher.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/launchers" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" @@ -43,11 +43,11 @@ type Launcher struct { // NewLauncher creates and returns an integrations launcher, and creates the // path for integrations files to run in func NewLauncher(sources *sources.LogSources, integrationsLogsComp integrations.Component) *Launcher { - runPath := filepath.Join(pkgConfig.Datadog().GetString("logs_config.run_path"), "integrations") + runPath := filepath.Join(pkgconfigsetup.Datadog().GetString("logs_config.run_path"), "integrations") err := os.MkdirAll(runPath, 0755) + if err != nil { - ddLog.Warn("Unable to make integrations logs directory: ", err) - return nil + ddLog.Warn("Unable to create integrations logs directory:", err) } return &Launcher{ @@ -76,6 +76,7 @@ func (s *Launcher) run() { for { select { case cfg := <-s.addedConfigs: + sources, err := ad.CreateSources(cfg.Config) if err != nil { ddLog.Warn("Failed to create source ", err) @@ -181,7 +182,7 @@ func (s *Launcher) integrationLogFilePath(id string) string { // ensureFileSize enforces the max file size for files integrations logs // files. Files over the set size will be deleted and remade. func (s *Launcher) ensureFileSize(logFilePath string) error { - maxFileSizeSetting := pkgConfig.Datadog().GetInt64("logs_config.integrations_logs_files_max_size") + maxFileSizeSetting := pkgconfigsetup.Datadog().GetInt64("logs_config.integrations_logs_files_max_size") maxFileSizeBytes := maxFileSizeSetting * 1024 * 1024 fi, err := os.Stat(logFilePath) diff --git a/pkg/logs/launchers/integration/launcher_test.go b/pkg/logs/launchers/integration/launcher_test.go index 1eef9c6eadf3e..c69a6476c0439 100644 --- a/pkg/logs/launchers/integration/launcher_test.go +++ b/pkg/logs/launchers/integration/launcher_test.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" integrationsmock "github.com/DataDog/datadog-agent/comp/logs/integrations/mock" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" @@ -50,10 +50,10 @@ func (suite *LauncherTestSuite) SetupTest() { // Override `logs_config.run_path` before calling `sources.NewLogSources()` as otherwise // it will try and create `/opt/datadog` directory and fail - pkgConfig.Datadog().SetWithoutSource("logs_config.run_path", suite.testDir) + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.run_path", suite.testDir) suite.s = NewLauncher(sources.NewLogSources(), suite.integrationsComp) - status.InitStatus(pkgConfig.Datadog(), util.CreateSources([]*sources.LogSource{suite.source})) + status.InitStatus(pkgconfigsetup.Datadog(), util.CreateSources([]*sources.LogSource{suite.source})) suite.s.runPath = suite.testDir } @@ -144,3 +144,31 @@ func (suite *LauncherTestSuite) TestIntegrationLogFilePath() { func TestLauncherTestSuite(t *testing.T) { suite.Run(t, new(LauncherTestSuite)) } + +// TestReadyOnlyFileSystem ensures the launcher doesn't panic in a read-only +// file system. There will be errors but it should handle them gracefully. +func TestReadyOnlyFileSystem(t *testing.T) { + readOnlyDir := filepath.Join(t.TempDir(), "readonly") + err := os.Mkdir(readOnlyDir, 0444) + assert.Nil(t, err, "Unable to make tempdir readonly") + + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.run_path", readOnlyDir) + + integrationsComp := integrationsmock.Mock() + s := NewLauncher(sources.NewLogSources(), integrationsComp) + + // Check the launcher doesn't block on receiving channels + mockConf := &integration.Config{} + mockConf.Provider = "container" + mockConf.LogsConfig = integration.Data(`[{"type": "integration", "source": "foo", "service": "bar"}]`) + id := "123456789" + + s.Start(nil, nil, nil, nil) + integrationsComp.RegisterIntegration(id, *mockConf) + + logSample := "hello world" + integrationsComp.SendLog(logSample, id) + + // send a second log to make sure the launcher isn't blocking + integrationsComp.SendLog(logSample, id) +} diff --git a/pkg/logs/message/go.mod b/pkg/logs/message/go.mod index 9e25811e26e28..2bc00a24443cc 100644 --- a/pkg/logs/message/go.mod +++ b/pkg/logs/message/go.mod @@ -16,6 +16,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/sources => ../sources github.com/DataDog/datadog-agent/pkg/logs/status/utils => ../status/utils @@ -39,28 +40,29 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/pkg/logs/pipeline/go.mod b/pkg/logs/pipeline/go.mod index 4a1f156e04cae..d96d228aa6a18 100644 --- a/pkg/logs/pipeline/go.mod +++ b/pkg/logs/pipeline/go.mod @@ -18,6 +18,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ../auditor github.com/DataDog/datadog-agent/pkg/logs/client => ../client @@ -55,7 +56,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 @@ -65,7 +66,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/sender v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 github.com/hashicorp/go-multierror v1.1.1 github.com/stretchr/testify v1.9.0 @@ -74,30 +75,31 @@ require ( require ( github.com/DataDog/agent-payload/v5 v5.0.106 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect diff --git a/pkg/logs/processor/go.mod b/pkg/logs/processor/go.mod index 24e11c8274253..6c4fc83034e36 100644 --- a/pkg/logs/processor/go.mod +++ b/pkg/logs/processor/go.mod @@ -17,6 +17,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/diagnostic => ../diagnostic github.com/DataDog/datadog-agent/pkg/logs/message => ../message @@ -47,37 +48,38 @@ require ( github.com/DataDog/agent-payload/v5 v5.0.106 github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.13.5 // indirect diff --git a/pkg/logs/schedulers/ad/scheduler.go b/pkg/logs/schedulers/ad/scheduler.go index 2de2f525df0bd..c9bc320d5b749 100644 --- a/pkg/logs/schedulers/ad/scheduler.go +++ b/pkg/logs/schedulers/ad/scheduler.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" logsConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/util/adlistener" "github.com/DataDog/datadog-agent/pkg/logs/schedulers" "github.com/DataDog/datadog-agent/pkg/logs/service" @@ -153,7 +153,7 @@ func CreateSources(config integration.Config) ([]*sourcesPkg.LogSource, error) { // config attached to a container label or a pod annotation configs, err = logsConfig.ParseJSON(config.LogsConfig) case names.RemoteConfig: - if pkgconfig.Datadog().GetBool("remote_configuration.agent_integrations.allow_log_config_scheduling") { + if pkgconfigsetup.Datadog().GetBool("remote_configuration.agent_integrations.allow_log_config_scheduling") { // config supplied by remote config configs, err = logsConfig.ParseJSON(config.LogsConfig) } else { diff --git a/pkg/logs/schedulers/ad/scheduler_test.go b/pkg/logs/schedulers/ad/scheduler_test.go index 477c29b7af2a6..669a1a8a21d4b 100644 --- a/pkg/logs/schedulers/ad/scheduler_test.go +++ b/pkg/logs/schedulers/ad/scheduler_test.go @@ -9,9 +9,9 @@ import ( "fmt" "testing" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -213,7 +213,7 @@ func TestIgnoreRemoteConfigIfDisabled(t *testing.T) { ClusterCheck: false, } configmock.New(t) - pkgconfig.Datadog().Set("remote_configuration.agent_integrations.allow_log_config_scheduling", rcLogCfgSchedEnabled, model.SourceFile) + pkgconfigsetup.Datadog().Set("remote_configuration.agent_integrations.allow_log_config_scheduling", rcLogCfgSchedEnabled, model.SourceFile) scheduler.Schedule([]integration.Config{configSource}) if rcLogCfgSchedEnabled { require.Equal(t, 1, len(spy.Events)) diff --git a/pkg/logs/schedulers/cca/scheduler.go b/pkg/logs/schedulers/cca/scheduler.go index fe54e99cd11b1..5d7c68f082763 100644 --- a/pkg/logs/schedulers/cca/scheduler.go +++ b/pkg/logs/schedulers/cca/scheduler.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery" logsConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/schedulers" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -37,7 +37,7 @@ func New(ac autodiscovery.Component) schedulers.Scheduler { // Start implements schedulers.Scheduler#Start. func (s *Scheduler) Start(sourceMgr schedulers.SourceManager) { - if !coreConfig.Datadog().GetBool("logs_config.container_collect_all") { + if !pkgconfigsetup.Datadog().GetBool("logs_config.container_collect_all") { return } // source to collect all logs from all containers @@ -52,7 +52,7 @@ func (s *Scheduler) Start(sourceMgr schedulers.SourceManager) { // a hack! go func() { s.blockUntilAutoConfigRanOnce( - time.Millisecond * time.Duration(coreConfig.Datadog().GetInt("ac_load_timeout"))) + time.Millisecond * time.Duration(pkgconfigsetup.Datadog().GetInt("ac_load_timeout"))) log.Debug("Adding ContainerCollectAll source to the Logs Agent") sourceMgr.AddSource(source) close(s.added) diff --git a/pkg/logs/sds/go.mod b/pkg/logs/sds/go.mod index 03e37c5ec16da..f21ce07bddc92 100644 --- a/pkg/logs/sds/go.mod +++ b/pkg/logs/sds/go.mod @@ -18,6 +18,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/client => ../client github.com/DataDog/datadog-agent/pkg/logs/message => ../message @@ -46,36 +47,37 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/pkg/logs/sds/rules.go b/pkg/logs/sds/rules.go index a5e32fe903cfa..43a3af4ac4fb5 100644 --- a/pkg/logs/sds/rules.go +++ b/pkg/logs/sds/rules.go @@ -50,7 +50,9 @@ type StandardRulesConfig struct { // StandardRulesDefaults contains consts defaults information for // standard rules. type StandardRulesDefaults struct { - IncludedKeywordsCharCount uint32 `json:"included_keywords_char_count"` + IncludedKeywordsCharCount uint32 `json:"included_keywords_char_count"` + ExcludedKeywordsCharCount uint32 `json:"excluded_keywords_char_count"` + ExcludedKeywords []string `json:"excluded_keywords"` } // RuleConfig of rule as sent by the Remote Configuration. @@ -67,8 +69,9 @@ type RuleConfig struct { // ProximityKeywords definition in RC config. type ProximityKeywords struct { - Keywords []string `json:"keywords"` - CharacterCount uint32 `json:"character_count"` + Keywords []string `json:"keywords"` + CharacterCount uint32 `json:"character_count"` + UseRecommendedKeywords bool `json:"use_recommended_keywords"` } // RuleDefinition definition in RC config. diff --git a/pkg/logs/sds/scanner.go b/pkg/logs/sds/scanner.go index c8f726b38a18d..581fe810a7fbb 100644 --- a/pkg/logs/sds/scanner.go +++ b/pkg/logs/sds/scanner.go @@ -14,6 +14,7 @@ import ( "strconv" "strings" "sync" + "time" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -32,6 +33,8 @@ var ( "Count of SDS reconfiguration error.", telemetry.Options{DefaultMetric: true}) tlmSDSReconfigSuccess = telemetry.NewCounterWithOpts("sds", "reconfiguration_success", []string{"pipeline", "type"}, "Count of SDS reconfiguration success.", telemetry.Options{DefaultMetric: true}) + tlmSDSProcessingLatency = telemetry.NewSimpleHistogram("sds", "processing_latency", "Processing latency histogram", + []float64{10, 250, 500, 2000, 5000, 10000}) // unit: us ) // Scanner wraps an SDS Scanner implementation, adds reconfiguration @@ -284,7 +287,6 @@ func interpretRCRule(userRule RuleConfig, standardRule StandardRuleConfig, defau reqCapabilitiesCount := len(stdRuleDef.RequiredCapabilities) if reqCapabilitiesCount > 0 { if reqCapabilitiesCount > 1 { - // TODO(remy): telemetry log.Warnf("Standard rule '%v' with multiple required capabilities: %d. Only the first one will be used", standardRule.Name, reqCapabilitiesCount) } received := stdRuleDef.RequiredCapabilities[0] @@ -307,20 +309,27 @@ func interpretRCRule(userRule RuleConfig, standardRule StandardRuleConfig, defau } if defToUse.Version == -1 { - // TODO(remy): telemetry return nil, fmt.Errorf("unsupported rule with no compatible definition") } - // we use the filled `CharacterCount` value to decide if we want - // to use the user provided configuration for proximity keywords - // or if we have to use the information provided in the std rules instead. - if userRule.IncludedKeywords.CharacterCount > 0 { - // proximity keywords configuration provided by the user - extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(userRule.IncludedKeywords.CharacterCount, userRule.IncludedKeywords.Keywords, nil) - } else if len(defToUse.DefaultIncludedKeywords) > 0 && defaults.IncludedKeywordsCharCount > 0 { - // the user has not specified proximity keywords - // use the proximity keywords provided by the standard rule if any + // If the "Use recommended keywords" checkbox has been checked, we use the default + // included keywords available in the rule (curated by Datadog). + // Otherwise: + // If some included keywords have been manually filled by the user, we use them + // Else we start using the default excluded keywords. + if userRule.IncludedKeywords.UseRecommendedKeywords { + // default included keywords extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(defaults.IncludedKeywordsCharCount, defToUse.DefaultIncludedKeywords, nil) + } else { + if len(userRule.IncludedKeywords.Keywords) > 0 && userRule.IncludedKeywords.CharacterCount > 0 { + // user provided included keywords + extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(userRule.IncludedKeywords.CharacterCount, userRule.IncludedKeywords.Keywords, nil) + } else if len(defaults.ExcludedKeywords) > 0 && defaults.ExcludedKeywordsCharCount > 0 { + // default excluded keywords + extraConfig.ProximityKeywords = sds.CreateProximityKeywordsConfig(defaults.ExcludedKeywordsCharCount, nil, defaults.ExcludedKeywords) + } else { + log.Warn("not using the recommended keywords but no keywords available for rule", userRule.Name) + } } // we've compiled all necessary information merging the standard rule and the user config @@ -358,6 +367,7 @@ func interpretRCRule(userRule RuleConfig, standardRule StandardRuleConfig, defau func (s *Scanner) Scan(event []byte, msg *message.Message) (bool, []byte, error) { s.Lock() defer s.Unlock() + start := time.Now() if s.Scanner == nil { return false, nil, fmt.Errorf("can't Scan with an unitialized scanner") @@ -378,6 +388,7 @@ func (s *Scanner) Scan(event []byte, msg *message.Message) (bool, []byte, error) // using a tag. msg.ProcessingTags = append(msg.ProcessingTags, ScannedTag) + tlmSDSProcessingLatency.Observe(float64(time.Since(start) / 1000)) return scanResult.Mutated, scanResult.Event, err } diff --git a/pkg/logs/sds/scanner_test.go b/pkg/logs/sds/scanner_test.go index 3ccc85fe7be4b..bf27ea97ae8e0 100644 --- a/pkg/logs/sds/scanner_test.go +++ b/pkg/logs/sds/scanner_test.go @@ -572,6 +572,12 @@ func TestCloseCycleScan(t *testing.T) { func TestInterpretRC(t *testing.T) { require := require.New(t) + defaults := StandardRulesDefaults{ + IncludedKeywordsCharCount: 10, + ExcludedKeywordsCharCount: 10, + ExcludedKeywords: []string{"trace-id"}, + } + stdRc := StandardRuleConfig{ ID: "0", Name: "Zero", @@ -593,9 +599,12 @@ func TestInterpretRC(t *testing.T) { Type: matchActionRCRedact, Placeholder: "[redacted]", }, + IncludedKeywords: ProximityKeywords{ + UseRecommendedKeywords: true, + }, } - rule, err := interpretRCRule(rc, stdRc, StandardRulesDefaults{}) + rule, err := interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok := rule.(sds.RegexRuleConfig) require.True(ok) @@ -611,7 +620,7 @@ func TestInterpretRC(t *testing.T) { RequiredCapabilities: []string{RCSecondaryValidationLuhnChecksum}, }) - rule, err = interpretRCRule(rc, stdRc, StandardRulesDefaults{}) + rule, err = interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok = rule.(sds.RegexRuleConfig) require.True(ok) @@ -641,7 +650,7 @@ func TestInterpretRC(t *testing.T) { }, } - rule, err = interpretRCRule(rc, stdRc, StandardRulesDefaults{}) + rule, err = interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok = rule.(sds.RegexRuleConfig) require.True(ok) @@ -650,8 +659,11 @@ func TestInterpretRC(t *testing.T) { require.Equal(rxRule.Pattern, "second pattern") require.Equal(rxRule.SecondaryValidator, sds.LuhnChecksum) + // included keywords + // ----------------- + // make sure we use the keywords proximity feature if any's configured - // in the std rule definition stdRc.Definitions = []StandardRuleDefinition{ + // in the std rule definition stdRc.Definitions = []StandardRuleDefinition{ { Version: 2, @@ -666,7 +678,7 @@ func TestInterpretRC(t *testing.T) { }, } - rule, err = interpretRCRule(rc, stdRc, StandardRulesDefaults{IncludedKeywordsCharCount: 10}) + rule, err = interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok = rule.(sds.RegexRuleConfig) require.True(ok) @@ -681,11 +693,12 @@ func TestInterpretRC(t *testing.T) { // make sure we use the user provided information first // even if there is some in the std rule rc.IncludedKeywords = ProximityKeywords{ - Keywords: []string{"custom"}, - CharacterCount: 42, + Keywords: []string{"custom"}, + CharacterCount: 42, + UseRecommendedKeywords: false, } - rule, err = interpretRCRule(rc, stdRc, StandardRulesDefaults{IncludedKeywordsCharCount: 10}) + rule, err = interpretRCRule(rc, stdRc, defaults) require.NoError(err) rxRule, ok = rule.(sds.RegexRuleConfig) require.True(ok) @@ -696,4 +709,42 @@ func TestInterpretRC(t *testing.T) { require.NotNil(rxRule.ProximityKeywords) require.Equal(rxRule.ProximityKeywords.LookAheadCharacterCount, uint32(42)) require.Equal(rxRule.ProximityKeywords.IncludedKeywords, []string{"custom"}) + + // excluded keywords + // ----------------- + + // make sure we use the user provided information first + // even if there is some in the std rule + rc.IncludedKeywords = ProximityKeywords{ + Keywords: nil, + CharacterCount: 0, + UseRecommendedKeywords: false, + } + + // make sure we use the keywords proximity feature if any's configured + // in the std rule definition, here the excluded keywords one + stdRc.Definitions = []StandardRuleDefinition{ + { + Version: 2, + Pattern: "second pattern", + RequiredCapabilities: []string{RCSecondaryValidationLuhnChecksum}, + }, + { + Version: 1, + Pattern: "first pattern", + RequiredCapabilities: nil, + }, + } + + rule, err = interpretRCRule(rc, stdRc, defaults) + require.NoError(err) + rxRule, ok = rule.(sds.RegexRuleConfig) + require.True(ok) + + require.Equal(rxRule.Id, "Zero") + require.Equal(rxRule.Pattern, "second pattern") + require.Equal(rxRule.SecondaryValidator, sds.LuhnChecksum) + require.NotNil(rxRule.ProximityKeywords) + require.Equal(rxRule.ProximityKeywords.LookAheadCharacterCount, uint32(10)) + require.Equal(rxRule.ProximityKeywords.ExcludedKeywords, []string{"trace-id"}) } diff --git a/pkg/logs/sender/go.mod b/pkg/logs/sender/go.mod index a53ad5a116d7d..e8826dfefa489 100644 --- a/pkg/logs/sender/go.mod +++ b/pkg/logs/sender/go.mod @@ -18,6 +18,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/client => ../client github.com/DataDog/datadog-agent/pkg/logs/message => ../message @@ -47,40 +48,41 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/benbjohnson/clock v1.3.5 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/pkg/logs/sources/go.mod b/pkg/logs/sources/go.mod index aca5605c063da..001bee5ca3f81 100644 --- a/pkg/logs/sources/go.mod +++ b/pkg/logs/sources/go.mod @@ -16,6 +16,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../config/utils github.com/DataDog/datadog-agent/pkg/logs/status/utils => ../status/utils github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry @@ -38,27 +39,28 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 github.com/stretchr/testify v1.9.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/pkg/logs/status/status_test.go b/pkg/logs/status/status_test.go index a8465583df008..d1b3904aa5ffd 100644 --- a/pkg/logs/status/status_test.go +++ b/pkg/logs/status/status_test.go @@ -14,14 +14,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/metrics" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/util/testutils" ) func initStatus() { - InitStatus(pkgConfig.Datadog(), testutils.CreateSources([]*sources.LogSource{ + InitStatus(pkgconfigsetup.Datadog(), testutils.CreateSources([]*sources.LogSource{ sources.NewLogSource("foo", &config.LogsConfig{Type: "foo"}), sources.NewLogSource("bar", &config.LogsConfig{Type: "foo"}), sources.NewLogSource("foo", &config.LogsConfig{Type: "foo"}), diff --git a/pkg/logs/status/test_utils.go b/pkg/logs/status/test_utils.go index 0c3d6ee515605..78c248850f891 100644 --- a/pkg/logs/status/test_utils.go +++ b/pkg/logs/status/test_utils.go @@ -11,14 +11,14 @@ import ( "go.uber.org/atomic" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgConfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/metrics" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/tailers" ) // InitStatus initialize a status builder -func InitStatus(coreConfig pkgConfig.Reader, sources *sources.LogSources) { +func InitStatus(coreConfig model.Reader, sources *sources.LogSources) { var isRunning = atomic.NewUint32(StatusRunning) tracker := tailers.NewTailerTracker() endpoints, _ := config.BuildEndpoints(coreConfig, config.HTTPConnectivityFailure, "test-track", "test-proto", "test-source") diff --git a/pkg/logs/tailers/file/tailer.go b/pkg/logs/tailers/file/tailer.go index 9f76f70df132e..d0637eaf05e11 100644 --- a/pkg/logs/tailers/file/tailer.go +++ b/pkg/logs/tailers/file/tailer.go @@ -20,7 +20,7 @@ import ( "github.com/benbjohnson/clock" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder" @@ -149,8 +149,8 @@ func NewTailer(opts *TailerOptions) *Tailer { } forwardContext, stopForward := context.WithCancel(context.Background()) - closeTimeout := coreConfig.Datadog().GetDuration("logs_config.close_timeout") * time.Second - windowsOpenFileTimeout := coreConfig.Datadog().GetDuration("logs_config.windows_open_file_timeout") * time.Second + closeTimeout := pkgconfigsetup.Datadog().GetDuration("logs_config.close_timeout") * time.Second + windowsOpenFileTimeout := pkgconfigsetup.Datadog().GetDuration("logs_config.windows_open_file_timeout") * time.Second bytesRead := status.NewCountInfo("Bytes Read") fileRotated := opts.Rotated diff --git a/pkg/logs/tailers/file/tailer_test.go b/pkg/logs/tailers/file/tailer_test.go index affdeb992ab65..ba4f5051d6a71 100644 --- a/pkg/logs/tailers/file/tailer_test.go +++ b/pkg/logs/tailers/file/tailer_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/sources" @@ -105,7 +105,7 @@ func (suite *TailerTestSuite) TestTialerTimeDurationConfig() { // To satisfy the suite level tailer suite.tailer.StartFromBeginning() - coreConfig.Datadog().SetWithoutSource("logs_config.close_timeout", 42) + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.close_timeout", 42) sleepDuration := 10 * time.Millisecond info := status.NewInfoRegistry() @@ -352,10 +352,10 @@ func (suite *TailerTestSuite) TestBuildTagsFileDir() { } func (suite *TailerTestSuite) TestTruncatedTag() { - coreConfig.Datadog().SetWithoutSource("logs_config.max_message_size_bytes", 3) - coreConfig.Datadog().SetWithoutSource("logs_config.tag_truncated_logs", true) - defer coreConfig.Datadog().SetWithoutSource("logs_config.max_message_size_bytes", coreConfig.DefaultMaxMessageSizeBytes) - defer coreConfig.Datadog().SetWithoutSource("logs_config.tag_truncated_logs", false) + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.max_message_size_bytes", 3) + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.tag_truncated_logs", true) + defer pkgconfigsetup.Datadog().SetWithoutSource("logs_config.max_message_size_bytes", pkgconfigsetup.DefaultMaxMessageSizeBytes) + defer pkgconfigsetup.Datadog().SetWithoutSource("logs_config.tag_truncated_logs", false) source := sources.NewLogSource("", &config.LogsConfig{ Type: config.FileType, diff --git a/pkg/logs/tailers/socket/tailer.go b/pkg/logs/tailers/socket/tailer.go index a2c791997f2df..ad28d251058d1 100644 --- a/pkg/logs/tailers/socket/tailer.go +++ b/pkg/logs/tailers/socket/tailer.go @@ -12,7 +12,7 @@ import ( "net" "strings" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/internal/decoder" "github.com/DataDog/datadog-agent/pkg/logs/internal/parsers/noop" "github.com/DataDog/datadog-agent/pkg/logs/message" @@ -100,7 +100,7 @@ func (t *Tailer) readForever() { } copiedTags := make([]string, len(t.source.Config.Tags)) copy(copiedTags, t.source.Config.Tags) - if ipAddress != "" && coreConfig.Datadog().GetBool("logs_config.use_sourcehost_tag") { + if ipAddress != "" && pkgconfigsetup.Datadog().GetBool("logs_config.use_sourcehost_tag") { lastColonIndex := strings.LastIndex(ipAddress, ":") var ipAddressWithoutPort string if lastColonIndex != -1 { diff --git a/pkg/logs/util/testutils/go.mod b/pkg/logs/util/testutils/go.mod index 2e1a8cc78da2c..2d3199639da6c 100644 --- a/pkg/logs/util/testutils/go.mod +++ b/pkg/logs/util/testutils/go.mod @@ -17,6 +17,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ../../../config/mock github.com/DataDog/datadog-agent/pkg/config/model => ../../../config/model github.com/DataDog/datadog-agent/pkg/config/setup => ../../../config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ../../../config/utils github.com/DataDog/datadog-agent/pkg/logs/sources => ../../sources github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface => ../../status/statusinterface @@ -41,25 +42,26 @@ replace ( require github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 require ( - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect diff --git a/pkg/metrics/go.mod b/pkg/metrics/go.mod index 9ddbec9712b46..c9baa39e3633a 100644 --- a/pkg/metrics/go.mod +++ b/pkg/metrics/go.mod @@ -27,7 +27,7 @@ require ( github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 github.com/stretchr/testify v1.9.0 go.uber.org/atomic v1.11.0 ) diff --git a/pkg/metrics/go.sum b/pkg/metrics/go.sum index a8bc0739ad6c7..8ba22b6a12d97 100644 --- a/pkg/metrics/go.sum +++ b/pkg/metrics/go.sum @@ -39,10 +39,10 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.14.0 h1:J0IEqkrB8BjtuDHofR8Q3J+Z8829Ja1Mlix9cyG8wJI= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.14.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0 h1:QHx6B/VUx3rZQqrQNZI5BfypbhhGSRzCz05viyJEQmM= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0/go.mod h1:q4c7zbmdnIdSJNZuBsveTk5ZeRkSkS2g6b8zzFF1mE4= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/pkg/metrics/metricsource.go b/pkg/metrics/metricsource.go index f5cecdcd591f9..9868f5925d086 100644 --- a/pkg/metrics/metricsource.go +++ b/pkg/metrics/metricsource.go @@ -215,7 +215,6 @@ const ( MetricSourceKubeAPIserverMetrics MetricSourceKubeControllerManager MetricSourceKubeDNS - MetricSourceKubeflow MetricSourceKubeMetricsServer MetricSourceKubeProxy MetricSourceKubeScheduler @@ -292,6 +291,12 @@ const ( MetricSourceYarn MetricSourceZk MetricSourceAwsNeuron + MetricSourceTibcoEMS + MetricSourceSlurm + MetricSourceKyverno + MetricSourceKubeflow + MetricSourceAppgateSDP + MetricSourceAnyscale ) // String returns a string representation of MetricSource @@ -1051,8 +1056,6 @@ func CheckNameToMetricSource(name string) MetricSource { return MetricSourceKubeProxy case "kube_scheduler": return MetricSourceKubeScheduler - case "kubeflow": - return MetricSourceKubeflow case "kubelet": return MetricSourceKubelet case "kubernetes_state": @@ -1371,6 +1374,16 @@ func CheckNameToMetricSource(name string) MetricSource { return MetricSourceZenohRouter case "aws_neuron": return MetricSourceAwsNeuron + case "kyverno": + return MetricSourceKyverno + case "anyscale": + return MetricSourceAnyscale + case "appgate_sdp": + return MetricSourceAppgateSDP + case "slurm": + return MetricSourceSlurm + case "tibco_ems": + return MetricSourceTibcoEMS default: return MetricSourceUnknown } diff --git a/pkg/network/config/config.go b/pkg/network/config/config.go index 06dc994e0da48..1b8ef55fb4df8 100644 --- a/pkg/network/config/config.go +++ b/pkg/network/config/config.go @@ -14,7 +14,7 @@ import ( "github.com/cilium/ebpf/features" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -310,10 +310,6 @@ type Config struct { // EnableUSMEventStream enables USM to use the event stream instead // of netlink for receiving process events. EnableUSMEventStream bool - - // BypassEnabled is used in tests only. - // It enables a ebpf-manager feature to bypass programs on-demand for controlled visibility. - BypassEnabled bool } func join(pieces ...string) string { @@ -322,7 +318,7 @@ func join(pieces ...string) string { // New creates a config for the network tracer func New() *Config { - cfg := ddconfig.SystemProbe() + cfg := pkgconfigsetup.SystemProbe() sysconfig.Adjust(cfg) c := &Config{ diff --git a/pkg/network/config/config_bpf_linux_test.go b/pkg/network/config/config_bpf_linux_test.go index e37e18136da70..a3b380c89fb2a 100644 --- a/pkg/network/config/config_bpf_linux_test.go +++ b/pkg/network/config/config_bpf_linux_test.go @@ -14,11 +14,8 @@ import ( "github.com/stretchr/testify/require" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/eventmonitor" emconfig "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" @@ -39,11 +36,7 @@ func TestEventStreamEnabledForSupportedKernelsLinux(t *testing.T) { opts := eventmonitor.Opts{} telemetry := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) - wmeta := fxutil.Test[workloadmeta.Component](t, - core.MockBundle(), - wmmock.MockModule(workloadmeta.NewParams()), - ) - evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, wmeta, telemetry) + evm, err := eventmonitor.NewEventMonitor(emconfig, secconfig, opts, telemetry) require.NoError(t, err) require.NoError(t, evm.Init()) } else { diff --git a/pkg/network/config/replace_rules.go b/pkg/network/config/replace_rules.go index 093ba772a0f35..ce8e1654766f0 100644 --- a/pkg/network/config/replace_rules.go +++ b/pkg/network/config/replace_rules.go @@ -10,7 +10,8 @@ import ( "fmt" "regexp" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // ReplaceRule specifies a replace rule. @@ -25,8 +26,8 @@ type ReplaceRule struct { Repl string `mapstructure:"repl"` } -func parseReplaceRules(cfg ddconfig.Config, key string) ([]*ReplaceRule, error) { - if !ddconfig.SystemProbe().IsSet(key) { +func parseReplaceRules(cfg model.Config, key string) ([]*ReplaceRule, error) { + if !pkgconfigsetup.SystemProbe().IsSet(key) { return nil, nil } diff --git a/pkg/network/ebpf/c/co-re/tracer-fentry.c b/pkg/network/ebpf/c/co-re/tracer-fentry.c index a5242b70662f9..09259e948047d 100644 --- a/pkg/network/ebpf/c/co-re/tracer-fentry.c +++ b/pkg/network/ebpf/c/co-re/tracer-fentry.c @@ -232,9 +232,6 @@ int BPF_PROG(tcp_close, struct sock *sk, long timeout) { conn_tuple_t t = {}; u64 pid_tgid = bpf_get_current_pid_tgid(); - // Should actually delete something only if the connection never got established - bpf_map_delete_elem(&tcp_ongoing_connect_pid, &sk); - // Get network namespace id log_debug("fentry/tcp_close: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { @@ -242,6 +239,11 @@ int BPF_PROG(tcp_close, struct sock *sk, long timeout) { } log_debug("fentry/tcp_close: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + skp_conn.tup.pid = 0; + + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); + cleanup_conn(ctx, &t, sk); return 0; } @@ -450,7 +452,15 @@ int BPF_PROG(tcp_connect, struct sock *sk) { u64 pid_tgid = bpf_get_current_pid_tgid(); log_debug("fentry/tcp_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &sk, &pid_tgid, BPF_ANY); + conn_tuple_t t = {}; + if (!read_conn_tuple(&t, sk, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_connect_failed_tuple); + return 0; + } + + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + pid_ts_t pid_ts = {.pid_tgid = pid_tgid, .timestamp = bpf_ktime_get_ns()}; + bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp_conn, &pid_ts, BPF_ANY); return 0; } @@ -458,19 +468,19 @@ int BPF_PROG(tcp_connect, struct sock *sk) { SEC("fentry/tcp_finish_connect") int BPF_PROG(tcp_finish_connect, struct sock *sk, struct sk_buff *skb, int rc) { RETURN_IF_NOT_IN_SYSPROBE_TASK("fentry/tcp_finish_connect"); - u64 *pid_tgid_p = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &sk); - if (!pid_tgid_p) { + conn_tuple_t t = {}; + if (!read_conn_tuple(&t, sk, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_finish_connect_failed_tuple); return 0; } - - u64 pid_tgid = *pid_tgid_p; - bpf_map_delete_elem(&tcp_ongoing_connect_pid, &sk); - log_debug("fentry/tcp_finish_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - - conn_tuple_t t = {}; - if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + pid_ts_t *pid_tgid_p = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &skp_conn); + if (!pid_tgid_p) { return 0; } + u64 pid_tgid = pid_tgid_p->pid_tgid; + t.pid = pid_tgid >> 32; + log_debug("fentry/tcp_finish_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); handle_tcp_stats(&t, sk, TCP_ESTABLISHED); handle_message(&t, 0, 0, CONN_DIRECTION_OUTGOING, 0, 0, PACKET_COUNT_NONE, sk); @@ -501,6 +511,10 @@ int BPF_PROG(inet_csk_accept_exit, struct sock *_sk, int flags, int *err, bool k pb.netns = t.netns; pb.port = t.sport; add_port_bind(&pb, port_bindings); + + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + pid_ts_t pid_ts = {.pid_tgid = pid_tgid, .timestamp = bpf_ktime_get_ns()}; + bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp_conn, &pid_ts, BPF_ANY); log_debug("fexit/inet_csk_accept: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); return 0; } diff --git a/pkg/network/ebpf/c/tracer.c b/pkg/network/ebpf/c/tracer.c index ad7f5fc0ad048..7555758f40524 100644 --- a/pkg/network/ebpf/c/tracer.c +++ b/pkg/network/ebpf/c/tracer.c @@ -193,42 +193,42 @@ int BPF_BYPASSABLE_KRETPROBE(kretprobe__udp_sendpage, int sent) { SEC("kprobe/tcp_done") int BPF_BYPASSABLE_KPROBE(kprobe__tcp_done, struct sock *sk) { conn_tuple_t t = {}; - u64 pid_tgid = bpf_get_current_pid_tgid(); - __u64 *failed_conn_pid = NULL; + + if (!read_conn_tuple(&t, sk, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_done_failed_tuple); + return 0; + } + log_debug("kprobe/tcp_done: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; if (!tcp_failed_connections_enabled()) { + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); return 0; } int err = 0; bpf_probe_read_kernel_with_telemetry(&err, sizeof(err), (&sk->sk_err)); if (err == 0) { + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); return 0; // no failure } if (err != TCP_CONN_FAILED_RESET && err != TCP_CONN_FAILED_TIMEOUT && err != TCP_CONN_FAILED_REFUSED) { log_debug("kprobe/tcp_done: unsupported error code: %d", err); increment_telemetry_count(unsupported_tcp_failures); + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); return 0; } - log_debug("kprobe/tcp_done: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { - return 0; - } - log_debug("kprobe/tcp_done: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); - - // connection timeouts will have 0 pids as they are cleaned up by an idle process. - // get the pid from the ongoing failure map in this case, as it should have been set in connect(). - failed_conn_pid = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &sk); + // connection timeouts will have 0 pids as they are cleaned up by an idle process. + // resets can also have kernel pids are they are triggered by receiving an RST packet from the server + // get the pid from the ongoing failure map in this case, as it should have been set in connect(). else bail + pid_ts_t *failed_conn_pid = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &skp_conn); if (failed_conn_pid) { - if (*failed_conn_pid != pid_tgid) { - increment_telemetry_count(tcp_done_pid_mismatch); - } - bpf_probe_read_kernel_with_telemetry(&pid_tgid, sizeof(pid_tgid), failed_conn_pid); - t.pid = pid_tgid >> 32; - } - if (t.pid == 0) { + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); + t.pid = failed_conn_pid->pid_tgid >> 32; + } else { + increment_telemetry_count(tcp_done_missing_pid); return 0; } @@ -238,13 +238,12 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_done, struct sock *sk) { __u64 timestamp = bpf_ktime_get_ns(); if (bpf_map_update_with_telemetry(conn_close_flushed, &t, ×tamp, BPF_NOEXIST, -EEXIST) == 0) { cleanup_conn(ctx, &t, sk); + flush_tcp_failure(ctx, &t, err); } else { bpf_map_delete_elem(&conn_close_flushed, &t); increment_telemetry_count(double_flush_attempts_done); } - flush_tcp_failure(ctx, &t, err); - return 0; } @@ -259,11 +258,6 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_close, struct sock *sk) { conn_tuple_t t = {}; u64 pid_tgid = bpf_get_current_pid_tgid(); - // increment telemetry for connections that were never established - if (bpf_map_delete_elem(&tcp_ongoing_connect_pid, &sk) == 0) { - increment_telemetry_count(tcp_failed_connect); - } - // Get network namespace id log_debug("kprobe/tcp_close: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { @@ -277,12 +271,28 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_close, struct sock *sk) { bpf_map_update_with_telemetry(tcp_close_args, &pid_tgid, &t, BPF_ANY); } + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + skp_conn.tup.pid = 0; + + bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp_conn); + + if (!tcp_failed_connections_enabled()) { + cleanup_conn(ctx, &t, sk); + return 0; + } + // check if this connection was already flushed and ensure we don't flush again // upsert the timestamp to the map and delete if it already exists, flush connection otherwise // skip EEXIST errors for telemetry since it is an expected error __u64 timestamp = bpf_ktime_get_ns(); - if (!tcp_failed_connections_enabled() || (bpf_map_update_with_telemetry(conn_close_flushed, &t, ×tamp, BPF_NOEXIST, -EEXIST) == 0)) { + if (bpf_map_update_with_telemetry(conn_close_flushed, &t, ×tamp, BPF_NOEXIST, -EEXIST) == 0) { cleanup_conn(ctx, &t, sk); + int err = 0; + bpf_probe_read_kernel_with_telemetry(&err, sizeof(err), (&sk->sk_err)); + if (err == TCP_CONN_FAILED_RESET || err == TCP_CONN_FAILED_TIMEOUT || err == TCP_CONN_FAILED_REFUSED) { + increment_telemetry_count(tcp_close_target_failures); + flush_tcp_failure(ctx, &t, err); + } } else { bpf_map_delete_elem(&conn_close_flushed, &t); increment_telemetry_count(double_flush_attempts_close); @@ -942,27 +952,36 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_connect, struct sock *skp) { u64 pid_tgid = bpf_get_current_pid_tgid(); log_debug("kprobe/tcp_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp, &pid_tgid, BPF_ANY); + conn_tuple_t t = {}; + if (!read_conn_tuple(&t, skp, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_connect_failed_tuple); + return 0; + } + + skp_conn_tuple_t skp_conn = {.sk = skp, .tup = t}; + pid_ts_t pid_ts = {.pid_tgid = pid_tgid, .timestamp = bpf_ktime_get_ns()}; + bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp_conn, &pid_ts, BPF_ANY); return 0; } SEC("kprobe/tcp_finish_connect") int BPF_BYPASSABLE_KPROBE(kprobe__tcp_finish_connect, struct sock *skp) { - u64 *pid_tgid_p = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &skp); + conn_tuple_t t = {}; + if (!read_conn_tuple(&t, skp, 0, CONN_TYPE_TCP)) { + increment_telemetry_count(tcp_finish_connect_failed_tuple); + return 0; + } + skp_conn_tuple_t skp_conn = {.sk = skp, .tup = t}; + pid_ts_t *pid_tgid_p = bpf_map_lookup_elem(&tcp_ongoing_connect_pid, &skp_conn); if (!pid_tgid_p) { return 0; } - u64 pid_tgid = *pid_tgid_p; - bpf_map_delete_elem(&tcp_ongoing_connect_pid, &skp); + u64 pid_tgid = pid_tgid_p->pid_tgid; + t.pid = pid_tgid >> 32; log_debug("kprobe/tcp_finish_connect: tgid: %llu, pid: %llu", pid_tgid >> 32, pid_tgid & 0xFFFFFFFF); - conn_tuple_t t = {}; - if (!read_conn_tuple(&t, skp, pid_tgid, CONN_TYPE_TCP)) { - return 0; - } - handle_tcp_stats(&t, skp, TCP_ESTABLISHED); handle_message(&t, 0, 0, CONN_DIRECTION_OUTGOING, 0, 0, PACKET_COUNT_NONE, skp); @@ -984,6 +1003,8 @@ int BPF_BYPASSABLE_KRETPROBE(kretprobe__inet_csk_accept, struct sock *sk) { if (!read_conn_tuple(&t, sk, pid_tgid, CONN_TYPE_TCP)) { return 0; } + log_debug("kretprobe/inet_csk_accept: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); + handle_tcp_stats(&t, sk, TCP_ESTABLISHED); handle_message(&t, 0, 0, CONN_DIRECTION_INCOMING, 0, 0, PACKET_COUNT_NONE, sk); @@ -992,7 +1013,11 @@ int BPF_BYPASSABLE_KRETPROBE(kretprobe__inet_csk_accept, struct sock *sk) { pb.port = t.sport; add_port_bind(&pb, port_bindings); - log_debug("kretprobe/inet_csk_accept: netns: %u, sport: %u, dport: %u", t.netns, t.sport, t.dport); + skp_conn_tuple_t skp_conn = {.sk = sk, .tup = t}; + skp_conn.tup.pid = 0; + pid_ts_t pid_ts = {.pid_tgid = pid_tgid, .timestamp = bpf_ktime_get_ns()}; + bpf_map_update_with_telemetry(tcp_ongoing_connect_pid, &skp_conn, &pid_ts, BPF_ANY); + return 0; } diff --git a/pkg/network/ebpf/c/tracer/events.h b/pkg/network/ebpf/c/tracer/events.h index 051e5821c3c00..5e0650faa862f 100644 --- a/pkg/network/ebpf/c/tracer/events.h +++ b/pkg/network/ebpf/c/tracer/events.h @@ -151,6 +151,7 @@ static __always_inline void flush_tcp_failure(void *ctx, conn_tuple_t *tup, int u32 cpu = bpf_get_smp_processor_id(); bpf_perf_event_output(ctx, &conn_fail_event, cpu, &failure, sizeof(conn_failed_t)); } + increment_telemetry_count(tcp_failed_connect); } static __always_inline void flush_conn_close_if_full(void *ctx) { diff --git a/pkg/network/ebpf/c/tracer/maps.h b/pkg/network/ebpf/c/tracer/maps.h index bf00f93b9e7a3..f02066fe4b4dc 100644 --- a/pkg/network/ebpf/c/tracer/maps.h +++ b/pkg/network/ebpf/c/tracer/maps.h @@ -23,8 +23,8 @@ BPF_HASH_MAP(tcp_stats, conn_tuple_t, tcp_stats_t, 0) */ BPF_HASH_MAP(tcp_retransmits, conn_tuple_t, __u32, 0) -/* Will hold the PIDs initiating TCP connections */ -BPF_HASH_MAP(tcp_ongoing_connect_pid, struct sock *, __u64, 1024) +/* Will hold the PIDs initiating TCP connections keyed by socket + tuple. PIDs have a timestamp attached so they can age out */ +BPF_HASH_MAP(tcp_ongoing_connect_pid, skp_conn_tuple_t, pid_ts_t, 0) /* Will hold a flag to indicate that closed connections have already been flushed */ BPF_HASH_MAP(conn_close_flushed, conn_tuple_t, __u64, 8192) diff --git a/pkg/network/ebpf/c/tracer/telemetry.h b/pkg/network/ebpf/c/tracer/telemetry.h index 761088be75788..a6067f1fae501 100644 --- a/pkg/network/ebpf/c/tracer/telemetry.h +++ b/pkg/network/ebpf/c/tracer/telemetry.h @@ -24,7 +24,11 @@ enum telemetry_counter { double_flush_attempts_close, double_flush_attempts_done, unsupported_tcp_failures, - tcp_done_pid_mismatch, + tcp_done_missing_pid, + tcp_connect_failed_tuple, + tcp_done_failed_tuple, + tcp_finish_connect_failed_tuple, + tcp_close_target_failures, }; static __always_inline void increment_telemetry_count(enum telemetry_counter counter_name) { @@ -63,8 +67,20 @@ static __always_inline void increment_telemetry_count(enum telemetry_counter cou case unsupported_tcp_failures: __sync_fetch_and_add(&val->unsupported_tcp_failures, 1); break; - case tcp_done_pid_mismatch: - __sync_fetch_and_add(&val->tcp_done_pid_mismatch, 1); + case tcp_done_missing_pid: + __sync_fetch_and_add(&val->tcp_done_missing_pid, 1); + break; + case tcp_connect_failed_tuple: + __sync_fetch_and_add(&val->tcp_connect_failed_tuple, 1); + break; + case tcp_done_failed_tuple: + __sync_fetch_and_add(&val->tcp_done_failed_tuple, 1); + break; + case tcp_finish_connect_failed_tuple: + __sync_fetch_and_add(&val->tcp_finish_connect_failed_tuple, 1); + break; + case tcp_close_target_failures: + __sync_fetch_and_add(&val->tcp_close_target_failures, 1); break; } } diff --git a/pkg/network/ebpf/c/tracer/tracer.h b/pkg/network/ebpf/c/tracer/tracer.h index 018446b375397..cb2a96ff7a66c 100644 --- a/pkg/network/ebpf/c/tracer/tracer.h +++ b/pkg/network/ebpf/c/tracer/tracer.h @@ -114,7 +114,11 @@ typedef struct { __u64 double_flush_attempts_close; __u64 double_flush_attempts_done; __u64 unsupported_tcp_failures; - __u64 tcp_done_pid_mismatch; + __u64 tcp_done_missing_pid; + __u64 tcp_connect_failed_tuple; + __u64 tcp_done_failed_tuple; + __u64 tcp_finish_connect_failed_tuple; + __u64 tcp_close_target_failures; } telemetry_t; typedef struct { @@ -147,4 +151,14 @@ typedef struct { }; } ip_make_skb_args_t; +typedef struct { + struct sock *sk; + conn_tuple_t tup; +} skp_conn_tuple_t; + +typedef struct { + __u64 pid_tgid; + __u64 timestamp; +} pid_ts_t; + #endif diff --git a/pkg/network/ebpf/kprobe_types.go b/pkg/network/ebpf/kprobe_types.go index 4bc58cdfb864f..6745cdc0b7fee 100644 --- a/pkg/network/ebpf/kprobe_types.go +++ b/pkg/network/ebpf/kprobe_types.go @@ -21,6 +21,8 @@ type TCPStats C.tcp_stats_t type ConnStats C.conn_stats_ts_t type Conn C.conn_t type FailedConn C.conn_failed_t +type SkpConn C.skp_conn_tuple_t +type PidTs C.pid_ts_t type Batch C.batch_t type Telemetry C.telemetry_t type PortBinding C.port_binding_t diff --git a/pkg/network/ebpf/kprobe_types_linux.go b/pkg/network/ebpf/kprobe_types_linux.go index 7916d343dcd10..58cee5d1115aa 100644 --- a/pkg/network/ebpf/kprobe_types_linux.go +++ b/pkg/network/ebpf/kprobe_types_linux.go @@ -44,6 +44,14 @@ type FailedConn struct { Reason uint32 Pad_cgo_0 [4]byte } +type SkpConn struct { + Sk uint64 + Tup ConnTuple +} +type PidTs struct { + Tgid uint64 + Timestamp uint64 +} type Batch struct { C0 Conn C1 Conn @@ -55,17 +63,21 @@ type Batch struct { Pad_cgo_0 [2]byte } type Telemetry struct { - Tcp_failed_connect uint64 - Tcp_sent_miscounts uint64 - Unbatched_tcp_close uint64 - Unbatched_udp_close uint64 - Udp_sends_processed uint64 - Udp_sends_missed uint64 - Udp_dropped_conns uint64 - Double_flush_attempts_close uint64 - Double_flush_attempts_done uint64 - Unsupported_tcp_failures uint64 - Tcp_done_pid_mismatch uint64 + Tcp_failed_connect uint64 + Tcp_sent_miscounts uint64 + Unbatched_tcp_close uint64 + Unbatched_udp_close uint64 + Udp_sends_processed uint64 + Udp_sends_missed uint64 + Udp_dropped_conns uint64 + Double_flush_attempts_close uint64 + Double_flush_attempts_done uint64 + Unsupported_tcp_failures uint64 + Tcp_done_missing_pid uint64 + Tcp_connect_failed_tuple uint64 + Tcp_done_failed_tuple uint64 + Tcp_finish_connect_failed_tuple uint64 + Tcp_close_target_failures uint64 } type PortBinding struct { Netns uint32 @@ -77,12 +89,12 @@ type PIDFD struct { Fd uint32 } type UDPRecvSock struct { - Sk uintptr - Msg uintptr + Sk uint64 + Msg uint64 } type BindSyscallArgs struct { - Addr uintptr - Sk uintptr + Addr uint64 + Sk uint64 } type ProtocolStack struct { Api uint8 diff --git a/pkg/network/ebpf/probes/probes.go b/pkg/network/ebpf/probes/probes.go index 1d455d9c6e364..8d8cfa5247542 100644 --- a/pkg/network/ebpf/probes/probes.go +++ b/pkg/network/ebpf/probes/probes.go @@ -192,8 +192,8 @@ const ( TCPStatsMap BPFMapName = "tcp_stats" // TCPRetransmitsMap is the map storing TCP retransmits TCPRetransmitsMap BPFMapName = "tcp_retransmits" - // TCPConnectSockPidMap is the map storing the PIDs of ongoing TCP connections - TCPConnectSockPidMap BPFMapName = "tcp_ongoing_connect_pid" + // TCPOngoingConnectPid is the map storing ongoing TCP connection PIDs by (socket + tuple) + TCPOngoingConnectPid BPFMapName = "tcp_ongoing_connect_pid" // ConnCloseFlushed is the map storing closed connections that were already flushed ConnCloseFlushed BPFMapName = "conn_close_flushed" // ConnCloseEventMap is the map storing connection close events diff --git a/pkg/network/encoding/encoding_test.go b/pkg/network/encoding/encoding_test.go index c23a02a8d3cc2..2e9806ea59297 100644 --- a/pkg/network/encoding/encoding_test.go +++ b/pkg/network/encoding/encoding_test.go @@ -19,8 +19,8 @@ import ( model "github.com/DataDog/agent-payload/v5/process" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/dns" "github.com/DataDog/datadog-agent/pkg/network/encoding/marshal" @@ -324,7 +324,7 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting application/json serialization (no query types)", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) out := getExpectedConnections(false, httpOutBlob) assert := assert.New(t) blobWriter := getBlobWriter(t, assert, in, "application/json") @@ -346,8 +346,8 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting application/json serialization (with query types)", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) out := getExpectedConnections(true, httpOutBlob) assert := assert.New(t) @@ -370,7 +370,7 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting empty serialization", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) out := getExpectedConnections(false, httpOutBlob) assert := assert.New(t) @@ -401,7 +401,7 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting unsupported serialization format", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) out := getExpectedConnections(false, httpOutBlob) assert := assert.New(t) @@ -457,7 +457,7 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { t.Run("requesting application/protobuf serialization (no query types)", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) out := getExpectedConnections(false, httpOutBlob) assert := assert.New(t) @@ -473,8 +473,8 @@ func testSerialization(t *testing.T, aggregateByStatusCode bool) { }) t.Run("requesting application/protobuf serialization (with query types)", func(t *testing.T) { configmock.NewSystemProbe(t) - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) out := getExpectedConnections(true, httpOutBlob) assert := assert.New(t) diff --git a/pkg/network/encoding/marshal/dns.go b/pkg/network/encoding/marshal/dns.go index 0cab4bae202fd..ef945306a656b 100644 --- a/pkg/network/encoding/marshal/dns.go +++ b/pkg/network/encoding/marshal/dns.go @@ -7,7 +7,8 @@ package marshal import ( model "github.com/DataDog/agent-payload/v5/process" - "github.com/DataDog/datadog-agent/pkg/config" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/dns" ) @@ -27,8 +28,8 @@ func newDNSFormatter(conns *network.Connections, ipc ipCache) *dnsFormatter { conns: conns, ipc: ipc, domainSet: make(map[string]int), - queryTypeEnabled: config.SystemProbe().GetBool("network_config.enable_dns_by_querytype"), - dnsDomainsEnabled: config.SystemProbe().GetBool("system_probe_config.collect_dns_domains"), + queryTypeEnabled: pkgconfigsetup.SystemProbe().GetBool("network_config.enable_dns_by_querytype"), + dnsDomainsEnabled: pkgconfigsetup.SystemProbe().GetBool("system_probe_config.collect_dns_domains"), } } diff --git a/pkg/network/encoding/marshal/dns_test.go b/pkg/network/encoding/marshal/dns_test.go index c4cc2ee5b414d..18ee7b250947a 100644 --- a/pkg/network/encoding/marshal/dns_test.go +++ b/pkg/network/encoding/marshal/dns_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/dns" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -50,8 +50,8 @@ func TestFormatConnectionDNS(t *testing.T) { } t.Run("DNS with collect_domains_enabled=true,enable_dns_by_querytype=false", func(t *testing.T) { - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", false) ipc := make(ipCache) formatter := newDNSFormatter(payload, ipc) @@ -80,8 +80,8 @@ func TestFormatConnectionDNS(t *testing.T) { }) t.Run("DNS with collect_domains_enabled=true,enable_dns_by_querytype=true", func(t *testing.T) { - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", true) ipc := make(ipCache) formatter := newDNSFormatter(payload, ipc) diff --git a/pkg/network/encoding/marshal/modeler.go b/pkg/network/encoding/marshal/modeler.go index 22d3c9b77c8a9..f365ed34d91b5 100644 --- a/pkg/network/encoding/marshal/modeler.go +++ b/pkg/network/encoding/marshal/modeler.go @@ -10,7 +10,7 @@ import ( model "github.com/DataDog/agent-payload/v5/process" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" ) @@ -64,9 +64,10 @@ func (c *ConnectionsModeler) Close() { func (c *ConnectionsModeler) modelConnections(builder *model.ConnectionsBuilder, conns *network.Connections) { cfgOnce.Do(func() { agentCfg = &model.AgentConfiguration{ - NpmEnabled: config.SystemProbe().GetBool("network_config.enabled"), - UsmEnabled: config.SystemProbe().GetBool("service_monitoring_config.enabled"), - CcmEnabled: config.SystemProbe().GetBool("ccm_network_config.enabled"), + NpmEnabled: pkgconfigsetup.SystemProbe().GetBool("network_config.enabled"), + UsmEnabled: pkgconfigsetup.SystemProbe().GetBool("service_monitoring_config.enabled"), + CcmEnabled: pkgconfigsetup.SystemProbe().GetBool("ccm_network_config.enabled"), + CsmEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enabled"), } }) @@ -86,6 +87,7 @@ func (c *ConnectionsModeler) modelConnections(builder *model.ConnectionsBuilder, w.SetNpmEnabled(agentCfg.NpmEnabled) w.SetUsmEnabled(agentCfg.UsmEnabled) w.SetCcmEnabled(agentCfg.CcmEnabled) + w.SetCsmEnabled(agentCfg.CsmEnabled) }) for _, d := range c.dnsFormatter.Domains() { builder.AddDomains(d) diff --git a/pkg/network/encoding/marshal/modeler_test.go b/pkg/network/encoding/marshal/modeler_test.go new file mode 100644 index 0000000000000..bac1f11891670 --- /dev/null +++ b/pkg/network/encoding/marshal/modeler_test.go @@ -0,0 +1,67 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package marshal + +import ( + "strconv" + "sync" + "testing" + + model "github.com/DataDog/agent-payload/v5/process" + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/network" +) + +func TestConnectionModelerAgentConfiguration(t *testing.T) { + tests := []struct { + npm, usm, ccm, csm bool + }{ + {false, false, false, false}, + {false, false, true, false}, + {false, true, false, false}, + {false, true, true, false}, + {true, false, false, false}, + {true, false, true, false}, + {true, true, false, false}, + {true, true, true, false}, + {false, false, false, true}, + {false, false, true, true}, + {false, true, false, true}, + {false, true, true, true}, + {true, false, false, true}, + {true, false, true, true}, + {true, true, false, true}, + {true, true, true, true}, + } + + for _, te := range tests { + t.Run("", func(t *testing.T) { + t.Setenv("DD_SYSTEM_PROBE_NETWORK_ENABLED", strconv.FormatBool(te.npm)) + t.Setenv("DD_SYSTEM_PROBE_SERVICE_MONITORING_ENABLED", strconv.FormatBool(te.usm)) + t.Setenv("DD_CCM_NETWORK_CONFIG_ENABLED", strconv.FormatBool(te.ccm)) + t.Setenv("DD_RUNTIME_SECURITY_CONFIG_ENABLED", strconv.FormatBool(te.csm)) + mock.NewSystemProbe(t) + cfgOnce = sync.Once{} + conns := &network.Connections{} + mod := NewConnectionsModeler(conns) + streamer := NewProtoTestStreamer[*model.Connections]() + builder := model.NewConnectionsBuilder(streamer) + expected := &model.AgentConfiguration{ + CcmEnabled: te.ccm, + CsmEnabled: te.csm, + UsmEnabled: te.usm, + NpmEnabled: te.npm, + } + + mod.modelConnections(builder, conns) + + actual := streamer.Unwrap(t, &model.Connections{}) + assert.Equal(t, expected, actual.AgentConfiguration) + }) + } +} diff --git a/pkg/network/encoding/marshal/usm.go b/pkg/network/encoding/marshal/usm.go index a9650e83b5778..bcae7a12ecc12 100644 --- a/pkg/network/encoding/marshal/usm.go +++ b/pkg/network/encoding/marshal/usm.go @@ -11,7 +11,7 @@ import ( "github.com/cihub/seelog" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" "github.com/DataDog/datadog-agent/pkg/network/types" @@ -56,7 +56,7 @@ func GroupByConnection[K comparable, V any](protocol string, data map[K]V, keyGe lookupFn: USMLookup[K, V], // Experimental: Connection Rollups - enableConnectionRollup: config.SystemProbe().GetBool("service_monitoring_config.enable_connection_rollup"), + enableConnectionRollup: pkgconfigsetup.SystemProbe().GetBool("service_monitoring_config.enable_connection_rollup"), } // The map intended to calculate how many entries we actually need in byConnection.data, and for each entry diff --git a/pkg/network/events/monitor_windows.go b/pkg/network/events/monitor_windows.go index 3698e001eb40f..e766e0a4bfa0f 100644 --- a/pkg/network/events/monitor_windows.go +++ b/pkg/network/events/monitor_windows.go @@ -62,7 +62,7 @@ func getAPMTags(already map[string]struct{}, filename string) []*intern.Value { tags := make([]*intern.Value, 0, 3) // see if there's an app.config in the directory - appConfig := filepath.Join(dir, "app.config") + appConfig := filename + ".config" ddJSON := filepath.Join(dir, "datadog.json") if _, err := os.Stat(appConfig); err == nil { @@ -74,7 +74,7 @@ func getAPMTags(already map[string]struct{}, filename string) []*intern.Value { } } } else if !errors.Is(err, os.ErrNotExist) { - log.Warnf("Error reading app.config: %v", err) + log.Warnf("Error reading app.config: %s %v", appConfig, err) } if len(already) == len(envFilter) { // we've seen all we need, no point in looking in datadog.json diff --git a/pkg/network/gateway_lookup_linux.go b/pkg/network/gateway_lookup_linux.go index a3131f40fc085..9236bcd6bb8a9 100644 --- a/pkg/network/gateway_lookup_linux.go +++ b/pkg/network/gateway_lookup_linux.go @@ -18,7 +18,7 @@ import ( telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/process/util" "github.com/DataDog/datadog-agent/pkg/telemetry" @@ -73,7 +73,7 @@ func init() { func gwLookupEnabled() bool { // only enabled on AWS currently - return Cloud.IsAWS() && ddconfig.IsCloudProviderEnabled(ec2.CloudProviderName) + return Cloud.IsAWS() && pkgconfigsetup.IsCloudProviderEnabled(ec2.CloudProviderName, pkgconfigsetup.Datadog()) } // NewGatewayLookup creates a new instance of a gateway lookup using diff --git a/pkg/network/nettop/main.go b/pkg/network/nettop/main.go index ad28dda34b871..1fb0f4da992ff 100644 --- a/pkg/network/nettop/main.go +++ b/pkg/network/nettop/main.go @@ -14,7 +14,7 @@ import ( "syscall" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network" networkConfig "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/tracer" @@ -29,8 +29,8 @@ func main() { os.Exit(1) } - config.Datadog().SetConfigFile(*cfgpath) - if _, err := config.LoadWithoutSecret(); err != nil { + pkgconfigsetup.Datadog().SetConfigFile(*cfgpath) + if _, err := pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil); err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) os.Exit(1) } diff --git a/pkg/network/protocols/postgres/telemetry.go b/pkg/network/protocols/postgres/telemetry.go index bfdd93030c078..9d45855c312fb 100644 --- a/pkg/network/protocols/postgres/telemetry.go +++ b/pkg/network/protocols/postgres/telemetry.go @@ -22,18 +22,81 @@ const ( numberOfBucketsSmallerThanMaxBufferSize = 3 ) +type counterStateEnum int + +const ( + tableAndOperation counterStateEnum = iota + 1 + operationNotFound + tableNameNotFound + tableAndOpNotFound +) + +// extractionFailureCounter stores counter when goal was achieved and counter when target not found. +type extractionFailureCounter struct { + // countTableAndOperationFound counts the number of successfully retrieved table name and operation. + countTableAndOperationFound *libtelemetry.Counter + // countOperationNotFound counts the number of unsuccessful fetches of the operation. + countOperationNotFound *libtelemetry.Counter + // countTableNameNotFound counts the number of unsuccessful fetches of the table name. + countTableNameNotFound *libtelemetry.Counter + // countTableAndOpNotFound counts the number of failed attempts to fetch both the table name and the operation. + countTableAndOpNotFound *libtelemetry.Counter +} + +// newExtractionFailureCounter creates and returns a new instance +func newExtractionFailureCounter(metricGroup *libtelemetry.MetricGroup, metricName string, tags ...string) *extractionFailureCounter { + return &extractionFailureCounter{ + countTableAndOperationFound: metricGroup.NewCounter(metricName, append(tags, "state:table_and_op")...), + countOperationNotFound: metricGroup.NewCounter(metricName, append(tags, "state:no_operation")...), + countTableNameNotFound: metricGroup.NewCounter(metricName, append(tags, "state:no_table_name")...), + countTableAndOpNotFound: metricGroup.NewCounter(metricName, append(tags, "state:no_table_no_op")...), + } +} + +// inc increments the appropriate counter based on the provided state. +func (c *extractionFailureCounter) inc(state counterStateEnum) { + switch state { + case tableAndOperation: + c.countTableAndOperationFound.Add(1) + case operationNotFound: + c.countOperationNotFound.Add(1) + case tableNameNotFound: + c.countTableNameNotFound.Add(1) + case tableAndOpNotFound: + c.countTableAndOpNotFound.Add(1) + default: + log.Errorf("unable to increment extractionFailureCounter due to undefined state: %v\n", state) + } +} + +// get returns the counter value based on the result. +func (c *extractionFailureCounter) get(state counterStateEnum) int64 { + switch state { + case tableAndOperation: + return c.countTableAndOperationFound.Get() + case operationNotFound: + return c.countOperationNotFound.Get() + case tableNameNotFound: + return c.countTableNameNotFound.Get() + case tableAndOpNotFound: + return c.countTableAndOpNotFound.Get() + default: + return 0 + } +} + // Telemetry is a struct to hold the telemetry for the postgres protocol type Telemetry struct { metricGroup *libtelemetry.MetricGroup // queryLengthBuckets holds the counters for the different buckets of by the query length quires - queryLengthBuckets [numberOfBuckets]*libtelemetry.Counter + queryLengthBuckets [numberOfBuckets]*extractionFailureCounter // failedTableNameExtraction holds the counter for the failed table name extraction failedTableNameExtraction *libtelemetry.Counter // failedOperationExtraction holds the counter for the failed operation extraction failedOperationExtraction *libtelemetry.Counter // firstBucketLowerBoundary is the lower boundary of the first bucket. - // We add 1 in order to include BufferSize as the upper boundary of the third bucket. + // We inc 1 in order to include BufferSize as the upper boundary of the third bucket. // Then the first three buckets will include query lengths shorter or equal to BufferSize, // and the rest will include sizes equal to or above the buffer size. firstBucketLowerBoundary int @@ -51,10 +114,10 @@ type Telemetry struct { // Bucket 7: BufferSize + 4*bucketLength + 1 to BufferSize + 5*bucketLength // Bucket 8: BufferSize + 5*bucketLength + 1 to BufferSize + 6*bucketLength // Bucket 9: BufferSize + 6*bucketLength + 1 to BufferSize + 7*bucketLength -func createQueryLengthBuckets(metricGroup *libtelemetry.MetricGroup) [numberOfBuckets]*libtelemetry.Counter { - var buckets [numberOfBuckets]*libtelemetry.Counter +func createQueryLengthBuckets(metricGroup *libtelemetry.MetricGroup) [numberOfBuckets]*extractionFailureCounter { + var buckets [numberOfBuckets]*extractionFailureCounter for i := 0; i < numberOfBuckets; i++ { - buckets[i] = metricGroup.NewCounter("query_length_bucket"+fmt.Sprint(i+1), libtelemetry.OptStatsd) + buckets[i] = newExtractionFailureCounter(metricGroup, "query_length_bucket"+fmt.Sprint(i+1), libtelemetry.OptStatsd) } return buckets } @@ -88,16 +151,22 @@ func (t *Telemetry) getBucketIndex(querySize int) int { func (t *Telemetry) Count(tx *ebpf.EbpfEvent, eventWrapper *EventWrapper) { querySize := int(tx.Tx.Original_query_size) - bucketIndex := t.getBucketIndex(querySize) - if bucketIndex >= 0 && bucketIndex < len(t.queryLengthBuckets) { - t.queryLengthBuckets[bucketIndex].Add(1) - } - + state := tableAndOperation if eventWrapper.Operation() == UnknownOP { t.failedOperationExtraction.Add(1) + state = operationNotFound } if eventWrapper.TableName() == "UNKNOWN" { t.failedTableNameExtraction.Add(1) + if state == operationNotFound { + state = tableAndOpNotFound + } else { + state = tableNameNotFound + } + } + bucketIndex := t.getBucketIndex(querySize) + if bucketIndex >= 0 && bucketIndex < len(t.queryLengthBuckets) { + t.queryLengthBuckets[bucketIndex].inc(state) } } diff --git a/pkg/network/protocols/postgres/telemetry_test.go b/pkg/network/protocols/postgres/telemetry_test.go index f1e5f9cf58a04..aa8aa6843e373 100644 --- a/pkg/network/protocols/postgres/telemetry_test.go +++ b/pkg/network/protocols/postgres/telemetry_test.go @@ -22,6 +22,7 @@ type telemetryResults struct { queryLength [bucketLength]int64 failedTableNameExtraction int64 failedOperationExtraction int64 + counterState counterStateEnum } func Test_getBucketIndex(t *testing.T) { @@ -81,6 +82,7 @@ func TestTelemetry_Count(t *testing.T) { queryLength: [bucketLength]int64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, failedOperationExtraction: 10, failedTableNameExtraction: 10, + counterState: tableAndOpNotFound, }, }, { @@ -103,6 +105,7 @@ func TestTelemetry_Count(t *testing.T) { queryLength: [bucketLength]int64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, failedOperationExtraction: 10, failedTableNameExtraction: 10, + counterState: tableAndOpNotFound, }, }, { @@ -125,6 +128,7 @@ func TestTelemetry_Count(t *testing.T) { queryLength: [bucketLength]int64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, failedOperationExtraction: 10, failedTableNameExtraction: 10, + counterState: tableAndOpNotFound, }, }, { @@ -134,6 +138,7 @@ func TestTelemetry_Count(t *testing.T) { expectedTelemetry: telemetryResults{ failedOperationExtraction: 1, queryLength: [bucketLength]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + counterState: operationNotFound, }, }, { @@ -143,6 +148,7 @@ func TestTelemetry_Count(t *testing.T) { expectedTelemetry: telemetryResults{ failedTableNameExtraction: 1, queryLength: [bucketLength]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + counterState: tableNameNotFound, }, }, { @@ -153,6 +159,7 @@ func TestTelemetry_Count(t *testing.T) { failedTableNameExtraction: 1, failedOperationExtraction: 1, queryLength: [bucketLength]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + counterState: tableAndOpNotFound, }, }, } @@ -188,7 +195,15 @@ func createEbpfEvent(querySize int) *ebpf.EbpfEvent { func verifyTelemetry(t *testing.T, tel *Telemetry, expected telemetryResults) { for i := 0; i < len(tel.queryLengthBuckets); i++ { - assert.Equal(t, expected.queryLength[i], tel.queryLengthBuckets[i].Get(), "queryLength for bucket %d count is incorrect", i) + expState := expected.counterState + expCount := expected.queryLength[i] + curCount := tel.queryLengthBuckets[i].get(expState) + + assert.Equal(t, + expCount, + curCount, + "queryLength bucket '%d': expected state '%v', expected counter '%d', actual counter '%d'", + i, expState, expCount, curCount) } assert.Equal(t, expected.failedTableNameExtraction, tel.failedTableNameExtraction.Get(), "failedTableNameExtraction count is incorrect") assert.Equal(t, expected.failedOperationExtraction, tel.failedOperationExtraction.Get(), "failedOperationExtraction count is incorrect") diff --git a/pkg/network/state_test.go b/pkg/network/state_test.go index 41d29919ae6f6..d4015c6a972ca 100644 --- a/pkg/network/state_test.go +++ b/pkg/network/state_test.go @@ -22,7 +22,7 @@ import ( "go.uber.org/atomic" "go4.org/intern" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/network/dns" "github.com/DataDog/datadog-agent/pkg/network/protocols" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" @@ -2859,8 +2859,8 @@ func TestDNSPIDCollision(t *testing.T) { }, } - config.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) - config.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", false) + pkgconfigsetup.SystemProbe().SetWithoutSource("system_probe_config.collect_dns_domains", true) + pkgconfigsetup.SystemProbe().SetWithoutSource("network_config.enable_dns_by_querytype", false) state := newDefaultState() state.RegisterClient("foo") diff --git a/pkg/network/tracer/connection/dump.go b/pkg/network/tracer/connection/dump.go index 101ab0d084cb6..a9a9b1aff1337 100644 --- a/pkg/network/tracer/connection/dump.go +++ b/pkg/network/tracer/connection/dump.go @@ -23,7 +23,7 @@ import ( ) -func dumpMapsHandler(w io.Writer, manager *manager.Manager, mapName string, currentMap *ebpf.Map) { +func dumpMapsHandler(w io.Writer, _ *manager.Manager, mapName string, currentMap *ebpf.Map) { switch mapName { case "connectsock_ipv6": // maps/connectsock_ipv6 (BPF_MAP_TYPE_HASH), key C.__u64, value uintptr // C.void* @@ -89,6 +89,21 @@ func dumpMapsHandler(w io.Writer, manager *manager.Manager, mapName string, curr spew.Fdump(w, key, value) } + case probes.TCPOngoingConnectPid: // maps/tcp_ongoing_connect_pid (BPF_MAP_TYPE_HASH), key SkpConnTuple, value u64 + io.WriteString(w, "Map: '"+mapName+"', key: 'SkpConnTuple', value: 'C.u64'\n") + io.WriteString(w, "This map is used to store the PID of the process that initiated the connection\n") + totalSize := 0 + info, _ := currentMap.Info() + spew.Fdump(w, info) + iter := currentMap.Iterate() + var key ddebpf.SkpConn + var value ddebpf.PidTs + for iter.Next(unsafe.Pointer(&key), unsafe.Pointer(&value)) { + totalSize++ + spew.Fdump(w, key.Tup, value) + } + io.WriteString(w, "Total entries: "+spew.Sdump(totalSize)) + case probes.ConnCloseBatchMap: // maps/conn_close_batch (BPF_MAP_TYPE_HASH), key C.__u32, value batch io.WriteString(w, "Map: '"+mapName+"', key: 'C.__u32', value: 'batch'\n") iter := currentMap.Iterate() diff --git a/pkg/network/tracer/connection/ebpf_tracer.go b/pkg/network/tracer/connection/ebpf_tracer.go index e46d8dd242161..4792e6f2aca5a 100644 --- a/pkg/network/tracer/connection/ebpf_tracer.go +++ b/pkg/network/tracer/connection/ebpf_tracer.go @@ -46,42 +46,53 @@ const ( connTracerModuleName = "network_tracer__ebpf" ) +var tcpOngoingConnectMapTTL = 30 * time.Minute.Nanoseconds() + var EbpfTracerTelemetry = struct { connections telemetry.Gauge tcpFailedConnects *prometheus.Desc - TcpSentMiscounts *prometheus.Desc + TcpSentMiscounts *prometheus.Desc unbatchedTcpClose *prometheus.Desc unbatchedUdpClose *prometheus.Desc UdpSendsProcessed *prometheus.Desc - UdpSendsMissed *prometheus.Desc - UdpDroppedConns *prometheus.Desc + UdpSendsMissed *prometheus.Desc + UdpDroppedConns *prometheus.Desc // doubleFlushAttemptsClose is a counter measuring the number of attempts to flush a closed connection twice from tcp_close doubleFlushAttemptsClose *prometheus.Desc // doubleFlushAttemptsDone is a counter measuring the number of attempts to flush a closed connection twice from tcp_done doubleFlushAttemptsDone *prometheus.Desc // unsupportedTcpFailures is a counter measuring the number of attempts to flush a TCP failure that is not supported unsupportedTcpFailures *prometheus.Desc - // tcpDonePidMismatch is a counter measuring the number of TCP connections with a PID mismatch between tcp_connect and tcp_done - tcpDonePidMismatch *prometheus.Desc - PidCollisions *telemetry.StatCounterWrapper - iterationDups telemetry.Counter - iterationAborts telemetry.Counter + // tcpDoneMissingPid is a counter measuring the number of TCP connections with a PID mismatch between tcp_connect and tcp_done + tcpDoneMissingPid *prometheus.Desc + tcpConnectFailedTuple *prometheus.Desc + tcpDoneFailedTuple *prometheus.Desc + tcpFinishConnectFailedTuple *prometheus.Desc + tcpCloseTargetFailures *prometheus.Desc + ongoingConnectPidCleaned telemetry.Counter + PidCollisions *telemetry.StatCounterWrapper + iterationDups telemetry.Counter + iterationAborts telemetry.Counter lastTcpFailedConnects *atomic.Int64 - LastTcpSentMiscounts *atomic.Int64 + LastTcpSentMiscounts *atomic.Int64 lastUnbatchedTcpClose *atomic.Int64 lastUnbatchedUdpClose *atomic.Int64 lastUdpSendsProcessed *atomic.Int64 - lastUdpSendsMissed *atomic.Int64 - lastUdpDroppedConns *atomic.Int64 + lastUdpSendsMissed *atomic.Int64 + lastUdpDroppedConns *atomic.Int64 // lastDoubleFlushAttemptsClose is a counter measuring the diff between the last two values of doubleFlushAttemptsClose lastDoubleFlushAttemptsClose *atomic.Int64 // lastDoubleFlushAttemptsDone is a counter measuring the diff between the last two values of doubleFlushAttemptsDone lastDoubleFlushAttemptsDone *atomic.Int64 // lastUnsupportedTcpFailures is a counter measuring the diff between the last two values of unsupportedTcpFailures lastUnsupportedTcpFailures *atomic.Int64 - // lastTcpDonePidMismatch is a counter measuring the diff between the last two values of tcpDonePidMismatch - lastTcpDonePidMismatch *atomic.Int64 + // lastTcpDoneMissingPid is a counter measuring the diff between the last two values of tcpDoneMissingPid + lastTcpDoneMissingPid *atomic.Int64 + lastTcpConnectFailedTuple *atomic.Int64 + lastTcpDoneFailedTuple *atomic.Int64 + lastTcpFinishConnectFailedTuple *atomic.Int64 + lastTcpCloseTargetFailures *atomic.Int64 }{ telemetry.NewGauge(connTracerModuleName, "connections", []string{"ip_proto", "family"}, "Gauge measuring the number of active connections in the EBPF map"), prometheus.NewDesc(connTracerModuleName+"__tcp_failed_connects", "Counter measuring the number of failed TCP connections in the EBPF map", nil, nil), @@ -94,7 +105,12 @@ var EbpfTracerTelemetry = struct { prometheus.NewDesc(connTracerModuleName+"__double_flush_attempts_close", "Counter measuring the number of attempts to flush a closed connection twice from tcp_close", nil, nil), prometheus.NewDesc(connTracerModuleName+"__double_flush_attempts_done", "Counter measuring the number of attempts to flush a closed connection twice from tcp_done", nil, nil), prometheus.NewDesc(connTracerModuleName+"__unsupported_tcp_failures", "Counter measuring the number of attempts to flush a TCP failure that is not supported", nil, nil), - prometheus.NewDesc(connTracerModuleName+"__tcp_done_pid_mismatch", "Counter measuring the number of TCP connections with a PID mismatch between tcp_connect and tcp_done", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_done_missing_pid", "Counter measuring the number of TCP connections with a missing PID in tcp_done", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_connect_failed_tuple", "Counter measuring the number of failed TCP connections due to tuple collisions", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_done_failed_tuple", "Counter measuring the number of failed TCP connections due to tuple collisions", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_finish_connect_failed_tuple", "Counter measuring the number of failed TCP connections due to tuple collisions", nil, nil), + prometheus.NewDesc(connTracerModuleName+"__tcp_close_target_failures", "Counter measuring the number of failed TCP connections in tcp_close", nil, nil), + telemetry.NewCounter(connTracerModuleName, "ongoing_connect_pid_cleaned", []string{}, "Counter measuring the number of tcp_ongoing_connect_pid entries cleaned in userspace"), telemetry.NewStatCounterWrapper(connTracerModuleName, "pid_collisions", []string{}, "Counter measuring number of process collisions"), telemetry.NewCounter(connTracerModuleName, "iteration_dups", []string{}, "Counter measuring the number of connections iterated more than once"), telemetry.NewCounter(connTracerModuleName, "iteration_aborts", []string{}, "Counter measuring how many times ebpf iteration of connection map was aborted"), @@ -109,6 +125,10 @@ var EbpfTracerTelemetry = struct { atomic.NewInt64(0), atomic.NewInt64(0), atomic.NewInt64(0), + atomic.NewInt64(0), + atomic.NewInt64(0), + atomic.NewInt64(0), + atomic.NewInt64(0), } type ebpfTracer struct { @@ -124,6 +144,9 @@ type ebpfTracer struct { // tcp failure events failedConnConsumer *failure.TCPFailedConnConsumer + // periodically clean the ongoing connection pid map + ongoingConnectCleaner *ddebpf.MapCleaner[netebpf.SkpConn, netebpf.PidTs] + removeTuple *netebpf.ConnTuple closeTracer func() @@ -161,6 +184,7 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace probes.UDPPortBindingsMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, probes.ConnectionProtocolMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, probes.ConnectionTupleToSocketSKBConnMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.TCPOngoingConnectPid: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, }, ConstantEditors: []manager.ConstantEditor{ boolConst("tcpv6_enabled", config.CollectTCPv6Conns), @@ -244,6 +268,8 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace ch: newCookieHasher(), } + tr.setupMapCleaner(m) + tr.conns, err = maps.GetMap[netebpf.ConnTuple, netebpf.ConnStats](m, probes.ConnMap) if err != nil { tr.Stop() @@ -329,6 +355,7 @@ func (t *ebpfTracer) Stop() { _ = t.m.Stop(manager.CleanAll) t.closeConsumer.Stop() t.failedConnConsumer.Stop() + t.ongoingConnectCleaner.Stop() if t.closeTracer != nil { t.closeTracer() } @@ -501,7 +528,11 @@ func (t *ebpfTracer) Describe(ch chan<- *prometheus.Desc) { ch <- EbpfTracerTelemetry.doubleFlushAttemptsClose ch <- EbpfTracerTelemetry.doubleFlushAttemptsDone ch <- EbpfTracerTelemetry.unsupportedTcpFailures - ch <- EbpfTracerTelemetry.tcpDonePidMismatch + ch <- EbpfTracerTelemetry.tcpDoneMissingPid + ch <- EbpfTracerTelemetry.tcpConnectFailedTuple + ch <- EbpfTracerTelemetry.tcpDoneFailedTuple + ch <- EbpfTracerTelemetry.tcpFinishConnectFailedTuple + ch <- EbpfTracerTelemetry.tcpCloseTargetFailures } // Collect returns the current state of all metrics of the collector @@ -550,10 +581,25 @@ func (t *ebpfTracer) Collect(ch chan<- prometheus.Metric) { EbpfTracerTelemetry.lastUnsupportedTcpFailures.Store(int64(ebpfTelemetry.Unsupported_tcp_failures)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.unsupportedTcpFailures, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Tcp_done_pid_mismatch) - EbpfTracerTelemetry.lastTcpDonePidMismatch.Load() - EbpfTracerTelemetry.lastTcpDonePidMismatch.Store(int64(ebpfTelemetry.Tcp_done_pid_mismatch)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpDonePidMismatch, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Tcp_done_missing_pid) - EbpfTracerTelemetry.lastTcpDoneMissingPid.Load() + EbpfTracerTelemetry.lastTcpDoneMissingPid.Store(int64(ebpfTelemetry.Tcp_done_missing_pid)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpDoneMissingPid, prometheus.CounterValue, float64(delta)) + + delta = int64(ebpfTelemetry.Tcp_connect_failed_tuple) - EbpfTracerTelemetry.lastTcpConnectFailedTuple.Load() + EbpfTracerTelemetry.lastTcpConnectFailedTuple.Store(int64(ebpfTelemetry.Tcp_connect_failed_tuple)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpConnectFailedTuple, prometheus.CounterValue, float64(delta)) + + delta = int64(ebpfTelemetry.Tcp_done_failed_tuple) - EbpfTracerTelemetry.lastTcpDoneFailedTuple.Load() + EbpfTracerTelemetry.lastTcpDoneFailedTuple.Store(int64(ebpfTelemetry.Tcp_done_failed_tuple)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpDoneFailedTuple, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Tcp_finish_connect_failed_tuple) - EbpfTracerTelemetry.lastTcpFinishConnectFailedTuple.Load() + EbpfTracerTelemetry.lastTcpFinishConnectFailedTuple.Store(int64(ebpfTelemetry.Tcp_finish_connect_failed_tuple)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpFinishConnectFailedTuple, prometheus.CounterValue, float64(delta)) + + delta = int64(ebpfTelemetry.Tcp_close_target_failures) - EbpfTracerTelemetry.lastTcpCloseTargetFailures.Load() + EbpfTracerTelemetry.lastTcpCloseTargetFailures.Store(int64(ebpfTelemetry.Tcp_close_target_failures)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpCloseTargetFailures, prometheus.CounterValue, float64(delta)) } // DumpMaps (for debugging purpose) returns all maps content by default or selected maps from maps parameter. @@ -645,6 +691,31 @@ func (t *ebpfTracer) getTCPStats(stats *netebpf.TCPStats, tuple *netebpf.ConnTup return t.tcpStats.Lookup(tuple, stats) == nil } +// setupMapCleaner sets up a map cleaner for the tcp_ongoing_connect_pid map +func (t *ebpfTracer) setupMapCleaner(m *manager.Manager) { + tcpOngoingConnectPidMap, _, err := m.GetMap(probes.TCPOngoingConnectPid) + if err != nil { + log.Errorf("error getting %v map: %s", probes.TCPOngoingConnectPid, err) + return + } + + tcpOngoingConnectPidCleaner, err := ddebpf.NewMapCleaner[netebpf.SkpConn, netebpf.PidTs](tcpOngoingConnectPidMap, 1024) + if err != nil { + log.Errorf("error creating map cleaner: %s", err) + return + } + tcpOngoingConnectPidCleaner.Clean(time.Minute*5, nil, nil, func(now int64, _ netebpf.SkpConn, val netebpf.PidTs) bool { + ts := int64(val.Timestamp) + expired := ts > 0 && now-ts > tcpOngoingConnectMapTTL + if expired { + EbpfTracerTelemetry.ongoingConnectPidCleaned.Inc() + } + return expired + }) + + t.ongoingConnectCleaner = tcpOngoingConnectPidCleaner +} + func populateConnStats(stats *network.ConnectionStats, t *netebpf.ConnTuple, s *netebpf.ConnStats, ch *cookieHasher) { *stats = network.ConnectionStats{ Pid: t.Pid, diff --git a/pkg/network/tracer/connection/failure/failed_conn_consumer.go b/pkg/network/tracer/connection/failure/failed_conn_consumer.go index b82dbd5afc76b..4df1a90f29705 100644 --- a/pkg/network/tracer/connection/failure/failed_conn_consumer.go +++ b/pkg/network/tracer/connection/failure/failed_conn_consumer.go @@ -57,7 +57,7 @@ func (c *TCPFailedConnConsumer) Stop() { c.once.Do(func() { close(c.closed) }) - c.FailedConns.mapCleaner.Stop() + c.FailedConns.connCloseFlushedCleaner.Stop() } func (c *TCPFailedConnConsumer) extractConn(data []byte) { diff --git a/pkg/network/tracer/connection/failure/matching.go b/pkg/network/tracer/connection/failure/matching.go index d02f4ddc0ebbb..4ad1c8ff31c1d 100644 --- a/pkg/network/tracer/connection/failure/matching.go +++ b/pkg/network/tracer/connection/failure/matching.go @@ -26,18 +26,20 @@ import ( ) var ( - telemetryModuleName = "network_tracer__tcp_failure" - mapTTL = 10 * time.Millisecond.Nanoseconds() + telemetryModuleName = "network_tracer__tcp_failure" + connClosedFlushMapTTL = 10 * time.Millisecond.Nanoseconds() ) var failureTelemetry = struct { - failedConnMatches telemetry.Counter - failedConnOrphans telemetry.Counter - failedConnsDropped telemetry.Counter + failedConnMatches telemetry.Counter + failedConnOrphans telemetry.Counter + failedConnsDropped telemetry.Counter + closedConnFlushedCleaned telemetry.Counter }{ telemetry.NewCounter(telemetryModuleName, "matches", []string{"type"}, "Counter measuring the number of successful matches of failed connections with closed connections"), telemetry.NewCounter(telemetryModuleName, "orphans", []string{}, "Counter measuring the number of orphans after associating failed connections with a closed connection"), telemetry.NewCounter(telemetryModuleName, "dropped", []string{}, "Counter measuring the number of dropped failed connections"), + telemetry.NewCounter(telemetryModuleName, "closed_conn_flushed_cleaned", []string{}, "Counter measuring the number of conn_close_flushed entries cleaned in userspace"), } // FailedConnStats is a wrapper to help document the purpose of the underlying map @@ -58,10 +60,10 @@ type FailedConnMap map[ebpf.ConnTuple]*FailedConnStats // FailedConns is a struct to hold failed connections type FailedConns struct { - FailedConnMap map[ebpf.ConnTuple]*FailedConnStats - maxFailuresBuffered uint32 - failureTuple *ebpf.ConnTuple - mapCleaner *ddebpf.MapCleaner[ebpf.ConnTuple, int64] + FailedConnMap map[ebpf.ConnTuple]*FailedConnStats + maxFailuresBuffered uint32 + failureTuple *ebpf.ConnTuple + connCloseFlushedCleaner *ddebpf.MapCleaner[ebpf.ConnTuple, int64] sync.Mutex } @@ -159,8 +161,12 @@ func (fc *FailedConns) setupMapCleaner(m *manager.Manager) { } mapCleaner.Clean(time.Second*1, nil, nil, func(now int64, _ ebpf.ConnTuple, val int64) bool { - return val > 0 && now-val > mapTTL + expired := val > 0 && now-val > connClosedFlushMapTTL + if expired { + failureTelemetry.closedConnFlushedCleaned.Inc() + } + return expired }) - fc.mapCleaner = mapCleaner + fc.connCloseFlushedCleaner = mapCleaner } diff --git a/pkg/network/tracer/connection/fentry/manager.go b/pkg/network/tracer/connection/fentry/manager.go index 5bf409712b057..29cb5ac920bfe 100644 --- a/pkg/network/tracer/connection/fentry/manager.go +++ b/pkg/network/tracer/connection/fentry/manager.go @@ -20,7 +20,7 @@ func initManager(mgr *ddebpf.Manager, connCloseEventHandler ddebpf.EventHandler, mgr.Maps = []*manager.Map{ {Name: probes.ConnMap}, {Name: probes.TCPStatsMap}, - {Name: probes.TCPConnectSockPidMap}, + {Name: probes.TCPOngoingConnectPid}, {Name: probes.ConnCloseFlushed}, {Name: probes.ConnCloseBatchMap}, {Name: "udp_recv_sock"}, diff --git a/pkg/network/tracer/connection/kprobe/config.go b/pkg/network/tracer/connection/kprobe/config.go index 4f119bdbf7bec..e86745416eebe 100644 --- a/pkg/network/tracer/connection/kprobe/config.go +++ b/pkg/network/tracer/connection/kprobe/config.go @@ -59,10 +59,8 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes enableProbe(enabled, probes.TCPClose) enableProbe(enabled, probes.TCPCloseFlushReturn) enableProbe(enabled, probes.TCPConnect) - if c.FailedConnectionsSupported() && (runtimeTracer || coreTracer) { - enableProbe(enabled, probes.TCPDone) - enableProbe(enabled, probes.TCPDoneFlushReturn) - } + enableProbe(enabled, probes.TCPDone) + enableProbe(enabled, probes.TCPDoneFlushReturn) enableProbe(enabled, probes.TCPFinishConnect) enableProbe(enabled, probes.InetCskAcceptReturn) enableProbe(enabled, probes.InetCskListenStop) diff --git a/pkg/network/tracer/connection/kprobe/manager.go b/pkg/network/tracer/connection/kprobe/manager.go index 4582c276b01f6..e8c1448e9d11c 100644 --- a/pkg/network/tracer/connection/kprobe/manager.go +++ b/pkg/network/tracer/connection/kprobe/manager.go @@ -65,7 +65,7 @@ func initManager(mgr *ddebpf.Manager, connCloseEventHandler ddebpf.EventHandler, mgr.Maps = []*manager.Map{ {Name: probes.ConnMap}, {Name: probes.TCPStatsMap}, - {Name: probes.TCPConnectSockPidMap}, + {Name: probes.TCPOngoingConnectPid}, {Name: probes.ConnCloseFlushed}, {Name: probes.ConnCloseBatchMap}, {Name: "udp_recv_sock"}, diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go index 5017167091fca..735b26f55db9a 100644 --- a/pkg/network/tracer/tracer.go +++ b/pkg/network/tracer/tracer.go @@ -67,14 +67,12 @@ var tracerTelemetry = struct { closedConns *telemetry.StatCounterWrapper connStatsMapSize telemetry.Gauge payloadSizePerClient telemetry.Gauge - failedConnOrphans telemetry.Counter }{ telemetry.NewCounter(tracerModuleName, "skipped_conns", []string{"ip_proto"}, "Counter measuring skipped connections"), telemetry.NewCounter(tracerModuleName, "expired_tcp_conns", []string{}, "Counter measuring expired TCP connections"), telemetry.NewStatCounterWrapper(tracerModuleName, "closed_conns", []string{"ip_proto"}, "Counter measuring closed TCP connections"), telemetry.NewGauge(tracerModuleName, "conn_stats_map_size", []string{}, "Gauge measuring the size of the active connections map"), telemetry.NewGauge(tracerModuleName, "payload_conn_count", []string{"client_id", "ip_proto"}, "Gauge measuring the number of connections in the system-probe payload"), - telemetry.NewCounter(tracerModuleName, "failed_conn_orphans", []string{}, "Counter measuring the number of orphans after associating failed connections with a closed connection"), } // Tracer implements the functionality of the network tracer diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index dfa6036127c77..c0aadb5bbe945 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -40,8 +40,8 @@ import ( "go4.org/intern" "golang.org/x/sys/unix" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" @@ -620,9 +620,9 @@ func (s *TracerSuite) TestGatewayLookupNotEnabled() { m.EXPECT().IsAWS().Return(true) network.Cloud = m - clouds := ddconfig.Datadog().Get("cloud_provider_metadata") - ddconfig.Datadog().SetWithoutSource("cloud_provider_metadata", []string{}) - defer ddconfig.Datadog().SetWithoutSource("cloud_provider_metadata", clouds) + clouds := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{}) + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", clouds) tr := setupTracer(t, cfg) require.Nil(t, tr.gwLookup) @@ -2391,8 +2391,8 @@ func checkSkipFailureConnectionsTests(t *testing.T) { if _, ok := failedConnectionsBuildModes[ebpftest.GetBuildMode()]; !ok { t.Skip("Skipping test on unsupported build mode: ", ebpftest.GetBuildMode()) } - } + func (s *TracerSuite) TestTCPFailureConnectionTimeout() { t := s.T() @@ -2445,11 +2445,76 @@ func (s *TracerSuite) TestTCPFailureConnectionTimeout() { localAddr := fmt.Sprintf("127.0.0.1:%d", port) // Check if the connection was recorded as failed due to timeout + var conn *network.ConnectionStats require.Eventually(t, func() bool { conns := getConnections(t, tr) // 110 is the errno for ETIMEDOUT - return findFailedConnection(t, localAddr, srvAddr, conns, 110) - }, 3*time.Second, 1000*time.Millisecond, "Failed connection not recorded properly") + conn = findFailedConnection(t, localAddr, srvAddr, conns, 110) + return conn != nil + }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") + + assert.Equal(t, uint32(0), conn.TCPFailures[104], "expected 0 connection reset") + assert.Equal(t, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") + assert.Equal(t, uint32(1), conn.TCPFailures[110], "expected 1 connection timeout") + assert.Equal(t, uint64(0), conn.Monotonic.SentBytes, "expected 0 bytes sent") + assert.Equal(t, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") +} + +func (s *TracerSuite) TestTCPFailureConnectionResetWithDNAT() { + t := s.T() + + checkSkipFailureConnectionsTests(t) + + cfg := testConfig() + cfg.TCPFailedConnectionsEnabled = true + tr := setupTracer(t, cfg) + + // Setup DNAT to redirect traffic from 2.2.2.2 to 1.1.1.1 + netlinktestutil.SetupDNAT(t) + + // Set up a TCP server on the translated address (1.1.1.1) + srv := tracertestutil.NewTCPServerOnAddress("1.1.1.1:80", func(c net.Conn) { + if tcpConn, ok := c.(*net.TCPConn); ok { + tcpConn.SetLinger(0) + buf := make([]byte, 10) + _, _ = c.Read(buf) + time.Sleep(10 * time.Millisecond) + } + c.Close() + }) + + require.NoError(t, srv.Run(), "error running server") + t.Cleanup(srv.Shutdown) + + // Attempt to connect to the DNAT address (2.2.2.2), which should be redirected to the server at 1.1.1.1 + serverAddr := "2.2.2.2:80" + c, err := net.Dial("tcp", serverAddr) + require.NoError(t, err, "could not connect to server: ", err) + + // Write to the server and expect a reset + _, writeErr := c.Write([]byte("ping")) + if writeErr != nil { + t.Log("Write error:", writeErr) + } + + // Read from server to ensure that the server has a chance to reset the connection + _, readErr := c.Read(make([]byte, 4)) + require.Error(t, readErr, "expected connection reset error but got none") + + // Check if the connection was recorded as reset + var conn *network.ConnectionStats + require.Eventually(t, func() bool { + // 104 is the errno for ECONNRESET + conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, getConnections(t, tr), 104) + return conn != nil + }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") + + require.NoError(t, c.Close(), "error closing client connection") + assert.Equal(t, uint32(1), conn.TCPFailures[104], "expected 1 connection reset") + assert.Equal(t, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") + assert.Equal(t, uint32(0), conn.TCPFailures[110], "expected 0 connection timeout") + assert.Equal(t, uint64(4), conn.Monotonic.SentBytes, "expected 4 bytes sent") + assert.Equal(t, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") } func setupDropTrafficRule(tb testing.TB) (ns string) { diff --git a/pkg/network/tracer/tracer_test.go b/pkg/network/tracer/tracer_test.go index 1e001d5b5ed98..d552f29b95bec 100644 --- a/pkg/network/tracer/tracer_test.go +++ b/pkg/network/tracer/tracer_test.go @@ -33,7 +33,6 @@ import ( "github.com/stretchr/testify/suite" "golang.org/x/sync/errgroup" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network" @@ -77,7 +76,7 @@ func isFentry() bool { func setupTracer(t testing.TB, cfg *config.Config) *Tracer { if isFentry() { - ddconfig.SetFeatures(t, env.ECSFargate) + env.SetFeatures(t, env.ECSFargate) // protocol classification not yet supported on fargate cfg.ProtocolClassificationEnabled = false } @@ -1260,14 +1259,22 @@ func (s *TracerSuite) TestTCPFailureConnectionRefused() { require.Error(t, err, "expected connection refused error but got none") // Check if the connection was recorded as refused + var foundConn *network.ConnectionStats require.Eventually(t, func() bool { conns := getConnections(t, tr) // Check for the refusal record - return findFailedConnectionByRemoteAddr(srvAddr, conns, 111) + foundConn = findFailedConnectionByRemoteAddr(srvAddr, conns, 111) + return foundConn != nil }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") + + assert.Equal(t, uint32(1), foundConn.TCPFailures[111], "expected 1 connection refused") + assert.Equal(t, uint32(0), foundConn.TCPFailures[104], "expected 0 connection reset") + assert.Equal(t, uint32(0), foundConn.TCPFailures[110], "expected 0 connection timeout") + assert.Equal(t, uint64(0), foundConn.Monotonic.SentBytes, "expected 0 bytes sent") + assert.Equal(t, uint64(0), foundConn.Monotonic.RecvBytes, "expected 0 bytes received") } -func (s *TracerSuite) TestTCPFailureConnectionReset() { +func (s *TracerSuite) TestTCPFailureConnectionResetWithData() { t := s.T() checkSkipFailureConnectionsTests(t) @@ -1304,27 +1311,84 @@ func (s *TracerSuite) TestTCPFailureConnectionReset() { require.Error(t, readErr, "expected connection reset error but got none") // Check if the connection was recorded as reset + var conn *network.ConnectionStats + require.Eventually(t, func() bool { + // 104 is the errno for ECONNRESET + conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, getConnections(t, tr), 104) + return conn != nil + }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") + + require.NoError(t, c.Close(), "error closing client connection") + assert.Equal(t, uint32(1), conn.TCPFailures[104], "expected 1 connection reset") + assert.Equal(t, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") + assert.Equal(t, uint32(0), conn.TCPFailures[110], "expected 0 connection timeout") + assert.Equal(t, uint64(4), conn.Monotonic.SentBytes, "expected 4 bytes sent") + assert.Equal(t, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") +} + +func (s *TracerSuite) TestTCPFailureConnectionResetNoData() { + t := s.T() + + checkSkipFailureConnectionsTests(t) + + cfg := testConfig() + cfg.TCPFailedConnectionsEnabled = true + tr := setupTracer(t, cfg) + + // Server that immediately resets the connection without any data transfer + srv := testutil.NewTCPServer(func(c net.Conn) { + if tcpConn, ok := c.(*net.TCPConn); ok { + tcpConn.SetLinger(0) + } + time.Sleep(10 * time.Millisecond) + // Close the connection immediately to trigger a reset + c.Close() + }) + + require.NoError(t, srv.Run(), "error running server") + t.Cleanup(srv.Shutdown) + + serverAddr := srv.Address() + c, err := net.Dial("tcp", serverAddr) + require.NoError(t, err, "could not connect to server: ", err) + + // Wait briefly to give the server time to close the connection + time.Sleep(50 * time.Millisecond) + + // Attempt to write to the server, expecting a reset + _, writeErr := c.Write([]byte("ping")) + require.Error(t, writeErr, "expected connection reset error but got none") + + // Check if the connection was recorded as reset + var conn *network.ConnectionStats require.Eventually(t, func() bool { conns := getConnections(t, tr) // 104 is the errno for ECONNRESET - return findFailedConnection(t, c.LocalAddr().String(), serverAddr, conns, 104) + conn = findFailedConnection(t, c.LocalAddr().String(), serverAddr, conns, 104) + return conn != nil }, 3*time.Second, 100*time.Millisecond, "Failed connection not recorded properly") require.NoError(t, c.Close(), "error closing client connection") + + assert.Equal(t, uint32(1), conn.TCPFailures[104], "expected 1 connection reset") + assert.Equal(t, uint32(0), conn.TCPFailures[111], "expected 0 connection refused") + assert.Equal(t, uint32(0), conn.TCPFailures[110], "expected 0 connection timeout") + assert.Equal(t, uint64(0), conn.Monotonic.SentBytes, "expected 0 bytes sent") + assert.Equal(t, uint64(0), conn.Monotonic.RecvBytes, "expected 0 bytes received") } // findFailedConnection is a utility function to find a failed connection based on specific TCP error codes -func findFailedConnection(t *testing.T, local, remote string, conns *network.Connections, errorCode uint32) bool { // nolint:unused +func findFailedConnection(t *testing.T, local, remote string, conns *network.Connections, errorCode uint32) *network.ConnectionStats { // nolint:unused // Extract the address and port from the net.Addr types localAddrPort, err := netip.ParseAddrPort(local) if err != nil { t.Logf("Failed to parse local address: %v", err) - return false + return nil } remoteAddrPort, err := netip.ParseAddrPort(remote) if err != nil { t.Logf("Failed to parse remote address: %v", err) - return false + return nil } failureFilter := func(cs network.ConnectionStats) bool { @@ -1333,13 +1397,13 @@ func findFailedConnection(t *testing.T, local, remote string, conns *network.Con return localMatch && remoteMatch && cs.TCPFailures[errorCode] > 0 } - return network.FirstConnection(conns, failureFilter) != nil + return network.FirstConnection(conns, failureFilter) } // for some failed connections we don't know the local addr/port so we need to search by remote addr only -func findFailedConnectionByRemoteAddr(remoteAddr string, conns *network.Connections, errorCode uint32) bool { +func findFailedConnectionByRemoteAddr(remoteAddr string, conns *network.Connections, errorCode uint32) *network.ConnectionStats { failureFilter := func(cs network.ConnectionStats) bool { return netip.MustParseAddrPort(remoteAddr) == netip.AddrPortFrom(cs.Dest.Addr, cs.DPort) && cs.TCPFailures[errorCode] > 0 } - return network.FirstConnection(conns, failureFilter) != nil + return network.FirstConnection(conns, failureFilter) } diff --git a/pkg/network/tracer/utils_linux.go b/pkg/network/tracer/utils_linux.go index d072906eec687..afb6ca29b6416 100644 --- a/pkg/network/tracer/utils_linux.go +++ b/pkg/network/tracer/utils_linux.go @@ -14,14 +14,14 @@ import ( "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/features" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) // NeedsEBPF returns `true` if the network-tracer requires eBPF func NeedsEBPF() bool { - return !coreconfig.SystemProbe().GetBool("network_config.enable_ebpfless") + return !pkgconfigsetup.SystemProbe().GetBool("network_config.enable_ebpfless") } // IsTracerSupportedByOS returns whether the current kernel version supports tracer functionality diff --git a/pkg/network/usm/sharedlibraries/compile.go b/pkg/network/usm/sharedlibraries/compile.go index b33323d292444..be409112fc7d5 100644 --- a/pkg/network/usm/sharedlibraries/compile.go +++ b/pkg/network/usm/sharedlibraries/compile.go @@ -9,19 +9,19 @@ package sharedlibraries import ( + "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" - "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/process/statsd" ) //go:generate $GOPATH/bin/include_headers pkg/network/ebpf/c/runtime/shared-libraries.c pkg/ebpf/bytecode/build/runtime/shared-libraries.c pkg/ebpf/c pkg/network/ebpf/c/runtime pkg/network/ebpf/c //go:generate $GOPATH/bin/integrity pkg/ebpf/bytecode/build/runtime/shared-libraries.c pkg/ebpf/bytecode/runtime/shared-libraries.go runtime -func getRuntimeCompiledSharedLibraries(config *config.Config) (runtime.CompiledOutput, error) { - return runtime.SharedLibraries.Compile(&config.Config, getCFlags(config), statsd.Client) +func getRuntimeCompiledSharedLibraries(config *ebpf.Config) (runtime.CompiledOutput, error) { + return runtime.SharedLibraries.Compile(config, getCFlags(config), statsd.Client) } -func getCFlags(config *config.Config) []string { +func getCFlags(config *ebpf.Config) []string { cflags := []string{"-g"} if config.BPFDebug { diff --git a/pkg/network/usm/sharedlibraries/ebpf.go b/pkg/network/usm/sharedlibraries/ebpf.go index 3b67a6b14d3a6..4b32417132add 100644 --- a/pkg/network/usm/sharedlibraries/ebpf.go +++ b/pkg/network/usm/sharedlibraries/ebpf.go @@ -12,6 +12,7 @@ import ( "math" "os" "runtime" + "strings" manager "github.com/DataDog/ebpf-manager" "golang.org/x/sys/unix" @@ -19,7 +20,6 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" - "github.com/DataDog/datadog-agent/pkg/network/config" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -38,13 +38,31 @@ const ( var traceTypes = []string{"enter", "exit"} -type ebpfProgram struct { - cfg *config.Config +// EbpfProgram represents the shared libraries eBPF program. +type EbpfProgram struct { + cfg *ddebpf.Config perfHandler *ddebpf.PerfHandler *ddebpf.Manager } -func newEBPFProgram(c *config.Config) *ebpfProgram { +// IsSupported returns true if the shared libraries monitoring is supported on the current system. +func IsSupported(cfg *ddebpf.Config) bool { + kversion, err := kernel.HostVersion() + if err != nil { + log.Warn("could not determine the current kernel version. shared libraries monitoring disabled.") + return false + } + + if strings.HasPrefix(runtime.GOARCH, "arm") { + return kversion >= kernel.VersionCode(5, 5, 0) && (cfg.EnableRuntimeCompiler || cfg.EnableCORE) + } + + // Minimum version for shared libraries monitoring is 4.14 + return kversion >= kernel.VersionCode(4, 14, 0) +} + +// NewEBPFProgram creates a new EBPFProgram to monitor shared libraries +func NewEBPFProgram(c *ddebpf.Config) *EbpfProgram { perfHandler := ddebpf.NewPerfHandler(100) pm := &manager.PerfMap{ Map: manager.Map{ @@ -74,14 +92,15 @@ func newEBPFProgram(c *config.Config) *ebpfProgram { ) } - return &ebpfProgram{ + return &EbpfProgram{ cfg: c, Manager: ddebpf.NewManager(mgr, &ebpftelemetry.ErrorsTelemetryModifier{}), perfHandler: perfHandler, } } -func (e *ebpfProgram) Init() error { +// Init initializes the eBPF program. +func (e *EbpfProgram) Init() error { var err error if e.cfg.EnableCORE { err = e.initCORE() @@ -110,17 +129,19 @@ func (e *ebpfProgram) Init() error { return e.initPrebuilt() } -func (e *ebpfProgram) GetPerfHandler() *ddebpf.PerfHandler { +// GetPerfHandler returns the perf handler +func (e *EbpfProgram) GetPerfHandler() *ddebpf.PerfHandler { return e.perfHandler } -func (e *ebpfProgram) Stop() { +// Stop stops the eBPF program +func (e *EbpfProgram) Stop() { ebpftelemetry.UnregisterTelemetry(e.Manager.Manager) e.Manager.Stop(manager.CleanAll) //nolint:errcheck e.perfHandler.Stop() } -func (e *ebpfProgram) init(buf bytecode.AssetReader, options manager.Options) error { +func (e *EbpfProgram) init(buf bytecode.AssetReader, options manager.Options) error { options.RLimit = &unix.Rlimit{ Cur: math.MaxUint64, Max: math.MaxUint64, @@ -138,12 +159,12 @@ func (e *ebpfProgram) init(buf bytecode.AssetReader, options manager.Options) er return e.InitWithOptions(buf, &options) } -func (e *ebpfProgram) initCORE() error { +func (e *EbpfProgram) initCORE() error { assetName := getAssetName("shared-libraries", e.cfg.BPFDebug) return ddebpf.LoadCOREAsset(assetName, e.init) } -func (e *ebpfProgram) initRuntimeCompiler() error { +func (e *EbpfProgram) initRuntimeCompiler() error { bc, err := getRuntimeCompiledSharedLibraries(e.cfg) if err != nil { return err @@ -152,7 +173,7 @@ func (e *ebpfProgram) initRuntimeCompiler() error { return e.init(bc, manager.Options{}) } -func (e *ebpfProgram) initPrebuilt() error { +func (e *EbpfProgram) initPrebuilt() error { bc, err := netebpf.ReadSharedLibrariesModule(e.cfg.BPFDir, e.cfg.BPFDebug) if err != nil { return err diff --git a/pkg/network/usm/sharedlibraries/testutil/testutil.go b/pkg/network/usm/sharedlibraries/testutil/testutil.go index 1cf1bcee23e3a..80f6832cd2f63 100644 --- a/pkg/network/usm/sharedlibraries/testutil/testutil.go +++ b/pkg/network/usm/sharedlibraries/testutil/testutil.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" protocolstestutil "github.com/DataDog/datadog-agent/pkg/network/protocols/testutil" usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" + "github.com/DataDog/datadog-agent/pkg/util/log" ) // mutex protecting build process @@ -35,6 +36,7 @@ func OpenFromProcess(t *testing.T, programExecutable string, paths ...string) (* cmd.Stderr = patternScanner require.NoError(t, cmd.Start()) + log.Infof("exec prog=%s, paths=%v | PID = %d", programExecutable, paths, cmd.Process.Pid) t.Cleanup(func() { if cmd.Process == nil { @@ -50,7 +52,7 @@ func OpenFromProcess(t *testing.T, programExecutable string, paths ...string) (* case <-time.After(time.Second * 5): patternScanner.PrintLogs(t) // please don't use t.Fatalf() here as we could test if it failed later - return nil, fmt.Errorf("couldn't luanch process in time") + return nil, fmt.Errorf("couldn't launch process in time") } } } diff --git a/pkg/network/usm/sharedlibraries/types.go b/pkg/network/usm/sharedlibraries/types.go index 906bcb7188967..066b69fbc2b0a 100644 --- a/pkg/network/usm/sharedlibraries/types.go +++ b/pkg/network/usm/sharedlibraries/types.go @@ -12,8 +12,8 @@ package sharedlibraries */ import "C" -type libPath C.lib_path_t +type LibPath C.lib_path_t const ( - libPathMaxSize = C.LIB_PATH_MAX_SIZE + LibPathMaxSize = C.LIB_PATH_MAX_SIZE ) diff --git a/pkg/network/usm/sharedlibraries/types_linux.go b/pkg/network/usm/sharedlibraries/types_linux.go index c857c249155e7..3240185a07632 100644 --- a/pkg/network/usm/sharedlibraries/types_linux.go +++ b/pkg/network/usm/sharedlibraries/types_linux.go @@ -3,12 +3,12 @@ package sharedlibraries -type libPath struct { +type LibPath struct { Pid uint32 Len uint32 Buf [120]byte } const ( - libPathMaxSize = 0x78 + LibPathMaxSize = 0x78 ) diff --git a/pkg/network/usm/sharedlibraries/watcher.go b/pkg/network/usm/sharedlibraries/watcher.go index 1342e2c8fd50b..ab0a9a4bfff81 100644 --- a/pkg/network/usm/sharedlibraries/watcher.go +++ b/pkg/network/usm/sharedlibraries/watcher.go @@ -32,11 +32,13 @@ const ( scanTerminatedProcessesInterval = 30 * time.Second ) -func toLibPath(data []byte) libPath { - return *(*libPath)(unsafe.Pointer(&data[0])) +// ToLibPath casts the perf event data to the LibPath structure +func ToLibPath(data []byte) LibPath { + return *(*LibPath)(unsafe.Pointer(&data[0])) } -func toBytes(l *libPath) []byte { +// ToBytes converts the libpath to a byte array containing the path +func ToBytes(l *LibPath) []byte { return l.Buf[:l.Len] } @@ -56,7 +58,7 @@ type Watcher struct { loadEvents *ddebpf.PerfHandler processMonitor *monitor.ProcessMonitor registry *utils.FileRegistry - ebpfProgram *ebpfProgram + ebpfProgram *EbpfProgram // telemetry libHits *telemetry.Counter @@ -68,7 +70,7 @@ var _ utils.Attacher = &Watcher{} // NewWatcher creates a new Watcher instance func NewWatcher(cfg *config.Config, rules ...Rule) (*Watcher, error) { - ebpfProgram := newEBPFProgram(cfg) + ebpfProgram := NewEBPFProgram(&cfg.Config) err := ebpfProgram.Init() if err != nil { return nil, fmt.Errorf("error initializing shared library program: %w", err) @@ -255,7 +257,7 @@ func (w *Watcher) Start() { return } - lib := toLibPath(event.Data) + lib := ToLibPath(event.Data) if int(lib.Pid) == thisPID { // don't scan ourself event.Done() @@ -263,7 +265,7 @@ func (w *Watcher) Start() { } w.libHits.Add(1) - path := toBytes(&lib) + path := ToBytes(&lib) for _, r := range w.rules { if r.Re.Match(path) { w.libMatches.Add(1) diff --git a/pkg/network/usm/tests/tracer_classification_test.go b/pkg/network/usm/tests/tracer_classification_test.go index 582425dc6c360..fb167ab7e093c 100644 --- a/pkg/network/usm/tests/tracer_classification_test.go +++ b/pkg/network/usm/tests/tracer_classification_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" "github.com/DataDog/datadog-agent/pkg/network" @@ -52,7 +51,7 @@ func TestMain(m *testing.M) { func setupTracer(t testing.TB, cfg *config.Config) *tracer.Tracer { if ebpftest.GetBuildMode() == ebpftest.Fentry { - ddconfig.SetFeatures(t, env.ECSFargate) + env.SetFeatures(t, env.ECSFargate) // protocol classification not yet supported on fargate cfg.ProtocolClassificationEnabled = false } diff --git a/pkg/network/usm/testutil/generic_testutil_builder.go b/pkg/network/usm/testutil/generic_testutil_builder.go index 623aeccb284e5..899cce0aa70ca 100644 --- a/pkg/network/usm/testutil/generic_testutil_builder.go +++ b/pkg/network/usm/testutil/generic_testutil_builder.go @@ -20,9 +20,9 @@ const ( // buildGoBinary builds a Go binary and returns the path to it. // If the binary is already built (meanly in the CI), it returns the // path to the binary. -func buildGoBinary(curDir, binaryDir, buildFlags string) (string, error) { - serverSrcDir := path.Join(curDir, binaryDir) - cachedServerBinaryPath := path.Join(serverSrcDir, binaryDir) +func buildGoBinary(srcDir, outPath, buildFlags string) (string, error) { + serverSrcDir := srcDir + cachedServerBinaryPath := outPath // If there is a compiled binary already, skip the compilation. // Meant for the CI. @@ -43,12 +43,16 @@ func buildGoBinary(curDir, binaryDir, buildFlags string) (string, error) { // If the binary is already built (meanly in the CI), it returns the // path to the binary. func BuildGoBinaryWrapper(curDir, binaryDir string) (string, error) { - return buildGoBinary(curDir, binaryDir, baseLDFlags) + srcDir := path.Join(curDir, binaryDir) + outPath := path.Join(srcDir, binaryDir) + return buildGoBinary(srcDir, outPath, baseLDFlags) } // BuildGoBinaryWrapperWithoutSymbols builds a Go binary without symbols and returns the path to it. // If the binary is already built (meanly in the CI), it returns the // path to the binary. func BuildGoBinaryWrapperWithoutSymbols(curDir, binaryDir string) (string, error) { - return buildGoBinary(curDir, binaryDir, baseLDFlags+" -s -w") + srcDir := path.Join(curDir, binaryDir) + outPath := path.Join(srcDir, binaryDir+"-nosymbols") + return buildGoBinary(srcDir, outPath, baseLDFlags+" -s -w") } diff --git a/pkg/network/usm/utils/file_registry.go b/pkg/network/usm/utils/file_registry.go index 5ea71e3838603..2b212074a7413 100644 --- a/pkg/network/usm/utils/file_registry.go +++ b/pkg/network/usm/utils/file_registry.go @@ -78,7 +78,8 @@ func NewFilePath(procRoot, namespacedPath string, pid uint32) (FilePath, error) return FilePath{HostPath: path, ID: pathID, PID: pid}, nil } -type callback func(FilePath) error +// Callback is a function that is executed when a file becomes active or inactive +type Callback func(FilePath) error // IgnoreCB is just a dummy callback that doesn't do anything // Meant for testing purposes @@ -122,7 +123,7 @@ var ( // If no current registration exists for the given `PathIdentifier`, we execute // its *activation* callback. Otherwise, we increment the reference counter for // the existing registration if and only if `pid` is new; -func (r *FileRegistry) Register(namespacedPath string, pid uint32, activationCB, deactivationCB, alreadyRegistered callback) error { +func (r *FileRegistry) Register(namespacedPath string, pid uint32, activationCB, deactivationCB, alreadyRegistered Callback) error { if activationCB == nil || deactivationCB == nil { return errCallbackIsMissing } @@ -281,7 +282,7 @@ func (r *FileRegistry) Clear() { r.stopped = true } -func (r *FileRegistry) newRegistration(sampleFilePath string, deactivationCB callback) *registration { +func (r *FileRegistry) newRegistration(sampleFilePath string, deactivationCB Callback) *registration { return ®istration{ deactivationCB: deactivationCB, uniqueProcessesCount: atomic.NewInt32(1), @@ -292,7 +293,7 @@ func (r *FileRegistry) newRegistration(sampleFilePath string, deactivationCB cal type registration struct { uniqueProcessesCount *atomic.Int32 - deactivationCB callback + deactivationCB Callback // we are sharing the telemetry from FileRegistry telemetry *registryTelemetry diff --git a/pkg/networkdevice/pinger/pinger_linux.go b/pkg/networkdevice/pinger/pinger_linux.go index 059de377911ef..89476acbc920b 100644 --- a/pkg/networkdevice/pinger/pinger_linux.go +++ b/pkg/networkdevice/pinger/pinger_linux.go @@ -10,7 +10,7 @@ package pinger import ( "encoding/json" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -42,7 +42,7 @@ func (p *LinuxPinger) Ping(host string) (*Result, error) { } tu, err := net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { log.Warnf("could not initialize system-probe connection: %s", err.Error()) return nil, err diff --git a/pkg/networkdevice/utils/utils_test.go b/pkg/networkdevice/utils/utils_test.go index 3afbace214c6d..b1d20706b9a8f 100644 --- a/pkg/networkdevice/utils/utils_test.go +++ b/pkg/networkdevice/utils/utils_test.go @@ -9,9 +9,10 @@ import ( "fmt" "testing" - "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/version" "github.com/stretchr/testify/assert" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/version" ) func Test_CopyStrings(t *testing.T) { @@ -28,8 +29,8 @@ func Test_BoolToFloat64(t *testing.T) { } func Test_getAgentTags(t *testing.T) { - config.Datadog().SetWithoutSource("hostname", "my-host") - defer config.Datadog().SetWithoutSource("hostname", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "my-host") + defer pkgconfigsetup.Datadog().SetWithoutSource("hostname", "") assert.Equal(t, []string{ "agent_host:my-host", diff --git a/pkg/networkpath/payload/pathevent.go b/pkg/networkpath/payload/pathevent.go index 25fa9f748cffc..004d4b44303ec 100644 --- a/pkg/networkpath/payload/pathevent.go +++ b/pkg/networkpath/payload/pathevent.go @@ -62,13 +62,14 @@ type NetworkPathDestination struct { // NetworkPath encapsulates data that defines a // path between two hosts as mapped by the agent type NetworkPath struct { - Timestamp int64 `json:"timestamp"` - Namespace string `json:"namespace"` // namespace used to resolve NDM resources - PathtraceID string `json:"pathtrace_id"` - Origin PathOrigin `json:"origin"` - Protocol Protocol `json:"protocol"` - Source NetworkPathSource `json:"source"` - Destination NetworkPathDestination `json:"destination"` - Hops []NetworkPathHop `json:"hops"` - Tags []string `json:"tags,omitempty"` + Timestamp int64 `json:"timestamp"` + AgentVersion string `json:"agent_version"` + Namespace string `json:"namespace"` // namespace used to resolve NDM resources + PathtraceID string `json:"pathtrace_id"` + Origin PathOrigin `json:"origin"` + Protocol Protocol `json:"protocol"` + Source NetworkPathSource `json:"source"` + Destination NetworkPathDestination `json:"destination"` + Hops []NetworkPathHop `json:"hops"` + Tags []string `json:"tags,omitempty"` } diff --git a/pkg/networkpath/traceroute/runner.go b/pkg/networkpath/traceroute/runner.go index 2e0fabbc88eec..b016da93a5a2c 100644 --- a/pkg/networkpath/traceroute/runner.go +++ b/pkg/networkpath/traceroute/runner.go @@ -15,6 +15,7 @@ import ( "sort" "time" + "github.com/DataDog/datadog-agent/pkg/version" "github.com/Datadog/dublin-traceroute/go/dublintraceroute/probes/probev4" "github.com/Datadog/dublin-traceroute/go/dublintraceroute/results" "github.com/vishvananda/netns" @@ -225,9 +226,10 @@ func (r *Runner) runTCP(cfg Config, hname string, target net.IP, maxTTL uint8, t func (r *Runner) processTCPResults(res *tcp.Results, hname string, destinationHost string, destinationPort uint16, destinationIP net.IP) (payload.NetworkPath, error) { traceroutePath := payload.NetworkPath{ - PathtraceID: payload.NewPathtraceID(), - Protocol: payload.ProtocolTCP, - Timestamp: time.Now().UnixMilli(), + AgentVersion: version.AgentVersion, + PathtraceID: payload.NewPathtraceID(), + Protocol: payload.ProtocolTCP, + Timestamp: time.Now().UnixMilli(), Source: payload.NetworkPathSource{ Hostname: hname, NetworkID: r.networkID, @@ -287,9 +289,10 @@ func (r *Runner) processUDPResults(res *results.Results, hname string, destinati } traceroutePath := payload.NetworkPath{ - PathtraceID: payload.NewPathtraceID(), - Protocol: payload.ProtocolUDP, - Timestamp: time.Now().UnixMilli(), + AgentVersion: version.AgentVersion, + PathtraceID: payload.NewPathtraceID(), + Protocol: payload.ProtocolUDP, + Timestamp: time.Now().UnixMilli(), Source: payload.NetworkPathSource{ Hostname: hname, NetworkID: r.networkID, diff --git a/pkg/networkpath/traceroute/tcp/utils.go b/pkg/networkpath/traceroute/tcp/utils.go index eba21aa3631c9..7eb8c5cf45222 100644 --- a/pkg/networkpath/traceroute/tcp/utils.go +++ b/pkg/networkpath/traceroute/tcp/utils.go @@ -151,6 +151,8 @@ func listenPackets(icmpConn rawConnWrapper, tcpConn rawConnWrapper, timeout time var icmpIP net.IP var tcpIP net.IP var icmpCode layers.ICMPv4TypeCode + var tcpFinished time.Time + var icmpFinished time.Time var port uint16 wg.Add(2) ctx, cancel := context.WithTimeout(context.Background(), timeout) @@ -158,24 +160,21 @@ func listenPackets(icmpConn rawConnWrapper, tcpConn rawConnWrapper, timeout time go func() { defer wg.Done() defer cancel() - tcpIP, port, _, tcpErr = handlePackets(ctx, tcpConn, "tcp", localIP, localPort, remoteIP, remotePort, seqNum) + tcpIP, port, _, tcpFinished, tcpErr = handlePackets(ctx, tcpConn, "tcp", localIP, localPort, remoteIP, remotePort, seqNum) }() go func() { defer wg.Done() defer cancel() - icmpIP, _, icmpCode, icmpErr = handlePackets(ctx, icmpConn, "icmp", localIP, localPort, remoteIP, remotePort, seqNum) + icmpIP, _, icmpCode, icmpFinished, icmpErr = handlePackets(ctx, icmpConn, "icmp", localIP, localPort, remoteIP, remotePort, seqNum) }() wg.Wait() - // TODO: while this is okay, we - // should do this more cleanly - finished := time.Now() if tcpErr != nil && icmpErr != nil { _, tcpCanceled := tcpErr.(canceledError) _, icmpCanceled := icmpErr.(canceledError) if icmpCanceled && tcpCanceled { log.Trace("timed out waiting for responses") - return net.IP{}, 0, 0, finished, nil + return net.IP{}, 0, 0, time.Time{}, nil } if tcpErr != nil { log.Errorf("TCP listener error: %s", tcpErr.Error()) @@ -184,34 +183,34 @@ func listenPackets(icmpConn rawConnWrapper, tcpConn rawConnWrapper, timeout time log.Errorf("ICMP listener error: %s", icmpErr.Error()) } - return net.IP{}, 0, 0, finished, multierr.Append(fmt.Errorf("tcp error: %w", tcpErr), fmt.Errorf("icmp error: %w", icmpErr)) + return net.IP{}, 0, 0, time.Time{}, multierr.Append(fmt.Errorf("tcp error: %w", tcpErr), fmt.Errorf("icmp error: %w", icmpErr)) } // if there was an error for TCP, but not // ICMP, return the ICMP response if tcpErr != nil { - return icmpIP, port, icmpCode, finished, nil + return icmpIP, port, icmpCode, icmpFinished, nil } // return the TCP response - return tcpIP, port, 0, finished, nil + return tcpIP, port, 0, tcpFinished, nil } // handlePackets in its current implementation should listen for the first matching // packet on the connection and then return. If no packet is received within the // timeout or if the listener is canceled, it should return a canceledError -func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) (net.IP, uint16, layers.ICMPv4TypeCode, error) { +func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) (net.IP, uint16, layers.ICMPv4TypeCode, time.Time, error) { buf := make([]byte, 1024) for { select { case <-ctx.Done(): - return net.IP{}, 0, 0, canceledError("listener canceled") + return net.IP{}, 0, 0, time.Time{}, canceledError("listener canceled") default: } now := time.Now() err := conn.SetReadDeadline(now.Add(time.Millisecond * 100)) if err != nil { - return net.IP{}, 0, 0, fmt.Errorf("failed to read: %w", err) + return net.IP{}, 0, 0, time.Time{}, fmt.Errorf("failed to read: %w", err) } header, packet, _, err := conn.ReadFrom(buf) if err != nil { @@ -220,8 +219,12 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo continue } } - return net.IP{}, 0, 0, err + return net.IP{}, 0, 0, time.Time{}, err } + // once we have a packet, take a timestamp to know when + // the response was received, if it matches, we will + // return this timestamp + received := time.Now() // TODO: remove listener constraint and parse all packets // in the same function return a succinct struct here if listener == "icmp" { @@ -231,7 +234,7 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo continue } if icmpMatch(localIP, localPort, remoteIP, remotePort, seqNum, icmpResponse) { - return icmpResponse.SrcIP, 0, icmpResponse.TypeCode, nil + return icmpResponse.SrcIP, 0, icmpResponse.TypeCode, received, nil } } else if listener == "tcp" { tcpResp, err := parseTCP(header, packet) @@ -240,10 +243,10 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo continue } if tcpMatch(localIP, localPort, remoteIP, remotePort, seqNum, tcpResp) { - return tcpResp.SrcIP, uint16(tcpResp.TCPResponse.SrcPort), 0, nil + return tcpResp.SrcIP, uint16(tcpResp.TCPResponse.SrcPort), 0, received, nil } } else { - return net.IP{}, 0, 0, fmt.Errorf("unsupported listener type") + return net.IP{}, 0, 0, received, fmt.Errorf("unsupported listener type") } } } @@ -258,7 +261,6 @@ func parseICMP(header *ipv4.Header, payload []byte) (*icmpResponse, error) { if header.Protocol != IPProtoICMP || header.Version != 4 || header.Src == nil || header.Dst == nil { - log.Errorf("invalid IP header for ICMP packet") return nil, fmt.Errorf("invalid IP header for ICMP packet: %+v", header) } icmpResponse.SrcIP = header.Src @@ -312,7 +314,6 @@ func parseTCP(header *ipv4.Header, payload []byte) (*tcpResponse, error) { if header.Protocol != IPProtoTCP || header.Version != 4 || header.Src == nil || header.Dst == nil { - log.Errorf("invalid IP header for TCP packet") return nil, fmt.Errorf("invalid IP header for TCP packet: %+v", header) } tcpResponse.SrcIP = header.Src diff --git a/pkg/networkpath/traceroute/tcp/utils_test.go b/pkg/networkpath/traceroute/tcp/utils_test.go index 4bd4b996b0e3f..a7ac53e57cc68 100644 --- a/pkg/networkpath/traceroute/tcp/utils_test.go +++ b/pkg/networkpath/traceroute/tcp/utils_test.go @@ -26,6 +26,9 @@ import ( var ( srcIP = net.ParseIP("1.2.3.4") dstIP = net.ParseIP("5.6.7.8") + + innerSrcIP = net.ParseIP("10.0.0.1") + innerDstIP = net.ParseIP("192.168.1.1") ) type ( @@ -47,6 +50,8 @@ type ( ) func Test_handlePackets(t *testing.T) { + _, tcpBytes := createMockTCPPacket(createMockIPv4Header(dstIP, srcIP, 6), createMockTCPLayer(443, 12345, 28394, 28395, true, true, true)) + tt := []struct { description string // input @@ -121,13 +126,47 @@ func Test_handlePackets(t *testing.T) { listener: "tcp", errMsg: "canceled", }, + { + description: "successful ICMP parsing returns IP, port, and type code", + ctxTimeout: 500 * time.Millisecond, + conn: &mockRawConn{ + header: createMockIPv4Header(srcIP, dstIP, 1), + payload: createMockICMPPacket(createMockICMPLayer(layers.ICMPv4CodeTTLExceeded), createMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), createMockTCPLayer(12345, 443, 28394, 12737, true, true, true), false), + }, + localIP: innerSrcIP, + localPort: 12345, + remoteIP: innerDstIP, + remotePort: 443, + seqNum: 28394, + listener: "icmp", + expectedIP: srcIP, + expectedPort: 0, + expectedTypeCode: layers.ICMPv4CodeTTLExceeded, + }, + { + description: "successful TCP parsing returns IP, port, and type code", + ctxTimeout: 500 * time.Millisecond, + conn: &mockRawConn{ + header: createMockIPv4Header(dstIP, srcIP, 6), + payload: tcpBytes, + }, + localIP: srcIP, + localPort: 12345, + remoteIP: dstIP, + remotePort: 443, + seqNum: 28394, + listener: "tcp", + expectedIP: dstIP, + expectedPort: 443, + expectedTypeCode: 0, + }, } for _, test := range tt { t.Run(test.description, func(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), test.ctxTimeout) defer cancel() - actualIP, actualPort, actualTypeCode, err := handlePackets(ctx, test.conn, test.listener, test.localIP, test.localPort, test.remoteIP, test.remotePort, test.seqNum) + actualIP, actualPort, actualTypeCode, _, err := handlePackets(ctx, test.conn, test.listener, test.localIP, test.localPort, test.remoteIP, test.remotePort, test.seqNum) if test.errMsg != "" { require.Error(t, err) assert.True(t, strings.Contains(err.Error(), test.errMsg)) @@ -142,8 +181,6 @@ func Test_handlePackets(t *testing.T) { } func Test_parseICMP(t *testing.T) { - innerSrcIP := net.ParseIP("10.0.0.1") - innerDstIP := net.ParseIP("192.168.1.1") ipv4Header := createMockIPv4Header(srcIP, dstIP, 1) icmpLayer := createMockICMPLayer(layers.ICMPv4CodeTTLExceeded) innerIPv4Layer := createMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP) diff --git a/pkg/networkpath/traceroute/traceroute_linux.go b/pkg/networkpath/traceroute/traceroute_linux.go index 1e4e0f10321cb..547f0fa2ff501 100644 --- a/pkg/networkpath/traceroute/traceroute_linux.go +++ b/pkg/networkpath/traceroute/traceroute_linux.go @@ -12,7 +12,7 @@ import ( "encoding/json" "github.com/DataDog/datadog-agent/comp/core/telemetry" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -41,7 +41,7 @@ func New(cfg Config, _ telemetry.Component) (*LinuxTraceroute, error) { // Run executes a traceroute func (l *LinuxTraceroute) Run(_ context.Context) (payload.NetworkPath, error) { tu, err := net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { log.Warnf("could not initialize system-probe connection: %s", err.Error()) return payload.NetworkPath{}, err diff --git a/pkg/networkpath/traceroute/traceroute_windows.go b/pkg/networkpath/traceroute/traceroute_windows.go index f6e1702121b3b..089f46d216766 100644 --- a/pkg/networkpath/traceroute/traceroute_windows.go +++ b/pkg/networkpath/traceroute/traceroute_windows.go @@ -12,7 +12,7 @@ import ( "encoding/json" "github.com/DataDog/datadog-agent/comp/core/telemetry" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -41,7 +41,7 @@ func New(cfg Config, _ telemetry.Component) (*WindowsTraceroute, error) { // Run executes a traceroute func (w *WindowsTraceroute) Run(_ context.Context) (payload.NetworkPath, error) { tu, err := net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { log.Warnf("could not initialize system-probe connection: %s", err.Error()) return payload.NetworkPath{}, err diff --git a/pkg/orchestrator/config/config.go b/pkg/orchestrator/config/config.go index 870b6e0e5ebf3..564de4d3abc1e 100644 --- a/pkg/orchestrator/config/config.go +++ b/pkg/orchestrator/config/config.go @@ -13,8 +13,8 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator/redact" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" @@ -86,8 +86,8 @@ func (oc *OrchestratorConfig) Load() error { } oc.OrchestratorEndpoints[0].Endpoint = URL - if key := "api_key"; config.Datadog().IsSet(key) { - oc.OrchestratorEndpoints[0].APIKey = utils.SanitizeAPIKey(config.Datadog().GetString(key)) + if key := "api_key"; pkgconfigsetup.Datadog().IsSet(key) { + oc.OrchestratorEndpoints[0].APIKey = utils.SanitizeAPIKey(pkgconfigsetup.Datadog().GetString(key)) } if err := extractOrchestratorAdditionalEndpoints(URL, &oc.OrchestratorEndpoints); err != nil { @@ -95,12 +95,12 @@ func (oc *OrchestratorConfig) Load() error { } // A custom word list to enhance the default one used by the DataScrubber - if k := OrchestratorNSKey("custom_sensitive_words"); config.Datadog().IsSet(k) { - oc.Scrubber.AddCustomSensitiveWords(config.Datadog().GetStringSlice(k)) + if k := OrchestratorNSKey("custom_sensitive_words"); pkgconfigsetup.Datadog().IsSet(k) { + oc.Scrubber.AddCustomSensitiveWords(pkgconfigsetup.Datadog().GetStringSlice(k)) } - if k := OrchestratorNSKey("custom_sensitive_annotations_labels"); config.Datadog().IsSet(k) { - redact.UpdateSensitiveAnnotationsAndLabels(config.Datadog().GetStringSlice(k)) + if k := OrchestratorNSKey("custom_sensitive_annotations_labels"); pkgconfigsetup.Datadog().IsSet(k) { + redact.UpdateSensitiveAnnotationsAndLabels(pkgconfigsetup.Datadog().GetStringSlice(k)) } // The maximum number of resources per message and the maximum message size. @@ -108,8 +108,8 @@ func (oc *OrchestratorConfig) Load() error { setBoundedConfigIntValue(OrchestratorNSKey("max_per_message"), maxMessageBatch, func(v int) { oc.MaxPerMessage = v }) setBoundedConfigIntValue(OrchestratorNSKey("max_message_bytes"), maxMessageSize, func(v int) { oc.MaxWeightPerMessageBytes = v }) - if k := key(processNS, "pod_queue_bytes"); config.Datadog().IsSet(k) { - if queueBytes := config.Datadog().GetInt(k); queueBytes > 0 { + if k := key(processNS, "pod_queue_bytes"); pkgconfigsetup.Datadog().IsSet(k) { + if queueBytes := pkgconfigsetup.Datadog().GetInt(k); queueBytes > 0 { oc.PodQueueBytes = queueBytes } } @@ -117,22 +117,22 @@ func (oc *OrchestratorConfig) Load() error { // Orchestrator Explorer oc.OrchestrationCollectionEnabled, oc.KubeClusterName = IsOrchestratorEnabled() - oc.CollectorDiscoveryEnabled = config.Datadog().GetBool(OrchestratorNSKey("collector_discovery.enabled")) - oc.IsScrubbingEnabled = config.Datadog().GetBool(OrchestratorNSKey("container_scrubbing.enabled")) - oc.ExtraTags = config.Datadog().GetStringSlice(OrchestratorNSKey("extra_tags")) - oc.IsManifestCollectionEnabled = config.Datadog().GetBool(OrchestratorNSKey("manifest_collection.enabled")) - oc.BufferedManifestEnabled = config.Datadog().GetBool(OrchestratorNSKey("manifest_collection.buffer_manifest")) - oc.ManifestBufferFlushInterval = config.Datadog().GetDuration(OrchestratorNSKey("manifest_collection.buffer_flush_interval")) + oc.CollectorDiscoveryEnabled = pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("collector_discovery.enabled")) + oc.IsScrubbingEnabled = pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("container_scrubbing.enabled")) + oc.ExtraTags = pkgconfigsetup.Datadog().GetStringSlice(OrchestratorNSKey("extra_tags")) + oc.IsManifestCollectionEnabled = pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("manifest_collection.enabled")) + oc.BufferedManifestEnabled = pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("manifest_collection.buffer_manifest")) + oc.ManifestBufferFlushInterval = pkgconfigsetup.Datadog().GetDuration(OrchestratorNSKey("manifest_collection.buffer_flush_interval")) return nil } func extractOrchestratorAdditionalEndpoints(URL *url.URL, orchestratorEndpoints *[]apicfg.Endpoint) error { - if k := OrchestratorNSKey("orchestrator_additional_endpoints"); config.Datadog().IsSet(k) { + if k := OrchestratorNSKey("orchestrator_additional_endpoints"); pkgconfigsetup.Datadog().IsSet(k) { if err := extractEndpoints(URL, k, orchestratorEndpoints); err != nil { return err } - } else if k := key(processNS, "orchestrator_additional_endpoints"); config.Datadog().IsSet(k) { + } else if k := key(processNS, "orchestrator_additional_endpoints"); pkgconfigsetup.Datadog().IsSet(k) { if err := extractEndpoints(URL, k, orchestratorEndpoints); err != nil { return err } @@ -141,7 +141,7 @@ func extractOrchestratorAdditionalEndpoints(URL *url.URL, orchestratorEndpoints } func extractEndpoints(URL *url.URL, k string, endpoints *[]apicfg.Endpoint) error { - for endpointURL, apiKeys := range config.Datadog().GetStringMapStringSlice(k) { + for endpointURL, apiKeys := range pkgconfigsetup.Datadog().GetStringMapStringSlice(k) { u, err := URL.Parse(endpointURL) if err != nil { return fmt.Errorf("invalid additional endpoint url '%s': %s", endpointURL, err) @@ -160,7 +160,7 @@ func extractEndpoints(URL *url.URL, k string, endpoints *[]apicfg.Endpoint) erro func extractOrchestratorDDUrl() (*url.URL, error) { orchestratorURL := OrchestratorNSKey("orchestrator_dd_url") processURL := key(processNS, "orchestrator_dd_url") - URL, err := url.Parse(utils.GetMainEndpointBackwardCompatible(config.Datadog(), "https://orchestrator.", orchestratorURL, processURL)) + URL, err := url.Parse(utils.GetMainEndpointBackwardCompatible(pkgconfigsetup.Datadog(), "https://orchestrator.", orchestratorURL, processURL)) if err != nil { return nil, fmt.Errorf("error parsing orchestrator_dd_url: %s", err) } @@ -168,11 +168,11 @@ func extractOrchestratorDDUrl() (*url.URL, error) { } func setBoundedConfigIntValue(configKey string, upperBound int, setter func(v int)) { - if !config.Datadog().IsSet(configKey) { + if !pkgconfigsetup.Datadog().IsSet(configKey) { return } - val := config.Datadog().GetInt(configKey) + val := pkgconfigsetup.Datadog().GetInt(configKey) if val <= 0 { pkglog.Warnf("Ignoring invalid value for setting %s (<=0)", configKey) @@ -188,7 +188,7 @@ func setBoundedConfigIntValue(configKey string, upperBound int, setter func(v in // IsOrchestratorEnabled checks if orchestrator explorer features are enabled, it returns the boolean and the cluster name func IsOrchestratorEnabled() (bool, string) { - enabled := config.Datadog().GetBool(OrchestratorNSKey("enabled")) + enabled := pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("enabled")) var clusterName string if enabled { // Set clustername @@ -200,11 +200,11 @@ func IsOrchestratorEnabled() (bool, string) { // IsOrchestratorECSExplorerEnabled checks if orchestrator ecs explorer features are enabled func IsOrchestratorECSExplorerEnabled() bool { - if !config.Datadog().GetBool(OrchestratorNSKey("enabled")) { + if !pkgconfigsetup.Datadog().GetBool(OrchestratorNSKey("enabled")) { return false } - if !config.Datadog().GetBool("ecs_task_collection_enabled") { + if !pkgconfigsetup.Datadog().GetBool("ecs_task_collection_enabled") { return false } diff --git a/pkg/persistentcache/persistentcache.go b/pkg/persistentcache/persistentcache.go index c293a0b473fa8..8ff5092d436d0 100644 --- a/pkg/persistentcache/persistentcache.go +++ b/pkg/persistentcache/persistentcache.go @@ -12,7 +12,7 @@ import ( "regexp" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // Invalid characters to clean up @@ -22,7 +22,7 @@ var invalidChars = regexp.MustCompile("[^a-zA-Z0-9_-]") // first prefix as directory, if present. This is useful for integrations, which // use the check_id formed with $check_name:$hash func getFileForKey(key string) (string, error) { - parent := config.Datadog().GetString("run_path") + parent := pkgconfigsetup.Datadog().GetString("run_path") paths := strings.SplitN(key, ":", 2) cleanedPath := invalidChars.ReplaceAllString(paths[0], "") if len(paths) == 1 { diff --git a/pkg/process/checks/checks.go b/pkg/process/checks/checks.go index b7aabbd8c794e..ec06e082350a2 100644 --- a/pkg/process/checks/checks.go +++ b/pkg/process/checks/checks.go @@ -12,8 +12,8 @@ import ( sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -107,7 +107,7 @@ func (p CombinedRunResult) RealtimePayloads() []model.MessageBody { // All is a list of all runnable checks. Putting a check in here does not guarantee it will be run, // it just guarantees that the collector will be able to find the check. // If you want to add a check you MUST register it here. -func All(config, sysprobeYamlCfg ddconfig.ReaderWriter, syscfg *sysconfigtypes.Config, wmeta workloadmeta.Component, npCollector npcollector.Component) []Check { +func All(config, sysprobeYamlCfg pkgconfigmodel.ReaderWriter, syscfg *sysconfigtypes.Config, wmeta workloadmeta.Component, npCollector npcollector.Component) []Check { return []Check{ NewProcessCheck(config, sysprobeYamlCfg, wmeta), NewContainerCheck(config, wmeta), @@ -130,7 +130,7 @@ func RTName(checkName string) string { } } -func canEnableContainerChecks(config ddconfig.Reader, displayFeatureWarning bool) bool { +func canEnableContainerChecks(config pkgconfigmodel.Reader, displayFeatureWarning bool) bool { // The process and container checks are mutually exclusive if config.GetBool("process_config.process_collection.enabled") { return false diff --git a/pkg/process/checks/config.go b/pkg/process/checks/config.go index 160c0cb1dff0b..225e5c3e55414 100644 --- a/pkg/process/checks/config.go +++ b/pkg/process/checks/config.go @@ -6,32 +6,33 @@ package checks import ( - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) // getMaxBatchSize returns the maximum number of items (processes, containers, process_discoveries) in a check payload -var getMaxBatchSize = func(config ddconfig.Reader) int { +var getMaxBatchSize = func(config model.Reader) int { return ensureValidMaxBatchSize(config.GetInt("process_config.max_per_message")) } func ensureValidMaxBatchSize(batchSize int) int { - if batchSize <= 0 || batchSize > ddconfig.ProcessMaxPerMessageLimit { - log.Warnf("Invalid max item count per message (%d), using default value of %d", batchSize, ddconfig.DefaultProcessMaxPerMessage) - return ddconfig.DefaultProcessMaxPerMessage + if batchSize <= 0 || batchSize > pkgconfigsetup.ProcessMaxPerMessageLimit { + log.Warnf("Invalid max item count per message (%d), using default value of %d", batchSize, pkgconfigsetup.DefaultProcessMaxPerMessage) + return pkgconfigsetup.DefaultProcessMaxPerMessage } return batchSize } // getMaxBatchSize returns the maximum number of bytes in a check payload -var getMaxBatchBytes = func(config ddconfig.Reader) int { +var getMaxBatchBytes = func(config model.Reader) int { return ensureValidMaxBatchBytes(config.GetInt("process_config.max_message_bytes")) } func ensureValidMaxBatchBytes(batchBytes int) int { - if batchBytes <= 0 || batchBytes > ddconfig.ProcessMaxMessageBytesLimit { - log.Warnf("Invalid max byte size per message (%d), using default value of %d", batchBytes, ddconfig.DefaultProcessMaxMessageBytes) - return ddconfig.DefaultProcessMaxMessageBytes + if batchBytes <= 0 || batchBytes > pkgconfigsetup.ProcessMaxMessageBytesLimit { + log.Warnf("Invalid max byte size per message (%d), using default value of %d", batchBytes, pkgconfigsetup.DefaultProcessMaxMessageBytes) + return pkgconfigsetup.DefaultProcessMaxMessageBytes } return batchBytes } diff --git a/pkg/process/checks/config_test.go b/pkg/process/checks/config_test.go index d3a6b0fca6a2e..6f120d12da41d 100644 --- a/pkg/process/checks/config_test.go +++ b/pkg/process/checks/config_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestEnsureValidMaxBatchSize(t *testing.T) { @@ -33,17 +33,17 @@ func TestEnsureValidMaxBatchSize(t *testing.T) { { name: "invalid negative batch count", maxPerMessage: -1, - expectedMaxBatchSize: ddconfig.DefaultProcessMaxPerMessage, + expectedMaxBatchSize: pkgconfigsetup.DefaultProcessMaxPerMessage, }, { name: "invalid 0 max batch size", maxPerMessage: 0, - expectedMaxBatchSize: ddconfig.DefaultProcessMaxPerMessage, + expectedMaxBatchSize: pkgconfigsetup.DefaultProcessMaxPerMessage, }, { name: "invalid big max batch size", maxPerMessage: 20000, - expectedMaxBatchSize: ddconfig.DefaultProcessMaxPerMessage, + expectedMaxBatchSize: pkgconfigsetup.DefaultProcessMaxPerMessage, }, } @@ -74,17 +74,17 @@ func TestEnsureValidMaxBatchBytes(t *testing.T) { { name: "invalid negative batch size", maxMessageBytes: -1, - expectedMaxBatchBytes: ddconfig.DefaultProcessMaxMessageBytes, + expectedMaxBatchBytes: pkgconfigsetup.DefaultProcessMaxMessageBytes, }, { name: "invalid 0 max batch size", maxMessageBytes: 0, - expectedMaxBatchBytes: ddconfig.DefaultProcessMaxMessageBytes, + expectedMaxBatchBytes: pkgconfigsetup.DefaultProcessMaxMessageBytes, }, { name: "invalid big max batch size", maxMessageBytes: 20000000, - expectedMaxBatchBytes: ddconfig.DefaultProcessMaxMessageBytes, + expectedMaxBatchBytes: pkgconfigsetup.DefaultProcessMaxMessageBytes, }, } diff --git a/pkg/process/checks/container.go b/pkg/process/checks/container.go index 1e4187d46391c..590cb8f9ac359 100644 --- a/pkg/process/checks/container.go +++ b/pkg/process/checks/container.go @@ -14,7 +14,7 @@ import ( model "github.com/DataDog/agent-payload/v5/process" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/process/statsd" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" @@ -27,7 +27,7 @@ const ( ) // NewContainerCheck returns an instance of the ContainerCheck. -func NewContainerCheck(config ddconfig.Reader, wmeta workloadmeta.Component) *ContainerCheck { +func NewContainerCheck(config pkgconfigmodel.Reader, wmeta workloadmeta.Component) *ContainerCheck { return &ContainerCheck{ config: config, wmeta: wmeta, @@ -38,7 +38,7 @@ func NewContainerCheck(config ddconfig.Reader, wmeta workloadmeta.Component) *Co type ContainerCheck struct { sync.Mutex - config ddconfig.Reader + config pkgconfigmodel.Reader hostInfo *HostInfo containerProvider proccontainers.ContainerProvider diff --git a/pkg/process/checks/container_rt.go b/pkg/process/checks/container_rt.go index 42a9634c6f5d8..452323de8e7a5 100644 --- a/pkg/process/checks/container_rt.go +++ b/pkg/process/checks/container_rt.go @@ -11,7 +11,7 @@ import ( model "github.com/DataDog/agent-payload/v5/process" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -23,7 +23,7 @@ const ( ) // NewRTContainerCheck returns an instance of the RTContainerCheck. -func NewRTContainerCheck(config ddconfig.Reader, wmeta workloadmeta.Component) *RTContainerCheck { +func NewRTContainerCheck(config pkgconfigmodel.Reader, wmeta workloadmeta.Component) *RTContainerCheck { return &RTContainerCheck{ config: config, wmeta: wmeta, @@ -36,7 +36,7 @@ type RTContainerCheck struct { hostInfo *HostInfo containerProvider proccontainers.ContainerProvider lastRates map[string]*proccontainers.ContainerRateMetrics - config ddconfig.Reader + config pkgconfigmodel.Reader wmeta workloadmeta.Component } diff --git a/pkg/process/checks/enable_checks_containerized_test.go b/pkg/process/checks/enable_checks_containerized_test.go index aad1d013d9500..92111e391802d 100644 --- a/pkg/process/checks/enable_checks_containerized_test.go +++ b/pkg/process/checks/enable_checks_containerized_test.go @@ -18,7 +18,6 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector/npcollectorimpl" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/flavor" @@ -39,7 +38,7 @@ func TestContainerCheck(t *testing.T) { cfg.SetWithoutSource("process_config.process_collection.enabled", false) cfg.SetWithoutSource("process_config.container_collection.enabled", true) cfg.SetWithoutSource("process_config.disable_realtime_checks", false) - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) enabledChecks := getEnabledChecks(t, cfg, configmock.NewSystemProbe(t), deps.WMeta, deps.NpCollector) assertContainsCheck(t, enabledChecks, ContainerCheckName) @@ -54,7 +53,7 @@ func TestContainerCheck(t *testing.T) { cfg.SetWithoutSource("process_config.process_collection.enabled", false) cfg.SetWithoutSource("process_config.container_collection.enabled", true) cfg.SetWithoutSource("process_config.disable_realtime_checks", true) - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) enabledChecks := getEnabledChecks(t, cfg, configmock.NewSystemProbe(t), deps.WMeta, deps.NpCollector) assertContainsCheck(t, enabledChecks, ContainerCheckName) @@ -80,7 +79,7 @@ func TestContainerCheck(t *testing.T) { cfg := configmock.New(t) cfg.SetWithoutSource("process_config.process_collection.enabled", true) cfg.SetWithoutSource("process_config.container_collection.enabled", true) - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) enabledChecks := getEnabledChecks(t, cfg, configmock.NewSystemProbe(t), deps.WMeta, deps.NpCollector) assertContainsCheck(t, enabledChecks, ProcessCheckName) @@ -96,7 +95,7 @@ func TestContainerCheck(t *testing.T) { cfg.SetWithoutSource("process_config.process_collection.enabled", false) cfg.SetWithoutSource("process_config.container_collection.enabled", true) cfg.SetWithoutSource("process_config.run_in_core_agent.enabled", true) - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) flavor.SetFlavor("process_agent") enabledChecks := getEnabledChecks(t, cfg, scfg, deps.WMeta, deps.NpCollector) @@ -135,7 +134,7 @@ func TestDisableRealTime(t *testing.T) { mockConfig := configmock.New(t) mockConfig.SetWithoutSource("process_config.disable_realtime_checks", tc.disableRealtime) mockConfig.SetWithoutSource("process_config.process_discovery.enabled", false) // Not an RT check so we don't care - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) enabledChecks := getEnabledChecks(t, mockConfig, configmock.NewSystemProbe(t), deps.WMeta, deps.NpCollector) assert.EqualValues(tc.expectedChecks, enabledChecks) diff --git a/pkg/process/checks/enabled_checks_test.go b/pkg/process/checks/enabled_checks_test.go index 7b4aaea81c38f..c6e4bd07659bb 100644 --- a/pkg/process/checks/enabled_checks_test.go +++ b/pkg/process/checks/enabled_checks_test.go @@ -19,8 +19,8 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector/npcollectorimpl" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -35,7 +35,7 @@ func assertNotContainsCheck(t *testing.T, checks []string, name string) { assert.NotContains(t, checks, name) } -func getEnabledChecks(t *testing.T, cfg, sysprobeYamlConfig config.ReaderWriter, wmeta workloadmeta.Component, npCollector npcollector.Component) []string { +func getEnabledChecks(t *testing.T, cfg, sysprobeYamlConfig pkgconfigmodel.ReaderWriter, wmeta workloadmeta.Component, npCollector npcollector.Component) []string { sysprobeConfigStruct, err := sysconfig.New("", "") require.NoError(t, err) diff --git a/pkg/process/checks/host_info.go b/pkg/process/checks/host_info.go index baaf70890e647..ba5c30e08341f 100644 --- a/pkg/process/checks/host_info.go +++ b/pkg/process/checks/host_info.go @@ -17,7 +17,8 @@ import ( model "github.com/DataDog/agent-payload/v5/process" "google.golang.org/grpc" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/fargate" "github.com/DataDog/datadog-agent/pkg/util/flavor" @@ -39,7 +40,7 @@ type HostInfo struct { } // CollectHostInfo collects host information -func CollectHostInfo(config config.Reader) (*HostInfo, error) { +func CollectHostInfo(config pkgconfigmodel.Reader) (*HostInfo, error) { sysInfo, err := CollectSystemInfo() if err != nil { return nil, err @@ -57,7 +58,7 @@ func CollectHostInfo(config config.Reader) (*HostInfo, error) { }, nil } -func resolveHostName(config config.Reader) (string, error) { +func resolveHostName(config pkgconfigmodel.Reader) (string, error) { // use the common agent hostname utility when not running in the process-agent if flavor.GetFlavor() != flavor.ProcessAgent { hostName, err := coreAgentGetHostname(context.TODO()) @@ -147,12 +148,12 @@ func getHostnameFromGRPC(ctx context.Context, grpcClientFn func(ctx context.Cont ctx, cancel := context.WithTimeout(ctx, grpcConnectionTimeout) defer cancel() - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } - ddAgentClient, err := grpcClientFn(ctx, ipcAddress, config.GetIPCPort()) + ddAgentClient, err := grpcClientFn(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return "", fmt.Errorf("cannot connect to datadog agent via grpc: %w", err) } diff --git a/pkg/process/checks/host_info_test.go b/pkg/process/checks/host_info_test.go index ad19147d649b8..3dc57ffb98e15 100644 --- a/pkg/process/checks/host_info_test.go +++ b/pkg/process/checks/host_info_test.go @@ -20,8 +20,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" pbmocks "github.com/DataDog/datadog-agent/pkg/proto/pbgo/mocks/core" "github.com/DataDog/datadog-agent/pkg/util/flavor" @@ -55,7 +55,7 @@ func TestGetHostnameFromGRPC(t *testing.T) { t.Run("hostname returns from grpc", func(t *testing.T) { hostname, err := getHostnameFromGRPC(ctx, func(_ context.Context, _, _ string, _ ...grpc.DialOption) (pb.AgentClient, error) { return mockClient, nil - }, config.DefaultGRPCConnectionTimeoutSecs*time.Second) + }, pkgconfigsetup.DefaultGRPCConnectionTimeoutSecs*time.Second) assert.Nil(t, err) assert.Equal(t, "unit-test-hostname", hostname) @@ -65,7 +65,7 @@ func TestGetHostnameFromGRPC(t *testing.T) { grpcErr := errors.New("no grpc client") hostname, err := getHostnameFromGRPC(ctx, func(_ context.Context, _, _ string, _ ...grpc.DialOption) (pb.AgentClient, error) { return nil, grpcErr - }, config.DefaultGRPCConnectionTimeoutSecs*time.Second) + }, pkgconfigsetup.DefaultGRPCConnectionTimeoutSecs*time.Second) assert.NotNil(t, err) assert.Equal(t, grpcErr, errors.Unwrap(err)) diff --git a/pkg/process/checks/interval.go b/pkg/process/checks/interval.go index b1f44ad6b9cd7..4e02b47ccd10a 100644 --- a/pkg/process/checks/interval.go +++ b/pkg/process/checks/interval.go @@ -8,7 +8,8 @@ package checks import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -48,7 +49,7 @@ var ( RTContainerCheckName: RTContainerCheckDefaultInterval, ConnectionsCheckName: ConnectionsCheckDefaultInterval, DiscoveryCheckName: ProcessDiscoveryCheckDefaultInterval, - ProcessEventsCheckName: config.DefaultProcessEventsCheckInterval, + ProcessEventsCheckName: pkgconfigsetup.DefaultProcessEventsCheckInterval, } configKeys = map[string]string{ @@ -66,7 +67,7 @@ func GetDefaultInterval(checkName string) time.Duration { } // GetInterval returns the configured check interval value -func GetInterval(cfg config.Reader, checkName string) time.Duration { +func GetInterval(cfg pkgconfigmodel.Reader, checkName string) time.Duration { switch checkName { case DiscoveryCheckName: // We don't need to check if the key exists since we already bound it to a default in InitConfig. @@ -80,10 +81,10 @@ func GetInterval(cfg config.Reader, checkName string) time.Duration { case ProcessEventsCheckName: eventsInterval := cfg.GetDuration("process_config.event_collection.interval") - if eventsInterval < config.DefaultProcessEventsMinCheckInterval { - eventsInterval = config.DefaultProcessEventsCheckInterval + if eventsInterval < pkgconfigsetup.DefaultProcessEventsMinCheckInterval { + eventsInterval = pkgconfigsetup.DefaultProcessEventsCheckInterval _ = log.Warnf("Invalid interval for process_events check (< %s) using default value of %s", - config.DefaultProcessEventsMinCheckInterval.String(), config.DefaultProcessEventsCheckInterval.String()) + pkgconfigsetup.DefaultProcessEventsMinCheckInterval.String(), pkgconfigsetup.DefaultProcessEventsCheckInterval.String()) } return eventsInterval diff --git a/pkg/process/checks/interval_test.go b/pkg/process/checks/interval_test.go index 3cfc5a26bce97..ecb73709e6a26 100644 --- a/pkg/process/checks/interval_test.go +++ b/pkg/process/checks/interval_test.go @@ -11,8 +11,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestLegacyIntervalDefault(t *testing.T) { @@ -137,7 +137,7 @@ func TestProcessEventsInterval(t *testing.T) { { name: "below minimum", interval: 0, - expectedInterval: config.DefaultProcessEventsCheckInterval, + expectedInterval: pkgconfigsetup.DefaultProcessEventsCheckInterval, }, } { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/process/checks/net.go b/pkg/process/checks/net.go index 5396fdd26c10f..13b33d87f9dca 100644 --- a/pkg/process/checks/net.go +++ b/pkg/process/checks/net.go @@ -21,7 +21,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/network/dns" "github.com/DataDog/datadog-agent/pkg/process/metadata/parser" "github.com/DataDog/datadog-agent/pkg/process/net" @@ -47,7 +47,7 @@ var ( ) // NewConnectionsCheck returns an instance of the ConnectionsCheck. -func NewConnectionsCheck(config, sysprobeYamlConfig config.Reader, syscfg *sysconfigtypes.Config, wmeta workloadmeta.Component, npCollector npcollector.Component) *ConnectionsCheck { +func NewConnectionsCheck(config, sysprobeYamlConfig pkgconfigmodel.Reader, syscfg *sysconfigtypes.Config, wmeta workloadmeta.Component, npCollector npcollector.Component) *ConnectionsCheck { return &ConnectionsCheck{ config: config, syscfg: syscfg, @@ -60,8 +60,8 @@ func NewConnectionsCheck(config, sysprobeYamlConfig config.Reader, syscfg *sysco // ConnectionsCheck collects statistics about live TCP and UDP connections. type ConnectionsCheck struct { syscfg *sysconfigtypes.Config - sysprobeYamlConfig config.Reader - config config.Reader + sysprobeYamlConfig pkgconfigmodel.Reader + config pkgconfigmodel.Reader hostInfo *HostInfo maxConnsPerMessage int @@ -212,7 +212,7 @@ func (c *ConnectionsCheck) getConnections() (*model.Connections, error) { return tu.GetConnections(c.tracerClientID) } -func (c *ConnectionsCheck) notifyProcessConnRates(config config.Reader, conns *model.Connections) { +func (c *ConnectionsCheck) notifyProcessConnRates(config pkgconfigmodel.Reader, conns *model.Connections) { if len(c.processConnRatesTransmitter.Chs) == 0 { return } diff --git a/pkg/process/checks/process.go b/pkg/process/checks/process.go index 26685da9e15aa..5d905a135702c 100644 --- a/pkg/process/checks/process.go +++ b/pkg/process/checks/process.go @@ -18,7 +18,8 @@ import ( "go.uber.org/atomic" workloadmetacomp "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/metadata" "github.com/DataDog/datadog-agent/pkg/process/metadata/parser" "github.com/DataDog/datadog-agent/pkg/process/metadata/workloadmeta" @@ -43,7 +44,7 @@ const ( ) // NewProcessCheck returns an instance of the ProcessCheck. -func NewProcessCheck(config ddconfig.Reader, sysprobeYamlConfig ddconfig.Reader, wmeta workloadmetacomp.Component) *ProcessCheck { +func NewProcessCheck(config pkgconfigmodel.Reader, sysprobeYamlConfig pkgconfigmodel.Reader, wmeta workloadmetacomp.Component) *ProcessCheck { serviceExtractorEnabled := true useWindowsServiceName := sysprobeYamlConfig.GetBool("system_probe_config.process_service_inference.use_windows_service_name") useImprovedAlgorithm := sysprobeYamlConfig.GetBool("system_probe_config.process_service_inference.use_improved_algorithm") @@ -69,7 +70,7 @@ const ( // for live and running processes. The instance will store some state between // checks that will be used for rates, cpu calculations, etc. type ProcessCheck struct { - config ddconfig.Reader + config pkgconfigmodel.Reader probe procutil.Probe // scrubber is a DataScrubber to hide command line sensitive words @@ -157,8 +158,8 @@ func (p *ProcessCheck) Init(syscfg *SysProbeConfig, info *HostInfo, oneShot bool p.skipAmount = uint32(p.config.GetInt32("process_config.process_discovery.hint_frequency")) if p.skipAmount == 0 { log.Warnf("process_config.process_discovery.hint_frequency must be greater than 0. using default value %d", - ddconfig.DefaultProcessDiscoveryHintFrequency) - p.skipAmount = ddconfig.DefaultProcessDiscoveryHintFrequency + pkgconfigsetup.DefaultProcessDiscoveryHintFrequency) + p.skipAmount = pkgconfigsetup.DefaultProcessDiscoveryHintFrequency } initScrubber(p.config, p.scrubber) @@ -172,7 +173,7 @@ func (p *ProcessCheck) Init(syscfg *SysProbeConfig, info *HostInfo, oneShot bool p.extractors = append(p.extractors, p.serviceExtractor) if !oneShot && workloadmeta.Enabled(p.config) { - p.workloadMetaExtractor = workloadmeta.GetSharedWorkloadMetaExtractor(ddconfig.SystemProbe()) + p.workloadMetaExtractor = workloadmeta.GetSharedWorkloadMetaExtractor(pkgconfigsetup.SystemProbe()) // The server is only needed on the process agent if !p.config.GetBool("process_config.run_in_core_agent.enabled") && flavor.GetFlavor() == flavor.ProcessAgent { @@ -689,7 +690,7 @@ func mergeProcWithSysprobeStats(pids []int32, procs map[int32]*procutil.Process, } } -func initScrubber(config ddconfig.Reader, scrubber *procutil.DataScrubber) { +func initScrubber(config pkgconfigmodel.Reader, scrubber *procutil.DataScrubber) { // Enable/Disable the DataScrubber to obfuscate process args if config.IsSet(configScrubArgs) { scrubber.Enabled = config.GetBool(configScrubArgs) @@ -713,7 +714,7 @@ func initScrubber(config ddconfig.Reader, scrubber *procutil.DataScrubber) { } } -func initDisallowList(config ddconfig.Reader) []*regexp.Regexp { +func initDisallowList(config pkgconfigmodel.Reader) []*regexp.Regexp { var disallowList []*regexp.Regexp // A list of regex patterns that will exclude a process if matched. if config.IsSet(configDisallowList) { diff --git a/pkg/process/checks/process_data.go b/pkg/process/checks/process_data.go index bda9c947f37dc..90585934ff05c 100644 --- a/pkg/process/checks/process_data.go +++ b/pkg/process/checks/process_data.go @@ -8,7 +8,7 @@ package checks import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/metadata" "github.com/DataDog/datadog-agent/pkg/process/procutil" ) @@ -22,7 +22,7 @@ type ProcessData struct { } // NewProcessData returns a new ProcessData from the given config -func NewProcessData(cfg config.Reader) *ProcessData { +func NewProcessData(cfg pkgconfigmodel.Reader) *ProcessData { return &ProcessData{ probe: newProcessProbe(cfg), } diff --git a/pkg/process/checks/process_discovery_check.go b/pkg/process/checks/process_discovery_check.go index c5a3b65069b70..967d29dc8f62a 100644 --- a/pkg/process/checks/process_discovery_check.go +++ b/pkg/process/checks/process_discovery_check.go @@ -9,8 +9,8 @@ import ( "fmt" "time" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -20,7 +20,7 @@ import ( ) // NewProcessDiscoveryCheck returns an instance of the ProcessDiscoveryCheck. -func NewProcessDiscoveryCheck(config ddconfig.Reader) *ProcessDiscoveryCheck { +func NewProcessDiscoveryCheck(config pkgconfigmodel.Reader) *ProcessDiscoveryCheck { return &ProcessDiscoveryCheck{ config: config, scrubber: procutil.NewDefaultDataScrubber(), @@ -32,7 +32,7 @@ func NewProcessDiscoveryCheck(config ddconfig.Reader) *ProcessDiscoveryCheck { // It uses its own ProcessDiscovery payload. // The goal of this check is to collect information about possible integrations that may be enabled by the end user. type ProcessDiscoveryCheck struct { - config ddconfig.Reader + config pkgconfigmodel.Reader probe procutil.Probe scrubber *procutil.DataScrubber diff --git a/pkg/process/checks/process_discovery_check_test.go b/pkg/process/checks/process_discovery_check_test.go index d20dac12f0bfd..233b863753f43 100644 --- a/pkg/process/checks/process_discovery_check_test.go +++ b/pkg/process/checks/process_discovery_check_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/process/procutil/mocks" ) @@ -50,7 +50,7 @@ func TestProcessDiscoveryCheck(t *testing.T) { }() maxBatchSize := 10 - getMaxBatchSize = func(config.Reader) int { return maxBatchSize } + getMaxBatchSize = func(pkgconfigmodel.Reader) int { return maxBatchSize } check := NewProcessDiscoveryCheck(configmock.New(t)) check.Init( diff --git a/pkg/process/checks/process_events_fallback.go b/pkg/process/checks/process_events_fallback.go index 4b1f864357228..b49b54378843d 100644 --- a/pkg/process/checks/process_events_fallback.go +++ b/pkg/process/checks/process_events_fallback.go @@ -10,11 +10,11 @@ package checks import ( "errors" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" ) // NewProcessEventsCheck returns an instance of the ProcessEventsCheck. -func NewProcessEventsCheck(config config.Reader) *ProcessEventsCheck { +func NewProcessEventsCheck(config pkgconfigmodel.Reader) *ProcessEventsCheck { return &ProcessEventsCheck{ config: config, } @@ -22,7 +22,7 @@ func NewProcessEventsCheck(config config.Reader) *ProcessEventsCheck { // ProcessEventsCheck collects process lifecycle events such as exec and exit signals type ProcessEventsCheck struct { - config config.Reader + config pkgconfigmodel.Reader } // Init initializes the ProcessEventsCheck. diff --git a/pkg/process/checks/process_events_linux.go b/pkg/process/checks/process_events_linux.go index 0c9154457d73b..630b31f383112 100644 --- a/pkg/process/checks/process_events_linux.go +++ b/pkg/process/checks/process_events_linux.go @@ -16,7 +16,7 @@ import ( payload "github.com/DataDog/agent-payload/v5/process" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/events" "github.com/DataDog/datadog-agent/pkg/process/events/model" "github.com/DataDog/datadog-agent/pkg/process/statsd" @@ -24,7 +24,7 @@ import ( ) // NewProcessEventsCheck returns an instance of the ProcessEventsCheck. -func NewProcessEventsCheck(config ddconfig.Reader) *ProcessEventsCheck { +func NewProcessEventsCheck(config pkgconfigmodel.Reader) *ProcessEventsCheck { return &ProcessEventsCheck{ config: config, } @@ -34,7 +34,7 @@ func NewProcessEventsCheck(config ddconfig.Reader) *ProcessEventsCheck { type ProcessEventsCheck struct { initMutex sync.Mutex - config ddconfig.Reader + config pkgconfigmodel.Reader store events.Store listener *events.SysProbeListener diff --git a/pkg/process/checks/process_probe.go b/pkg/process/checks/process_probe.go index d77f622259590..2f4f94272ba01 100644 --- a/pkg/process/checks/process_probe.go +++ b/pkg/process/checks/process_probe.go @@ -8,10 +8,10 @@ package checks import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/procutil" ) -func newProcessProbe(_ config.Reader, options ...procutil.Option) procutil.Probe { +func newProcessProbe(_ pkgconfigmodel.Reader, options ...procutil.Option) procutil.Probe { return procutil.NewProcessProbe(options...) } diff --git a/pkg/process/checks/process_probe_windows.go b/pkg/process/checks/process_probe_windows.go index 25bc83e7c259b..b8e3e29720935 100644 --- a/pkg/process/checks/process_probe_windows.go +++ b/pkg/process/checks/process_probe_windows.go @@ -6,12 +6,12 @@ package checks import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/util/log" ) -func newProcessProbe(config config.Reader, options ...procutil.Option) procutil.Probe { +func newProcessProbe(config pkgconfigmodel.Reader, options ...procutil.Option) procutil.Probe { if !config.GetBool("process_config.windows.use_perf_counters") { log.Info("Using toolhelp API probe for process data collection") return procutil.NewWindowsToolhelpProbe() diff --git a/pkg/process/checks/user_nix.go b/pkg/process/checks/user_nix.go index 7e99159c48ad3..e7412aeade221 100644 --- a/pkg/process/checks/user_nix.go +++ b/pkg/process/checks/user_nix.go @@ -13,13 +13,13 @@ import ( "github.com/patrickmn/go-cache" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) //nolint:revive // TODO(PROC) Fix revive linter type LookupIdProbe struct { - config config.Reader + config pkgconfigmodel.Reader lookupIdCache *cache.Cache //nolint:revive // TODO(PROC) Fix revive linter @@ -27,7 +27,7 @@ type LookupIdProbe struct { } // NewLookupIDProbe returns a new LookupIdProbe from the config -func NewLookupIDProbe(coreConfig config.Reader) *LookupIdProbe { +func NewLookupIDProbe(coreConfig pkgconfigmodel.Reader) *LookupIdProbe { if coreConfig.GetBool("process_config.cache_lookupid") { log.Debug("Using cached calls to `user.LookupID`") } diff --git a/pkg/process/checks/user_windows.go b/pkg/process/checks/user_windows.go index b50dd2ceae7c9..6b5196a3fea3a 100644 --- a/pkg/process/checks/user_windows.go +++ b/pkg/process/checks/user_windows.go @@ -8,7 +8,7 @@ package checks import ( - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" ) // On Windows the LookupIdProbe does nothing since we get the user info from the process itself. @@ -19,6 +19,6 @@ type LookupIdProbe struct{} // NewLookupIDProbe returns a new LookupIdProbe // //nolint:revive // TODO(PROC) Fix revive linter -func NewLookupIDProbe(config.Reader) *LookupIdProbe { +func NewLookupIDProbe(pkgconfigmodel.Reader) *LookupIdProbe { return &LookupIdProbe{} } diff --git a/pkg/process/events/listener_linux.go b/pkg/process/events/listener_linux.go index 787886091151f..4a59d29f3bec6 100644 --- a/pkg/process/events/listener_linux.go +++ b/pkg/process/events/listener_linux.go @@ -20,7 +20,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/eventmonitor/proto/api" "github.com/DataDog/datadog-agent/pkg/process/events/model" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -45,7 +45,7 @@ type SysProbeListener struct { // NewListener returns a new SysProbeListener to listen for process events func NewListener(handler EventHandler) (*SysProbeListener, error) { - socketPath := ddconfig.SystemProbe().GetString("event_monitoring_config.socket") + socketPath := pkgconfigsetup.SystemProbe().GetString("event_monitoring_config.socket") if socketPath == "" { return nil, errors.New("event_monitoring_config.socket must be set") } diff --git a/pkg/process/events/store.go b/pkg/process/events/store.go index 62145898d19b0..4d6767ac31e3c 100644 --- a/pkg/process/events/store.go +++ b/pkg/process/events/store.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" "go.uber.org/atomic" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/events/model" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -80,7 +80,7 @@ type RingStore struct { } // readPositiveInt reads a config stored in the given key and asserts that it's a positive value -func readPositiveInt(cfg config.Reader, key string) (int, error) { +func readPositiveInt(cfg pkgconfigmodel.Reader, key string) (int, error) { i := cfg.GetInt(key) if i <= 0 { return 0, fmt.Errorf("invalid setting. %s must be > 0", key) @@ -90,7 +90,7 @@ func readPositiveInt(cfg config.Reader, key string) (int, error) { } // NewRingStore creates a new RingStore to store process events -func NewRingStore(cfg config.Reader, client statsd.ClientInterface) (Store, error) { +func NewRingStore(cfg pkgconfigmodel.Reader, client statsd.ClientInterface) (Store, error) { maxItems, err := readPositiveInt(cfg, "process_config.event_collection.store.max_items") if err != nil { return nil, err diff --git a/pkg/process/metadata/parser/service_windows_test.go b/pkg/process/metadata/parser/service_windows_test.go index fa49194059515..b1d748df5e4d1 100644 --- a/pkg/process/metadata/parser/service_windows_test.go +++ b/pkg/process/metadata/parser/service_windows_test.go @@ -13,8 +13,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/util/winutil" ) @@ -90,7 +90,7 @@ func TestWindowsExtractServiceMetadata(t *testing.T) { } func TestWindowsExtractServiceWithSCMReader(t *testing.T) { - makeServiceExtractor := func(t *testing.T, sysprobeConfig ddconfig.Reader) (*ServiceExtractor, *mockSCM) { + makeServiceExtractor := func(t *testing.T, sysprobeConfig pkgconfigmodel.Reader) (*ServiceExtractor, *mockSCM) { enabled := sysprobeConfig.GetBool("system_probe_config.process_service_inference.enabled") useWindowsServiceName := sysprobeConfig.GetBool("system_probe_config.process_service_inference.use_windows_service_name") useImprovedAlgorithm := sysprobeConfig.GetBool("system_probe_config.process_service_inference.use_improved_algorithm") diff --git a/pkg/process/metadata/workloadmeta/collector/process.go b/pkg/process/metadata/workloadmeta/collector/process.go index 76801fdb71a79..bc9d9ea14ea0c 100644 --- a/pkg/process/metadata/workloadmeta/collector/process.go +++ b/pkg/process/metadata/workloadmeta/collector/process.go @@ -13,7 +13,7 @@ import ( "github.com/benbjohnson/clock" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/checks" workloadmetaExtractor "github.com/DataDog/datadog-agent/pkg/process/metadata/workloadmeta" proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" @@ -26,7 +26,7 @@ const ( ) // NewProcessCollector creates a new process collector. -func NewProcessCollector(coreConfig, sysProbeConfig config.Reader) *Collector { +func NewProcessCollector(coreConfig, sysProbeConfig pkgconfigmodel.Reader) *Collector { wlmExtractor := workloadmetaExtractor.NewWorkloadMetaExtractor(sysProbeConfig) processData := checks.NewProcessData(coreConfig) @@ -45,7 +45,7 @@ func NewProcessCollector(coreConfig, sysProbeConfig config.Reader) *Collector { // Collector collects processes to send to the remote process collector in the core agent. // It is only intended to be used when language detection is enabled, and the process check is disabled. type Collector struct { - ddConfig config.Reader + ddConfig pkgconfigmodel.Reader processData *checks.ProcessData @@ -106,7 +106,7 @@ func (c *Collector) run(ctx context.Context, containerProvider proccontainers.Co // Additionally, if the remote process collector is not enabled in the core agent, there is no reason to collect processes. Therefore, we check `language_detection.enabled`. // We also check `process_config.run_in_core_agent.enabled` because this collector should only be used when the core agent collector is not running. // Finally, we only want to run this collector in the process agent, so if we're running as anything else we should disable the collector. -func Enabled(cfg config.Reader) bool { +func Enabled(cfg pkgconfigmodel.Reader) bool { if cfg.GetBool("process_config.process_collection.enabled") { return false } diff --git a/pkg/process/metadata/workloadmeta/extractor.go b/pkg/process/metadata/workloadmeta/extractor.go index 429c092bd5642..25db230e57263 100644 --- a/pkg/process/metadata/workloadmeta/extractor.go +++ b/pkg/process/metadata/workloadmeta/extractor.go @@ -11,7 +11,7 @@ import ( "strconv" "sync" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/languagedetection" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/process/procutil" @@ -51,7 +51,7 @@ type WorkloadMetaExtractor struct { pidToCid map[int]string - sysprobeConfig config.Reader + sysprobeConfig pkgconfigmodel.Reader } // ProcessCacheDiff holds the information about processes that have been created and deleted in the past @@ -72,7 +72,7 @@ var ( ) // GetSharedWorkloadMetaExtractor returns a shared WorkloadMetaExtractor -func GetSharedWorkloadMetaExtractor(sysprobeConfig config.Reader) *WorkloadMetaExtractor { +func GetSharedWorkloadMetaExtractor(sysprobeConfig pkgconfigmodel.Reader) *WorkloadMetaExtractor { initWorkloadMetaExtractor.Do(func() { sharedWorkloadMetaExtractor = NewWorkloadMetaExtractor(sysprobeConfig) }) @@ -80,7 +80,7 @@ func GetSharedWorkloadMetaExtractor(sysprobeConfig config.Reader) *WorkloadMetaE } // NewWorkloadMetaExtractor constructs the WorkloadMetaExtractor. -func NewWorkloadMetaExtractor(sysprobeConfig config.Reader) *WorkloadMetaExtractor { +func NewWorkloadMetaExtractor(sysprobeConfig pkgconfigmodel.Reader) *WorkloadMetaExtractor { log.Info("Instantiating a new WorkloadMetaExtractor") return &WorkloadMetaExtractor{ @@ -197,7 +197,7 @@ func getDifference(oldCache, newCache map[string]*ProcessEntity) []*ProcessEntit } // Enabled returns whether the extractor should be enabled -func Enabled(ddconfig config.Reader) bool { +func Enabled(ddconfig pkgconfigmodel.Reader) bool { enabled := ddconfig.GetBool("language_detection.enabled") if enabled && runtime.GOOS == "darwin" { log.Warn("Language detection is not supported on macOS") diff --git a/pkg/process/metadata/workloadmeta/grpc.go b/pkg/process/metadata/workloadmeta/grpc.go index 28592954005d6..ab4c947bd5d69 100644 --- a/pkg/process/metadata/workloadmeta/grpc.go +++ b/pkg/process/metadata/workloadmeta/grpc.go @@ -18,7 +18,8 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/telemetry" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" @@ -32,7 +33,7 @@ var DuplicateConnectionErr = errors.New("the stream was closed because another c // GRPCServer implements a gRPC server to expose Process Entities collected with a WorkloadMetaExtractor type GRPCServer struct { - config config.Reader + config pkgconfigmodel.Reader extractor *WorkloadMetaExtractor server *grpc.Server // The address of the server set by start(). Primarily used for testing. May be nil if start() has not been called. @@ -53,7 +54,7 @@ var ( ) // NewGRPCServer creates a new instance of a GRPCServer -func NewGRPCServer(config config.Reader, extractor *WorkloadMetaExtractor) *GRPCServer { +func NewGRPCServer(config pkgconfigmodel.Reader, extractor *WorkloadMetaExtractor) *GRPCServer { l := &GRPCServer{ config: config, extractor: extractor, @@ -201,8 +202,8 @@ func (l *GRPCServer) StreamEntities(_ *pbgo.ProcessStreamEntitiesRequest, out pb } // getListener returns a listening connection -func getListener(cfg config.Reader) (net.Listener, error) { - host, err := config.GetIPCAddress() +func getListener(cfg pkgconfigmodel.Reader) (net.Listener, error) { + host, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, err } @@ -211,11 +212,11 @@ func getListener(cfg config.Reader) (net.Listener, error) { return net.Listen("tcp", address) } -func getGRPCStreamPort(cfg config.Reader) int { +func getGRPCStreamPort(cfg pkgconfigmodel.Reader) int { grpcPort := cfg.GetInt("process_config.language_detection.grpc_port") if grpcPort <= 0 { - log.Warnf("Invalid process_config.language_detection.grpc_port -- %d, using default port %d", grpcPort, config.DefaultProcessEntityStreamPort) - grpcPort = config.DefaultProcessEntityStreamPort + log.Warnf("Invalid process_config.language_detection.grpc_port -- %d, using default port %d", grpcPort, pkgconfigsetup.DefaultProcessEntityStreamPort) + grpcPort = pkgconfigsetup.DefaultProcessEntityStreamPort } return grpcPort } diff --git a/pkg/process/metadata/workloadmeta/grpc_test.go b/pkg/process/metadata/workloadmeta/grpc_test.go index 8788abf4c098b..7f9b7bccd87a3 100644 --- a/pkg/process/metadata/workloadmeta/grpc_test.go +++ b/pkg/process/metadata/workloadmeta/grpc_test.go @@ -19,8 +19,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" "github.com/DataDog/datadog-agent/pkg/process/procutil" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" @@ -33,7 +33,7 @@ func TestGetGRPCStreamPort(t *testing.T) { cfg := configmock.New(t) cfg.SetWithoutSource("process_config.language_detection.grpc_port", "lorem ipsum") - assert.Equal(t, config.DefaultProcessEntityStreamPort, getGRPCStreamPort(cfg)) + assert.Equal(t, pkgconfigsetup.DefaultProcessEntityStreamPort, getGRPCStreamPort(cfg)) }) t.Run("valid port", func(t *testing.T) { @@ -45,7 +45,7 @@ func TestGetGRPCStreamPort(t *testing.T) { t.Run("default", func(t *testing.T) { cfg := configmock.New(t) - assert.Equal(t, config.DefaultProcessEntityStreamPort, getGRPCStreamPort(cfg)) + assert.Equal(t, pkgconfigsetup.DefaultProcessEntityStreamPort, getGRPCStreamPort(cfg)) }) } diff --git a/pkg/process/runner/collector_api_test.go b/pkg/process/runner/collector_api_test.go index 7c72976dfe59e..257b07e00d659 100644 --- a/pkg/process/runner/collector_api_test.go +++ b/pkg/process/runner/collector_api_test.go @@ -18,8 +18,8 @@ import ( "github.com/DataDog/agent-payload/v5/process" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/process/checks" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" @@ -33,7 +33,7 @@ import ( const testHostName = "test-host" -func setProcessEndpointsForTest(config ddconfig.Config, eps ...apicfg.Endpoint) { +func setProcessEndpointsForTest(config pkgconfigmodel.Config, eps ...apicfg.Endpoint) { additionalEps := make(map[string][]string) for i, ep := range eps { if i == 0 { @@ -46,7 +46,7 @@ func setProcessEndpointsForTest(config ddconfig.Config, eps ...apicfg.Endpoint) config.SetWithoutSource("process_config.additional_endpoints", additionalEps) } -func setProcessEventsEndpointsForTest(config ddconfig.Config, eps ...apicfg.Endpoint) { +func setProcessEventsEndpointsForTest(config pkgconfigmodel.Config, eps ...apicfg.Endpoint) { additionalEps := make(map[string][]string) for i, ep := range eps { if i == 0 { @@ -432,11 +432,11 @@ func TestMultipleAPIKeys(t *testing.T) { }) } -func runCollectorTest(t *testing.T, check checks.Check, epConfig *endpointConfig, mockConfig ddconfig.Config, tc func(c *CheckRunner, ep *mockEndpoint)) { +func runCollectorTest(t *testing.T, check checks.Check, epConfig *endpointConfig, mockConfig pkgconfigmodel.Config, tc func(c *CheckRunner, ep *mockEndpoint)) { runCollectorTestWithAPIKeys(t, check, epConfig, []string{"apiKey"}, mockConfig, tc) } -func runCollectorTestWithAPIKeys(t *testing.T, check checks.Check, epConfig *endpointConfig, apiKeys []string, mockConfig ddconfig.Config, tc func(c *CheckRunner, ep *mockEndpoint)) { +func runCollectorTestWithAPIKeys(t *testing.T, check checks.Check, epConfig *endpointConfig, apiKeys []string, mockConfig pkgconfigmodel.Config, tc func(c *CheckRunner, ep *mockEndpoint)) { ep := newMockEndpoint(t, epConfig) collectorAddr, eventsAddr := ep.start() defer ep.stop() diff --git a/pkg/process/runner/endpoint/endpoints.go b/pkg/process/runner/endpoint/endpoints.go index f85c2e929574c..7aaaf6b276d9c 100644 --- a/pkg/process/runner/endpoint/endpoints.go +++ b/pkg/process/runner/endpoint/endpoints.go @@ -10,24 +10,25 @@ import ( "fmt" "net/url" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" ) // GetAPIEndpoints returns the list of api endpoints from the config -func GetAPIEndpoints(config ddconfig.Reader) (eps []apicfg.Endpoint, err error) { +func GetAPIEndpoints(config pkgconfigmodel.Reader) (eps []apicfg.Endpoint, err error) { return getAPIEndpointsWithKeys(config, "https://process.", "process_config.process_dd_url", "process_config.additional_endpoints") } // GetEventsAPIEndpoints returns the list of api event endpoints from the config -func GetEventsAPIEndpoints(config ddconfig.Reader) (eps []apicfg.Endpoint, err error) { +func GetEventsAPIEndpoints(config pkgconfigmodel.Reader) (eps []apicfg.Endpoint, err error) { return getAPIEndpointsWithKeys(config, "https://process-events.", "process_config.events_dd_url", "process_config.events_additional_endpoints") } -func getAPIEndpointsWithKeys(config ddconfig.Reader, prefix, defaultEpKey, additionalEpsKey string) (eps []apicfg.Endpoint, err error) { +func getAPIEndpointsWithKeys(config pkgconfigmodel.Reader, prefix, defaultEpKey, additionalEpsKey string) (eps []apicfg.Endpoint, err error) { // Setup main endpoint - mainEndpointURL, err := url.Parse(utils.GetMainEndpoint(ddconfig.Datadog(), prefix, defaultEpKey)) + mainEndpointURL, err := url.Parse(utils.GetMainEndpoint(pkgconfigsetup.Datadog(), prefix, defaultEpKey)) if err != nil { return nil, fmt.Errorf("error parsing %s: %s", defaultEpKey, err) } diff --git a/pkg/process/runner/endpoints_test.go b/pkg/process/runner/endpoints_test.go index 779551e949541..bfdfe816772ce 100644 --- a/pkg/process/runner/endpoints_test.go +++ b/pkg/process/runner/endpoints_test.go @@ -11,8 +11,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" ) @@ -38,7 +38,7 @@ func TestGetAPIEndpoints(t *testing.T) { expected: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), }, }, }, @@ -63,7 +63,7 @@ func TestGetAPIEndpoints(t *testing.T) { }, expected: []apicfg.Endpoint{ { - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), APIKey: "test", }, { @@ -183,13 +183,13 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { expectedEndpoints: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), }, }, expectedEventsEndpoints: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEventsEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEventsEndpoint), }, }, }, @@ -206,7 +206,7 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { expectedEventsEndpoints: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEventsEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEventsEndpoint), }, }, }, @@ -217,7 +217,7 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { expectedEndpoints: []apicfg.Endpoint{ { APIKey: "test", - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), }, }, expectedEventsEndpoints: []apicfg.Endpoint{ @@ -249,7 +249,7 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { }, expectedEndpoints: []apicfg.Endpoint{ { - Endpoint: mkurl(config.DefaultProcessEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEndpoint), APIKey: "test", }, { @@ -267,7 +267,7 @@ func TestGetConcurrentAPIEndpoints(t *testing.T) { }, expectedEventsEndpoints: []apicfg.Endpoint{ { - Endpoint: mkurl(config.DefaultProcessEventsEndpoint), + Endpoint: mkurl(pkgconfigsetup.DefaultProcessEventsEndpoint), APIKey: "test", }, { diff --git a/pkg/process/runner/runner.go b/pkg/process/runner/runner.go index c1dd778c270d8..ba5cda0bc8fa0 100644 --- a/pkg/process/runner/runner.go +++ b/pkg/process/runner/runner.go @@ -20,7 +20,7 @@ import ( sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/process/types" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" oconfig "github.com/DataDog/datadog-agent/pkg/orchestrator/config" "github.com/DataDog/datadog-agent/pkg/process/checks" "github.com/DataDog/datadog-agent/pkg/process/status" @@ -54,7 +54,7 @@ type Runner interface{} // CheckRunner will collect metrics from the local system and ship to the backend. type CheckRunner struct { - config ddconfig.Reader + config pkgconfigmodel.Reader sysProbeCfg *checks.SysProbeConfig hostInfo *checks.HostInfo @@ -97,7 +97,7 @@ func (l *CheckRunner) RunRealTime() bool { // NewRunner creates a new CheckRunner func NewRunner( - config ddconfig.Reader, + config pkgconfigmodel.Reader, sysCfg *sysconfigtypes.Config, hostInfo *checks.HostInfo, enabledChecks []checks.Check, @@ -119,7 +119,7 @@ func NewRunner( // NewRunnerWithChecks creates a new CheckRunner func NewRunnerWithChecks( - config ddconfig.Reader, + config pkgconfigmodel.Reader, sysProbeCfg *checks.SysProbeConfig, hostInfo *checks.HostInfo, checks []checks.Check, diff --git a/pkg/process/runner/submitter.go b/pkg/process/runner/submitter.go index a2ee141a960a7..f4fd31c1cd92c 100644 --- a/pkg/process/runner/submitter.go +++ b/pkg/process/runner/submitter.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/comp/process/forwarders" "github.com/DataDog/datadog-agent/comp/process/types" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/process/checks" "github.com/DataDog/datadog-agent/pkg/process/runner/endpoint" "github.com/DataDog/datadog-agent/pkg/process/statsd" @@ -92,22 +92,22 @@ type CheckSubmitter struct { func NewSubmitter(config config.Component, log log.Component, forwarders forwarders.Component, hostname string) (*CheckSubmitter, error) { queueBytes := config.GetInt("process_config.process_queue_bytes") if queueBytes <= 0 { - log.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, ddconfig.DefaultProcessQueueBytes) - queueBytes = ddconfig.DefaultProcessQueueBytes + log.Warnf("Invalid queue bytes size: %d. Using default value: %d", queueBytes, pkgconfigsetup.DefaultProcessQueueBytes) + queueBytes = pkgconfigsetup.DefaultProcessQueueBytes } queueSize := config.GetInt("process_config.queue_size") if queueSize <= 0 { - log.Warnf("Invalid check queue size: %d. Using default value: %d", queueSize, ddconfig.DefaultProcessQueueSize) - queueSize = ddconfig.DefaultProcessQueueSize + log.Warnf("Invalid check queue size: %d. Using default value: %d", queueSize, pkgconfigsetup.DefaultProcessQueueSize) + queueSize = pkgconfigsetup.DefaultProcessQueueSize } processResults := api.NewWeightedQueue(queueSize, int64(queueBytes)) log.Debugf("Creating process check queue with max_size=%d and max_weight=%d", processResults.MaxSize(), processResults.MaxWeight()) rtQueueSize := config.GetInt("process_config.rt_queue_size") if rtQueueSize <= 0 { - log.Warnf("Invalid rt check queue size: %d. Using default value: %d", rtQueueSize, ddconfig.DefaultProcessRTQueueSize) - rtQueueSize = ddconfig.DefaultProcessRTQueueSize + log.Warnf("Invalid rt check queue size: %d. Using default value: %d", rtQueueSize, pkgconfigsetup.DefaultProcessRTQueueSize) + rtQueueSize = pkgconfigsetup.DefaultProcessRTQueueSize } // reuse main queue's ProcessQueueBytes because it's unlikely that it'll reach to that size in bytes, so we don't need a separate config for it rtProcessResults := api.NewWeightedQueue(rtQueueSize, int64(queueBytes)) diff --git a/pkg/process/runner/submitter_test.go b/pkg/process/runner/submitter_test.go index 81da4347a1473..2825af8105c89 100644 --- a/pkg/process/runner/submitter_test.go +++ b/pkg/process/runner/submitter_test.go @@ -16,19 +16,21 @@ import ( "go.uber.org/fx" model "github.com/DataDog/agent-payload/v5/process" + mockStatsd "github.com/DataDog/datadog-go/v5/statsd/mocks" + "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" "github.com/DataDog/datadog-agent/comp/process/forwarders" "github.com/DataDog/datadog-agent/comp/process/forwarders/forwardersimpl" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" processStatsd "github.com/DataDog/datadog-agent/pkg/process/statsd" "github.com/DataDog/datadog-agent/pkg/process/util/api/headers" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/version" - mockStatsd "github.com/DataDog/datadog-go/v5/statsd/mocks" ) func TestNewCollectorQueueSize(t *testing.T) { @@ -42,7 +44,7 @@ func TestNewCollectorQueueSize(t *testing.T) { name: "default queue size", override: false, queueSize: 42, - expectedQueueSize: ddconfig.DefaultProcessQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueSize, }, { name: "valid queue size override", @@ -54,13 +56,13 @@ func TestNewCollectorQueueSize(t *testing.T) { name: "invalid negative queue size override", override: true, queueSize: -10, - expectedQueueSize: ddconfig.DefaultProcessQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueSize, }, { name: "invalid 0 queue size override", override: true, queueSize: 0, - expectedQueueSize: ddconfig.DefaultProcessQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueSize, }, } @@ -89,7 +91,7 @@ func TestNewCollectorRTQueueSize(t *testing.T) { name: "default queue size", override: false, queueSize: 2, - expectedQueueSize: ddconfig.DefaultProcessRTQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessRTQueueSize, }, { name: "valid queue size override", @@ -101,13 +103,13 @@ func TestNewCollectorRTQueueSize(t *testing.T) { name: "invalid negative size override", override: true, queueSize: -2, - expectedQueueSize: ddconfig.DefaultProcessRTQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessRTQueueSize, }, { name: "invalid 0 queue size override", override: true, queueSize: 0, - expectedQueueSize: ddconfig.DefaultProcessRTQueueSize, + expectedQueueSize: pkgconfigsetup.DefaultProcessRTQueueSize, }, } @@ -136,7 +138,7 @@ func TestNewCollectorProcessQueueBytes(t *testing.T) { name: "default queue size", override: false, queueBytes: 42000, - expectedQueueSize: ddconfig.DefaultProcessQueueBytes, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueBytes, }, { name: "valid queue size override", @@ -148,13 +150,13 @@ func TestNewCollectorProcessQueueBytes(t *testing.T) { name: "invalid negative queue size override", override: true, queueBytes: -2, - expectedQueueSize: ddconfig.DefaultProcessQueueBytes, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueBytes, }, { name: "invalid 0 queue size override", override: true, queueBytes: 0, - expectedQueueSize: ddconfig.DefaultProcessQueueBytes, + expectedQueueSize: pkgconfigsetup.DefaultProcessQueueBytes, }, } @@ -390,7 +392,7 @@ func newSubmitterDeps(t *testing.T) submitterDeps { return fxutil.Test[submitterDeps](t, getForwardersMockModules(t, nil)) } -func newSubmitterDepsWithConfig(t *testing.T, config ddconfig.Config) submitterDeps { +func newSubmitterDepsWithConfig(t *testing.T, config pkgconfigmodel.Config) submitterDeps { overrides := config.AllSettings() return fxutil.Test[submitterDeps](t, getForwardersMockModules(t, overrides)) } diff --git a/pkg/process/status/expvars.go b/pkg/process/status/expvars.go index f9642d22c7385..897c1014f5829 100644 --- a/pkg/process/status/expvars.go +++ b/pkg/process/status/expvars.go @@ -21,7 +21,7 @@ import ( model "github.com/DataDog/agent-payload/v5/process" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/version" @@ -247,7 +247,7 @@ func publishDropCheckPayloads() interface{} { } // InitExpvars initializes expvars -func InitExpvars(_ ddconfig.Reader, hostname string, processModuleEnabled, languageDetectionEnabled bool, eps []apicfg.Endpoint) { +func InitExpvars(_ pkgconfigmodel.Reader, hostname string, processModuleEnabled, languageDetectionEnabled bool, eps []apicfg.Endpoint) { infoOnce.Do(func() { processExpvars := expvar.NewMap("process_agent") hostString := expvar.NewString("host") diff --git a/pkg/process/util/status/status.go b/pkg/process/util/status/status.go index 2d05dbd664f2b..aea42e11f5fb6 100644 --- a/pkg/process/util/status/status.go +++ b/pkg/process/util/status/status.go @@ -16,7 +16,7 @@ import ( hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" apiutil "github.com/DataDog/datadog-agent/pkg/api/util" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -128,7 +128,7 @@ func OverrideTime(t time.Time) StatusOption { } } -func getCoreStatus(coreConfig ddconfig.Reader) (s CoreStatus) { +func getCoreStatus(coreConfig pkgconfigmodel.Reader) (s CoreStatus) { return CoreStatus{ AgentVersion: version.AgentVersion, GoVersion: runtime.Version(), @@ -152,7 +152,7 @@ func getExpvars(expVarURL string) (s ProcessExpvars, err error) { } // GetStatus returns a Status object with runtime information about process-agent -func GetStatus(coreConfig ddconfig.Reader, expVarURL string) (*Status, error) { +func GetStatus(coreConfig pkgconfigmodel.Reader, expVarURL string) (*Status, error) { coreStatus := getCoreStatus(coreConfig) processExpVars, err := getExpvars(expVarURL) if err != nil { diff --git a/pkg/process/util/status/status_test.go b/pkg/process/util/status/status_test.go index 5efd72f4d9585..89f37e5ee9083 100644 --- a/pkg/process/util/status/status_test.go +++ b/pkg/process/util/status/status_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/require" hostMetadataUtils "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" - ddconfig "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -77,7 +77,7 @@ func TestGetStatus(t *testing.T) { // Feature detection needs to run before host methods are called. During runtime, feature detection happens // when the datadog.yaml file is loaded cfg := configmock.New(t) - ddconfig.SetFeatures(t) + env.SetFeatures(t) cfg.SetWithoutSource("hostname", "test") // Prevents panic since feature detection has not run cfg.SetWithoutSource("language_detection.enabled", true) diff --git a/pkg/remoteconfig/state/products.go b/pkg/remoteconfig/state/products.go index 35974c5e94386..120229ecf2d39 100644 --- a/pkg/remoteconfig/state/products.go +++ b/pkg/remoteconfig/state/products.go @@ -30,6 +30,7 @@ var validProducts = map[string]struct{}{ ProductContainerAutoscalingValues: {}, ProductTesting1: {}, ProductTesting2: {}, + ProductOrchestratorK8sCRDs: {}, } const ( @@ -81,4 +82,6 @@ const ( ProductTesting1 = "TESTING1" // ProductTesting2 is a product used for testing remote config ProductTesting2 = "TESTING2" + // ProductOrchestratorK8sCRDs receives values for k8s crds + ProductOrchestratorK8sCRDs = "ORCHESTRATOR_K8S_CRDS" ) diff --git a/pkg/sbom/collectors/host/request.go b/pkg/sbom/collectors/host/request.go index 3b3481f7c2633..c30cb453ceab9 100644 --- a/pkg/sbom/collectors/host/request.go +++ b/pkg/sbom/collectors/host/request.go @@ -7,7 +7,10 @@ package host import ( "io/fs" + "os" + "path/filepath" + "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/sbom/types" ) @@ -18,11 +21,42 @@ type scanRequest struct { FS fs.FS } +type relFS struct { + root string + fs fs.FS +} + +func newFS(root string) fs.FS { + fs := os.DirFS(root) + return &relFS{root: "/", fs: fs} +} + +func (f *relFS) Open(name string) (fs.File, error) { + if filepath.IsAbs(name) { + var err error + name, err = filepath.Rel(f.root, name) + if err != nil { + return nil, err + } + } + + return f.fs.Open(name) +} + // NewScanRequest creates a new scan request func NewScanRequest(path string, fs fs.FS) types.ScanRequest { return scanRequest{Path: path, FS: fs} } +// NewHostScanRequest creates a new scan request for the root filesystem +func NewHostScanRequest() types.ScanRequest { + scanPath := "/" + if hostRoot := os.Getenv("HOST_ROOT"); env.IsContainerized() && hostRoot != "" { + scanPath = hostRoot + } + return NewScanRequest(scanPath, newFS("/")) +} + // Collector returns the collector name func (r scanRequest) Collector() string { return "host" diff --git a/pkg/sbom/scanner/scanner_test.go b/pkg/sbom/scanner/scanner_test.go index ba522b63cbefc..e10743f19828d 100644 --- a/pkg/sbom/scanner/scanner_test.go +++ b/pkg/sbom/scanner/scanner_test.go @@ -21,7 +21,6 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" @@ -121,7 +120,7 @@ func TestRetryLogic_Error(t *testing.T) { mockCollector.On("Type").Return(tt.st) // Set up the configuration as the default one is too slow - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) cfg.Set("sbom.scan_queue.base_backoff", "200ms", model.SourceAgentRuntime) cfg.Set("sbom.scan_queue.max_backoff", "600ms", model.SourceAgentRuntime) cfg.Set("sbom.cache.clean_interval", "10s", model.SourceAgentRuntime) // Required for the ticker @@ -186,7 +185,7 @@ func TestRetryLogic_ImageDeleted(t *testing.T) { mockCollector.On("Type").Return(collectors.ContainerImageScanType) // Set up the configuration as the default one is too slow - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) cfg.Set("sbom.scan_queue.base_backoff", "200ms", model.SourceAgentRuntime) cfg.Set("sbom.scan_queue.max_backoff", "600ms", model.SourceAgentRuntime) cfg.Set("sbom.cache.clean_interval", "10s", model.SourceAgentRuntime) // Required for the ticker @@ -251,7 +250,7 @@ func TestRetryChannelFull(t *testing.T) { mockCollector.On("Type").Return(collectors.ContainerImageScanType) // Set up the configuration - cfg := config.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) + cfg := model.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) cfg.Set("sbom.scan_queue.base_backoff", "200ms", model.SourceAgentRuntime) cfg.Set("sbom.scan_queue.max_backoff", "600ms", model.SourceAgentRuntime) cfg.Set("sbom.cache.clean_interval", "10s", model.SourceAgentRuntime) // Required for the ticker diff --git a/pkg/security/agent/client.go b/pkg/security/agent/client.go index 46325e9b60722..317cd472695c4 100644 --- a/pkg/security/agent/client.go +++ b/pkg/security/agent/client.go @@ -18,7 +18,7 @@ import ( "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials/insecure" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/proto/api" ) @@ -182,7 +182,7 @@ func (c *RuntimeSecurityClient) Close() { // NewRuntimeSecurityClient instantiates a new RuntimeSecurityClient func NewRuntimeSecurityClient() (*RuntimeSecurityClient, error) { - socketPath := coreconfig.Datadog().GetString("runtime_security_config.socket") + socketPath := pkgconfigsetup.Datadog().GetString("runtime_security_config.socket") if socketPath == "" { return nil, errors.New("runtime_security_config.socket must be set") } diff --git a/pkg/security/common/logs_context.go b/pkg/security/common/logs_context.go index a274f6bbba851..7ddddf0701a6d 100644 --- a/pkg/security/common/logs_context.go +++ b/pkg/security/common/logs_context.go @@ -10,7 +10,7 @@ import ( "fmt" logsconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/client" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -22,7 +22,7 @@ const ( // NewLogContextCompliance returns the context fields to send compliance events to the intake func NewLogContextCompliance() (*logsconfig.Endpoints, *client.DestinationsContext, error) { - logsConfigComplianceKeys := logsconfig.NewLogsConfigKeys("compliance_config.endpoints.", pkgconfig.Datadog()) + logsConfigComplianceKeys := logsconfig.NewLogsConfigKeys("compliance_config.endpoints.", pkgconfigsetup.Datadog()) return NewLogContext(logsConfigComplianceKeys, "cspm-intake.", "compliance", logsconfig.DefaultIntakeOrigin, logsconfig.AgentJSONIntakeProtocol) } @@ -39,18 +39,18 @@ func NewLogContextRuntime(useSecRuntimeTrack bool) (*logsconfig.Endpoints, *clie trackType = "logs" } - logsRuntimeConfigKeys := logsconfig.NewLogsConfigKeys("runtime_security_config.endpoints.", pkgconfig.Datadog()) + logsRuntimeConfigKeys := logsconfig.NewLogsConfigKeys("runtime_security_config.endpoints.", pkgconfigsetup.Datadog()) return NewLogContext(logsRuntimeConfigKeys, "runtime-security-http-intake.logs.", trackType, cwsIntakeOrigin, logsconfig.DefaultIntakeProtocol) } // NewLogContext returns the context fields to send events to the intake func NewLogContext(logsConfig *logsconfig.LogsConfigKeys, endpointPrefix string, intakeTrackType logsconfig.IntakeTrackType, intakeOrigin logsconfig.IntakeOrigin, intakeProtocol logsconfig.IntakeProtocol) (*logsconfig.Endpoints, *client.DestinationsContext, error) { - endpoints, err := logsconfig.BuildHTTPEndpointsWithConfig(pkgconfig.Datadog(), logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin) + endpoints, err := logsconfig.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin) if err != nil { - endpoints, err = logsconfig.BuildHTTPEndpoints(pkgconfig.Datadog(), intakeTrackType, intakeProtocol, intakeOrigin) + endpoints, err = logsconfig.BuildHTTPEndpoints(pkgconfigsetup.Datadog(), intakeTrackType, intakeProtocol, intakeOrigin) if err == nil { - httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, pkgconfig.Datadog()) - endpoints, err = logsconfig.BuildEndpoints(pkgconfig.Datadog(), httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin) + httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, pkgconfigsetup.Datadog()) + endpoints, err = logsconfig.BuildEndpoints(pkgconfigsetup.Datadog(), httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin) } } diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go index 95a261f32963d..8ca5c7ef33ae8 100644 --- a/pkg/security/config/config.go +++ b/pkg/security/config/config.go @@ -15,7 +15,8 @@ import ( sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" logsconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" pconfig "github.com/DataDog/datadog-agent/pkg/security/probe/config" @@ -156,8 +157,6 @@ type RuntimeSecurityConfig struct { SecurityProfileCacheSize int // SecurityProfileMaxCount defines the maximum number of Security Profiles that may be evaluated concurrently SecurityProfileMaxCount int - // SecurityProfileRCEnabled defines if remote-configuration is enabled - SecurityProfileRCEnabled bool // SecurityProfileDNSMatchMaxDepth defines the max depth of subdomain to be matched for DNS anomaly detection (0 to match everything) SecurityProfileDNSMatchMaxDepth int @@ -301,7 +300,7 @@ func NewConfig() (*Config, error) { // NewRuntimeSecurityConfig returns the runtime security (CWS) config, build from the system probe one func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { - sysconfig.Adjust(coreconfig.SystemProbe()) + sysconfig.Adjust(pkgconfigsetup.SystemProbe()) eventTypeStrings := map[string]model.EventType{} @@ -323,65 +322,65 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { } rsConfig := &RuntimeSecurityConfig{ - RuntimeEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enabled"), - FIMEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.fim_enabled"), - WindowsFilenameCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.windows_filename_cache_max"), - WindowsRegistryCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.windows_registry_cache_max"), - ETWEventsChannelSize: coreconfig.SystemProbe().GetInt("runtime_security_config.etw_events_channel_size"), - ETWEventsMaxBuffers: coreconfig.SystemProbe().GetInt("runtime_security_config.etw_events_max_buffers"), - WindowsProbeBlockOnChannelSend: coreconfig.SystemProbe().GetBool("runtime_security_config.windows_probe_block_on_channel_send"), - - SocketPath: coreconfig.SystemProbe().GetString("runtime_security_config.socket"), - EventServerBurst: coreconfig.SystemProbe().GetInt("runtime_security_config.event_server.burst"), - EventServerRate: coreconfig.SystemProbe().GetInt("runtime_security_config.event_server.rate"), - EventServerRetention: coreconfig.SystemProbe().GetDuration("runtime_security_config.event_server.retention"), - - SelfTestEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.self_test.enabled"), - SelfTestSendReport: coreconfig.SystemProbe().GetBool("runtime_security_config.self_test.send_report"), + RuntimeEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enabled"), + FIMEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.fim_enabled"), + WindowsFilenameCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.windows_filename_cache_max"), + WindowsRegistryCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.windows_registry_cache_max"), + ETWEventsChannelSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.etw_events_channel_size"), + ETWEventsMaxBuffers: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.etw_events_max_buffers"), + WindowsProbeBlockOnChannelSend: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.windows_probe_block_on_channel_send"), + + SocketPath: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.socket"), + EventServerBurst: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.event_server.burst"), + EventServerRate: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.event_server.rate"), + EventServerRetention: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.event_server.retention"), + + SelfTestEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.self_test.enabled"), + SelfTestSendReport: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.self_test.send_report"), RemoteConfigurationEnabled: isRemoteConfigEnabled(), - RemoteConfigurationDumpPolicies: coreconfig.SystemProbe().GetBool("runtime_security_config.remote_configuration.dump_policies"), + RemoteConfigurationDumpPolicies: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.remote_configuration.dump_policies"), - OnDemandEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.on_demand.enabled"), - OnDemandRateLimiterEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.on_demand.rate_limiter.enabled"), + OnDemandEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.on_demand.enabled"), + OnDemandRateLimiterEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.on_demand.rate_limiter.enabled"), // policy & ruleset - PoliciesDir: coreconfig.SystemProbe().GetString("runtime_security_config.policies.dir"), - WatchPoliciesDir: coreconfig.SystemProbe().GetBool("runtime_security_config.policies.watch_dir"), - PolicyMonitorEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.policies.monitor.enabled"), - PolicyMonitorPerRuleEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.policies.monitor.per_rule_enabled"), - PolicyMonitorReportInternalPolicies: coreconfig.SystemProbe().GetBool("runtime_security_config.policies.monitor.report_internal_policies"), + PoliciesDir: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.policies.dir"), + WatchPoliciesDir: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.watch_dir"), + PolicyMonitorEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.monitor.enabled"), + PolicyMonitorPerRuleEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.monitor.per_rule_enabled"), + PolicyMonitorReportInternalPolicies: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.policies.monitor.report_internal_policies"), - LogPatterns: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.log_patterns"), - LogTags: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.log_tags"), + LogPatterns: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.log_patterns"), + LogTags: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.log_tags"), // custom events - InternalMonitoringEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.internal_monitoring.enabled"), + InternalMonitoringEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.internal_monitoring.enabled"), // activity dump - ActivityDumpEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.enabled"), - ActivityDumpCleanupPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.cleanup_period"), - ActivityDumpTagsResolutionPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.tags_resolution_period"), - ActivityDumpLoadControlPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.load_controller_period"), - ActivityDumpLoadControlMinDumpTimeout: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.min_timeout"), - ActivityDumpTracedCgroupsCount: coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.traced_cgroups_count"), - ActivityDumpTracedEventTypes: parseEventTypeStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.traced_event_types")), - ActivityDumpCgroupDumpTimeout: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.dump_duration"), - ActivityDumpRateLimiter: coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.rate_limiter"), - ActivityDumpCgroupWaitListTimeout: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.cgroup_wait_list_timeout"), - ActivityDumpCgroupDifferentiateArgs: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.cgroup_differentiate_args"), - ActivityDumpLocalStorageDirectory: coreconfig.SystemProbe().GetString("runtime_security_config.activity_dump.local_storage.output_directory"), - ActivityDumpLocalStorageMaxDumpsCount: coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.local_storage.max_dumps_count"), - ActivityDumpLocalStorageCompression: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.local_storage.compression"), - ActivityDumpSyscallMonitorPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.syscall_monitor.period"), - ActivityDumpMaxDumpCountPerWorkload: coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.max_dump_count_per_workload"), - ActivityDumpTagRulesEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.tag_rules.enabled"), - ActivityDumpSilentWorkloadsDelay: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.silent_workloads.delay"), - ActivityDumpSilentWorkloadsTicker: coreconfig.SystemProbe().GetDuration("runtime_security_config.activity_dump.silent_workloads.ticker"), - ActivityDumpWorkloadDenyList: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.workload_deny_list"), - ActivityDumpAutoSuppressionEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.activity_dump.auto_suppression.enabled"), + ActivityDumpEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.enabled"), + ActivityDumpCleanupPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.cleanup_period"), + ActivityDumpTagsResolutionPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.tags_resolution_period"), + ActivityDumpLoadControlPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.load_controller_period"), + ActivityDumpLoadControlMinDumpTimeout: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.min_timeout"), + ActivityDumpTracedCgroupsCount: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.traced_cgroups_count"), + ActivityDumpTracedEventTypes: parseEventTypeStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.traced_event_types")), + ActivityDumpCgroupDumpTimeout: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.dump_duration"), + ActivityDumpRateLimiter: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.rate_limiter"), + ActivityDumpCgroupWaitListTimeout: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.cgroup_wait_list_timeout"), + ActivityDumpCgroupDifferentiateArgs: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.cgroup_differentiate_args"), + ActivityDumpLocalStorageDirectory: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.activity_dump.local_storage.output_directory"), + ActivityDumpLocalStorageMaxDumpsCount: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.local_storage.max_dumps_count"), + ActivityDumpLocalStorageCompression: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.local_storage.compression"), + ActivityDumpSyscallMonitorPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.syscall_monitor.period"), + ActivityDumpMaxDumpCountPerWorkload: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.max_dump_count_per_workload"), + ActivityDumpTagRulesEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.tag_rules.enabled"), + ActivityDumpSilentWorkloadsDelay: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.silent_workloads.delay"), + ActivityDumpSilentWorkloadsTicker: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.silent_workloads.ticker"), + ActivityDumpWorkloadDenyList: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.workload_deny_list"), + ActivityDumpAutoSuppressionEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.auto_suppression.enabled"), // activity dump dynamic fields ActivityDumpMaxDumpSize: func() int { - mds := coreconfig.SystemProbe().GetInt("runtime_security_config.activity_dump.max_dump_size") + mds := pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.max_dump_size") if mds < ADMinMaxDumSize { mds = ADMinMaxDumSize } @@ -389,66 +388,65 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { }, // SBOM resolver - SBOMResolverEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.sbom.enabled"), - SBOMResolverWorkloadsCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.sbom.workloads_cache_size"), - SBOMResolverHostEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.sbom.host.enabled"), + SBOMResolverEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.sbom.enabled"), + SBOMResolverWorkloadsCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.sbom.workloads_cache_size"), + SBOMResolverHostEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.sbom.host.enabled"), // Hash resolver - HashResolverEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.hash_resolver.enabled"), - HashResolverEventTypes: parseEventTypeStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.hash_resolver.event_types")), - HashResolverMaxFileSize: coreconfig.SystemProbe().GetInt64("runtime_security_config.hash_resolver.max_file_size"), - HashResolverHashAlgorithms: parseHashAlgorithmStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.hash_resolver.hash_algorithms")), - HashResolverMaxHashBurst: coreconfig.SystemProbe().GetInt("runtime_security_config.hash_resolver.max_hash_burst"), - HashResolverMaxHashRate: coreconfig.SystemProbe().GetInt("runtime_security_config.hash_resolver.max_hash_rate"), - HashResolverCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.hash_resolver.cache_size"), - HashResolverReplace: coreconfig.SystemProbe().GetStringMapString("runtime_security_config.hash_resolver.replace"), + HashResolverEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.hash_resolver.enabled"), + HashResolverEventTypes: parseEventTypeStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.hash_resolver.event_types")), + HashResolverMaxFileSize: pkgconfigsetup.SystemProbe().GetInt64("runtime_security_config.hash_resolver.max_file_size"), + HashResolverHashAlgorithms: parseHashAlgorithmStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.hash_resolver.hash_algorithms")), + HashResolverMaxHashBurst: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.hash_resolver.max_hash_burst"), + HashResolverMaxHashRate: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.hash_resolver.max_hash_rate"), + HashResolverCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.hash_resolver.cache_size"), + HashResolverReplace: pkgconfigsetup.SystemProbe().GetStringMapString("runtime_security_config.hash_resolver.replace"), // security profiles - SecurityProfileEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.enabled"), - SecurityProfileMaxImageTags: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.max_image_tags"), - SecurityProfileDir: coreconfig.SystemProbe().GetString("runtime_security_config.security_profile.dir"), - SecurityProfileWatchDir: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.watch_dir"), - SecurityProfileCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.cache_size"), - SecurityProfileMaxCount: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.max_count"), - SecurityProfileRCEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.remote_configuration.enabled"), - SecurityProfileDNSMatchMaxDepth: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.dns_match_max_depth"), + SecurityProfileEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.enabled"), + SecurityProfileMaxImageTags: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.max_image_tags"), + SecurityProfileDir: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.security_profile.dir"), + SecurityProfileWatchDir: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.watch_dir"), + SecurityProfileCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.cache_size"), + SecurityProfileMaxCount: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.max_count"), + SecurityProfileDNSMatchMaxDepth: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.dns_match_max_depth"), // auto suppression - SecurityProfileAutoSuppressionEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.auto_suppression.enabled"), - SecurityProfileAutoSuppressionEventTypes: parseEventTypeStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.security_profile.auto_suppression.event_types")), + SecurityProfileAutoSuppressionEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.auto_suppression.enabled"), + SecurityProfileAutoSuppressionEventTypes: parseEventTypeStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.security_profile.auto_suppression.event_types")), // anomaly detection - AnomalyDetectionEventTypes: parseEventTypeStringSlice(coreconfig.SystemProbe().GetStringSlice("runtime_security_config.security_profile.anomaly_detection.event_types")), - AnomalyDetectionDefaultMinimumStablePeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.default_minimum_stable_period"), - AnomalyDetectionMinimumStablePeriods: parseEventTypeDurations(coreconfig.SystemProbe(), "runtime_security_config.security_profile.anomaly_detection.minimum_stable_period"), - AnomalyDetectionWorkloadWarmupPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.workload_warmup_period"), - AnomalyDetectionUnstableProfileTimeThreshold: coreconfig.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.unstable_profile_time_threshold"), - AnomalyDetectionUnstableProfileSizeThreshold: coreconfig.SystemProbe().GetInt64("runtime_security_config.security_profile.anomaly_detection.unstable_profile_size_threshold"), - AnomalyDetectionRateLimiterPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.rate_limiter.period"), - AnomalyDetectionRateLimiterNumKeys: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_keys"), - AnomalyDetectionRateLimiterNumEventsAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_events_allowed"), - AnomalyDetectionTagRulesEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.tag_rules.enabled"), - AnomalyDetectionSilentRuleEventsEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.silent_rule_events.enabled"), - AnomalyDetectionEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.enabled"), + AnomalyDetectionEventTypes: parseEventTypeStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.security_profile.anomaly_detection.event_types")), + AnomalyDetectionDefaultMinimumStablePeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.default_minimum_stable_period"), + AnomalyDetectionMinimumStablePeriods: parseEventTypeDurations(pkgconfigsetup.SystemProbe(), "runtime_security_config.security_profile.anomaly_detection.minimum_stable_period"), + AnomalyDetectionWorkloadWarmupPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.workload_warmup_period"), + AnomalyDetectionUnstableProfileTimeThreshold: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.unstable_profile_time_threshold"), + AnomalyDetectionUnstableProfileSizeThreshold: pkgconfigsetup.SystemProbe().GetInt64("runtime_security_config.security_profile.anomaly_detection.unstable_profile_size_threshold"), + AnomalyDetectionRateLimiterPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.security_profile.anomaly_detection.rate_limiter.period"), + AnomalyDetectionRateLimiterNumKeys: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_keys"), + AnomalyDetectionRateLimiterNumEventsAllowed: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.security_profile.anomaly_detection.rate_limiter.num_events_allowed"), + AnomalyDetectionTagRulesEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.tag_rules.enabled"), + AnomalyDetectionSilentRuleEventsEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.silent_rule_events.enabled"), + AnomalyDetectionEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.security_profile.anomaly_detection.enabled"), // enforcement - EnforcementEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.enabled"), - EnforcementBinaryExcluded: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.exclude_binaries"), - EnforcementRawSyscallEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.raw_syscall.enabled"), - EnforcementRuleSourceAllowed: coreconfig.SystemProbe().GetStringSlice("runtime_security_config.enforcement.rule_source_allowed"), - EnforcementDisarmerContainerEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.container.enabled"), - EnforcementDisarmerContainerMaxAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.container.max_allowed"), - EnforcementDisarmerContainerPeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.container.period"), - EnforcementDisarmerExecutableEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.executable.enabled"), - EnforcementDisarmerExecutableMaxAllowed: coreconfig.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.executable.max_allowed"), - EnforcementDisarmerExecutablePeriod: coreconfig.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.executable.period"), + EnforcementEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enforcement.enabled"), + EnforcementBinaryExcluded: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.enforcement.exclude_binaries"), + EnforcementRawSyscallEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enforcement.raw_syscall.enabled"), + EnforcementRuleSourceAllowed: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.enforcement.rule_source_allowed"), + EnforcementDisarmerContainerEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.container.enabled"), + EnforcementDisarmerContainerMaxAllowed: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.container.max_allowed"), + EnforcementDisarmerContainerPeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.container.period"), + EnforcementDisarmerExecutableEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.enforcement.disarmer.executable.enabled"), + EnforcementDisarmerExecutableMaxAllowed: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.enforcement.disarmer.executable.max_allowed"), + EnforcementDisarmerExecutablePeriod: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.enforcement.disarmer.executable.period"), // User Sessions - UserSessionsCacheSize: coreconfig.SystemProbe().GetInt("runtime_security_config.user_sessions.cache_size"), + UserSessionsCacheSize: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.user_sessions.cache_size"), // ebpf less - EBPFLessEnabled: coreconfig.SystemProbe().GetBool("runtime_security_config.ebpfless.enabled"), - EBPFLessSocket: coreconfig.SystemProbe().GetString("runtime_security_config.ebpfless.socket"), + EBPFLessEnabled: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.ebpfless.enabled"), + EBPFLessSocket: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.ebpfless.socket"), // IMDS IMDSIPv4: parseIMDSIPv4(), @@ -468,7 +466,7 @@ func (c *RuntimeSecurityConfig) IsRuntimeEnabled() bool { // parseIMDSIPv4 returns the uint32 representation of the IMDS IP set by the configuration func parseIMDSIPv4() uint32 { - ip := coreconfig.SystemProbe().GetString("runtime_security_config.imds_ipv4") + ip := pkgconfigsetup.SystemProbe().GetString("runtime_security_config.imds_ipv4") parsedIP := net.ParseIP(ip) if parsedIP == nil { return 0 @@ -479,13 +477,13 @@ func parseIMDSIPv4() uint32 { // If RC is globally enabled, RC is enabled for CWS, unless the CWS-specific RC value is explicitly set to false func isRemoteConfigEnabled() bool { // This value defaults to true - rcEnabledInSysprobeConfig := coreconfig.SystemProbe().GetBool("runtime_security_config.remote_configuration.enabled") + rcEnabledInSysprobeConfig := pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.remote_configuration.enabled") if !rcEnabledInSysprobeConfig { return false } - if coreconfig.IsRemoteConfigEnabled(coreconfig.Datadog()) { + if pkgconfigsetup.IsRemoteConfigEnabled(pkgconfigsetup.Datadog()) { return true } @@ -502,13 +500,13 @@ func (c *RuntimeSecurityConfig) GetAnomalyDetectionMinimumStablePeriod(eventType // sanitize ensures that the configuration is properly setup func (c *RuntimeSecurityConfig) sanitize() error { - serviceName := utils.GetTagValue("service", configUtils.GetConfiguredTags(coreconfig.Datadog(), true)) + serviceName := utils.GetTagValue("service", configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true)) if len(serviceName) > 0 { c.HostServiceName = serviceName } if c.IMDSIPv4 == 0 { - return fmt.Errorf("invalid IPv4 address: got %v", coreconfig.SystemProbe().GetString("runtime_security_config.imds_ipv4")) + return fmt.Errorf("invalid IPv4 address: got %v", pkgconfigsetup.SystemProbe().GetString("runtime_security_config.imds_ipv4")) } if c.EnforcementDisarmerContainerEnabled && c.EnforcementDisarmerContainerMaxAllowed <= 0 { @@ -537,7 +535,7 @@ func (c *RuntimeSecurityConfig) sanitizeRuntimeSecurityConfigActivityDump() erro c.ActivityDumpTracedEventTypes = append(c.ActivityDumpTracedEventTypes, model.ExecEventType) } - if formats := coreconfig.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.local_storage.formats"); len(formats) > 0 { + if formats := pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.local_storage.formats"); len(formats) > 0 { var err error c.ActivityDumpLocalStorageFormats, err = ParseStorageFormats(formats) if err != nil { @@ -558,13 +556,13 @@ func (c *RuntimeSecurityConfig) sanitizeRuntimeSecurityConfigActivityDump() erro // ActivityDumpRemoteStorageEndpoints returns the list of activity dump remote storage endpoints parsed from the agent config func ActivityDumpRemoteStorageEndpoints(endpointPrefix string, intakeTrackType logsconfig.IntakeTrackType, intakeProtocol logsconfig.IntakeProtocol, intakeOrigin logsconfig.IntakeOrigin) (*logsconfig.Endpoints, error) { - logsConfig := logsconfig.NewLogsConfigKeys("runtime_security_config.activity_dump.remote_storage.endpoints.", coreconfig.Datadog()) - endpoints, err := logsconfig.BuildHTTPEndpointsWithConfig(coreconfig.Datadog(), logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin) + logsConfig := logsconfig.NewLogsConfigKeys("runtime_security_config.activity_dump.remote_storage.endpoints.", pkgconfigsetup.Datadog()) + endpoints, err := logsconfig.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), logsConfig, endpointPrefix, intakeTrackType, intakeProtocol, intakeOrigin) if err != nil { - endpoints, err = logsconfig.BuildHTTPEndpoints(coreconfig.Datadog(), intakeTrackType, intakeProtocol, intakeOrigin) + endpoints, err = logsconfig.BuildHTTPEndpoints(pkgconfigsetup.Datadog(), intakeTrackType, intakeProtocol, intakeOrigin) if err == nil { - httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, coreconfig.Datadog()) - endpoints, err = logsconfig.BuildEndpoints(coreconfig.Datadog(), httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin) + httpConnectivity := logshttp.CheckConnectivity(endpoints.Main, pkgconfigsetup.Datadog()) + endpoints, err = logsconfig.BuildEndpoints(pkgconfigsetup.Datadog(), httpConnectivity, intakeTrackType, intakeProtocol, intakeOrigin) } } @@ -591,7 +589,7 @@ func ParseEvalEventType(eventType eval.EventType) model.EventType { } // parseEventTypeDurations converts a map of durations indexed by event types -func parseEventTypeDurations(cfg coreconfig.Config, prefix string) map[model.EventType]time.Duration { +func parseEventTypeDurations(cfg pkgconfigmodel.Config, prefix string) map[model.EventType]time.Duration { eventTypeMap := cfg.GetStringMap(prefix) eventTypeDurations := make(map[model.EventType]time.Duration, len(eventTypeMap)) for eventType := range eventTypeMap { diff --git a/pkg/security/ebpf/c/include/constants/custom.h b/pkg/security/ebpf/c/include/constants/custom.h index 181460e586ce1..b257e0f342031 100644 --- a/pkg/security/ebpf/c/include/constants/custom.h +++ b/pkg/security/ebpf/c/include/constants/custom.h @@ -12,15 +12,6 @@ #define MAX_PATH_LEN 256 #define REVISION_ARRAY_SIZE 4096 #define INODE_DISCARDER_TYPE 0 -#define BASENAME_APPROVER_TYPE 0 -#define FLAG_APPROVER_TYPE 1 - -enum MONITOR_KEYS -{ - ERPC_MONITOR_KEY = 1, - DISCARDER_MONITOR_KEY, - APPROVER_MONITOR_KEY, -}; #define PATH_ID_MAP_SIZE 512 diff --git a/pkg/security/ebpf/c/include/constants/enums.h b/pkg/security/ebpf/c/include/constants/enums.h index 859a3230465c1..ad37ad7885112 100644 --- a/pkg/security/ebpf/c/include/constants/enums.h +++ b/pkg/security/ebpf/c/include/constants/enums.h @@ -87,28 +87,35 @@ enum enum policy_mode { NO_FILTER = 0, - ACCEPT = 1, - DENY = 2, + ACCEPT, + DENY, }; -enum policy_flags +enum APPROVER_TYPE { - BASENAME = 1, - FLAGS = 2, - MODE = 4, - PARENT_NAME = 8, + BASENAME_APPROVER_TYPE = 0, + FLAG_APPROVER_TYPE, + AUID_APPROVER_TYPE, }; -enum tls_format +enum SYSCALL_STATE { - DEFAULT_TLS_FORMAT + ACCEPTED = 0, // approved and can't be discarded later + APPROVED, // approved but can be discarded later + DISCARDED, // discarded +}; + +enum MONITOR_KEYS +{ + ERPC_MONITOR_KEY = 1, + DISCARDER_MONITOR_KEY, + APPROVER_MONITOR_KEY, }; -typedef enum discard_check_state +enum tls_format { - NOT_DISCARDED, - DISCARDED, -} discard_check_state; + DEFAULT_TLS_FORMAT +}; enum bpf_cmd_def { diff --git a/pkg/security/ebpf/c/include/helpers/approvers.h b/pkg/security/ebpf/c/include/helpers/approvers.h index 02c10694c7a19..d3b68a4d9282f 100644 --- a/pkg/security/ebpf/c/include/helpers/approvers.h +++ b/pkg/security/ebpf/c/include/helpers/approvers.h @@ -20,33 +20,65 @@ void __attribute__((always_inline)) monitor_event_approved(u64 event_type, u32 a __sync_fetch_and_add(&stats->event_approved_by_basename, 1); } else if (approver_type == FLAG_APPROVER_TYPE) { __sync_fetch_and_add(&stats->event_approved_by_flag, 1); + } else if (approver_type == AUID_APPROVER_TYPE) { + __sync_fetch_and_add(&stats->event_approved_by_auid, 1); } } void get_dentry_name(struct dentry *dentry, void *buffer, size_t n); -int __attribute__((always_inline)) approve_by_basename(struct dentry *dentry, u64 event_type) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_by_auid(struct syscall_cache_t *syscall, u64 event_type) { + u32 pid = bpf_get_current_pid_tgid() >> 32; + struct pid_cache_t *pid_entry = (struct pid_cache_t *)bpf_map_lookup_elem(&pid_cache, &pid); + if (!pid_entry || !pid_entry->credentials.is_auid_set) { + return DISCARDED; + } + + u32 auid = pid_entry->credentials.auid; + + struct event_mask_filter_t *mask_filter = bpf_map_lookup_elem(&auid_approvers, &auid); + if (mask_filter && mask_filter->event_mask & (1 << (event_type - 1))) { + monitor_event_approved(syscall->type, AUID_APPROVER_TYPE); + return ACCEPTED; + } + + struct u32_range_filter_t *range_filter = bpf_map_lookup_elem(&auid_range_approvers, &event_type); + if (range_filter && auid >= range_filter->min && auid <= range_filter->max) { + monitor_event_approved(syscall->type, AUID_APPROVER_TYPE); + return ACCEPTED; + } + + return DISCARDED; +} + +enum SYSCALL_STATE __attribute__((always_inline)) approve_by_basename(struct dentry *dentry, u64 event_type) { struct basename_t basename = {}; get_dentry_name(dentry, &basename, sizeof(basename)); - struct basename_filter_t *filter = bpf_map_lookup_elem(&basename_approvers, &basename); + struct event_mask_filter_t *filter = bpf_map_lookup_elem(&basename_approvers, &basename); if (filter && filter->event_mask & (1 << (event_type - 1))) { monitor_event_approved(event_type, BASENAME_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) basename_approver(struct syscall_cache_t *syscall, struct dentry *dentry, u64 event_type) { - return approve_by_basename(dentry, event_type); -} +enum SYSCALL_STATE __attribute__((always_inline)) chmod_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->setattr.dentry, EVENT_CHMOD); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_CHMOD); + } -int __attribute__((always_inline)) chmod_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->setattr.dentry, EVENT_CHMOD); + return state; } -int __attribute__((always_inline)) chown_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->setattr.dentry, EVENT_CHOWN); +enum SYSCALL_STATE __attribute__((always_inline)) chown_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->setattr.dentry, EVENT_CHOWN); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_CHOWN); + } + + return state; } int __attribute__((always_inline)) lookup_u32_flags(void *map, u32 *flags) { @@ -65,106 +97,120 @@ int __attribute__((always_inline)) approve_mmap_by_flags(struct syscall_cache_t int exists = lookup_u32_flags(&mmap_flags_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->mmap.flags & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) approve_mmap_by_protection(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_mmap_by_protection_flags(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&mmap_protection_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((flags == 0 && syscall->mmap.protection == 0) || (syscall->mmap.protection & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) mmap_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = 0; +enum SYSCALL_STATE __attribute__((always_inline)) mmap_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = DISCARDED; if (syscall->mmap.dentry != NULL) { - pass_to_userspace = approve_by_basename(syscall->mmap.dentry, EVENT_MMAP); + state = approve_by_basename(syscall->mmap.dentry, EVENT_MMAP); } - if (!pass_to_userspace) { - pass_to_userspace = approve_mmap_by_protection(syscall); - if (!pass_to_userspace) { - pass_to_userspace = approve_mmap_by_flags(syscall); - } + if (state == DISCARDED) { + state = approve_mmap_by_protection_flags(syscall); + } + if (state == DISCARDED) { + state = approve_mmap_by_flags(syscall); } - return pass_to_userspace; + return state; } -int __attribute__((always_inline)) link_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->link.src_dentry, EVENT_LINK) || - basename_approver(syscall, syscall->link.target_dentry, EVENT_LINK); +enum SYSCALL_STATE __attribute__((always_inline)) link_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->link.src_dentry, EVENT_LINK); + if (state == DISCARDED) { + state = approve_by_basename(syscall->link.target_dentry, EVENT_LINK); + } + + return state; } -int __attribute__((always_inline)) mkdir_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->mkdir.dentry, EVENT_MKDIR); +enum SYSCALL_STATE __attribute__((always_inline)) mkdir_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->mkdir.dentry, EVENT_MKDIR); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_MKDIR); + } + + return state; } -int __attribute__((always_inline)) chdir_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->chdir.dentry, EVENT_CHDIR); +enum SYSCALL_STATE __attribute__((always_inline)) chdir_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->chdir.dentry, EVENT_CHDIR); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_CHDIR); + } + + return state; } -int __attribute__((always_inline)) approve_mprotect_by_vm_protection(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_mprotect_by_vm_protection(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&mprotect_vm_protection_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->mprotect.vm_protection & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) approve_mprotect_by_req_protection(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_mprotect_by_req_protection(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&mprotect_req_protection_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->mprotect.req_protection & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) mprotect_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = approve_mprotect_by_vm_protection(syscall); - if (!pass_to_userspace) { - pass_to_userspace = approve_mprotect_by_req_protection(syscall); +enum SYSCALL_STATE __attribute__((always_inline)) mprotect_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_mprotect_by_vm_protection(syscall); + if (state == DISCARDED) { + state = approve_mprotect_by_req_protection(syscall); } - return pass_to_userspace; + return state; } -int __attribute__((always_inline)) approve_by_flags(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_open_by_flags(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&open_flags_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((flags == 0 && syscall->open.flags == 0) || ((syscall->open.flags & flags) > 0)) { @@ -174,97 +220,153 @@ int __attribute__((always_inline)) approve_by_flags(struct syscall_cache_t *sysc bpf_printk("open flags %d approved", syscall->open.flags); #endif - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) open_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = approve_by_basename(syscall->open.dentry, EVENT_OPEN); - if (!pass_to_userspace) { - pass_to_userspace = approve_by_flags(syscall); +enum SYSCALL_STATE __attribute__((always_inline)) open_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->open.dentry, EVENT_OPEN); + if (state == DISCARDED) { + state = approve_open_by_flags(syscall); + } + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_OPEN); } - return pass_to_userspace; + return state; } -int __attribute__((always_inline)) rename_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->rename.src_dentry, EVENT_RENAME) || - basename_approver(syscall, syscall->rename.target_dentry, EVENT_RENAME); +enum SYSCALL_STATE __attribute__((always_inline)) rename_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->rename.src_dentry, EVENT_RENAME); + if (state == DISCARDED) { + state = approve_by_basename(syscall->rename.target_dentry, EVENT_RENAME); + } + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_RENAME); + } + + return state; } -int __attribute__((always_inline)) rmdir_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->rmdir.dentry, EVENT_RMDIR); +enum SYSCALL_STATE __attribute__((always_inline)) rmdir_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->rmdir.dentry, EVENT_RMDIR); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_RMDIR); + } + return state; } -int __attribute__((always_inline)) approve_splice_by_entry_flags(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_splice_by_entry_flags(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&splice_entry_flags_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->splice.pipe_entry_flag & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) approve_splice_by_exit_flags(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) approve_splice_by_exit_flags(struct syscall_cache_t *syscall) { u32 flags = 0; int exists = lookup_u32_flags(&splice_exit_flags_approvers, &flags); if (!exists) { - return 0; + return DISCARDED; } if ((syscall->splice.pipe_exit_flag & flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; } - return 0; + return DISCARDED; } -int __attribute__((always_inline)) splice_approvers(struct syscall_cache_t *syscall) { - int pass_to_userspace = 0; +enum SYSCALL_STATE __attribute__((always_inline)) splice_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = DISCARDED; if (syscall->splice.dentry != NULL) { - pass_to_userspace = approve_by_basename(syscall->splice.dentry, EVENT_SPLICE); + state = approve_by_basename(syscall->splice.dentry, EVENT_SPLICE); } - if (!pass_to_userspace) { - pass_to_userspace = approve_splice_by_exit_flags(syscall); - if (!pass_to_userspace) { - pass_to_userspace = approve_splice_by_entry_flags(syscall); - } + if (state == DISCARDED) { + state = approve_splice_by_exit_flags(syscall); + } + if (state == DISCARDED) { + state = approve_splice_by_entry_flags(syscall); } - return pass_to_userspace; + return state; } -int __attribute__((always_inline)) unlink_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->unlink.dentry, EVENT_UNLINK); +enum SYSCALL_STATE __attribute__((always_inline)) unlink_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->unlink.dentry, EVENT_UNLINK); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_UNLINK); + } + return state; } -int __attribute__((always_inline)) utime_approvers(struct syscall_cache_t *syscall) { - return basename_approver(syscall, syscall->setattr.dentry, EVENT_UTIME); +enum SYSCALL_STATE __attribute__((always_inline)) utime_approvers(struct syscall_cache_t *syscall) { + enum SYSCALL_STATE state = approve_by_basename(syscall->setattr.dentry, EVENT_UTIME); + if (state == DISCARDED) { + state = approve_by_auid(syscall, EVENT_UTIME); + } + return state; } -int __attribute__((always_inline)) bpf_approvers(struct syscall_cache_t *syscall) { +enum SYSCALL_STATE __attribute__((always_inline)) bpf_approvers(struct syscall_cache_t *syscall) { u32 key = 0; struct u64_flags_filter_t *filter = bpf_map_lookup_elem(&bpf_cmd_approvers, &key); if (filter == NULL || !filter->is_set) { - return 0; + return DISCARDED; } if (((1 << syscall->bpf.cmd) & filter->flags) > 0) { monitor_event_approved(syscall->type, FLAG_APPROVER_TYPE); - return 1; + return APPROVED; + } + + return DISCARDED; +} + +enum SYSCALL_STATE __attribute__((always_inline)) approve_syscall(struct syscall_cache_t *syscall, enum SYSCALL_STATE (*check_approvers)(struct syscall_cache_t *syscall)) { + if (syscall->policy.mode == NO_FILTER) { + return syscall->state = ACCEPTED; + } + + if (syscall->policy.mode == ACCEPT) { + return syscall->state = APPROVED; + } + + if (syscall->policy.mode == DENY) { + syscall->state = check_approvers(syscall); + } + + u32 tgid = bpf_get_current_pid_tgid() >> 32; + u64 *cookie = bpf_map_lookup_elem(&traced_pids, &tgid); + if (cookie != NULL) { + u64 now = bpf_ktime_get_ns(); + struct activity_dump_config *config = lookup_or_delete_traced_pid(tgid, now, cookie); + if (config != NULL) { + // is this event type traced ? + if (mask_has_event(config->event_mask, syscall->type) && activity_dump_rate_limiter_allow(config, *cookie, now, 0)) { + if (syscall->state == DISCARDED) { + syscall->resolver.flags |= SAVED_BY_ACTIVITY_DUMP; + } + + // force to be accepted as this event will be part of a dump + syscall->state = ACCEPTED; + } + } } - return 0; + return syscall->state; } #endif diff --git a/pkg/security/ebpf/c/include/helpers/discarders.h b/pkg/security/ebpf/c/include/helpers/discarders.h index 86bc5d3a1c96d..873356c61e9b3 100644 --- a/pkg/security/ebpf/c/include/helpers/discarders.h +++ b/pkg/security/ebpf/c/include/helpers/discarders.h @@ -222,25 +222,25 @@ int __attribute__((always_inline)) discard_inode(u64 event_type, u32 mount_id, u return 0; } -discard_check_state __attribute__((always_inline)) is_discarded_by_inode(struct is_discarded_by_inode_t *params) { +int __attribute__((always_inline)) is_discarded_by_inode(struct is_discarded_by_inode_t *params) { // start with the "normal" discarder check struct inode_discarder_t key = params->discarder; - struct inode_discarder_params_t *inode_params = (struct inode_discarder_params_t *)is_discarded(&inode_discarders, &key, params->discarder_type, params->now); + struct inode_discarder_params_t *inode_params = (struct inode_discarder_params_t *)is_discarded(&inode_discarders, &key, params->event_type, params->now); if (!inode_params) { - return NOT_DISCARDED; + return 0; } bool are_revisions_equal = inode_params->mount_revision == get_mount_discarder_revision(params->discarder.path_key.mount_id); if (!are_revisions_equal) { - return NOT_DISCARDED; + return 0; } u32 revision = get_discarders_revision(); if (inode_params->params.revision != revision) { - return NOT_DISCARDED; + return 0; } - return DISCARDED; + return 1; } int __attribute__((always_inline)) expire_inode_discarders(u32 mount_id, u64 inode) { @@ -283,4 +283,16 @@ int __attribute__((always_inline)) expire_inode_discarders(u32 mount_id, u64 ino return 0; } +static __attribute__((always_inline)) int is_discarded_by_pid() { + return is_runtime_discarded() && is_runtime_request(); +} + +int __attribute__((always_inline)) dentry_resolver_discarder_event_type(struct syscall_cache_t *syscall) { + if (syscall->state == ACCEPTED) { + return 0; + } + + return syscall->type; +} + #endif diff --git a/pkg/security/ebpf/c/include/helpers/exec.h b/pkg/security/ebpf/c/include/helpers/exec.h index 7c5d5ee02170f..99cc6fed57093 100644 --- a/pkg/security/ebpf/c/include/helpers/exec.h +++ b/pkg/security/ebpf/c/include/helpers/exec.h @@ -26,7 +26,7 @@ int __attribute__((always_inline)) handle_exec_event(ctx_t *ctx, struct syscall_ // resolve dentry syscall->resolver.key = syscall->exec.file.path_key; syscall->resolver.dentry = syscall->exec.dentry; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = DR_NO_CALLBACK; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/helpers/syscalls.h b/pkg/security/ebpf/c/include/helpers/syscalls.h index e4c41ccd5e8c2..8de9cdf42ff43 100644 --- a/pkg/security/ebpf/c/include/helpers/syscalls.h +++ b/pkg/security/ebpf/c/include/helpers/syscalls.h @@ -192,48 +192,6 @@ struct syscall_cache_t *__attribute__((always_inline)) pop_syscall(u64 type) { return syscall; } -int __attribute__((always_inline)) discard_syscall(struct syscall_cache_t *syscall) { - u64 key = bpf_get_current_pid_tgid(); - bpf_map_delete_elem(&syscalls, &key); - monitor_syscalls(syscall->type, -1); - return 0; -} - -int __attribute__((always_inline)) mark_as_discarded(struct syscall_cache_t *syscall) { - syscall->discarded = 1; - return 0; -} - -int __attribute__((always_inline)) filter_syscall(struct syscall_cache_t *syscall, int (*check_approvers)(struct syscall_cache_t *syscall)) { - if (syscall->policy.mode == NO_FILTER) { - return 0; - } - - char pass_to_userspace = syscall->policy.mode == ACCEPT ? 1 : 0; - - if (syscall->policy.mode == DENY) { - pass_to_userspace = check_approvers(syscall); - } - - u32 tgid = bpf_get_current_pid_tgid() >> 32; - u64 *cookie = bpf_map_lookup_elem(&traced_pids, &tgid); - if (cookie != NULL) { - u64 now = bpf_ktime_get_ns(); - struct activity_dump_config *config = lookup_or_delete_traced_pid(tgid, now, cookie); - if (config != NULL) { - // is this event type traced ? - if (mask_has_event(config->event_mask, syscall->type) && activity_dump_rate_limiter_allow(config, *cookie, now, 0)) { - if (!pass_to_userspace) { - syscall->resolver.flags |= SAVED_BY_ACTIVITY_DUMP; - } - return 0; - } - } - } - - return !pass_to_userspace; -} - // the following functions must use the {peek,pop}_current_or_impersonated_exec_syscall to retrieve the syscall context // because the task performing the exec syscall may change its pid in the flush_old_exec() kernel function diff --git a/pkg/security/ebpf/c/include/hooks/bpf.h b/pkg/security/ebpf/c/include/hooks/bpf.h index 8b1c9f3bca35c..a39e587faa9a1 100644 --- a/pkg/security/ebpf/c/include/hooks/bpf.h +++ b/pkg/security/ebpf/c/include/hooks/bpf.h @@ -50,6 +50,10 @@ __attribute__((always_inline)) void send_bpf_event(void *ctx, struct syscall_cac } HOOK_SYSCALL_ENTRY3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_BPF); struct syscall_cache_t syscall = { .policy = policy, @@ -71,8 +75,8 @@ __attribute__((always_inline)) int sys_bpf_ret(void *ctx, int retval) { return 0; } - if (filter_syscall(syscall, bpf_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, bpf_approvers) == DISCARDED) { + return 0; } syscall->bpf.retval = retval; diff --git a/pkg/security/ebpf/c/include/hooks/cgroup.h b/pkg/security/ebpf/c/include/hooks/cgroup.h index aa43cc0a333a7..72c82beddf78e 100644 --- a/pkg/security/ebpf/c/include/hooks/cgroup.h +++ b/pkg/security/ebpf/c/include/hooks/cgroup.h @@ -201,7 +201,7 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { } resolver->type = EVENT_CGROUP_WRITE; - resolver->discarder_type = NO_FILTER; + resolver->discarder_event_type = 0; resolver->callback = DR_CGROUP_WRITE_CALLBACK_KPROBE_KEY; resolver->iteration = 0; resolver->ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/chdir.h b/pkg/security/ebpf/c/include/hooks/chdir.h index b424ec706ddd1..2d1e6c5db11eb 100644 --- a/pkg/security/ebpf/c/include/hooks/chdir.h +++ b/pkg/security/ebpf/c/include/hooks/chdir.h @@ -8,6 +8,10 @@ #include "helpers/syscalls.h" long __attribute__((always_inline)) trace__sys_chdir(const char *path) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_CHDIR); struct syscall_cache_t syscall = { .type = EVENT_CHDIR, @@ -53,8 +57,9 @@ int hook_set_fs_pwd(ctx_t *ctx) { set_file_inode(dentry, &syscall->chdir.file, 0); - if (filter_syscall(syscall, chdir_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, chdir_approvers) == DISCARDED) { + pop_syscall(EVENT_CHDIR); + return 0; } return 0; @@ -66,7 +71,7 @@ int __attribute__((always_inline)) sys_chdir_ret(void *ctx, int retval, int dr_t return 0; } if (IS_UNHANDLED_ERROR(retval)) { - discard_syscall(syscall); + pop_syscall(EVENT_CHDIR); return 0; } @@ -74,7 +79,7 @@ int __attribute__((always_inline)) sys_chdir_ret(void *ctx, int retval, int dr_t syscall->resolver.key = syscall->chdir.file.path_key; syscall->resolver.dentry = syscall->chdir.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_CHDIR : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = select_dr_key(dr_type, DR_CHDIR_CALLBACK_KPROBE_KEY, DR_CHDIR_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/chmod.h b/pkg/security/ebpf/c/include/hooks/chmod.h index 8afd2f1f663ab..f374c6478cc31 100644 --- a/pkg/security/ebpf/c/include/hooks/chmod.h +++ b/pkg/security/ebpf/c/include/hooks/chmod.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_chmod(const char *path, umode_t mode) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_CHMOD); struct syscall_cache_t syscall = { .type = EVENT_CHMOD, diff --git a/pkg/security/ebpf/c/include/hooks/chown.h b/pkg/security/ebpf/c/include/hooks/chown.h index 248d366aef68c..3bb4cbf243141 100644 --- a/pkg/security/ebpf/c/include/hooks/chown.h +++ b/pkg/security/ebpf/c/include/hooks/chown.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_chown(const char *filename, uid_t user, gid_t group) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_CHOWN); struct syscall_cache_t syscall = { .type = EVENT_CHOWN, diff --git a/pkg/security/ebpf/c/include/hooks/dentry_resolver.h b/pkg/security/ebpf/c/include/hooks/dentry_resolver.h index a47ecd1050c79..1304ee7c94cc8 100644 --- a/pkg/security/ebpf/c/include/hooks/dentry_resolver.h +++ b/pkg/security/ebpf/c/include/hooks/dentry_resolver.h @@ -20,7 +20,7 @@ int __attribute__((always_inline)) resolve_dentry_tail_call(void *ctx, struct de return DENTRY_ERROR; } *params = (struct is_discarded_by_inode_t){ - .discarder_type = input->discarder_type, + .event_type = input->discarder_event_type, .now = bpf_ktime_get_ns(), }; @@ -40,7 +40,7 @@ int __attribute__((always_inline)) resolve_dentry_tail_call(void *ctx, struct de next_key.mount_id = 0; } - if (input->discarder_type && input->iteration == 1 && i <= 3) { + if (input->discarder_event_type && input->iteration == 1 && i <= 3) { params->discarder.path_key.ino = key.ino; params->discarder.path_key.mount_id = key.mount_id; params->discarder.is_leaf = i == 0; diff --git a/pkg/security/ebpf/c/include/hooks/exec.h b/pkg/security/ebpf/c/include/hooks/exec.h index c7ff9374bc2a9..a99783248c5d9 100644 --- a/pkg/security/ebpf/c/include/hooks/exec.h +++ b/pkg/security/ebpf/c/include/hooks/exec.h @@ -80,7 +80,7 @@ int __attribute__((always_inline)) handle_interpreted_exec_event(void *ctx, stru // This overwrites the resolver fields on this syscall, but that's ok because the executed file has already been written to the map/pathnames ebpf map. syscall->resolver.key = syscall->exec.linux_binprm.interpreter; syscall->resolver.dentry = get_file_dentry(file); - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = DR_NO_CALLBACK; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/link.h b/pkg/security/ebpf/c/include/hooks/link.h index 51a4a18ee88b2..8184b16cfd4bf 100644 --- a/pkg/security/ebpf/c/include/hooks/link.h +++ b/pkg/security/ebpf/c/include/hooks/link.h @@ -69,8 +69,9 @@ int hook_vfs_link(ctx_t *ctx) { // force a new path id to force path resolution set_file_inode(src_dentry, &syscall->link.src_file, 1); - if (filter_syscall(syscall, link_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, link_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } fill_file(src_dentry, &syscall->link.src_file); @@ -85,7 +86,7 @@ int hook_vfs_link(ctx_t *ctx) { syscall->resolver.dentry = src_dentry; syscall->resolver.key = syscall->link.src_file.path_key; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_LINK : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_LINK_SRC_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -107,7 +108,8 @@ int tail_call_target_dr_link_src_callback(ctx_t *ctx) { if (syscall->resolver.ret == DENTRY_DISCARDED) { monitor_discarded(EVENT_LINK); - return mark_as_discarded(syscall); + // do not pop, we want to invalidate the inode even if the syscall is discarded + syscall->state = DISCARDED; } return 0; @@ -124,18 +126,16 @@ int __attribute__((always_inline)) sys_link_ret(void *ctx, int retval, int dr_ty return 0; } - int pass_to_userspace = !syscall->discarded && is_event_enabled(EVENT_LINK); - // invalidate user space inode, so no need to bump the discarder revision in the event if (retval >= 0) { // for hardlink we need to invalidate the discarders as the nlink counter in now > 1 expire_inode_discarders(syscall->link.src_file.path_key.mount_id, syscall->link.src_file.path_key.ino); } - if (pass_to_userspace) { + if (syscall->state != DISCARDED && is_event_enabled(EVENT_LINK)) { syscall->resolver.dentry = syscall->link.target_dentry; syscall->resolver.key = syscall->link.target_file.path_key; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = select_dr_key(dr_type, DR_LINK_DST_CALLBACK_KPROBE_KEY, DR_LINK_DST_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/mkdir.h b/pkg/security/ebpf/c/include/hooks/mkdir.h index 0047972df52df..c736528d7222f 100644 --- a/pkg/security/ebpf/c/include/hooks/mkdir.h +++ b/pkg/security/ebpf/c/include/hooks/mkdir.h @@ -8,6 +8,10 @@ #include "helpers/syscalls.h" long __attribute__((always_inline)) trace__sys_mkdir(u8 async, umode_t mode) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_MKDIR); struct syscall_cache_t syscall = { .type = EVENT_MKDIR, @@ -51,8 +55,8 @@ int hook_vfs_mkdir(ctx_t *ctx) { syscall->mkdir.file.path_key.mount_id = get_path_mount_id(syscall->mkdir.path); - if (filter_syscall(syscall, mkdir_approvers)) { - return discard_syscall(syscall); + if (approve_syscall(syscall, mkdir_approvers) == DISCARDED) { + pop_syscall(EVENT_MKDIR); } return 0; @@ -64,7 +68,7 @@ int __attribute__((always_inline)) sys_mkdir_ret(void *ctx, int retval, int dr_t return 0; } if (IS_UNHANDLED_ERROR(retval)) { - discard_syscall(syscall); + pop_syscall(EVENT_MKDIR); return 0; } @@ -73,7 +77,7 @@ int __attribute__((always_inline)) sys_mkdir_ret(void *ctx, int retval, int dr_t syscall->resolver.key = syscall->mkdir.file.path_key; syscall->resolver.dentry = syscall->mkdir.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_MKDIR : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = select_dr_key(dr_type, DR_MKDIR_CALLBACK_KPROBE_KEY, DR_MKDIR_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/mmap.h b/pkg/security/ebpf/c/include/hooks/mmap.h index 45392650a94ca..d15a25651e63d 100644 --- a/pkg/security/ebpf/c/include/hooks/mmap.h +++ b/pkg/security/ebpf/c/include/hooks/mmap.h @@ -9,6 +9,10 @@ HOOK_ENTRY("vm_mmap_pgoff") int hook_vm_mmap_pgoff(ctx_t *ctx) { + if (is_discarded_by_pid()) { + return 0; + } + u64 len = CTX_PARM3(ctx); u64 prot = CTX_PARM4(ctx); u64 flags = CTX_PARM5(ctx); @@ -47,13 +51,13 @@ int __attribute__((always_inline)) sys_mmap_ret(void *ctx, int retval, u64 addr) return 0; } - if (syscall->resolver.ret == DENTRY_DISCARDED) { - monitor_discarded(EVENT_MMAP); + if (approve_syscall(syscall, mmap_approvers) == DISCARDED) { return 0; } - if (filter_syscall(syscall, mmap_approvers)) { - return mark_as_discarded(syscall); + if (syscall->resolver.ret == DENTRY_DISCARDED) { + monitor_discarded(EVENT_MMAP); + return 0; } if (retval != -1) { @@ -101,9 +105,9 @@ int hook_security_mmap_file(ctx_t *ctx) { syscall->resolver.key = syscall->mmap.file.path_key; syscall->resolver.dentry = syscall->mmap.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_MMAP : 0; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); resolve_dentry(ctx, DR_KPROBE_OR_FENTRY); diff --git a/pkg/security/ebpf/c/include/hooks/module.h b/pkg/security/ebpf/c/include/hooks/module.h index 905301cbeebf2..ab5bc5caea8ee 100644 --- a/pkg/security/ebpf/c/include/hooks/module.h +++ b/pkg/security/ebpf/c/include/hooks/module.h @@ -42,8 +42,8 @@ int __attribute__((always_inline)) trace_kernel_file(ctx_t *ctx, struct file *f, syscall->resolver.key = syscall->init_module.file.path_key; syscall->resolver.dentry = syscall->init_module.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_INIT_MODULE : 0; syscall->resolver.iteration = 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_NO_CALLBACK; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/mount.h b/pkg/security/ebpf/c/include/hooks/mount.h index 2a56d04b1a750..00247418d7a0f 100644 --- a/pkg/security/ebpf/c/include/hooks/mount.h +++ b/pkg/security/ebpf/c/include/hooks/mount.h @@ -171,7 +171,7 @@ void __attribute__((always_inline)) handle_new_mount(void *ctx, struct syscall_c syscall->resolver.key = syscall->mount.root_key; syscall->resolver.dentry = root_dentry; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = select_dr_key(dr_type, DR_MOUNT_STAGE_ONE_CALLBACK_KPROBE_KEY, DR_MOUNT_STAGE_ONE_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -189,7 +189,7 @@ int __attribute__((always_inline)) dr_mount_stage_one_callback(void *ctx, int dr syscall->resolver.key = syscall->mount.mountpoint_key; syscall->resolver.dentry = syscall->mount.mountpoint_dentry; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = select_dr_key(dr_type, DR_MOUNT_STAGE_TWO_CALLBACK_KPROBE_KEY, DR_MOUNT_STAGE_TWO_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/mprotect.h b/pkg/security/ebpf/c/include/hooks/mprotect.h index 0a6107c12c13b..7bfb5c657d54f 100644 --- a/pkg/security/ebpf/c/include/hooks/mprotect.h +++ b/pkg/security/ebpf/c/include/hooks/mprotect.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" HOOK_SYSCALL_ENTRY0(mprotect) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_MPROTECT); struct syscall_cache_t syscall = { .type = EVENT_MPROTECT, @@ -42,8 +46,8 @@ int __attribute__((always_inline)) sys_mprotect_ret(void *ctx, int retval) { return 0; } - if (filter_syscall(syscall, mprotect_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, mprotect_approvers) == DISCARDED) { + return 0; } struct mprotect_event_t event = { diff --git a/pkg/security/ebpf/c/include/hooks/open.h b/pkg/security/ebpf/c/include/hooks/open.h index 3d48368979c93..d41d105fdbbf7 100644 --- a/pkg/security/ebpf/c/include/hooks/open.h +++ b/pkg/security/ebpf/c/include/hooks/open.h @@ -11,6 +11,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_openat2(const char *path, u8 async, int flags, umode_t mode, u64 pid_tgid) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_OPEN); struct syscall_cache_t syscall = { .type = EVENT_OPEN, @@ -85,9 +89,8 @@ int __attribute__((always_inline)) handle_open_event(struct syscall_cache_t *sys set_file_inode(dentry, &syscall->open.file, 0); - if (filter_syscall(syscall, open_approvers)) { - return mark_as_discarded(syscall); - } + // do not pop, we want to keep track of the mount ref counter later in the stack + approve_syscall(syscall, open_approvers); return 0; } @@ -112,9 +115,8 @@ int __attribute__((always_inline)) handle_truncate_path_dentry(struct path *path set_file_inode(dentry, &syscall->open.file, 0); - if (filter_syscall(syscall, open_approvers)) { - return mark_as_discarded(syscall); - } + // do not pop, we want to keep track of the mount ref counter later in the stack + approve_syscall(syscall, open_approvers); return 0; } @@ -237,14 +239,14 @@ int __attribute__((always_inline)) sys_open_ret(void *ctx, int retval, int dr_ty // increase mount ref inc_mount_ref(syscall->open.file.path_key.mount_id); - if (syscall->discarded) { + if (syscall->state == DISCARDED) { pop_syscall(EVENT_OPEN); return 0; } syscall->resolver.key = syscall->open.file.path_key; syscall->resolver.dentry = syscall->open.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_OPEN : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = select_dr_key(dr_type, DR_OPEN_CALLBACK_KPROBE_KEY, DR_OPEN_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/rename.h b/pkg/security/ebpf/c/include/hooks/rename.h index 4cef2525162b7..3cd813cac99f2 100644 --- a/pkg/security/ebpf/c/include/hooks/rename.h +++ b/pkg/security/ebpf/c/include/hooks/rename.h @@ -90,14 +90,15 @@ int hook_vfs_rename(ctx_t *ctx) { } // always return after any invalidate_inode call - if (filter_syscall(syscall, rename_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, rename_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } // the mount id of path_key is resolved by kprobe/mnt_want_write. It is already set by the time we reach this probe. syscall->resolver.dentry = syscall->rename.src_dentry; syscall->resolver.key = syscall->rename.src_file.path_key; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = DR_NO_CALLBACK; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -128,8 +129,6 @@ int __attribute__((always_inline)) sys_rename_ret(void *ctx, int retval, int dr_ expire_inode_discarders(syscall->rename.target_file.path_key.mount_id, inode); } - int pass_to_userspace = !syscall->discarded && is_event_enabled(EVENT_RENAME); - // invalid discarder + path_id if (retval >= 0) { expire_inode_discarders(syscall->rename.target_file.path_key.mount_id, syscall->rename.target_file.path_key.ino); @@ -141,11 +140,11 @@ int __attribute__((always_inline)) sys_rename_ret(void *ctx, int retval, int dr_ } } - if (pass_to_userspace) { + if (syscall->state != DISCARDED && is_event_enabled(EVENT_RENAME)) { // for centos7, use src dentry for target resolution as the pointers have been swapped syscall->resolver.key = syscall->rename.target_file.path_key; syscall->resolver.dentry = syscall->rename.src_dentry; - syscall->resolver.discarder_type = 0; + syscall->resolver.discarder_event_type = 0; syscall->resolver.callback = select_dr_key(dr_type, DR_RENAME_CALLBACK_KPROBE_KEY, DR_RENAME_CALLBACK_TRACEPOINT_KEY); syscall->resolver.iteration = 0; syscall->resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/rmdir.h b/pkg/security/ebpf/c/include/hooks/rmdir.h index 71fc46ec73f05..f8b0b75a09f82 100644 --- a/pkg/security/ebpf/c/include/hooks/rmdir.h +++ b/pkg/security/ebpf/c/include/hooks/rmdir.h @@ -58,8 +58,11 @@ int hook_security_inode_rmdir(ctx_t *ctx) { key = syscall->rmdir.file.path_key; syscall->rmdir.dentry = dentry; - if (filter_syscall(syscall, rmdir_approvers)) { - return mark_as_discarded(syscall); + syscall->policy = fetch_policy(EVENT_RMDIR); + + if (approve_syscall(syscall, rmdir_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } break; @@ -77,9 +80,13 @@ int hook_security_inode_rmdir(ctx_t *ctx) { key = syscall->unlink.file.path_key; syscall->unlink.dentry = dentry; + + // fake rmdir event as we will generate and rmdir event at the end syscall->policy = fetch_policy(EVENT_RMDIR); - if (filter_syscall(syscall, rmdir_approvers)) { - return mark_as_discarded(syscall); + + if (approve_syscall(syscall, rmdir_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } break; @@ -90,7 +97,7 @@ int hook_security_inode_rmdir(ctx_t *ctx) { if (dentry != NULL) { syscall->resolver.key = key; syscall->resolver.dentry = dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? syscall->type : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_SECURITY_INODE_RMDIR_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -111,8 +118,9 @@ int tail_call_target_dr_security_inode_rmdir_callback(ctx_t *ctx) { } if (syscall->resolver.ret == DENTRY_DISCARDED) { - monitor_discarded(EVENT_RMDIR); - return mark_as_discarded(syscall); + monitor_discarded(syscall->type); + // do not pop, we want to invalidate the inode even if the syscall is discarded + syscall->state = DISCARDED; } return 0; } @@ -127,8 +135,7 @@ int __attribute__((always_inline)) sys_rmdir_ret(void *ctx, int retval) { return 0; } - int pass_to_userspace = !syscall->discarded && is_event_enabled(EVENT_RMDIR); - if (pass_to_userspace) { + if (syscall->state != DISCARDED && is_event_enabled(EVENT_RMDIR)) { struct rmdir_event_t event = { .syscall.retval = retval, .event.flags = syscall->async ? EVENT_FLAGS_ASYNC : 0, diff --git a/pkg/security/ebpf/c/include/hooks/selinux.h b/pkg/security/ebpf/c/include/hooks/selinux.h index aa9d7084823b9..f6efc208eb1bb 100644 --- a/pkg/security/ebpf/c/include/hooks/selinux.h +++ b/pkg/security/ebpf/c/include/hooks/selinux.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) handle_selinux_event(void *ctx, struct file *file, const char *buf, size_t count, enum selinux_source_event_t source_event) { + if (is_discarded_by_pid()) { + return 0; + } + struct syscall_cache_t syscall = { .type = EVENT_SELINUX, .policy = fetch_policy(EVENT_SELINUX), @@ -55,7 +59,7 @@ int __attribute__((always_inline)) handle_selinux_event(void *ctx, struct file * syscall.resolver.key = syscall.selinux.file.path_key; syscall.resolver.dentry = syscall.selinux.dentry; - syscall.resolver.discarder_type = syscall.policy.mode != NO_FILTER ? EVENT_SELINUX : 0; + syscall.resolver.discarder_event_type = dentry_resolver_discarder_event_type(&syscall); syscall.resolver.callback = DR_SELINUX_CALLBACK_KPROBE_KEY; syscall.resolver.iteration = 0; syscall.resolver.ret = 0; diff --git a/pkg/security/ebpf/c/include/hooks/setattr.h b/pkg/security/ebpf/c/include/hooks/setattr.h index d271283fee02d..4a4b3729283e6 100644 --- a/pkg/security/ebpf/c/include/hooks/setattr.h +++ b/pkg/security/ebpf/c/include/hooks/setattr.h @@ -63,20 +63,23 @@ int hook_security_inode_setattr(ctx_t *ctx) { u64 event_type = 0; switch (syscall->type) { case EVENT_UTIME: - if (filter_syscall(syscall, utime_approvers)) { - return discard_syscall(syscall); + if (approve_syscall(syscall, utime_approvers) == DISCARDED) { + pop_syscall(EVENT_UTIME); + return 0; } event_type = EVENT_UTIME; break; case EVENT_CHMOD: - if (filter_syscall(syscall, chmod_approvers)) { - return discard_syscall(syscall); + if (approve_syscall(syscall, chmod_approvers) == DISCARDED) { + pop_syscall(EVENT_CHMOD); + return 0; } event_type = EVENT_CHMOD; break; case EVENT_CHOWN: - if (filter_syscall(syscall, chown_approvers)) { - return discard_syscall(syscall); + if (approve_syscall(syscall, chown_approvers) == DISCARDED) { + pop_syscall(EVENT_CHOWN); + return 0; } event_type = EVENT_CHOWN; break; @@ -84,7 +87,7 @@ int hook_security_inode_setattr(ctx_t *ctx) { syscall->resolver.dentry = syscall->setattr.dentry; syscall->resolver.key = syscall->setattr.file.path_key; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? event_type : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_SETATTR_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -106,7 +109,7 @@ int tail_call_target_dr_setattr_callback(ctx_t *ctx) { if (syscall->resolver.ret == DENTRY_DISCARDED) { monitor_discarded(syscall->type); - return discard_syscall(syscall); + pop_syscall(syscall->resolver.discarder_event_type); } return 0; diff --git a/pkg/security/ebpf/c/include/hooks/setxattr.h b/pkg/security/ebpf/c/include/hooks/setxattr.h index f97a33e160f3b..1590a5507199e 100644 --- a/pkg/security/ebpf/c/include/hooks/setxattr.h +++ b/pkg/security/ebpf/c/include/hooks/setxattr.h @@ -7,6 +7,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_setxattr(const char *xattr_name) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_SETXATTR); struct syscall_cache_t syscall = { .type = EVENT_SETXATTR, @@ -84,7 +88,7 @@ int __attribute__((always_inline)) trace__vfs_setxattr(ctx_t *ctx, u64 event_typ // the mount id of path_key is resolved by kprobe/mnt_want_write. It is already set by the time we reach this probe. syscall->resolver.dentry = syscall->xattr.dentry; syscall->resolver.key = syscall->xattr.file.path_key; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? event_type : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_SETXATTR_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -106,7 +110,7 @@ int tail_call_target_dr_setxattr_callback(ctx_t *ctx) { if (syscall->resolver.ret == DENTRY_DISCARDED) { monitor_discarded(EVENT_SETXATTR); - return discard_syscall(syscall); + pop_syscall(EVENT_SETXATTR); } return 0; diff --git a/pkg/security/ebpf/c/include/hooks/signal.h b/pkg/security/ebpf/c/include/hooks/signal.h index 8b6d11fb80206..92dbd145d7992 100644 --- a/pkg/security/ebpf/c/include/hooks/signal.h +++ b/pkg/security/ebpf/c/include/hooks/signal.h @@ -6,6 +6,10 @@ #include "helpers/syscalls.h" HOOK_SYSCALL_ENTRY2(kill, int, pid, int, type) { + if (is_discarded_by_pid()) { + return 0; + } + /* TODO: implement the event for pid equal to 0 or -1. */ if (pid < 1) { return 0; diff --git a/pkg/security/ebpf/c/include/hooks/splice.h b/pkg/security/ebpf/c/include/hooks/splice.h index 7dff0490400e7..4d289c94ab615 100644 --- a/pkg/security/ebpf/c/include/hooks/splice.h +++ b/pkg/security/ebpf/c/include/hooks/splice.h @@ -9,6 +9,10 @@ #include "helpers/syscalls.h" HOOK_SYSCALL_ENTRY0(splice) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_SPLICE); struct syscall_cache_t syscall = { .type = EVENT_SPLICE, @@ -50,9 +54,9 @@ int rethook_get_pipe_info(ctx_t *ctx) { syscall->splice.file_found = 1; syscall->resolver.key = syscall->splice.file.path_key; syscall->resolver.dentry = syscall->splice.dentry; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_SPLICE : 0; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); resolve_dentry(ctx, DR_KPROBE_OR_FENTRY); @@ -87,8 +91,8 @@ int __attribute__((always_inline)) sys_splice_ret(void *ctx, int retval) { syscall->splice.pipe_exit_flag = get_pipe_last_buffer_flags(syscall->splice.pipe_info, syscall->splice.bufs); } - if (filter_syscall(syscall, splice_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, splice_approvers) == DISCARDED) { + return 0; } struct splice_event_t event = { diff --git a/pkg/security/ebpf/c/include/hooks/unlink.h b/pkg/security/ebpf/c/include/hooks/unlink.h index 55f97e367494a..aefe174c7465a 100644 --- a/pkg/security/ebpf/c/include/hooks/unlink.h +++ b/pkg/security/ebpf/c/include/hooks/unlink.h @@ -69,14 +69,15 @@ int hook_vfs_unlink(ctx_t *ctx) { set_file_inode(dentry, &syscall->unlink.file, 1); fill_file(dentry, &syscall->unlink.file); - if (filter_syscall(syscall, unlink_approvers)) { - return mark_as_discarded(syscall); + if (approve_syscall(syscall, unlink_approvers) == DISCARDED) { + // do not pop, we want to invalidate the inode even if the syscall is discarded + return 0; } // the mount id of path_key is resolved by kprobe/mnt_want_write. It is already set by the time we reach this probe. syscall->resolver.dentry = dentry; syscall->resolver.key = syscall->unlink.file.path_key; - syscall->resolver.discarder_type = syscall->policy.mode != NO_FILTER ? EVENT_UNLINK : 0; + syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); syscall->resolver.callback = DR_UNLINK_CALLBACK_KPROBE_KEY; syscall->resolver.iteration = 0; syscall->resolver.ret = 0; @@ -96,8 +97,10 @@ int tail_call_target_dr_unlink_callback(ctx_t *ctx) { return 0; } - if (syscall->resolver.ret < 0) { - return mark_as_discarded(syscall); + if (syscall->resolver.ret == DENTRY_DISCARDED) { + monitor_discarded(EVENT_UNLINK); + // do not pop, we want to invalidate the inode even if the syscall is discarded + syscall->state = DISCARDED; } return 0; @@ -114,10 +117,7 @@ int __attribute__((always_inline)) sys_unlink_ret(void *ctx, int retval) { } u64 enabled_events = get_enabled_events(); - int pass_to_userspace = !syscall->discarded && - (mask_has_event(enabled_events, EVENT_UNLINK) || - mask_has_event(enabled_events, EVENT_RMDIR)); - if (pass_to_userspace) { + if (syscall->state != DISCARDED && (mask_has_event(enabled_events, EVENT_UNLINK) || mask_has_event(enabled_events, EVENT_RMDIR))) { if (syscall->unlink.flags & AT_REMOVEDIR) { struct rmdir_event_t event = { .syscall.retval = retval, diff --git a/pkg/security/ebpf/c/include/hooks/utimes.h b/pkg/security/ebpf/c/include/hooks/utimes.h index 88bdc8d44c8ae..8cb865dc7f998 100644 --- a/pkg/security/ebpf/c/include/hooks/utimes.h +++ b/pkg/security/ebpf/c/include/hooks/utimes.h @@ -6,6 +6,10 @@ #include "helpers/syscalls.h" int __attribute__((always_inline)) trace__sys_utimes(const char *filename) { + if (is_discarded_by_pid()) { + return 0; + } + struct policy_t policy = fetch_policy(EVENT_UTIME); struct syscall_cache_t syscall = { .type = EVENT_UTIME, diff --git a/pkg/security/ebpf/c/include/maps.h b/pkg/security/ebpf/c/include/maps.h index 91cca71ffe8ec..e860c70dc5b72 100644 --- a/pkg/security/ebpf/c/include/maps.h +++ b/pkg/security/ebpf/c/include/maps.h @@ -32,12 +32,14 @@ BPF_HASH_MAP(activity_dump_config_defaults, u32, struct activity_dump_config, 1) BPF_HASH_MAP(traced_cgroups, container_id_t, u64, 1) // max entries will be overridden at runtime BPF_HASH_MAP(cgroup_wait_list, container_id_t, u64, 1) // max entries will be overridden at runtime BPF_HASH_MAP(traced_pids, u32, u64, 8192) // max entries will be overridden at runtime -BPF_HASH_MAP(basename_approvers, struct basename_t, struct basename_filter_t, 255) +BPF_HASH_MAP(basename_approvers, struct basename_t, struct event_mask_filter_t, 255) BPF_HASH_MAP(register_netdevice_cache, u64, struct register_netdevice_cache_t, 1024) BPF_HASH_MAP(netdevice_lookup_cache, u64, struct device_ifindex_t, 1024) BPF_HASH_MAP(fd_link_pid, u8, u32, 1) BPF_HASH_MAP(security_profiles, container_id_t, struct security_profile_t, 1) // max entries will be overriden at runtime BPF_HASH_MAP(secprofs_syscalls, u64, struct security_profile_syscalls_t, 1) // max entries will be overriden at runtime +BPF_HASH_MAP(auid_approvers, u32, struct event_mask_filter_t, 128) +BPF_HASH_MAP(auid_range_approvers, u32, struct u32_range_filter_t, EVENT_MAX) BPF_LRU_MAP(activity_dump_rate_limiters, u64, struct activity_dump_rate_limiter_ctx, 1) // max entries will be overridden at runtime BPF_LRU_MAP(mount_ref, u32, struct mount_ref_t, 64000) diff --git a/pkg/security/ebpf/c/include/structs/dentry_resolver.h b/pkg/security/ebpf/c/include/structs/dentry_resolver.h index d60012aa0e38d..032d5053d3f7d 100644 --- a/pkg/security/ebpf/c/include/structs/dentry_resolver.h +++ b/pkg/security/ebpf/c/include/structs/dentry_resolver.h @@ -33,7 +33,7 @@ struct dentry_resolver_input_t { struct path_key_t key; struct path_key_t original_key; struct dentry *dentry; - u64 discarder_type; + u64 discarder_event_type; s64 sysretval; int callback; int ret; diff --git a/pkg/security/ebpf/c/include/structs/filter.h b/pkg/security/ebpf/c/include/structs/filter.h index 9d876d0a89ef9..78f99c0d19c00 100644 --- a/pkg/security/ebpf/c/include/structs/filter.h +++ b/pkg/security/ebpf/c/include/structs/filter.h @@ -14,13 +14,14 @@ struct policy_t { struct approver_stats_t { u64 event_approved_by_basename; u64 event_approved_by_flag; + u64 event_approved_by_auid; }; struct basename_t { char value[BASENAME_FILTER_SIZE]; }; -struct basename_filter_t { +struct event_mask_filter_t { u64 event_mask; }; @@ -34,6 +35,11 @@ struct u64_flags_filter_t { u8 is_set; }; +struct u32_range_filter_t { + u32 min; + u32 max; +}; + // Discarders struct discarder_stats_t { @@ -61,7 +67,7 @@ struct inode_discarder_t { }; struct is_discarded_by_inode_t { - u64 discarder_type; + u64 event_type; struct inode_discarder_t discarder; u64 now; }; diff --git a/pkg/security/ebpf/c/include/structs/syscalls.h b/pkg/security/ebpf/c/include/structs/syscalls.h index 3fe9851827624..7217ec8a54810 100644 --- a/pkg/security/ebpf/c/include/structs/syscalls.h +++ b/pkg/security/ebpf/c/include/structs/syscalls.h @@ -32,7 +32,7 @@ struct syscall_table_key_t { struct syscall_cache_t { struct policy_t policy; u64 type; - u8 discarded; + enum SYSCALL_STATE state; u8 async; u32 ctx_id; struct dentry_resolver_input_t resolver; diff --git a/pkg/security/ebpf/c/include/tests/discarders_test.h b/pkg/security/ebpf/c/include/tests/discarders_test.h index 074f179b8e7e7..6738e64eca392 100644 --- a/pkg/security/ebpf/c/include/tests/discarders_test.h +++ b/pkg/security/ebpf/c/include/tests/discarders_test.h @@ -6,7 +6,7 @@ int __attribute__((always_inline)) _is_discarded_by_inode(u64 event_type, u32 mount_id, u64 inode) { struct is_discarded_by_inode_t params = { - .discarder_type = event_type, + .event_type = event_type, .discarder = { .path_key.ino = inode, .path_key.mount_id = mount_id, diff --git a/pkg/security/ebpf/map.go b/pkg/security/ebpf/map.go index 99e39dff3dbf5..cb2fcf84ae9f7 100644 --- a/pkg/security/ebpf/map.go +++ b/pkg/security/ebpf/map.go @@ -126,6 +126,25 @@ func NewUint64FlagsMapItem(i uint64) *Uint64FlagsMapItem { return &item } +// UInt32RangeMapItem defines a uint32 range map item +type UInt32RangeMapItem struct { + Min uint32 + Max uint32 +} + +// MarshalBinary returns the binary representation of a UInt32RangeMapItem +func (i *UInt32RangeMapItem) MarshalBinary() ([]byte, error) { + b := make([]byte, 8) + binary.NativeEndian.PutUint32(b, i.Min) + binary.NativeEndian.PutUint32(b[4:], i.Max) + return b, nil +} + +// NewUInt32RangeMapItem returns a new UInt32RangeMapItem +func NewUInt32RangeMapItem(min, max uint32) *UInt32RangeMapItem { + return &UInt32RangeMapItem{Min: min, Max: max} +} + // Zero table items var ( ZeroUint8MapItem = BytesMapItem([]byte{0}) diff --git a/pkg/security/module/cws.go b/pkg/security/module/cws.go index 4f2c298989e75..863a5dedc01a5 100644 --- a/pkg/security/module/cws.go +++ b/pkg/security/module/cws.go @@ -320,3 +320,11 @@ func (c *CWSConsumer) statsSender() { func (c *CWSConsumer) GetRuleEngine() *rulesmodule.RuleEngine { return c.ruleEngine } + +// PrepareForFunctionalTests tweaks the module to be ready for functional tests +// currently it: +// - disables the container running telemetry +func (c *CWSConsumer) PrepareForFunctionalTests() { + // no need for container running telemetry in functional tests + c.crtelemetry = nil +} diff --git a/pkg/security/module/msg_sender.go b/pkg/security/module/msg_sender.go index b19499faf47a0..eddd3ebf67f0d 100644 --- a/pkg/security/module/msg_sender.go +++ b/pkg/security/module/msg_sender.go @@ -9,7 +9,7 @@ package module import ( "fmt" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/proto/api" "github.com/DataDog/datadog-agent/pkg/security/reporter" @@ -73,7 +73,7 @@ func (ds *DirectMsgSender) Send(msg *api.SecurityEventMessage, _ func(*api.Secur // NewDirectMsgSender returns a new direct sender func NewDirectMsgSender(stopper startstop.Stopper) (*DirectMsgSender, error) { - useSecRuntimeTrack := pkgconfig.SystemProbe().GetBool("runtime_security_config.use_secruntime_track") + useSecRuntimeTrack := pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.use_secruntime_track") endpoints, destinationsCtx, err := common.NewLogContextRuntime(useSecRuntimeTrack) if err != nil { diff --git a/pkg/security/module/server.go b/pkg/security/module/server.go index d07dd25e5e871..dc314d9d42f10 100644 --- a/pkg/security/module/server.go +++ b/pkg/security/module/server.go @@ -21,7 +21,7 @@ import ( "github.com/mailru/easyjson" "go.uber.org/atomic" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/events" @@ -32,7 +32,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/probe/selftests" "github.com/DataDog/datadog-agent/pkg/security/proto/api" "github.com/DataDog/datadog-agent/pkg/security/rules/monitor" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" @@ -44,48 +43,65 @@ import ( ) const ( - maxRetry = 3 + maxRetry = 10 retryDelay = time.Second ) type pendingMsg struct { - ruleID string - backendEvent events.BackendEvent - eventJSON []byte - tags []string - actionReports []model.ActionReport - service string - extTagsCb func() []string - sendAfter time.Time - retry int + ruleID string + backendEvent events.BackendEvent + eventSerializer *serializers.EventSerializer + tags []string + actionReports []model.ActionReport + service string + extTagsCb func() []string + sendAfter time.Time + retry int } -func (p *pendingMsg) ToJSON() ([]byte, bool, error) { - fullyResolved := true +func (p *pendingMsg) isResolved() bool { + for _, report := range p.actionReports { + if !report.IsResolved() { + return false + } + } + return true +} +func (p *pendingMsg) toJSON() ([]byte, error) { p.backendEvent.RuleActions = []json.RawMessage{} for _, report := range p.actionReports { - data, resolved, err := report.ToJSON() + if patcher, ok := report.(serializers.EventSerializerPatcher); ok { + patcher.PatchEvent(p.eventSerializer) + } + + data, err := report.ToJSON() if err != nil { - return nil, false, err + return nil, err } - p.backendEvent.RuleActions = append(p.backendEvent.RuleActions, data) - if !resolved { - fullyResolved = false + if len(data) > 0 { + p.backendEvent.RuleActions = append(p.backendEvent.RuleActions, data) } } backendEventJSON, err := easyjson.Marshal(p.backendEvent) if err != nil { - return nil, false, err + return nil, err } - data := append(backendEventJSON[:len(backendEventJSON)-1], ',') - data = append(data, p.eventJSON[1:]...) + eventJSON, err := p.eventSerializer.ToJSON() + if err != nil { + return nil, err + } - return data, fullyResolved, nil + return mergeJSON(backendEventJSON, eventJSON), nil +} + +func mergeJSON(j1, j2 []byte) []byte { + data := append(j1[:len(j1)-1], ',') + return append(data, j2[1:]...) } // APIServer represents a gRPC server in charge of receiving events sent by @@ -191,7 +207,7 @@ func (a *APIServer) dequeue(now time.Time, cb func(msg *pendingMsg) bool) { seclog.Errorf("failed to sent event, max retry reached: %d", msg.retry) return true } - seclog.Debugf("failed to sent event, retry %d/%d", msg.retry, maxRetry) + seclog.Tracef("failed to sent event, retry %d/%d", msg.retry, maxRetry) msg.sendAfter = now.Add(retryDelay) msg.retry++ @@ -229,17 +245,17 @@ func (a *APIServer) start(ctx context.Context) { } } - data, resolved, err := msg.ToJSON() + // not fully resolved, retry + if !msg.isResolved() && msg.retry < maxRetry { + return false + } + + data, err := msg.toJSON() if err != nil { seclog.Errorf("failed to marshal event context: %v", err) return true } - // not fully resolved, retry - if !resolved && msg.retry < maxRetry { - return false - } - seclog.Tracef("Sending event message for rule `%s` to security-agent `%s`", msg.ruleID, string(data)) m := &api.SecurityEventMessage{ @@ -278,7 +294,7 @@ func (a *APIServer) GetConfig(_ context.Context, _ *api.GetConfigParams) (*api.S } // SendEvent forwards events sent by the runtime security module to Datadog -func (a *APIServer) SendEvent(rule *rules.Rule, e events.Event, extTagsCb func() []string, service string) { +func (a *APIServer) SendEvent(rule *rules.Rule, event events.Event, extTagsCb func() []string, service string) { backendEvent := events.BackendEvent{ Title: rule.Def.Description, AgentContext: events.AgentContext{ @@ -296,13 +312,7 @@ func (a *APIServer) SendEvent(rule *rules.Rule, e events.Event, extTagsCb func() backendEvent.AgentContext.PolicyVersion = policy.Def.Version } - eventJSON, err := marshalEvent(e, rule.Opts) - if err != nil { - seclog.Errorf("failed to marshal event: %v", err) - return - } - - seclog.Tracef("Prepare event message for rule `%s` : `%s`", rule.ID, string(eventJSON)) + seclog.Tracef("Prepare event message for rule `%s`", rule.ID) // no retention if there is no ext tags to resolve retention := a.retention @@ -310,51 +320,78 @@ func (a *APIServer) SendEvent(rule *rules.Rule, e events.Event, extTagsCb func() retention = 0 } - // get type tags + container tags if already resolved, see ResolveContainerTags - eventTags := e.GetTags() - ruleID := rule.Def.ID if rule.Def.GroupID != "" { ruleID = rule.Def.GroupID } - eventActionReports := e.GetActionReports() - actionReports := make([]model.ActionReport, 0, len(eventActionReports)) - for _, ar := range eventActionReports { - if ar.IsMatchingRule(rule.ID) { - actionReports = append(actionReports, ar) + // get type tags + container tags if already resolved, see ResolveContainerTags + eventTags := event.GetTags() + + tags := []string{"rule_id:" + ruleID} + tags = append(tags, rule.Tags...) + tags = append(tags, eventTags...) + tags = append(tags, common.QueryAccountIDTag()) + + // model event or custom event ? if model event use queuing so that tags and actions can be handled + if ev, ok := event.(*model.Event); ok { + //return serializers.MarshalEvent(ev, opts) + eventActionReports := ev.GetActionReports() + actionReports := make([]model.ActionReport, 0, len(eventActionReports)) + for _, ar := range eventActionReports { + if ar.IsMatchingRule(rule.ID) { + actionReports = append(actionReports, ar) + } } - } - msg := &pendingMsg{ - ruleID: ruleID, - backendEvent: backendEvent, - eventJSON: eventJSON, - extTagsCb: extTagsCb, - service: service, - sendAfter: time.Now().Add(retention), - tags: make([]string, 0, 1+len(rule.Tags)+len(eventTags)+1), - actionReports: actionReports, - } + msg := &pendingMsg{ + ruleID: ruleID, + backendEvent: backendEvent, + eventSerializer: serializers.NewEventSerializer(ev, rule.Opts), + extTagsCb: extTagsCb, + service: service, + sendAfter: time.Now().Add(retention), + tags: tags, + actionReports: actionReports, + } - msg.tags = append(msg.tags, "rule_id:"+ruleID) - msg.tags = append(msg.tags, rule.Tags...) - msg.tags = append(msg.tags, eventTags...) - msg.tags = append(msg.tags, common.QueryAccountIDTag()) + a.enqueue(msg) + } else { + var ( + backendEventJSON []byte + eventJSON []byte + err error + ) + backendEventJSON, err = easyjson.Marshal(backendEvent) + if err != nil { + seclog.Errorf("failed to marshal event: %v", err) + } - a.enqueue(msg) -} + if ev, ok := event.(events.EventMarshaler); ok { + if eventJSON, err = ev.ToJSON(); err != nil { + seclog.Errorf("failed to marshal event: %v", err) + return + } + } else { + if eventJSON, err = json.Marshal(event); err != nil { + seclog.Errorf("failed to marshal event: %v", err) + return + } + } -func marshalEvent(event events.Event, opts *eval.Opts) ([]byte, error) { - if ev, ok := event.(*model.Event); ok { - return serializers.MarshalEvent(ev, opts) - } + data := mergeJSON(backendEventJSON, eventJSON) - if ev, ok := event.(events.EventMarshaler); ok { - return ev.ToJSON() - } + seclog.Tracef("Sending event message for rule `%s` to security-agent `%s`", ruleID, string(data)) - return json.Marshal(event) + m := &api.SecurityEventMessage{ + RuleID: ruleID, + Data: data, + Service: service, + Tags: tags, + } + + a.msgSender.Send(m, a.expireEvent) + } } // expireEvent updates the count of expired messages for the appropriate rule @@ -508,7 +545,7 @@ func NewAPIServer(cfg *config.RuntimeSecurityConfig, probe *sprobe.Probe, msgSen } if as.msgSender == nil { - if pkgconfig.SystemProbe().GetBool("runtime_security_config.direct_send_from_system_probe") { + if pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.direct_send_from_system_probe") { msgSender, err := NewDirectMsgSender(stopper) if err != nil { log.Errorf("failed to setup direct reporter: %v", err) diff --git a/pkg/security/probe/actions.go b/pkg/security/probe/actions.go index 12ecf8d37987d..4f1ac2a46621e 100644 --- a/pkg/security/probe/actions.go +++ b/pkg/security/probe/actions.go @@ -28,10 +28,10 @@ type KillActionReport struct { DetectedAt time.Time KilledAt time.Time ExitedAt time.Time - Rule *rules.Rule // internal resolved bool + rule *rules.Rule } // JKillActionReport used to serialize date @@ -47,13 +47,19 @@ type JKillActionReport struct { TTR string `json:"ttr,omitempty"` } -// ToJSON marshal the action -func (k *KillActionReport) ToJSON() ([]byte, bool, error) { +// IsResolved return if the action is resolved +func (k *KillActionReport) IsResolved() bool { k.RLock() defer k.RUnlock() // for sigkill wait for exit - resolved := k.Signal != "SIGKILL" || k.resolved + return k.Signal != "SIGKILL" || k.resolved +} + +// ToJSON marshal the action +func (k *KillActionReport) ToJSON() ([]byte, error) { + k.RLock() + defer k.RUnlock() jk := JKillActionReport{ Type: rules.KillAction, @@ -71,10 +77,10 @@ func (k *KillActionReport) ToJSON() ([]byte, bool, error) { data, err := utils.MarshalEasyJSON(jk) if err != nil { - return nil, false, err + return nil, err } - return data, resolved, nil + return data, nil } // IsMatchingRule returns true if this action report is targeted at the given rule ID @@ -82,5 +88,5 @@ func (k *KillActionReport) IsMatchingRule(ruleID eval.RuleID) bool { k.RLock() defer k.RUnlock() - return k.Rule.ID == ruleID + return k.rule.ID == ruleID } diff --git a/pkg/security/probe/actions_linux.go b/pkg/security/probe/actions_linux.go new file mode 100644 index 0000000000000..54126bb2b12f5 --- /dev/null +++ b/pkg/security/probe/actions_linux.go @@ -0,0 +1,91 @@ +//go:generate go run github.com/mailru/easyjson/easyjson -gen_build_flags=-mod=mod -no_std_marshalers -build_tags linux $GOFILE + +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package probe holds probe related files +package probe + +import ( + "sync" + "time" + + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/DataDog/datadog-agent/pkg/security/serializers" + "github.com/DataDog/datadog-agent/pkg/security/utils" +) + +const ( + // HashTriggerTimeout hash triggered because of a timeout + HashTriggerTimeout = "timeout" + // HashTriggerProcessExit hash triggered on process exit + HashTriggerProcessExit = "process_exit" +) + +// HashActionReport defines a hash action reports +// easyjson:json +type HashActionReport struct { + sync.RWMutex + + Type string `json:"type"` + Path string `json:"path"` + State string `json:"state"` + Trigger string `json:"trigger"` + + // internal + resolved bool + rule *rules.Rule + pid uint32 + seenAt time.Time + fileEvent model.FileEvent + crtID containerutils.ContainerID + eventType model.EventType +} + +// IsResolved return if the action is resolved +func (k *HashActionReport) IsResolved() bool { + k.RLock() + defer k.RUnlock() + + return k.resolved +} + +// ToJSON marshal the action +func (k *HashActionReport) ToJSON() ([]byte, error) { + k.Lock() + defer k.Unlock() + + k.Type = rules.HashAction + k.Path = k.fileEvent.PathnameStr + k.State = k.fileEvent.HashState.String() + + data, err := utils.MarshalEasyJSON(k) + if err != nil { + return nil, err + } + + return data, nil +} + +// IsMatchingRule returns true if this action report is targeted at the given rule ID +func (k *HashActionReport) IsMatchingRule(ruleID eval.RuleID) bool { + k.RLock() + defer k.RUnlock() + + return k.rule.ID == ruleID +} + +// PatchEvent implements the EventSerializerPatcher interface +func (k *HashActionReport) PatchEvent(ev *serializers.EventSerializer) { + if ev.FileEventSerializer == nil { + return + } + + ev.FileEventSerializer.HashState = k.fileEvent.HashState.String() + ev.FileEventSerializer.Hashes = k.fileEvent.Hashes +} diff --git a/pkg/security/probe/actions_linux_easyjson.go b/pkg/security/probe/actions_linux_easyjson.go new file mode 100644 index 0000000000000..5ab1d450c2823 --- /dev/null +++ b/pkg/security/probe/actions_linux_easyjson.go @@ -0,0 +1,95 @@ +//go:build linux +// +build linux + +// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. + +package probe + +import ( + json "encoding/json" + easyjson "github.com/mailru/easyjson" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" +) + +// suppress unused package warning +var ( + _ *json.RawMessage + _ *jlexer.Lexer + _ *jwriter.Writer + _ easyjson.Marshaler +) + +func easyjson7cab6e30DecodeGithubComDataDogDatadogAgentPkgSecurityProbe(in *jlexer.Lexer, out *HashActionReport) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "type": + out.Type = string(in.String()) + case "path": + out.Path = string(in.String()) + case "state": + out.State = string(in.String()) + case "trigger": + out.Trigger = string(in.String()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson7cab6e30EncodeGithubComDataDogDatadogAgentPkgSecurityProbe(out *jwriter.Writer, in HashActionReport) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"type\":" + out.RawString(prefix[1:]) + out.String(string(in.Type)) + } + { + const prefix string = ",\"path\":" + out.RawString(prefix) + out.String(string(in.Path)) + } + { + const prefix string = ",\"state\":" + out.RawString(prefix) + out.String(string(in.State)) + } + { + const prefix string = ",\"trigger\":" + out.RawString(prefix) + out.String(string(in.Trigger)) + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v HashActionReport) MarshalEasyJSON(w *jwriter.Writer) { + easyjson7cab6e30EncodeGithubComDataDogDatadogAgentPkgSecurityProbe(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *HashActionReport) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson7cab6e30DecodeGithubComDataDogDatadogAgentPkgSecurityProbe(l, v) +} diff --git a/pkg/security/probe/config/config.go b/pkg/security/probe/config/config.go index 5ddd7ab15913c..879a231222153 100644 --- a/pkg/security/probe/config/config.go +++ b/pkg/security/probe/config/config.go @@ -14,8 +14,8 @@ import ( "time" sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -141,7 +141,7 @@ type Config struct { // NewConfig returns a new Config object func NewConfig() (*Config, error) { - sysconfig.Adjust(coreconfig.SystemProbe()) + sysconfig.Adjust(pkgconfigsetup.SystemProbe()) setEnv() @@ -173,8 +173,8 @@ func NewConfig() (*Config, error) { SyscallsMonitorEnabled: getBool("syscalls_monitor.enabled"), // event server - SocketPath: coreconfig.SystemProbe().GetString(join(evNS, "socket")), - EventServerBurst: coreconfig.SystemProbe().GetInt(join(evNS, "event_server.burst")), + SocketPath: pkgconfigsetup.SystemProbe().GetString(join(evNS, "socket")), + EventServerBurst: pkgconfigsetup.SystemProbe().GetInt(join(evNS, "event_server.burst")), // runtime compilation RuntimeCompilationEnabled: getBool("runtime_compilation.enabled"), @@ -267,41 +267,41 @@ func getAllKeys(key string) (string, string) { func isSet(key string) bool { deprecatedKey, newKey := getAllKeys(key) - return coreconfig.SystemProbe().IsSet(deprecatedKey) || coreconfig.SystemProbe().IsSet(newKey) + return pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) || pkgconfigsetup.SystemProbe().IsSet(newKey) } func getBool(key string) bool { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetBool(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetBool(deprecatedKey) } - return coreconfig.SystemProbe().GetBool(newKey) + return pkgconfigsetup.SystemProbe().GetBool(newKey) } func getInt(key string) int { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetInt(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetInt(deprecatedKey) } - return coreconfig.SystemProbe().GetInt(newKey) + return pkgconfigsetup.SystemProbe().GetInt(newKey) } func getString(key string) string { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetString(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetString(deprecatedKey) } - return coreconfig.SystemProbe().GetString(newKey) + return pkgconfigsetup.SystemProbe().GetString(newKey) } func getStringSlice(key string) []string { deprecatedKey, newKey := getAllKeys(key) - if coreconfig.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) - return coreconfig.SystemProbe().GetStringSlice(deprecatedKey) + return pkgconfigsetup.SystemProbe().GetStringSlice(deprecatedKey) } - return coreconfig.SystemProbe().GetStringSlice(newKey) + return pkgconfigsetup.SystemProbe().GetStringSlice(newKey) } diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants.json index 7bedf2df1b260..d0c78dd49e524 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants.json @@ -18253,6 +18253,13 @@ "uname_release": "4.14.35-2047.539.5.el7uek.aarch64", "cindex": 89 }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.540.4.1.el7uek.aarch64", + "cindex": 89 + }, { "distrib": "ol", "version": "7", @@ -19065,6 +19072,13 @@ "uname_release": "3.10.0-1160.119.1.0.2.el7.x86_64", "cindex": 93 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-1160.119.1.0.3.el7.x86_64", + "cindex": 93 + }, { "distrib": "ol", "version": "7", @@ -23671,6 +23685,13 @@ "uname_release": "4.14.35-2047.540.3.el7uek.x86_64", "cindex": 96 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.540.4.1.el7uek.x86_64", + "cindex": 96 + }, { "distrib": "ol", "version": "7", @@ -23685,6 +23706,20 @@ "uname_release": "4.14.35-2047.541.1.el7uek.x86_64", "cindex": 96 }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.541.2.el7uek.x86_64", + "cindex": 96 + }, + { + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.541.3.el7uek.x86_64", + "cindex": 96 + }, { "distrib": "ol", "version": "7", diff --git a/pkg/security/probe/field_handlers_ebpf.go b/pkg/security/probe/field_handlers_ebpf.go index b9cb03e12cfc7..da30fd83e235f 100644 --- a/pkg/security/probe/field_handlers_ebpf.go +++ b/pkg/security/probe/field_handlers_ebpf.go @@ -178,7 +178,10 @@ func (fh *EBPFFieldHandlers) ResolveMountRootPath(ev *model.Event, e *model.Moun func (fh *EBPFFieldHandlers) ResolveContainerContext(ev *model.Event) (*model.ContainerContext, bool) { if ev.ContainerContext.ContainerID != "" && !ev.ContainerContext.Resolved { if containerContext, _ := fh.resolvers.CGroupResolver.GetWorkload(string(ev.ContainerContext.ContainerID)); containerContext != nil { - ev.ContainerContext = &containerContext.ContainerContext + if containerContext.CGroupFlags.IsContainer() { + ev.ContainerContext = &containerContext.ContainerContext + } + ev.ContainerContext.Resolved = true } } @@ -239,12 +242,18 @@ func (fh *EBPFFieldHandlers) ResolveProcessArgv0(_ *model.Event, process *model. // ResolveProcessArgs resolves the args of the event func (fh *EBPFFieldHandlers) ResolveProcessArgs(ev *model.Event, process *model.Process) string { - return strings.Join(fh.ResolveProcessArgv(ev, process), " ") + if process.Args == "" { + process.Args = strings.Join(fh.ResolveProcessArgv(ev, process), " ") + } + return process.Args } // ResolveProcessArgsScrubbed resolves the args of the event func (fh *EBPFFieldHandlers) ResolveProcessArgsScrubbed(ev *model.Event, process *model.Process) string { - return strings.Join(fh.ResolveProcessArgvScrubbed(ev, process), " ") + if process.ArgsScrubbed == "" { + process.ArgsScrubbed = strings.Join(fh.ResolveProcessArgvScrubbed(ev, process), " ") + } + return process.ArgsScrubbed } // ResolveProcessArgv resolves the unscrubbed args of the process as an array. Use with caution. @@ -552,7 +561,11 @@ func (fh *EBPFFieldHandlers) ResolveCGroupManager(ev *model.Event, _ *model.CGro func (fh *EBPFFieldHandlers) ResolveContainerID(ev *model.Event, e *model.ContainerContext) string { if len(e.ContainerID) == 0 { if entry, _ := fh.ResolveProcessCacheEntry(ev); entry != nil { - e.ContainerID = containerutils.ContainerID(entry.ContainerID) + if entry.CGroup.CGroupFlags.IsContainer() { + e.ContainerID = containerutils.ContainerID(entry.ContainerID) + } else { + e.ContainerID = "" + } return string(e.ContainerID) } } diff --git a/pkg/security/probe/file_hasher.go b/pkg/security/probe/file_hasher.go new file mode 100644 index 0000000000000..d99f32a32d0f3 --- /dev/null +++ b/pkg/security/probe/file_hasher.go @@ -0,0 +1,117 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package probe holds probe related files +package probe + +import ( + "slices" + "sync" + "time" + + "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-agent/pkg/security/resolvers/hash" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/DataDog/datadog-agent/pkg/security/utils" +) + +const ( + defaultHashActionFlushDelay = 5 * time.Second +) + +// FileHasher defines a file hasher structure +type FileHasher struct { + sync.Mutex + + cfg *config.Config + resolver *hash.Resolver + + pendingReports []*HashActionReport +} + +// NewFileHasher returns a new FileHasher +func NewFileHasher(cfg *config.Config, resolver *hash.Resolver) *FileHasher { + return &FileHasher{ + cfg: cfg, + resolver: resolver, + } +} + +// AddPendingReports add a pending reports +func (p *FileHasher) AddPendingReports(report *HashActionReport) { + p.Lock() + defer p.Unlock() + + p.pendingReports = append(p.pendingReports, report) +} + +func (p *FileHasher) hash(report *HashActionReport) { + p.resolver.HashFileEvent(report.eventType, report.crtID, report.pid, &report.fileEvent) + report.resolved = true +} + +// FlushPendingReports flush pending reports +func (p *FileHasher) FlushPendingReports() { + p.Lock() + defer p.Unlock() + + p.pendingReports = slices.DeleteFunc(p.pendingReports, func(report *HashActionReport) bool { + report.Lock() + defer report.Unlock() + + if time.Now().After(report.seenAt.Add(defaultHashActionFlushDelay)) { + report.Trigger = HashTriggerTimeout + p.hash(report) + return true + } + return false + }) +} + +// HandleProcessExited handles process exited events +func (p *FileHasher) HandleProcessExited(event *model.Event) { + p.Lock() + defer p.Unlock() + + p.pendingReports = slices.DeleteFunc(p.pendingReports, func(report *HashActionReport) bool { + report.Lock() + defer report.Unlock() + + if report.pid == event.ProcessContext.Pid { + report.Trigger = HashTriggerProcessExit + p.hash(report) + return true + } + return false + }) +} + +// HashAndReport hash and report +func (p *FileHasher) HashAndReport(rule *rules.Rule, ev *model.Event) { + eventType := ev.GetEventType() + + // only open events are supported + if eventType != model.FileOpenEventType && eventType != model.ExecEventType { + return + } + + if ev.ProcessContext.Pid == utils.Getpid() { + return + } + + report := &HashActionReport{ + rule: rule, + pid: ev.ProcessContext.Pid, + crtID: ev.ProcessContext.ContainerID, + seenAt: ev.Timestamp, + fileEvent: ev.Open.File, + eventType: eventType, + } + ev.ActionReports = append(ev.ActionReports, report) + p.pendingReports = append(p.pendingReports, report) +} diff --git a/pkg/security/probe/kfilters/approvers.go b/pkg/security/probe/kfilters/approvers.go index f902b9ccd0e83..1314a54e958bb 100644 --- a/pkg/security/probe/kfilters/approvers.go +++ b/pkg/security/probe/kfilters/approvers.go @@ -17,19 +17,29 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -// BasenameApproverKernelMapName defines the basename approver kernel map name -const BasenameApproverKernelMapName = "basename_approvers" +const ( + // BasenameApproverKernelMapName defines the basename approver kernel map name + BasenameApproverKernelMapName = "basename_approvers" + + // BasenameApproverType is the type of basename approver + BasenameApproverType = "basename" + // FlagApproverType is the type of flags approver + FlagApproverType = "flag" + // AUIDApproverType is the type of auid approver + AUIDApproverType = "auid" +) -type kfiltersGetter func(approvers rules.Approvers) (ActiveKFilters, error) +type kfiltersGetter func(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) // KFilterGetters var contains all the kfilter getters var KFilterGetters = make(map[eval.EventType]kfiltersGetter) func newBasenameKFilter(tableName string, eventType model.EventType, basename string) (activeKFilter, error) { return &eventMaskEntry{ - tableName: tableName, - tableKey: ebpf.NewStringMapItem(basename, BasenameFilterSize), - eventMask: uint64(1 << (eventType - 1)), + approverType: BasenameApproverType, + tableName: tableName, + tableKey: ebpf.NewStringMapItem(basename, BasenameFilterSize), + eventMask: uint64(1 << (eventType - 1)), }, nil } @@ -59,10 +69,11 @@ func newKFilterWithUInt32Flags(tableName string, flags ...uint32) (activeKFilter } return &arrayEntry{ - tableName: tableName, - index: uint32(0), - value: ebpf.NewUint32FlagsMapItem(bitmask), - zeroValue: ebpf.Uint32FlagsZeroMapItem, + approverType: FlagApproverType, + tableName: tableName, + index: uint32(0), + value: ebpf.NewUint32FlagsMapItem(bitmask), + zeroValue: ebpf.Uint32FlagsZeroMapItem, }, nil } @@ -73,10 +84,11 @@ func newKFilterWithUInt64Flags(tableName string, flags ...uint64) (activeKFilter } return &arrayEntry{ - tableName: tableName, - index: uint32(0), - value: ebpf.NewUint64FlagsMapItem(bitmask), - zeroValue: ebpf.Uint64FlagsZeroMapItem, + approverType: FlagApproverType, + tableName: tableName, + index: uint32(0), + value: ebpf.NewUint64FlagsMapItem(bitmask), + zeroValue: ebpf.Uint64FlagsZeroMapItem, }, nil } @@ -92,7 +104,9 @@ func getEnumsKFilters(tableName string, enums ...uint64) (activeKFilter, error) return newKFilterWithUInt64Flags(tableName, flags...) } -func getBasenameKFilters(eventType model.EventType, field string, approvers rules.Approvers) ([]activeKFilter, error) { +func getBasenameKFilters(eventType model.EventType, field string, approvers rules.Approvers) ([]activeKFilter, []eval.Field, error) { + var fieldHandled []eval.Field + stringValues := func(fvs rules.FilterValues) []string { var values []string for _, v := range fvs { @@ -112,63 +126,66 @@ func getBasenameKFilters(eventType model.EventType, field string, approvers rule case prefix + model.NameSuffix: activeKFilters, err := newBasenameKFilters(BasenameApproverKernelMapName, eventType, stringValues(values)...) if err != nil { - return nil, err + return nil, nil, err } kfilters = append(kfilters, activeKFilters...) - + fieldHandled = append(fieldHandled, field) case prefix + model.PathSuffix: for _, value := range stringValues(values) { basename := path.Base(value) activeKFilter, err := newBasenameKFilter(BasenameApproverKernelMapName, eventType, basename) if err != nil { - return nil, err + return nil, nil, err } kfilters = append(kfilters, activeKFilter) } + fieldHandled = append(fieldHandled, field) } } - return kfilters, nil + return kfilters, fieldHandled, nil } -func basenameKFilterGetter(event model.EventType) kfiltersGetter { - return func(approvers rules.Approvers) (ActiveKFilters, error) { - basenameKFilters, err := getBasenameKFilters(event, "file", approvers) - if err != nil { - return nil, err - } - return newActiveKFilters(basenameKFilters...), nil - } -} +func fimKFiltersGetter(eventType model.EventType, fields []eval.Field) kfiltersGetter { + return func(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + var ( + kfilters []activeKFilter + fieldHandled []eval.Field + ) -func basenameskfiltersGetter(event model.EventType, field1, field2 string) kfiltersGetter { - return func(approvers rules.Approvers) (ActiveKFilters, error) { - basenameKFilters, err := getBasenameKFilters(event, field1, approvers) - if err != nil { - return nil, err + for _, field := range fields { + kfilter, handled, err := getBasenameKFilters(eventType, field, approvers) + if err != nil { + return nil, nil, err + } + kfilters = append(kfilters, kfilter...) + fieldHandled = append(fieldHandled, handled...) } - basenameKFilters2, err := getBasenameKFilters(event, field2, approvers) + + kfs, handled, err := getProcessKFilters(model.FileOpenEventType, approvers) if err != nil { - return nil, err + return nil, nil, err } - basenameKFilters = append(basenameKFilters, basenameKFilters2...) - return newActiveKFilters(basenameKFilters...), nil + kfilters = append(kfilters, kfs...) + fieldHandled = append(fieldHandled, handled...) + + return newActiveKFilters(kfilters...), fieldHandled, nil } } func init() { - KFilterGetters["chmod"] = basenameKFilterGetter(model.FileChmodEventType) - KFilterGetters["chown"] = basenameKFilterGetter(model.FileChownEventType) - KFilterGetters["link"] = basenameskfiltersGetter(model.FileLinkEventType, "file", "file.destination") - KFilterGetters["mkdir"] = basenameKFilterGetter(model.FileMkdirEventType) - KFilterGetters["open"] = openOnNewApprovers - KFilterGetters["rename"] = basenameskfiltersGetter(model.FileRenameEventType, "file", "file.destination") - KFilterGetters["rmdir"] = basenameKFilterGetter(model.FileRmdirEventType) - KFilterGetters["unlink"] = basenameKFilterGetter(model.FileUnlinkEventType) - KFilterGetters["utimes"] = basenameKFilterGetter(model.FileUtimesEventType) - KFilterGetters["mmap"] = mmapKFilters - KFilterGetters["mprotect"] = mprotectKFilters - KFilterGetters["splice"] = spliceKFilters - KFilterGetters["chdir"] = basenameKFilterGetter(model.FileChdirEventType) - KFilterGetters["bpf"] = bpfKFilters + KFilterGetters["chmod"] = fimKFiltersGetter(model.FileChmodEventType, []eval.Field{"file"}) + KFilterGetters["chown"] = fimKFiltersGetter(model.FileChownEventType, []eval.Field{"file"}) + KFilterGetters["link"] = fimKFiltersGetter(model.FileLinkEventType, []eval.Field{"file", "file.destination"}) + KFilterGetters["mkdir"] = fimKFiltersGetter(model.FileMkdirEventType, []eval.Field{"file"}) + KFilterGetters["open"] = openKFiltersGetter + KFilterGetters["rename"] = fimKFiltersGetter(model.FileRenameEventType, []eval.Field{"file", "file.destination"}) + KFilterGetters["rmdir"] = fimKFiltersGetter(model.FileRmdirEventType, []eval.Field{"file"}) + KFilterGetters["unlink"] = fimKFiltersGetter(model.FileUnlinkEventType, []eval.Field{"file"}) + KFilterGetters["utimes"] = fimKFiltersGetter(model.FileUtimesEventType, []eval.Field{"file"}) + KFilterGetters["mmap"] = mmapKFiltersGetter + KFilterGetters["mprotect"] = mprotectKFiltersGetter + KFilterGetters["splice"] = spliceKFiltersGetter + KFilterGetters["chdir"] = fimKFiltersGetter(model.FileChdirEventType, []eval.Field{"file"}) + KFilterGetters["bpf"] = bpfKFiltersGetter } diff --git a/pkg/security/probe/kfilters/approvers_test.go b/pkg/security/probe/kfilters/approvers_test.go index b0d867f88f14b..0298b2b972f67 100644 --- a/pkg/security/probe/kfilters/approvers_test.go +++ b/pkg/security/probe/kfilters/approvers_test.go @@ -11,6 +11,7 @@ package kfilters import ( "testing" + "github.com/DataDog/datadog-agent/pkg/security/ebpf" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -82,3 +83,116 @@ func TestApproverGlob(t *testing.T) { t.Fatalf("expected approver not found: %v", values) } } + +func TestApproverFlags(t *testing.T) { + enabled := map[eval.EventType]bool{"*": true} + + ruleOpts, evalOpts := rules.NewBothOpts(enabled) + + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) + rules.AddTestRuleExpr(t, rs, `open.flags & (O_SYNC | O_NOCTTY) > 0`) + capabilities, exists := allCapabilities["open"] + if !exists { + t.Fatal("no capabilities for open") + } + approvers, err := rs.GetEventTypeApprovers("open", capabilities) + if err != nil { + t.Fatal(err) + } + if values, exists := approvers["open.flags"]; !exists || len(values) != 1 { + t.Fatalf("expected approver not found: %v", values) + } +} + +func TestApproverWildcardBasename(t *testing.T) { + enabled := map[eval.EventType]bool{"*": true} + + ruleOpts, evalOpts := rules.NewBothOpts(enabled) + + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) + rules.AddTestRuleExpr(t, rs, `open.file.path =~ "/var/run/secrets/*"`) + capabilities, exists := allCapabilities["open"] + if !exists { + t.Fatal("no capabilities for open") + } + approvers, err := rs.GetEventTypeApprovers("open", capabilities) + if err != nil { + t.Fatal(err) + } + if values, exists := approvers["open.file.path"]; exists || len(values) != 0 { + t.Fatalf("unexpected approver found: %v", values) + } +} + +func TestApproverAUIDRange(t *testing.T) { + enabled := map[eval.EventType]bool{"*": true} + + ruleOpts, evalOpts := rules.NewBothOpts(enabled) + + assert := func(t *testing.T, ruleDefs []string, min, max uint32) { + t.Helper() + + rs := rules.NewRuleSet(&model.Model{}, newFakeEvent, ruleOpts, evalOpts) + rules.AddTestRuleExpr(t, rs, ruleDefs...) + + capabilities, exists := allCapabilities["open"] + if !exists { + t.Fatal("no capabilities for open") + } + approvers, err := rs.GetEventTypeApprovers("open", capabilities) + if err != nil { + t.Fatal(err) + } + if values, exists := approvers["process.auid"]; !exists { + t.Fatalf("expected approver not found: %+v", values) + } + + kfilters, _, err := KFilterGetters["open"](approvers) + if err != nil { + t.Fatal(err) + } + if len(kfilters) != 1 { + + if min != 0 && max != 0 { + t.Fatalf("expected kfilter not found: %+v", kfilters) + } else { + // no kfilter expected + return + } + } + + key := makeEntryKey(auidRangeApproversTable, model.FileOpenEventType) + entry := kfilters[key] + if entry == nil { + t.Fatalf("expected kfilter not found: %+v => %+v", key, kfilters) + } + + value := entry.(*hashEntry).value.(*ebpf.UInt32RangeMapItem) + if value.Min != min || value.Max != max { + t.Fatalf("expected kfilter not found: %+v => %+v", kfilters, value) + } + } + + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid > 1000 && process.auid < 2000`}, 0, maxAUID) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid > 1000`}, 1001, maxAUID) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid < 1000`}, 0, 999) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid >= 1000 && process.auid <= 2000`}, 0, maxAUID) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid >= 1000`}, 1000, maxAUID) + assert(t, []string{`open.file.path =~ "/tmp/*" && process.auid <= 1000`}, 0, 1000) + + assert(t, []string{ + `open.file.path =~ "/tmp/*" && process.auid > 1000`, + `open.file.path =~ "/tmp/*" && process.auid < 500`, + }, 0, maxAUID) + assert(t, []string{ + `open.file.path =~ "/tmp/*" && process.auid >= 1000`, + `open.file.path =~ "/tmp/*" && process.auid > 1500`, + }, 1000, maxAUID) + assert(t, []string{ + `open.file.path =~ "/tmp/*" && process.auid < 1000`, + `open.file.path =~ "/tmp/*" && process.auid < 500`, + }, 0, 999) + assert(t, []string{ + `open.file.path =~ "/tmp/*" && process.auid != AUDIT_AUID_UNSET`, + }, 0, maxAUID) +} diff --git a/pkg/security/probe/kfilters/bpf.go b/pkg/security/probe/kfilters/bpf.go index df39a91182744..3b17b394a3097 100644 --- a/pkg/security/probe/kfilters/bpf.go +++ b/pkg/security/probe/kfilters/bpf.go @@ -9,8 +9,6 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) @@ -22,20 +20,22 @@ var bpfCapabilities = rules.FieldCapabilities{ }, } -func bpfKFilters(approvers rules.Approvers) (ActiveKFilters, error) { - var bpfKFilters []activeKFilter +func bpfKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + var ( + kfilters []activeKFilter + fieldHandled []eval.Field + ) for field, values := range approvers { switch field { case "bpf.cmd": kfilter, err := getEnumsKFilters("bpf_cmd_approvers", uintValues[uint64](values)...) if err != nil { - return nil, err + return nil, nil, err } - bpfKFilters = append(bpfKFilters, kfilter) - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } } - return newActiveKFilters(bpfKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/kfilters/capabilities_linux.go b/pkg/security/probe/kfilters/capabilities_linux.go index 187273f6066ce..75f6d00cf5036 100644 --- a/pkg/security/probe/kfilters/capabilities_linux.go +++ b/pkg/security/probe/kfilters/capabilities_linux.go @@ -29,56 +29,46 @@ func validateBasenameFilter(value rules.FilterValue) bool { return false } -func oneBasenameCapabilities(event string) rules.FieldCapabilities { - return rules.FieldCapabilities{ - { - Field: event + ".file.path", - TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, - }, - { - Field: event + ".file.name", - TypeBitmask: eval.ScalarValueType, - }, +func buildBasenameCapabilities(event string, fields ...string) rules.FieldCapabilities { + var caps rules.FieldCapabilities + + for _, field := range fields { + caps = append(caps, rules.FieldCapabilities{ + { + Field: event + "." + field + ".path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, + ValidateFnc: validateBasenameFilter, + }, + { + Field: event + "." + field + ".name", + TypeBitmask: eval.ScalarValueType, + }, + }...) } + return caps } -func twoBasenameCapabilities(event string, field1, field2 string) rules.FieldCapabilities { - return rules.FieldCapabilities{ - { - Field: event + "." + field1 + ".path", - TypeBitmask: eval.ScalarValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, - }, - { - Field: event + "." + field1 + ".name", - TypeBitmask: eval.ScalarValueType, - }, - { - Field: event + "." + field2 + ".path", - TypeBitmask: eval.ScalarValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, - }, - { - Field: event + "." + field2 + ".name", - TypeBitmask: eval.ScalarValueType, - }, +func mergeCapabilities(caps ...rules.FieldCapabilities) rules.FieldCapabilities { + var result rules.FieldCapabilities + for _, c := range caps { + result = append(result, c...) } + return result } func init() { - allCapabilities["chmod"] = oneBasenameCapabilities("chmod") - allCapabilities["chown"] = oneBasenameCapabilities("chown") - allCapabilities["link"] = twoBasenameCapabilities("link", "file", "file.destination") - allCapabilities["mkdir"] = oneBasenameCapabilities("mkdir") + allCapabilities["chmod"] = mergeCapabilities(buildBasenameCapabilities("chmod", "file"), processCapabilities) + allCapabilities["chown"] = mergeCapabilities(buildBasenameCapabilities("chown", "file"), processCapabilities) + allCapabilities["link"] = mergeCapabilities(buildBasenameCapabilities("link", "file", "file.destination"), processCapabilities) + allCapabilities["mkdir"] = mergeCapabilities(buildBasenameCapabilities("mkdir", "file"), processCapabilities) allCapabilities["open"] = openCapabilities - allCapabilities["rename"] = twoBasenameCapabilities("rename", "file", "file.destination") - allCapabilities["rmdir"] = oneBasenameCapabilities("rmdir") - allCapabilities["unlink"] = oneBasenameCapabilities("unlink") - allCapabilities["utimes"] = oneBasenameCapabilities("utimes") + allCapabilities["rename"] = mergeCapabilities(buildBasenameCapabilities("rename", "file", "file.destination"), processCapabilities) + allCapabilities["rmdir"] = mergeCapabilities(buildBasenameCapabilities("rmdir", "file"), processCapabilities) + allCapabilities["unlink"] = mergeCapabilities(buildBasenameCapabilities("unlink", "file"), processCapabilities) + allCapabilities["utimes"] = mergeCapabilities(buildBasenameCapabilities("utimes", "file"), processCapabilities) allCapabilities["mmap"] = mmapCapabilities allCapabilities["mprotect"] = mprotectCapabilities allCapabilities["splice"] = spliceCapabilities - allCapabilities["chdir"] = oneBasenameCapabilities("chdir") + allCapabilities["chdir"] = mergeCapabilities(buildBasenameCapabilities("chdir", "file"), processCapabilities) allCapabilities["bpf"] = bpfCapabilities } diff --git a/pkg/security/probe/kfilters/kfilters.go b/pkg/security/probe/kfilters/kfilters.go index 596d0fbd06772..b7288e6775a1e 100644 --- a/pkg/security/probe/kfilters/kfilters.go +++ b/pkg/security/probe/kfilters/kfilters.go @@ -12,7 +12,7 @@ type FilterPolicy struct { Mode PolicyMode } -// Bytes returns the binary representation of a FilterPolicy -func (f *FilterPolicy) Bytes() ([]byte, error) { +// MarshalBinary returns the binary representation of a FilterPolicy +func (f *FilterPolicy) MarshalBinary() ([]byte, error) { return []byte{uint8(f.Mode)}, nil } diff --git a/pkg/security/probe/kfilters/kfilters_bpf.go b/pkg/security/probe/kfilters/kfilters_bpf.go index da8a0c4251c3b..1714cb24439a4 100644 --- a/pkg/security/probe/kfilters/kfilters_bpf.go +++ b/pkg/security/probe/kfilters/kfilters_bpf.go @@ -21,6 +21,7 @@ type activeKFilter interface { Apply(*manager.Manager) error Key() interface{} GetTableName() string + GetApproverType() string } // ActiveKFilters defines kfilter map @@ -66,24 +67,43 @@ type entryKey struct { key interface{} } +func makeEntryKey(tableName string, tableKey interface{}) entryKey { + mb, ok := tableKey.(encoding.BinaryMarshaler) + if !ok { + return entryKey{ + tableName: tableName, + key: tableKey, + } + } + + data, _ := mb.MarshalBinary() + + return entryKey{ + tableName: tableName, + key: hex.EncodeToString(data), + } +} + type arrayEntry struct { - tableName string - index interface{} - value interface{} - zeroValue interface{} + approverType string + tableName string + index interface{} + value interface{} + zeroValue interface{} } func (e *arrayEntry) Key() interface{} { - return entryKey{ - tableName: e.tableName, - key: e.index, - } + return makeEntryKey(e.tableName, e.index) } func (e *arrayEntry) GetTableName() string { return e.tableName } +func (e *arrayEntry) GetApproverType() string { + return e.approverType +} + func (e *arrayEntry) Remove(manager *manager.Manager) error { table, err := managerhelper.Map(manager, e.tableName) if err != nil { @@ -101,32 +121,24 @@ func (e *arrayEntry) Apply(manager *manager.Manager) error { } type eventMaskEntry struct { - tableName string - tableKey interface{} - eventMask uint64 + approverType string + tableName string + tableKey interface{} + eventMask uint64 } func (e *eventMaskEntry) Key() interface{} { - mb, ok := e.tableKey.(encoding.BinaryMarshaler) - if !ok { - return entryKey{ - tableName: e.tableName, - key: e.tableKey, - } - } - - data, _ := mb.MarshalBinary() - - return entryKey{ - tableName: e.tableName, - key: hex.EncodeToString(data), - } + return makeEntryKey(e.tableName, e.tableKey) } func (e *eventMaskEntry) GetTableName() string { return e.tableName } +func (e *eventMaskEntry) GetApproverType() string { + return e.approverType +} + func (e *eventMaskEntry) Remove(manager *manager.Manager) error { table, err := managerhelper.Map(manager, e.tableName) if err != nil { @@ -154,3 +166,38 @@ func (e *eventMaskEntry) Apply(manager *manager.Manager) error { } return table.Put(e.tableKey, eventMask) } + +type hashEntry struct { + approverType string + tableName string + tableKey interface{} + value interface{} +} + +func (e *hashEntry) Key() interface{} { + return makeEntryKey(e.tableName, e.tableKey) +} + +func (e *hashEntry) GetTableName() string { + return e.tableName +} + +func (e *hashEntry) GetApproverType() string { + return e.approverType +} + +func (e *hashEntry) Remove(manager *manager.Manager) error { + table, err := managerhelper.Map(manager, e.tableName) + if err != nil { + return err + } + return table.Delete(e.tableKey) +} + +func (e *hashEntry) Apply(manager *manager.Manager) error { + table, err := managerhelper.Map(manager, e.tableName) + if err != nil { + return err + } + return table.Put(e.tableKey, e.value) +} diff --git a/pkg/security/probe/kfilters/mmap.go b/pkg/security/probe/kfilters/mmap.go index 130ed05e8a8a6..e17f2e60809be 100644 --- a/pkg/security/probe/kfilters/mmap.go +++ b/pkg/security/probe/kfilters/mmap.go @@ -9,8 +9,6 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -36,30 +34,29 @@ var mmapCapabilities = rules.FieldCapabilities{ }, } -func mmapKFilters(approvers rules.Approvers) (ActiveKFilters, error) { - mmapKFilters, err := getBasenameKFilters(model.MMapEventType, "file", approvers) +func mmapKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + kfilters, fieldHandled, err := getBasenameKFilters(model.MMapEventType, "file", approvers) if err != nil { - return nil, err + return nil, nil, err } for field, values := range approvers { switch field { - case "mmap.file.name", "mmap.file.path": // already handled by getBasenameKFilters case "mmap.flags": kfilter, err := getFlagsKFilter("mmap_flags_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - mmapKFilters = append(mmapKFilters, kfilter) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) case "mmap.protection": kfilter, err := getFlagsKFilter("mmap_protection_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - mmapKFilters = append(mmapKFilters, kfilter) - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } } - return newActiveKFilters(mmapKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/kfilters/mprotect.go b/pkg/security/probe/kfilters/mprotect.go index dc1c10be3532b..e721683c748cc 100644 --- a/pkg/security/probe/kfilters/mprotect.go +++ b/pkg/security/probe/kfilters/mprotect.go @@ -9,8 +9,6 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) @@ -26,26 +24,29 @@ var mprotectCapabilities = rules.FieldCapabilities{ }, } -func mprotectKFilters(approvers rules.Approvers) (ActiveKFilters, error) { - var mprotectKFilters []activeKFilter +func mprotectKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + var ( + kfilters []activeKFilter + fieldHandled []eval.Field + ) for field, values := range approvers { switch field { case "mprotect.vm_protection": kfilter, err := getFlagsKFilter("mprotect_vm_protection_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - mprotectKFilters = append(mprotectKFilters, kfilter) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) case "mprotect.req_protection": kfilter, err := getFlagsKFilter("mprotect_req_protection_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - mprotectKFilters = append(mprotectKFilters, kfilter) - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } } - return newActiveKFilters(mprotectKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/kfilters/open.go b/pkg/security/probe/kfilters/open.go index 5a4ad806bb8ec..412524252d119 100644 --- a/pkg/security/probe/kfilters/open.go +++ b/pkg/security/probe/kfilters/open.go @@ -9,52 +9,56 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) -var openCapabilities = rules.FieldCapabilities{ - { - Field: "open.flags", - TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, - }, - { - Field: "open.file.path", - TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, - ValidateFnc: validateBasenameFilter, - FilterWeight: 15, +var openCapabilities = mergeCapabilities( + rules.FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + }, + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType | eval.GlobValueType, + ValidateFnc: validateBasenameFilter, + FilterWeight: 15, + }, + { + Field: "open.file.name", + TypeBitmask: eval.ScalarValueType, + FilterWeight: 300, + }, }, - { - Field: "open.file.name", - TypeBitmask: eval.ScalarValueType, - FilterWeight: 10, - }, -} + processCapabilities, +) -func openOnNewApprovers(approvers rules.Approvers) (ActiveKFilters, error) { - openKFilters, err := getBasenameKFilters(model.FileOpenEventType, "file", approvers) +func openKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + kfilters, fieldHandled, err := getBasenameKFilters(model.FileOpenEventType, "file", approvers) if err != nil { - return nil, err + return nil, nil, err } for field, values := range approvers { switch field { - case "open.file.name", "open.file.path": // already handled by getBasenameKFilters case "open.flags": kfilter, err := getFlagsKFilter("open_flags_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - openKFilters = append(openKFilters, kfilter) - - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } + } + kfs, handled, err := getProcessKFilters(model.FileOpenEventType, approvers) + if err != nil { + return nil, nil, err } + kfilters = append(kfilters, kfs...) + fieldHandled = append(fieldHandled, handled...) - return newActiveKFilters(openKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/kfilters/process.go b/pkg/security/probe/kfilters/process.go new file mode 100644 index 0000000000000..14d538765d882 --- /dev/null +++ b/pkg/security/probe/kfilters/process.go @@ -0,0 +1,95 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package kfilters holds kfilters related files +package kfilters + +import ( + "github.com/DataDog/datadog-agent/pkg/security/ebpf" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) + +const ( + auidField = "process.auid" + maxAUID = model.AuditUIDUnset - 1 + auidApproversTable = "auid_approvers" + auidRangeApproversTable = "auid_range_approvers" +) + +var processCapabilities = rules.FieldCapabilities{ + { + Field: "process.auid", + TypeBitmask: eval.ScalarValueType | eval.RangeValueType, + FilterMode: rules.ApproverOnlyMode, + RangeFilterValue: &rules.RangeFilterValue{Min: 0, Max: maxAUID}, + FilterWeight: 100, + // convert `!= model.AuditUIDUnset`` to the max range + HandleNotApproverValue: func(fieldValueType eval.FieldValueType, value interface{}) (eval.FieldValueType, interface{}, bool) { + if fieldValueType != eval.ScalarValueType { + return fieldValueType, value, false + } + + if i, ok := value.(int); ok && uint32(i) == model.AuditUIDUnset { + return eval.RangeValueType, rules.RangeFilterValue{Min: 0, Max: model.AuditUIDUnset - 1}, true + } + + return fieldValueType, value, false + }, + }, +} + +func getProcessKFilters(eventType model.EventType, approvers rules.Approvers) ([]activeKFilter, []eval.Field, error) { + var fieldHandled []eval.Field + + values, exists := approvers[auidField] + if !exists { + return nil, nil, nil + } + + var ( + kfilters []activeKFilter + auidRange = rules.RangeFilterValue{Min: 0, Max: maxAUID} + auidRangeSet bool + ) + + for _, value := range values { + switch value.Type { + case eval.ScalarValueType: + kfilters = append(kfilters, &eventMaskEntry{ + tableName: auidApproversTable, + tableKey: ebpf.Uint32MapItem(value.Value.(int)), + eventMask: uint64(1 << (eventType - 1)), + }) + case eval.RangeValueType: + min, max := value.Value.(rules.RangeFilterValue).Min, value.Value.(rules.RangeFilterValue).Max + if !auidRangeSet || auidRange.Min > min { + auidRange.Min = min + } + if !auidRangeSet || auidRange.Max < max { + auidRange.Max = max + } + auidRangeSet = true + } + } + + if auidRangeSet { + kfilters = append(kfilters, &hashEntry{ + approverType: AUIDApproverType, + tableName: auidRangeApproversTable, + tableKey: eventType, + value: ebpf.NewUInt32RangeMapItem(uint32(auidRange.Min), uint32(auidRange.Max)), + }) + } + + if len(kfilters) > 0 { + fieldHandled = append(fieldHandled, auidField) + } + + return kfilters, fieldHandled, nil +} diff --git a/pkg/security/probe/kfilters/splice.go b/pkg/security/probe/kfilters/splice.go index 2d6e9a4573ec8..bf49f7cb3b024 100644 --- a/pkg/security/probe/kfilters/splice.go +++ b/pkg/security/probe/kfilters/splice.go @@ -9,8 +9,6 @@ package kfilters import ( - "fmt" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" @@ -36,30 +34,29 @@ var spliceCapabilities = rules.FieldCapabilities{ }, } -func spliceKFilters(approvers rules.Approvers) (ActiveKFilters, error) { - spliceKFilters, err := getBasenameKFilters(model.SpliceEventType, "file", approvers) +func spliceKFiltersGetter(approvers rules.Approvers) (ActiveKFilters, []eval.Field, error) { + kfilters, fieldHandled, err := getBasenameKFilters(model.SpliceEventType, "file", approvers) if err != nil { - return nil, err + return nil, nil, err } for field, values := range approvers { switch field { - case "splice.file.name", "splice.file.path": // already handled by getBasenameKFilters case "splice.pipe_entry_flag": kfilter, err := getFlagsKFilter("splice_entry_flags_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - spliceKFilters = append(spliceKFilters, kfilter) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) case "splice.pipe_exit_flag": kfilter, err := getFlagsKFilter("splice_exit_flags_approvers", uintValues[uint32](values)...) if err != nil { - return nil, err + return nil, nil, err } - spliceKFilters = append(spliceKFilters, kfilter) - default: - return nil, fmt.Errorf("unknown field '%s'", field) + kfilters = append(kfilters, kfilter) + fieldHandled = append(fieldHandled, field) } } - return newActiveKFilters(spliceKFilters...), nil + return newActiveKFilters(kfilters...), fieldHandled, nil } diff --git a/pkg/security/probe/monitors/approver/approver_monitor.go b/pkg/security/probe/monitors/approver/approver_monitor.go index 7f6443d1df63b..b377bcab85705 100644 --- a/pkg/security/probe/monitors/approver/approver_monitor.go +++ b/pkg/security/probe/monitors/approver/approver_monitor.go @@ -14,6 +14,7 @@ import ( manager "github.com/DataDog/ebpf-manager" lib "github.com/cilium/ebpf" + "github.com/DataDog/datadog-agent/pkg/security/probe/kfilters" "github.com/DataDog/datadog-agent/pkg/security/probe/managerhelper" "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -28,6 +29,7 @@ import ( type Stats struct { EventApprovedByBasename uint64 EventApprovedByFlag uint64 + EventApprovedByAUID uint64 } // Monitor defines an approver monitor @@ -58,6 +60,7 @@ func (d *Monitor) SendStats() error { for _, stat := range statsAcrossAllCPUs { statsByEventType[eventType].EventApprovedByBasename += stat.EventApprovedByBasename statsByEventType[eventType].EventApprovedByFlag += stat.EventApprovedByFlag + statsByEventType[eventType].EventApprovedByAUID += stat.EventApprovedByAUID } } @@ -68,16 +71,22 @@ func (d *Monitor) SendStats() error { eventTypeTag := fmt.Sprintf("event_type:%s", model.EventType(eventType).String()) tagsForBasenameApprovedEvents := []string{ - "approver_type:basename", + "approver_type:" + kfilters.BasenameApproverType, eventTypeTag, } + _ = d.statsdClient.Count(metrics.MetricEventApproved, int64(stats.EventApprovedByBasename), tagsForBasenameApprovedEvents, 1.0) + tagsForFlagApprovedEvents := []string{ - "approver_type:flag", + "approver_type:" + kfilters.FlagApproverType, eventTypeTag, } - - _ = d.statsdClient.Count(metrics.MetricEventApproved, int64(stats.EventApprovedByBasename), tagsForBasenameApprovedEvents, 1.0) _ = d.statsdClient.Count(metrics.MetricEventApproved, int64(stats.EventApprovedByFlag), tagsForFlagApprovedEvents, 1.0) + + tagsForAUIDApprovedEvents := []string{ + "approver_type:" + kfilters.AUIDApproverType, + eventTypeTag, + } + _ = d.statsdClient.Count(metrics.MetricEventApproved, int64(stats.EventApprovedByAUID), tagsForAUIDApprovedEvents, 1.0) } for i := uint32(0); i != uint32(model.LastApproverEventType); i++ { _ = buffer.Put(i, d.statsZero) diff --git a/pkg/security/probe/opts_others.go b/pkg/security/probe/opts_others.go new file mode 100644 index 0000000000000..59cf8dccb1453 --- /dev/null +++ b/pkg/security/probe/opts_others.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !linux && !windows + +// Package probe holds probe related files +package probe + +// Opts defines some probe options +type Opts struct { + // DontDiscardRuntime do not discard the runtime. Mostly used by functional tests + DontDiscardRuntime bool +} diff --git a/pkg/security/probe/opts_windows.go b/pkg/security/probe/opts_windows.go index 7558f1758456b..8d079df75832f 100644 --- a/pkg/security/probe/opts_windows.go +++ b/pkg/security/probe/opts_windows.go @@ -14,6 +14,9 @@ import ( // Opts defines some probe options type Opts struct { + // DontDiscardRuntime do not discard the runtime. Mostly used by functional tests + DontDiscardRuntime bool + // StatsdClient to be used for probe stats StatsdClient statsd.ClientInterface diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index ca88215596b59..f90e7f883bd73 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -32,7 +32,6 @@ import ( "github.com/DataDog/ebpf-manager/tracefs" "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/config/env" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" @@ -149,6 +148,9 @@ type EBPFProbe struct { // On demand onDemandManager *OnDemandProbesManager onDemandRateLimiter *rate.Limiter + + // hash action + fileHasher *FileHasher } // GetProfileManager returns the Profile Managers @@ -856,8 +858,9 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { // update mount pid mapping p.Resolvers.MountResolver.DelPid(event.Exit.Pid) - // update kill action reports + // update action reports p.processKiller.HandleProcessExited(event) + p.fileHasher.HandleProcessExited(event) case model.SetuidEventType: // the process context may be incorrect, do not modify it if event.Error != nil { @@ -1001,7 +1004,8 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { } case model.IMDSEventType: if _, err = event.IMDS.UnmarshalBinary(data[offset:]); err != nil { - seclog.Errorf("failed to decode IMDS event: %s (offset %d, len %d)", err, offset, len(data)) + // it's very possible we can't parse the IMDS body, as such let's put it as debug for now + seclog.Debugf("failed to decode IMDS event: %s (offset %d, len %d)", err, offset, len(data)) return } defer p.Resolvers.ProcessResolver.UpdateAWSSecurityCredentials(event.PIDContext.Pid, event) @@ -1037,8 +1041,9 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { p.Resolvers.ProcessResolver.DeleteEntry(event.ProcessContext.Pid, event.ResolveEventTime()) } - // flush pending kill actions + // flush pending actions p.processKiller.FlushPendingReports() + p.fileHasher.FlushPendingReports() } // AddDiscarderPushedCallback add a callback to the list of func that have to be called when a discarder is pushed to kernel @@ -1103,16 +1108,20 @@ func (p *EBPFProbe) ApplyFilterPolicy(eventType eval.EventType, mode kfilters.Po return table.Put(ebpf.Uint32MapItem(et), policy) } -// SetApprovers applies approvers and removes the unused ones -func (p *EBPFProbe) SetApprovers(eventType eval.EventType, approvers rules.Approvers) error { +// setApprovers applies approvers and removes the unused ones +func (p *EBPFProbe) setApprovers(eventType eval.EventType, approvers rules.Approvers) error { kfiltersGetter, exists := kfilters.KFilterGetters[eventType] if !exists { return nil } - newKFilters, err := kfiltersGetter(approvers) + newKFilters, fieldHandled, err := kfiltersGetter(approvers) if err != nil { - seclog.Errorf("Error while adding approvers fallback in-kernel policy to `%s` for `%s`: %s", kfilters.PolicyModeAccept, eventType, err) + return err + } + + if len(approvers) != len(fieldHandled) { + return fmt.Errorf("all the approvers should be handled : %v vs %v", approvers, fieldHandled) } type tag struct { @@ -1127,7 +1136,7 @@ func (p *EBPFProbe) SetApprovers(eventType eval.EventType, approvers rules.Appro return err } - approverType := getApproverType(newKFilter.GetTableName()) + approverType := newKFilter.GetApproverType() approverAddedMetricCounter[tag{eventType, approverType}]++ } @@ -1139,7 +1148,7 @@ func (p *EBPFProbe) SetApprovers(eventType eval.EventType, approvers rules.Appro return err } - approverType := getApproverType(previousKFilter.GetTableName()) + approverType := previousKFilter.GetApproverType() approverAddedMetricCounter[tag{eventType, approverType}]-- if approverAddedMetricCounter[tag{eventType, approverType}] <= 0 { delete(approverAddedMetricCounter, tag{eventType, approverType}) @@ -1162,16 +1171,6 @@ func (p *EBPFProbe) SetApprovers(eventType eval.EventType, approvers rules.Appro return nil } -func getApproverType(tableName string) string { - approverType := "flag" - - if tableName == kfilters.BasenameApproverKernelMapName { - approverType = "basename" - } - - return approverType -} - func (p *EBPFProbe) isNeededForActivityDump(eventType eval.EventType) bool { if p.config.RuntimeSecurity.ActivityDumpEnabled { for _, e := range p.profileManagers.GetActivityDumpTracedEventTypes() { @@ -1429,6 +1428,9 @@ func (err QueuedNetworkDeviceError) Error() string { func (p *EBPFProbe) pushNewTCClassifierRequest(device model.NetDevice) { select { + case <-p.ctx.Done(): + // the probe is stopping, do not push the new tc classifier request + return case p.newTCNetDevices <- device: // do nothing default: @@ -1568,11 +1570,16 @@ func (p *EBPFProbe) ApplyRuleSet(rs *rules.RuleSet) (*kfilters.ApplyRuleSetRepor } for eventType, report := range ars.Policies { - if err := p.ApplyFilterPolicy(eventType, report.Mode); err != nil { - return nil, err - } - if err := p.SetApprovers(eventType, report.Approvers); err != nil { - return nil, err + if err := p.setApprovers(eventType, report.Approvers); err != nil { + seclog.Errorf("Error while adding approvers fallback in-kernel policy to `%s` for `%s`: %s", kfilters.PolicyModeAccept, eventType, err) + + if err := p.ApplyFilterPolicy(eventType, kfilters.PolicyModeAccept); err != nil { + return nil, err + } + } else { + if err := p.ApplyFilterPolicy(eventType, report.Mode); err != nil { + return nil, err + } } } @@ -1641,7 +1648,7 @@ func (p *EBPFProbe) EnableEnforcement(state bool) { } // NewEBPFProbe instantiates a new runtime security agent probe -func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, wmeta workloadmeta.Component, telemetry telemetry.Component) (*EBPFProbe, error) { +func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, telemetry telemetry.Component) (*EBPFProbe, error) { nerpc, err := erpc.NewERPC() if err != nil { return nil, err @@ -1897,11 +1904,13 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, wmeta workload TTYFallbackEnabled: probe.Opts.TTYFallbackEnabled, } - p.Resolvers, err = resolvers.NewEBPFResolvers(config, p.Manager, probe.StatsdClient, probe.scrubber, p.Erpc, resolversOpts, wmeta, telemetry) + p.Resolvers, err = resolvers.NewEBPFResolvers(config, p.Manager, probe.StatsdClient, probe.scrubber, p.Erpc, resolversOpts, telemetry) if err != nil { return nil, err } + p.fileHasher = NewFileHasher(config, p.Resolvers.HashResolver) + hostname, err := utils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" @@ -2194,8 +2203,7 @@ func (p *EBPFProbe) HandleActions(ctx *eval.Context, rule *rules.Rule) { p.probe.DispatchCustomEvent(rule, event) } case action.Def.Hash != nil: - // force the resolution as it will force the hash resolution as well - ev.ResolveFields() + p.fileHasher.HashAndReport(rule, ev) } } } diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index b2ee591f4becf..9d4c409d2fcb1 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -77,8 +77,13 @@ type EBPFLessProbe struct { cancelFnc context.CancelFunc fieldHandlers *EBPFLessFieldHandlers clients map[net.Conn]*client - processKiller *ProcessKiller wg sync.WaitGroup + + // kill action + processKiller *ProcessKiller + + // hash action + fileHasher *FileHasher } // GetProfileManager returns the Profile Managers @@ -325,14 +330,16 @@ func (p *EBPFLessProbe) handleSyscallMsg(cl *client, syscallMsg *ebpfless.Syscal event.Exit.Code = syscallMsg.Exit.Code defer p.Resolvers.ProcessResolver.DeleteEntry(process.CacheResolverKey{Pid: syscallMsg.PID, NSID: cl.nsID}, event.ProcessContext.ExitTime) - // update kill action reports + // update action reports p.processKiller.HandleProcessExited(event) + p.fileHasher.HandleProcessExited(event) } p.DispatchEvent(event) - // flush pending kill actions + // flush pending actions p.processKiller.FlushPendingReports() + p.fileHasher.FlushPendingReports() } // DispatchEvent sends an event to the probe event handler @@ -598,8 +605,7 @@ func (p *EBPFLessProbe) HandleActions(ctx *eval.Context, rule *rules.Rule) { return p.processKiller.KillFromUserspace(pid, sig, ev) }) case action.Def.Hash != nil: - // force the resolution as it will force the hash resolution as well - ev.ResolveFields() + p.fileHasher.HashAndReport(rule, ev) } } } @@ -673,6 +679,8 @@ func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts, telemetry return nil, err } + p.fileHasher = NewFileHasher(config, p.Resolvers.HashResolver) + hostname, err := utils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" diff --git a/pkg/security/probe/probe_linux.go b/pkg/security/probe/probe_linux.go index 173560f8dc302..020e7977dae4e 100644 --- a/pkg/security/probe/probe_linux.go +++ b/pkg/security/probe/probe_linux.go @@ -8,7 +8,6 @@ package probe import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/security/config" ) @@ -20,7 +19,7 @@ const ( ) // NewProbe instantiates a new runtime security agent probe -func NewProbe(config *config.Config, opts Opts, wmeta workloadmeta.Component, telemetry telemetry.Component) (*Probe, error) { +func NewProbe(config *config.Config, opts Opts, telemetry telemetry.Component) (*Probe, error) { opts.normalize() p := newProbe(config, opts) @@ -32,7 +31,7 @@ func NewProbe(config *config.Config, opts Opts, wmeta workloadmeta.Component, te } p.PlatformProbe = pp } else { - pp, err := NewEBPFProbe(p, config, opts, wmeta, telemetry) + pp, err := NewEBPFProbe(p, config, opts, telemetry) if err != nil { return nil, err } diff --git a/pkg/security/probe/probe_others.go b/pkg/security/probe/probe_others.go index 099b646d32bd4..ad1166c45c23d 100644 --- a/pkg/security/probe/probe_others.go +++ b/pkg/security/probe/probe_others.go @@ -39,6 +39,7 @@ type PlatformProbe struct { // Probe represents the runtime security probe type Probe struct { Config *config.Config + Opts Opts } // Origin returns origin diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index d1989f88b9f0c..7ee01709717bf 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -20,7 +20,6 @@ import ( lru "github.com/hashicorp/golang-lru/v2" "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/etw" etwimpl "github.com/DataDog/datadog-agent/comp/etw/impl" "github.com/DataDog/datadog-agent/pkg/security/config" @@ -1395,7 +1394,7 @@ func (p *WindowsProbe) EnableEnforcement(state bool) { } // NewProbe instantiates a new runtime security agent probe -func NewProbe(config *config.Config, opts Opts, _ workloadmeta.Component, telemetry telemetry.Component) (*Probe, error) { +func NewProbe(config *config.Config, opts Opts, telemetry telemetry.Component) (*Probe, error) { opts.normalize() p := newProbe(config, opts) diff --git a/pkg/security/probe/process_killer.go b/pkg/security/probe/process_killer.go index d9b80b6e4bec1..2fdaa3f0a53fe 100644 --- a/pkg/security/probe/process_killer.go +++ b/pkg/security/probe/process_killer.go @@ -10,6 +10,7 @@ package probe import ( "context" + "fmt" "slices" "sync" "time" @@ -131,26 +132,26 @@ func (p *ProcessKiller) HandleProcessExited(event *model.Event) { }) } -func (p *ProcessKiller) isKillAllowed(pids []uint32, paths []string) bool { +func (p *ProcessKiller) isKillAllowed(pids []uint32, paths []string) (bool, error) { p.Lock() if !p.enabled { p.Unlock() - return false + return false, fmt.Errorf("the enforcement capability is disabled") } p.Unlock() for i, pid := range pids { if pid <= 1 || pid == utils.Getpid() { - return false + return false, fmt.Errorf("process with pid %d cannot be killed", pid) } if slices.ContainsFunc(p.binariesExcluded, func(glob *eval.Glob) bool { return glob.Matches(paths[i]) }) { - return false + return false, fmt.Errorf("process `%s`(%d) is protected", paths[i], pid) } } - return true + return true, nil } func (p *ProcessKiller) isRuleAllowed(rule *rules.Rule) bool { @@ -215,8 +216,8 @@ func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.R } // if one pids is not allowed don't kill anything - if !p.isKillAllowed(pids, paths) { - log.Warnf("unable to kill, some processes are protected: %v, %v", pids, paths) + if killAllowed, err := p.isKillAllowed(pids, paths); !killAllowed { + log.Warnf("unable to kill: %v", err) return } @@ -254,7 +255,7 @@ func (p *ProcessKiller) KillAndReport(scope string, signal string, rule *rules.R CreatedAt: ev.ProcessContext.ExecTime, DetectedAt: ev.ResolveEventTime(), KilledAt: killedAt, - Rule: rule, + rule: rule, } ev.ActionReports = append(ev.ActionReports, report) p.pendingReports = append(p.pendingReports, report) diff --git a/pkg/security/probe/process_killer_test.go b/pkg/security/probe/process_killer_test.go index d9cb203df81cd..091b8b19d2592 100644 --- a/pkg/security/probe/process_killer_test.go +++ b/pkg/security/probe/process_killer_test.go @@ -29,13 +29,25 @@ func TestProcessKillerExclusion(t *testing.T) { }, }, ) - assert.Nil(t, err) - assert.True(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/usr/bin/date"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/usr/bin/dd"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/usr/sbin/sudo"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid()}, []string{"/usr/bin/date"})) - assert.False(t, p.isKillAllowed([]uint32{1}, []string{"/usr/bin/date"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/opt/datadog-agent/bin/agent/agent"})) - assert.False(t, p.isKillAllowed([]uint32{utils.Getpid() + 1}, []string{"/opt/datadog-packages/datadog-agent/v1.0.0/bin/agent/agent"})) + + pid := utils.Getpid() + tests := []struct { + pids []uint32 + paths []string + expectedResult bool + }{ + {[]uint32{pid + 1}, []string{"/usr/bin/date"}, true}, + {[]uint32{pid + 1}, []string{"/usr/bin/dd"}, false}, + {[]uint32{pid + 1}, []string{"/usr/sbin/sudo"}, false}, + {[]uint32{pid}, []string{"/usr/bin/date"}, false}, + {[]uint32{1}, []string{"/usr/bin/date"}, false}, + {[]uint32{pid + 1}, []string{"/opt/datadog-agent/bin/agent/agent"}, false}, + {[]uint32{pid + 1}, []string{"/opt/datadog-packages/datadog-agent/v1.0.0/bin/agent/agent"}, false}, + } + + for _, test := range tests { + isKilledAllowed, _ := p.isKillAllowed(test.pids, test.paths) + assert.Equal(t, test.expectedResult, isKilledAllowed) + } } diff --git a/pkg/security/rconfig/policies.go b/pkg/security/rconfig/policies.go index 3d8576481e07a..e7fe1ea3129ca 100644 --- a/pkg/security/rconfig/policies.go +++ b/pkg/security/rconfig/policies.go @@ -18,8 +18,8 @@ import ( "github.com/skydive-project/go-debouncer" "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/remote/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -53,16 +53,16 @@ func NewRCPolicyProvider(dumpPolicies bool, setEnforcementCallback func(bool)) ( return nil, fmt.Errorf("failed to parse agent version: %w", err) } - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("failed to get ipc address: %w", err) } - c, err := client.NewGRPCClient(ipcAddress, config.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(config.Datadog()) }, + c, err := client.NewGRPCClient(ipcAddress, pkgconfigsetup.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(pkgconfigsetup.Datadog()) }, client.WithAgent(agentName, agentVersion.String()), client.WithProducts(state.ProductCWSDD, state.ProductCWSCustom), client.WithPollInterval(securityAgentRCPollInterval), - client.WithDirectorRootOverride(config.Datadog().GetString("site"), config.Datadog().GetString("remote_configuration.director_root")), + client.WithDirectorRootOverride(pkgconfigsetup.Datadog().GetString("site"), pkgconfigsetup.Datadog().GetString("remote_configuration.director_root")), ) if err != nil { return nil, err diff --git a/pkg/security/rconfig/profiles.go b/pkg/security/rconfig/profiles.go deleted file mode 100644 index 5441d8621ed5b..0000000000000 --- a/pkg/security/rconfig/profiles.go +++ /dev/null @@ -1,150 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build linux - -// Package rconfig holds rconfig related files -package rconfig - -import ( - "context" - "encoding/json" - "fmt" - "sync" - - proto "github.com/DataDog/agent-payload/v5/cws/dumpsv1" - "github.com/DataDog/datadog-go/v5/statsd" - - "github.com/DataDog/datadog-agent/pkg/api/security" - "github.com/DataDog/datadog-agent/pkg/config" - "github.com/DataDog/datadog-agent/pkg/config/remote/client" - "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" - cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" - "github.com/DataDog/datadog-agent/pkg/security/utils" - "github.com/DataDog/datadog-agent/pkg/util/log" -) - -const ( - // image name/image tag separator - separator = ":::" -) - -// ProfileConfig defines a profile config -type ProfileConfig struct { - Tags []string - Profile []byte -} - -// RCProfileProvider defines a RC profile provider -type RCProfileProvider struct { - sync.RWMutex - - client *client.Client - - onNewProfileCallback func(selector cgroupModel.WorkloadSelector, profile *proto.SecurityProfile) -} - -// Stop stops the client -func (r *RCProfileProvider) Stop() error { - r.client.Close() - return nil -} - -func (r *RCProfileProvider) rcProfilesUpdateCallback(configs map[string]state.RawConfig, _ func(string, state.ApplyStatus)) { - for _, config := range configs { - var profCfg ProfileConfig - if err := json.Unmarshal(config.Config, &profCfg); err != nil { - log.Errorf("couldn't decode json profile: %s", err) - continue - } - - profile := &proto.SecurityProfile{} - if err := profile.UnmarshalVT([]byte(profCfg.Profile)); err != nil { - log.Errorf("couldn't decode protobuf profile: %s", err) - continue - } - - selector, err := cgroupModel.NewWorkloadSelector(profile.Selector.ImageName, profile.Selector.ImageTag) - if err != nil { - log.Errorf("selector error %s/%s: %v", profile.Selector.ImageName, profile.Selector.ImageTag, err) - continue - } - - log.Tracef("got a new profile for %v : %v", selector, profile) - r.onNewProfileCallback(selector, profile) - } -} - -// Start starts the Remote Config profile provider and subscribes to updates -func (r *RCProfileProvider) Start(ctx context.Context) error { - log.Info("remote-config profile provider started") - - r.client.Start() - r.client.Subscribe(state.ProductCWSProfiles, r.rcProfilesUpdateCallback) - - go func() { - <-ctx.Done() - _ = r.Stop() - }() - - return nil -} - -func selectorToTag(selector *cgroupModel.WorkloadSelector) string { - return selector.Image + separator + selector.Tag -} - -// UpdateWorkloadSelectors updates the selectors used to query profiles -func (r *RCProfileProvider) UpdateWorkloadSelectors(selectors []cgroupModel.WorkloadSelector) { - r.Lock() - defer r.Unlock() - - log.Tracef("updating workload selector: %v", selectors) - - var tags []string - - for _, selector := range selectors { - tags = append(tags, selectorToTag(&selector)) - } - - r.client.SetCWSWorkloads(tags) -} - -// SetOnNewProfileCallback sets the onNewProfileCallback function -func (r *RCProfileProvider) SetOnNewProfileCallback(onNewProfileCallback func(selector cgroupModel.WorkloadSelector, profile *proto.SecurityProfile)) { - r.onNewProfileCallback = onNewProfileCallback -} - -// SendStats sends the metrics of the directory provider -func (r *RCProfileProvider) SendStats(_ statsd.ClientInterface) error { - return nil -} - -// NewRCProfileProvider returns a new Remote Config based policy provider -func NewRCProfileProvider() (*RCProfileProvider, error) { - agentVersion, err := utils.GetAgentSemverVersion() - if err != nil { - return nil, fmt.Errorf("failed to parse agent version: %v", err) - } - - ipcAddress, err := config.GetIPCAddress() - if err != nil { - return nil, fmt.Errorf("failed to get ipc address: %w", err) - } - - c, err := client.NewGRPCClient(ipcAddress, config.GetIPCPort(), func() (string, error) { return security.FetchAuthToken(config.Datadog()) }, - client.WithAgent(agentName, agentVersion.String()), - client.WithProducts(state.ProductCWSProfiles), - client.WithPollInterval(securityAgentRCPollInterval)) - if err != nil { - return nil, err - } - - r := &RCProfileProvider{ - client: c, - } - - return r, nil -} diff --git a/pkg/security/reporter/reporter.go b/pkg/security/reporter/reporter.go index b4d40000fcc06..1c8a4f71bf5a0 100644 --- a/pkg/security/reporter/reporter.go +++ b/pkg/security/reporter/reporter.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" logsconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" @@ -52,7 +52,7 @@ func newReporter(hostname string, stopper startstop.Stopper, sourceName, sourceT stopper.Add(auditor) // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(logsconfig.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfig.Datadog()) + pipelineProvider := pipeline.NewProvider(logsconfig.NumberOfPipelines, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) pipelineProvider.Start() stopper.Add(pipelineProvider) diff --git a/pkg/security/resolvers/cgroup/model/model.go b/pkg/security/resolvers/cgroup/model/model.go index 5e2b8bcc6ba9d..15a9046510363 100644 --- a/pkg/security/resolvers/cgroup/model/model.go +++ b/pkg/security/resolvers/cgroup/model/model.go @@ -76,7 +76,7 @@ type CacheEntry struct { sync.RWMutex Deleted *atomic.Bool WorkloadSelector WorkloadSelector - PIDs map[uint32]int8 + PIDs map[uint32]bool } // NewCacheEntry returns a new instance of a CacheEntry @@ -90,11 +90,11 @@ func NewCacheEntry(containerID string, cgroupFlags uint64, pids ...uint32) (*Cac ContainerContext: model.ContainerContext{ ContainerID: containerutils.ContainerID(containerID), }, - PIDs: make(map[uint32]int8, 10), + PIDs: make(map[uint32]bool, 10), } for _, pid := range pids { - newCGroup.PIDs[pid] = 1 + newCGroup.PIDs[pid] = true } return &newCGroup, nil } @@ -127,7 +127,7 @@ func (cgce *CacheEntry) AddPID(pid uint32) { cgce.Lock() defer cgce.Unlock() - cgce.PIDs[pid] = 1 + cgce.PIDs[pid] = true } // SetTags sets the tags for the provided workload diff --git a/pkg/security/resolvers/cgroup/resolver.go b/pkg/security/resolvers/cgroup/resolver.go index 6f09ea32771fc..1e60d8de0c1f8 100644 --- a/pkg/security/resolvers/cgroup/resolver.go +++ b/pkg/security/resolvers/cgroup/resolver.go @@ -30,6 +30,8 @@ const ( WorkloadSelectorResolved Event = iota // CGroupDeleted is used to notify that a cgroup was deleted CGroupDeleted + // CGroupCreated new croup created + CGroupCreated // CGroupMaxEvent is used cap the event ID CGroupMaxEvent ) @@ -137,6 +139,13 @@ func (cr *Resolver) AddPID(process *model.ProcessCacheEntry) { // add the new CGroup to the cache cr.workloads.Add(string(process.ContainerID), newCGroup) + // notify listeners + cr.listenersLock.Lock() + for _, l := range cr.listeners[CGroupCreated] { + l(newCGroup) + } + cr.listenersLock.Unlock() + // check the tags of this workload cr.checkTags(newCGroup) } diff --git a/pkg/security/resolvers/hash/resolver_linux.go b/pkg/security/resolvers/hash/resolver_linux.go index d4ab18efb6f8d..7de1ef8e8a13c 100644 --- a/pkg/security/resolvers/hash/resolver_linux.go +++ b/pkg/security/resolvers/hash/resolver_linux.go @@ -28,6 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" + "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/utils" ) @@ -160,9 +161,17 @@ func NewResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.ClientInte // ComputeHashesFromEvent calls ComputeHashes using the provided event func (resolver *Resolver) ComputeHashesFromEvent(event *model.Event, file *model.FileEvent) []string { + if !resolver.opts.Enabled { + return nil + } + // resolve FileEvent event.FieldHandlers.ResolveFilePath(event, file) - return resolver.ComputeHashes(event.GetEventType(), &event.ProcessContext.Process, file) + + process := event.ProcessContext.Process + resolver.HashFileEvent(event.GetEventType(), process.ContainerID, process.Pid, file) + + return file.Hashes } // ComputeHashes computes the hashes of the provided file event. @@ -172,23 +181,8 @@ func (resolver *Resolver) ComputeHashes(eventType model.EventType, process *mode return nil } - // check state - if file.HashState == model.Done { - return file.Hashes - } - if file.HashState != model.NoHash && file.HashState != model.HashWasRateLimited { - // this file was already processed and an error occurred, nothing else to do - return nil - } - - // check if the resolver is allowed to hash this event type - if !slices.Contains(resolver.opts.EventTypes, eventType) { - file.HashState = model.EventTypeNotConfigured - resolver.hashMiss[eventType][model.EventTypeNotConfigured].Inc() - return nil - } + resolver.HashFileEvent(eventType, process.ContainerID, process.Pid, file) - resolver.hash(eventType, process, file) return file.Hashes } @@ -227,8 +221,28 @@ func getFileInfo(path string) (fs.FileMode, int64, fileUniqKey, error) { return utils.UnixStatModeToGoFileMode(stat.Mode), stat.Size, fkey, nil } -// hash hashes the provided file event -func (resolver *Resolver) hash(eventType model.EventType, process *model.Process, file *model.FileEvent) { +// HashFileEvent hashes the provided file event +func (resolver *Resolver) HashFileEvent(eventType model.EventType, ctrID containerutils.ContainerID, pid uint32, file *model.FileEvent) { + if !resolver.opts.Enabled { + return + } + + // check state + if file.HashState == model.Done { + return + } + if file.HashState != model.NoHash && file.HashState != model.HashWasRateLimited { + // this file was already processed and an error occurred, nothing else to do + return + } + + // check if the resolver is allowed to hash this event type + if !slices.Contains(resolver.opts.EventTypes, eventType) { + file.HashState = model.EventTypeNotConfigured + resolver.hashMiss[eventType][model.EventTypeNotConfigured].Inc() + return + } + if !file.IsPathnameStrResolved || len(file.PathnameStr) == 0 { resolver.hashMiss[eventType][model.PathnameResolutionError].Inc() file.HashState = model.PathnameResolutionError @@ -244,7 +258,7 @@ func (resolver *Resolver) hash(eventType model.EventType, process *model.Process // check if the hash(es) of this file is in cache fileKey := LRUCacheKey{ path: file.PathnameStr, - containerID: string(process.ContainerID), + containerID: string(ctrID), inode: file.Inode, pathID: file.PathKey.PathID, } @@ -267,9 +281,10 @@ func (resolver *Resolver) hash(eventType model.EventType, process *model.Process return } - rootPIDs := []uint32{process.Pid} + // add pid one for hash resolution outside of a container + rootPIDs := []uint32{1, pid} if resolver.cgroupResolver != nil { - w, ok := resolver.cgroupResolver.GetWorkload(string(process.ContainerID)) + w, ok := resolver.cgroupResolver.GetWorkload(string(ctrID)) if ok { rootPIDs = w.GetPIDs() } diff --git a/pkg/security/resolvers/resolvers_ebpf.go b/pkg/security/resolvers/resolvers_ebpf.go index 0b4d84fa2cbeb..b3ca8bb549b2b 100644 --- a/pkg/security/resolvers/resolvers_ebpf.go +++ b/pkg/security/resolvers/resolvers_ebpf.go @@ -18,7 +18,6 @@ import ( manager "github.com/DataDog/ebpf-manager" "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/probe/erpc" @@ -64,7 +63,7 @@ type EBPFResolvers struct { } // NewEBPFResolvers creates a new instance of EBPFResolvers -func NewEBPFResolvers(config *config.Config, manager *manager.Manager, statsdClient statsd.ClientInterface, scrubber *procutil.DataScrubber, eRPC *erpc.ERPC, opts Opts, wmeta workloadmeta.Component, telemetry telemetry.Component) (*EBPFResolvers, error) { +func NewEBPFResolvers(config *config.Config, manager *manager.Manager, statsdClient statsd.ClientInterface, scrubber *procutil.DataScrubber, eRPC *erpc.ERPC, opts Opts, telemetry telemetry.Component) (*EBPFResolvers, error) { dentryResolver, err := dentry.NewResolver(config.Probe, statsdClient, eRPC) if err != nil { return nil, err @@ -85,7 +84,7 @@ func NewEBPFResolvers(config *config.Config, manager *manager.Manager, statsdCli var sbomResolver *sbom.Resolver if config.RuntimeSecurity.SBOMResolverEnabled { - sbomResolver, err = sbom.NewSBOMResolver(config.RuntimeSecurity, statsdClient, wmeta) + sbomResolver, err = sbom.NewSBOMResolver(config.RuntimeSecurity, statsdClient) if err != nil { return nil, err } diff --git a/pkg/security/resolvers/sbom/resolver.go b/pkg/security/resolvers/sbom/resolver.go index 2a64784d6b793..8b03e0831f76a 100644 --- a/pkg/security/resolvers/sbom/resolver.go +++ b/pkg/security/resolvers/sbom/resolver.go @@ -26,7 +26,7 @@ import ( "go.uber.org/atomic" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/sbom/collectors/host" @@ -152,8 +152,8 @@ type Resolver struct { } // NewSBOMResolver returns a new instance of Resolver -func NewSBOMResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.ClientInterface, wmeta workloadmeta.Component) (*Resolver, error) { - sbomScanner, err := sbomscanner.CreateGlobalScanner(coreconfig.SystemProbe(), optional.NewOption(wmeta)) +func NewSBOMResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.ClientInterface) (*Resolver, error) { + sbomScanner, err := sbomscanner.CreateGlobalScanner(pkgconfigsetup.SystemProbe(), optional.NewNoneOption[workloadmeta.Component]()) if err != nil { return nil, err } @@ -208,7 +208,7 @@ func (r *Resolver) prepareContextTags() { r.contextTags = append(r.contextTags, fmt.Sprintf("host:%s", r.hostname)) // merge tags from config - for _, tag := range configUtils.GetConfiguredTags(coreconfig.Datadog(), true) { + for _, tag := range configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true) { if strings.HasPrefix(tag, "host") { continue } diff --git a/pkg/security/resolvers/sbom/resolver_unsupported.go b/pkg/security/resolvers/sbom/resolver_unsupported.go index 43583a80e7f78..a9b05d167589a 100644 --- a/pkg/security/resolvers/sbom/resolver_unsupported.go +++ b/pkg/security/resolvers/sbom/resolver_unsupported.go @@ -13,7 +13,6 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/security/config" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" "github.com/DataDog/datadog-agent/pkg/security/secl/model" @@ -24,7 +23,7 @@ type Resolver struct { } // NewSBOMResolver returns a new instance of Resolver -func NewSBOMResolver(_ *config.RuntimeSecurityConfig, _ statsd.ClientInterface, _ workloadmeta.Component) (*Resolver, error) { +func NewSBOMResolver(_ *config.RuntimeSecurityConfig, _ statsd.ClientInterface) (*Resolver, error) { return &Resolver{}, nil } diff --git a/pkg/security/resolvers/tags/resolver.go b/pkg/security/resolvers/tags/resolver.go index 57dd03cbb3caf..ab0285529e524 100644 --- a/pkg/security/resolvers/tags/resolver.go +++ b/pkg/security/resolvers/tags/resolver.go @@ -13,7 +13,7 @@ import ( taggerTelemetry "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/telemetry" - rootconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/probe/config" "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -96,12 +96,12 @@ func (t *DefaultResolver) Stop() error { // NewResolver returns a new tags resolver func NewResolver(config *config.Config, telemetry telemetry.Component) Resolver { if config.RemoteTaggerEnabled { - options, err := remote.NodeAgentOptionsForSecurityResolvers(rootconfig.Datadog()) + options, err := remote.NodeAgentOptionsForSecurityResolvers(pkgconfigsetup.Datadog()) if err != nil { log.Errorf("unable to configure the remote tagger: %s", err) } else { return &DefaultResolver{ - tagger: remote.NewTagger(options, rootconfig.Datadog(), taggerTelemetry.NewStore(telemetry)), + tagger: remote.NewTagger(options, pkgconfigsetup.Datadog(), taggerTelemetry.NewStore(telemetry)), } } } diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index 2c24d48d230a8..43ef3ea0dc0d3 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -63,6 +63,7 @@ type RuleEngine struct { eventSender events.EventSender rulesetListeners []rules.RuleSetListener AutoSuppression autosuppression.AutoSuppression + pid uint32 } // APIServer defines the API server @@ -85,6 +86,7 @@ func NewRuleEngine(evm *eventmonitor.EventMonitor, config *config.RuntimeSecurit policyLoader: rules.NewPolicyLoader(), statsdClient: statsdClient, rulesetListeners: rulesetListeners, + pid: utils.Getpid(), } engine.AutoSuppression.Init(autosuppression.Opts{ @@ -516,6 +518,11 @@ func (e *RuleEngine) SetRulesetLoadedCallback(cb func(es *rules.RuleSet, err *mu // HandleEvent is called by the probe when an event arrives from the kernel func (e *RuleEngine) HandleEvent(event *model.Event) { + // don't eval event originating from myself + if !e.probe.Opts.DontDiscardRuntime && event.ProcessContext != nil && event.ProcessContext.Pid == e.pid { + return + } + // event already marked with an error, skip it if event.Error != nil { return diff --git a/pkg/security/secl/compiler/eval/eval_operators.go b/pkg/security/secl/compiler/eval/eval_operators.go index 9b8f3ee330b0f..515a1393c3c22 100644 --- a/pkg/security/secl/compiler/eval/eval_operators.go +++ b/pkg/security/secl/compiler/eval/eval_operators.go @@ -478,13 +478,13 @@ func GreaterThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -549,13 +549,13 @@ func GreaterOrEqualThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEv isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -620,13 +620,13 @@ func LesserThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEvaluator, isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -691,13 +691,13 @@ func LesserOrEqualThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEva isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -762,13 +762,13 @@ func DurationLesserThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolEv isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -833,13 +833,13 @@ func DurationLesserOrEqualThan(a *IntEvaluator, b *IntEvaluator, state *State) ( isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -904,13 +904,13 @@ func DurationGreaterThan(a *IntEvaluator, b *IntEvaluator, state *State) (*BoolE isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -975,13 +975,13 @@ func DurationGreaterOrEqualThan(a *IntEvaluator, b *IntEvaluator, state *State) isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -1117,13 +1117,13 @@ func DurationLesserThanArithmeticOperation(a *IntEvaluator, b *IntEvaluator, sta isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -1188,13 +1188,13 @@ func DurationLesserOrEqualThanArithmeticOperation(a *IntEvaluator, b *IntEvaluat isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -1259,13 +1259,13 @@ func DurationGreaterThanArithmeticOperation(a *IntEvaluator, b *IntEvaluator, st isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } @@ -1330,13 +1330,13 @@ func DurationGreaterOrEqualThanArithmeticOperation(a *IntEvaluator, b *IntEvalua isDc := isArithmDeterministic(a, b, state) if a.Field != "" { - if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(a.Field, FieldValue{Value: b.Value, Type: RangeValueType}); err != nil { return nil, err } } if b.Field != "" { - if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: ScalarValueType}); err != nil { + if err := state.UpdateFieldValues(b.Field, FieldValue{Value: a.Value, Type: RangeValueType}); err != nil { return nil, err } } diff --git a/pkg/security/secl/compiler/eval/field.go b/pkg/security/secl/compiler/eval/field.go index 8646c1aa3c0d7..2b9f6f1cc5297 100644 --- a/pkg/security/secl/compiler/eval/field.go +++ b/pkg/security/secl/compiler/eval/field.go @@ -23,6 +23,7 @@ const ( BitmaskValueType FieldValueType = 1 << 4 VariableValueType FieldValueType = 1 << 5 IPNetValueType FieldValueType = 1 << 6 + RangeValueType FieldValueType = 1 << 7 ) // MarshalJSON returns the JSON encoding of the FieldValueType @@ -51,6 +52,8 @@ func (t FieldValueType) String() string { return "variable" case IPNetValueType: return "ip" + case RangeValueType: + return "range" } return "" diff --git a/pkg/security/secl/compiler/eval/state.go b/pkg/security/secl/compiler/eval/state.go index 2a8793fd1e19a..d7273496b2ff3 100644 --- a/pkg/security/secl/compiler/eval/state.go +++ b/pkg/security/secl/compiler/eval/state.go @@ -55,7 +55,7 @@ func (s *State) UpdateFieldValues(field Field, value FieldValue) error { for _, v := range values { // compare only comparable switch v.Value.(type) { - case int, uint, int64, uint64, string: + case int, uint, int64, uint64, string, bool: if v == value { return nil } diff --git a/pkg/security/secl/compiler/generators/operators/operators.go b/pkg/security/secl/compiler/generators/operators/operators.go index f826fe42cb76d..36748ff062670 100644 --- a/pkg/security/secl/compiler/generators/operators/operators.go +++ b/pkg/security/secl/compiler/generators/operators/operators.go @@ -28,6 +28,7 @@ type Operator struct { ArrayType string ValueType string Commutative bool + RangeLimit string } func main() { @@ -360,7 +361,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: stdCompare(">"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "GreaterOrEqualThan", @@ -369,7 +370,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: stdCompare(">="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "LesserThan", @@ -378,7 +379,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: stdCompare("<"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "LesserOrEqualThan", @@ -387,7 +388,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: stdCompare("<="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationLesserThan", @@ -396,7 +397,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompare("<"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationLesserOrEqualThan", @@ -405,7 +406,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompare("<="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationGreaterThan", @@ -414,7 +415,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompare(">"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationGreaterOrEqualThan", @@ -423,7 +424,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompare(">="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationEqual", @@ -441,7 +442,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompareArithmeticOperation("<"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationLesserOrEqualThanArithmeticOperation", @@ -450,7 +451,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompareArithmeticOperation("<="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationGreaterThanArithmeticOperation", @@ -459,7 +460,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompareArithmeticOperation(">"), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationGreaterOrEqualThanArithmeticOperation", @@ -468,7 +469,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ FuncReturnType: "BoolEvaluator", EvalReturnType: "bool", Op: durationCompareArithmeticOperation(">="), - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationEqualArithmeticOperation", @@ -509,7 +510,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: stdCompare(">"), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "IntArrayGreaterOrEqualThan", @@ -519,7 +520,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: stdCompare(">="), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "IntArrayLesserThan", @@ -529,7 +530,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: stdCompare("<"), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "IntArrayLesserOrEqualThan", @@ -539,7 +540,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: stdCompare("<="), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationArrayLesserThan", @@ -549,7 +550,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: durationCompare("<"), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationArrayLesserOrEqualThan", @@ -559,7 +560,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: durationCompare("<="), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationArrayGreaterThan", @@ -569,7 +570,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: durationCompare(">"), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, { FuncName: "DurationArrayGreaterOrEqualThan", @@ -579,7 +580,7 @@ func {{ .FuncName }}(a *{{ .Arg1Type }}, b *{{ .Arg2Type }}, state *State) (*{{ EvalReturnType: "bool", Op: durationCompare(">="), ArrayType: "int", - ValueType: "ScalarValueType", + ValueType: "RangeValueType", }, }, } diff --git a/pkg/security/secl/containerutils/cgroup.go b/pkg/security/secl/containerutils/cgroup.go index 734fc74438526..15a341057a0d1 100644 --- a/pkg/security/secl/containerutils/cgroup.go +++ b/pkg/security/secl/containerutils/cgroup.go @@ -67,7 +67,7 @@ func GetContainerFromCgroup(cgroup string) (string, CGroupFlags) { // GetCgroupFromContainer infers the container runtime from a cgroup name func GetCgroupFromContainer(id ContainerID, flags CGroupFlags) CGroupID { for runtimePrefix, runtimeFlag := range RuntimePrefixes { - if uint64(flags)&0b111 == uint64(runtimeFlag) { + if flags&CGroupManagerMask == CGroupFlags(runtimeFlag) { return CGroupID(runtimePrefix + string(id)) } } diff --git a/pkg/security/secl/containerutils/helpers.go b/pkg/security/secl/containerutils/helpers.go index ce2d8c7d3d24b..701e4f9cd1d9f 100644 --- a/pkg/security/secl/containerutils/helpers.go +++ b/pkg/security/secl/containerutils/helpers.go @@ -80,7 +80,7 @@ func FindContainerID(s string) (string, uint64) { // GetCGroupContext returns the cgroup ID and the sanitized container ID from a container id/flags tuple func GetCGroupContext(containerID ContainerID, cgroupFlags CGroupFlags) (CGroupID, ContainerID) { cgroupID := GetCgroupFromContainer(containerID, cgroupFlags) - if cgroupFlags&0b111 == 0 { + if !cgroupFlags.IsContainer() { containerID = "" } return CGroupID(cgroupID), ContainerID(containerID) diff --git a/pkg/security/secl/containerutils/types.go b/pkg/security/secl/containerutils/types.go index fbda75c7caebb..bc55b8539aec0 100644 --- a/pkg/security/secl/containerutils/types.go +++ b/pkg/security/secl/containerutils/types.go @@ -14,3 +14,11 @@ type CGroupID string // CGroupFlags represents the flags of a cgroup type CGroupFlags uint64 + +// CGroupManagerMask holds the bitmask for the cgroup manager +const CGroupManagerMask CGroupFlags = 0b111 + +// IsContainer returns whether a cgroup maps to a container +func (f CGroupFlags) IsContainer() bool { + return (f&CGroupManagerMask != 0) && ((f & CGroupManagerMask) != CGroupFlags(CGroupManagerSystemd)) +} diff --git a/pkg/security/secl/model/field_handlers_unix.go b/pkg/security/secl/model/field_handlers_unix.go index 80efe8248372b..32ff972cfee5c 100644 --- a/pkg/security/secl/model/field_handlers_unix.go +++ b/pkg/security/secl/model/field_handlers_unix.go @@ -36,7 +36,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveService(ev, &ev.BaseEvent) _ = ev.FieldHandlers.ResolveEventTimestamp(ev, &ev.BaseEvent) _ = ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.NetworkContext.Device) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.BaseEvent.ProcessContext.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.BaseEvent.ProcessContext.Process) + } _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.BaseEvent.ProcessContext.Process) _ = ev.FieldHandlers.ResolveProcessArgv(ev, &ev.BaseEvent.ProcessContext.Process) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.BaseEvent.ProcessContext.Process) @@ -112,7 +114,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) } if ev.BaseEvent.ProcessContext.HasParent() { - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.BaseEvent.ProcessContext.Parent) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.BaseEvent.ProcessContext.Parent) + } } if ev.BaseEvent.ProcessContext.HasParent() { _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.BaseEvent.ProcessContext.Parent) @@ -358,7 +362,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveK8SUID(ev, &ev.Exec.Process.UserSession) _ = ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Exec.Process.UserSession) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Exec.Process) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exec.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exec.Process) + } _ = ev.FieldHandlers.ResolveProcessArgv(ev, ev.Exec.Process) _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Exec.Process) _ = ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exec.Process) @@ -440,7 +446,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveK8SUID(ev, &ev.Exit.Process.UserSession) _ = ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Exit.Process.UserSession) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Exit.Process) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exit.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exit.Process) + } _ = ev.FieldHandlers.ResolveProcessArgv(ev, ev.Exit.Process) _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Exit.Process) _ = ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exit.Process) @@ -638,7 +646,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveK8SUID(ev, &ev.PTrace.Tracee.Process.UserSession) _ = ev.FieldHandlers.ResolveK8SGroups(ev, &ev.PTrace.Tracee.Process.UserSession) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.PTrace.Tracee.Process) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.PTrace.Tracee.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.PTrace.Tracee.Process) + } _ = ev.FieldHandlers.ResolveProcessArgv(ev, &ev.PTrace.Tracee.Process) _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.PTrace.Tracee.Process) _ = ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.PTrace.Tracee.Process) @@ -733,7 +743,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveProcessArgv0(ev, ev.PTrace.Tracee.Parent) } if ev.PTrace.Tracee.HasParent() { - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.PTrace.Tracee.Parent) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.PTrace.Tracee.Parent) + } } if ev.PTrace.Tracee.HasParent() { _ = ev.FieldHandlers.ResolveProcessArgv(ev, ev.PTrace.Tracee.Parent) @@ -907,7 +919,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveK8SUID(ev, &ev.Signal.Target.Process.UserSession) _ = ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Signal.Target.Process.UserSession) _ = ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.Signal.Target.Process) - _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.Signal.Target.Process) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.Signal.Target.Process) + } _ = ev.FieldHandlers.ResolveProcessArgv(ev, &ev.Signal.Target.Process) _ = ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.Signal.Target.Process) _ = ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.Signal.Target.Process) @@ -1002,7 +1016,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Signal.Target.Parent) } if ev.Signal.Target.HasParent() { - _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Signal.Target.Parent) + if !forADs { + _ = ev.FieldHandlers.ResolveProcessArgs(ev, ev.Signal.Target.Parent) + } } if ev.Signal.Target.HasParent() { _ = ev.FieldHandlers.ResolveProcessArgv(ev, ev.Signal.Target.Parent) diff --git a/pkg/security/secl/model/model.go b/pkg/security/secl/model/model.go index 77e44b0b6a86d..d8aebe7770aa5 100644 --- a/pkg/security/secl/model/model.go +++ b/pkg/security/secl/model/model.go @@ -313,8 +313,9 @@ type MatchedRule struct { // ActionReport defines an action report type ActionReport interface { - ToJSON() ([]byte, bool, error) + ToJSON() ([]byte, error) IsMatchingRule(ruleID eval.RuleID) bool + IsResolved() bool } // NewMatchedRule return a new MatchedRule instance diff --git a/pkg/security/secl/model/model_unix.go b/pkg/security/secl/model/model_unix.go index 63131fd329014..f721919aa87db 100644 --- a/pkg/security/secl/model/model_unix.go +++ b/pkg/security/secl/model/model_unix.go @@ -241,7 +241,7 @@ type Process struct { // defined to generate accessors, ArgsTruncated and EnvsTruncated are used during by unmarshaller Argv0 string `field:"argv0,handler:ResolveProcessArgv0,weight:100"` // SECLDoc[argv0] Definition:`First argument of the process` - Args string `field:"args,handler:ResolveProcessArgs,weight:500"` // SECLDoc[args] Definition:`Arguments of the process (as a string, excluding argv0)` Example:`exec.args == "-sV -p 22,53,110,143,4564 198.116.0-255.1-127"` Description:`Matches any process with these exact arguments.` Example:`exec.args =~ "* -F * http*"` Description:`Matches any process that has the "-F" argument anywhere before an argument starting with "http".` + Args string `field:"args,handler:ResolveProcessArgs,weight:500,opts:skip_ad"` // SECLDoc[args] Definition:`Arguments of the process (as a string, excluding argv0)` Example:`exec.args == "-sV -p 22,53,110,143,4564 198.116.0-255.1-127"` Description:`Matches any process with these exact arguments.` Example:`exec.args =~ "* -F * http*"` Description:`Matches any process that has the "-F" argument anywhere before an argument starting with "http".` Argv []string `field:"argv,handler:ResolveProcessArgv,weight:500; cmdargv,handler:ResolveProcessCmdArgv,opts:getters_only; args_flags,handler:ResolveProcessArgsFlags,opts:helper; args_options,handler:ResolveProcessArgsOptions,opts:helper"` // SECLDoc[argv] Definition:`Arguments of the process (as an array, excluding argv0)` Example:`exec.argv in ["127.0.0.1"]` Description:`Matches any process that has this IP address as one of its arguments.` SECLDoc[args_flags] Definition:`Flags in the process arguments` Example:`exec.args_flags in ["s"] && exec.args_flags in ["V"]` Description:`Matches any process with both "-s" and "-V" flags in its arguments. Also matches "-sV".` SECLDoc[args_options] Definition:`Argument of the process as options` Example:`exec.args_options in ["p=0-1024"]` Description:`Matches any process that has either "-p 0-1024" or "--p=0-1024" in its arguments.` ArgsTruncated bool `field:"args_truncated,handler:ResolveProcessArgsTruncated"` // SECLDoc[args_truncated] Definition:`Indicator of arguments truncation` Envs []string `field:"envs,handler:ResolveProcessEnvs,weight:100"` // SECLDoc[envs] Definition:`Environment variable names of the process` diff --git a/pkg/security/secl/rules/approvers.go b/pkg/security/secl/rules/approvers.go index 7e470d55684a7..3f3bc89c69c33 100644 --- a/pkg/security/secl/rules/approvers.go +++ b/pkg/security/secl/rules/approvers.go @@ -8,6 +8,7 @@ package rules import ( "errors" + "math" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" ) @@ -15,8 +16,7 @@ import ( // Approvers are just filter values indexed by field type Approvers map[eval.Field]FilterValues -// isAnApprover returns whether the given value is an approver for the given rule -func isAnApprover(event eval.Event, ctx *eval.Context, rule *Rule, field eval.Field, value interface{}) (bool, error) { +func partialEval(event eval.Event, ctx *eval.Context, rule *Rule, field eval.Field, value interface{}) (bool, error) { var readOnlyError *eval.ErrFieldReadOnly if err := event.SetFieldValue(field, value); err != nil { if errors.As(err, &readOnlyError) { @@ -24,28 +24,141 @@ func isAnApprover(event eval.Event, ctx *eval.Context, rule *Rule, field eval.Fi } return false, err } - origResult, err := rule.PartialEval(ctx, field) + return rule.PartialEval(ctx, field) +} + +func isAnIntLesserEqualThanApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, value interface{}) (bool, interface{}, error) { + min := math.MinInt + if fieldCap.RangeFilterValue != nil { + min = fieldCap.RangeFilterValue.Min + } + + maxResult, err := partialEval(event, ctx, rule, fieldCap.Field, value) if err != nil { - return false, err + return false, RangeFilterValue{}, err + } + if !maxResult { + return false, RangeFilterValue{}, nil } - notValue, err := eval.NotOfValue(value) + result, err := partialEval(event, ctx, rule, fieldCap.Field, value.(int)+1) + return !result, RangeFilterValue{Min: min, Max: value.(int)}, err +} + +func isAnIntLesserThanApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, value interface{}) (bool, interface{}, error) { + min := math.MinInt + if fieldCap.RangeFilterValue != nil { + min = fieldCap.RangeFilterValue.Min + } + + maxResult, err := partialEval(event, ctx, rule, fieldCap.Field, value.(int)-1) if err != nil { - return false, err + return false, RangeFilterValue{}, err + } + if !maxResult { + return false, RangeFilterValue{}, nil } - if err := event.SetFieldValue(field, notValue); err != nil { - if errors.As(err, &readOnlyError) { + result, err := partialEval(event, ctx, rule, fieldCap.Field, value) + return !result, RangeFilterValue{Min: min, Max: value.(int) - 1}, err +} + +func isAnIntGreaterEqualThanApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, value interface{}) (bool, interface{}, error) { + max := math.MaxInt + if fieldCap.RangeFilterValue != nil { + max = fieldCap.RangeFilterValue.Max + } + + minResult, err := partialEval(event, ctx, rule, fieldCap.Field, value) + if err != nil { + return false, RangeFilterValue{}, err + } + if !minResult { + return false, RangeFilterValue{}, nil + } + + result, err := partialEval(event, ctx, rule, fieldCap.Field, value.(int)-1) + return !result, RangeFilterValue{Min: value.(int), Max: max}, err +} + +func isAnIntGreaterThanApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, value interface{}) (bool, interface{}, error) { + max := math.MaxInt + if fieldCap.RangeFilterValue != nil { + max = fieldCap.RangeFilterValue.Max + } + + minResult, err := partialEval(event, ctx, rule, fieldCap.Field, value.(int)+1) + if err != nil { + return false, RangeFilterValue{}, err + } + if !minResult { + return false, RangeFilterValue{}, nil + } + + result, err := partialEval(event, ctx, rule, fieldCap.Field, value) + return !result, RangeFilterValue{Min: value.(int) + 1, Max: max}, err +} + +// isAnApprover returns whether the given value is an approver for the given rule +func isAnApprover(event eval.Event, ctx *eval.Context, rule *Rule, fieldCap FieldCapability, fieldValueType eval.FieldValueType, value interface{}) (bool, eval.FieldValueType, interface{}, error) { + if fieldValueType == eval.RangeValueType { + isAnApprover, approverValue, err := isAnIntLesserEqualThanApprover(event, ctx, rule, fieldCap, value) + if isAnApprover || err != nil { + return isAnApprover, eval.RangeValueType, approverValue, err + } + isAnApprover, approverValue, err = isAnIntLesserThanApprover(event, ctx, rule, fieldCap, value) + if isAnApprover || err != nil { + return isAnApprover, eval.RangeValueType, approverValue, err + } + isAnApprover, approverValue, err = isAnIntGreaterEqualThanApprover(event, ctx, rule, fieldCap, value) + if isAnApprover || err != nil { + return isAnApprover, eval.RangeValueType, approverValue, err + } + isAnApprover, approverValue, err = isAnIntGreaterThanApprover(event, ctx, rule, fieldCap, value) + if isAnApprover || err != nil { + return isAnApprover, eval.RangeValueType, approverValue, err + } + } + + isaaFnc := func(v1, v2 interface{}) (bool, error) { + origResult, err := partialEval(event, ctx, rule, fieldCap.Field, v1) + if err != nil { + return false, err + } + if !origResult { return false, nil } - return false, err + + notResult, err := partialEval(event, ctx, rule, fieldCap.Field, v2) + if err != nil { + return false, err + } + return origResult != notResult, nil } - notResult, err := rule.PartialEval(ctx, field) + + notValue, err := eval.NotOfValue(value) if err != nil { - return false, err + return false, fieldValueType, value, err + } + + result, err := isaaFnc(value, notValue) + if result || err != nil { + return result, fieldValueType, value, err } - return origResult && !notResult, nil + if fieldCap.HandleNotApproverValue == nil { + return false, fieldValueType, value, err + } + + result, err = isaaFnc(notValue, value) + if result { + fieldValueType, value, ok := fieldCap.HandleNotApproverValue(fieldValueType, value) + if ok { + return true, fieldValueType, value, nil + } + } + + return false, fieldValueType, value, err } func bitmaskCombinations(bitmasks []int) []int { @@ -96,7 +209,6 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) ctx := eval.NewContext(event) - // for each rule we should at least find one approver otherwise we will return no approver for the field for _, rule := range rules { var ( bestFilterField eval.Field @@ -105,6 +217,7 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) bestFilterMode FilterMode ) + LOOP: for _, fieldCap := range fieldCaps { field := fieldCap.Field @@ -112,15 +225,27 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) var bitmasks []int for _, value := range rule.GetFieldValues(field) { + // TODO: handle range for bitmask field, for now ignore range value + if fieldCap.TypeBitmask&eval.BitmaskValueType == eval.BitmaskValueType && value.Type == eval.RangeValueType { + continue + } + + if !fieldCap.TypeMatches(value.Type) { + continue LOOP + } + switch value.Type { - case eval.ScalarValueType, eval.PatternValueType, eval.GlobValueType: - isAnApprover, err := isAnApprover(event, ctx, rule, field, value.Value) + case eval.ScalarValueType, eval.PatternValueType, eval.GlobValueType, eval.RangeValueType: + isAnApprover, approverValueType, approverValue, err := isAnApprover(event, ctx, rule, fieldCap, value.Type, value.Value) if err != nil { return nil, err } - if isAnApprover { - filterValues = filterValues.Merge(FilterValue{Field: field, Value: value.Value, Type: value.Type, Mode: fieldCap.FilterMode}) + filterValue := FilterValue{Field: field, Value: approverValue, Type: approverValueType, Mode: fieldCap.FilterMode} + if !fieldCap.Validate(filterValue) { + continue LOOP + } + filterValues = filterValues.Merge(filterValue) } case eval.BitmaskValueType: bitmasks = append(bitmasks, value.Value.(int)) @@ -128,17 +253,21 @@ func getApprovers(rules []*Rule, event eval.Event, fieldCaps FieldCapabilities) } for _, bitmask := range bitmaskCombinations(bitmasks) { - isAnApprover, err := isAnApprover(event, ctx, rule, field, bitmask) + isAnApprover, _, _, err := isAnApprover(event, ctx, rule, fieldCap, eval.BitmaskValueType, bitmask) if err != nil { return nil, err } if isAnApprover { - filterValues = filterValues.Merge(FilterValue{Field: field, Value: bitmask, Type: eval.BitmaskValueType}) + filterValue := FilterValue{Field: field, Value: bitmask, Type: eval.BitmaskValueType} + if !fieldCap.Validate(filterValue) { + continue LOOP + } + filterValues = filterValues.Merge(filterValue) } } - if len(filterValues) == 0 || !fieldCaps.Validate(filterValues) { + if len(filterValues) == 0 { continue } diff --git a/pkg/security/secl/rules/capabilities.go b/pkg/security/secl/rules/capabilities.go index fc9bea2221953..98948f6f00476 100644 --- a/pkg/security/secl/rules/capabilities.go +++ b/pkg/security/secl/rules/capabilities.go @@ -25,45 +25,40 @@ type FieldCapabilities []FieldCapability // FieldCapability represents a field and the type of its value (scalar, pattern, bitmask, ...) type FieldCapability struct { - Field eval.Field - TypeBitmask eval.FieldValueType - ValidateFnc func(FilterValue) bool - FilterWeight int - FilterMode FilterMode + Field eval.Field + TypeBitmask eval.FieldValueType + ValidateFnc func(FilterValue) bool + FilterWeight int + FilterMode FilterMode + RangeFilterValue *RangeFilterValue + HandleNotApproverValue func(valueType eval.FieldValueType, value interface{}) (eval.FieldValueType, interface{}, bool) } -// GetFields returns all the fields of FieldCapabilities -func (fcs FieldCapabilities) GetFields() []eval.Field { - var fields []eval.Field - for _, fc := range fcs { - fields = append(fields, fc.Field) - } - return fields +// TypeMatches return if a type is supported +func (fc FieldCapability) TypeMatches(kind eval.FieldValueType) bool { + return kind&fc.TypeBitmask != 0 } -// Validate ensures all the filter values match field capabilities -func (fcs FieldCapabilities) Validate(filterValues FilterValues) bool { - for _, filterValue := range filterValues { - var found bool - for _, fc := range fcs { - if filterValue.Field != fc.Field || filterValue.Type&fc.TypeBitmask == 0 { - continue - } - - if fc.ValidateFnc != nil { - if !fc.ValidateFnc(filterValue) { - continue - } - } - - found = true - break - } +// Validate validate the filter value +func (fc FieldCapability) Validate(filterValue FilterValue) bool { + if filterValue.Field != fc.Field || !fc.TypeMatches(filterValue.Type) { + return false + } - if !found { + if fc.ValidateFnc != nil { + if !fc.ValidateFnc(filterValue) { return false } } return true } + +// GetFields returns all the fields of FieldCapabilities +func (fcs FieldCapabilities) GetFields() []eval.Field { + var fields []eval.Field + for _, fc := range fcs { + fields = append(fields, fc.Field) + } + return fields +} diff --git a/pkg/security/secl/rules/errors.go b/pkg/security/secl/rules/errors.go index 8440b00eed692..8cbbc71a06ddc 100644 --- a/pkg/security/secl/rules/errors.go +++ b/pkg/security/secl/rules/errors.go @@ -186,3 +186,13 @@ type ErrFieldNotAvailable struct { func (e *ErrFieldNotAvailable) Error() string { return fmt.Sprintf("field `%s` not available for event type `%v`, available for `%v`", e.Field, e.EventType, e.RestrictedTo) } + +// ErrActionNotAvailable is returned when an action is not available +type ErrActionNotAvailable struct { + ActionName string + EventType eval.EventType +} + +func (e *ErrActionNotAvailable) Error() string { + return fmt.Sprintf("action `%s` not available for event type `%v`", e.ActionName, e.EventType) +} diff --git a/pkg/security/secl/rules/filter_values.go b/pkg/security/secl/rules/filter_values.go index 7c0e6fd7a2fa7..58defbc125a3a 100644 --- a/pkg/security/secl/rules/filter_values.go +++ b/pkg/security/secl/rules/filter_values.go @@ -34,3 +34,9 @@ LOOP: return fv } + +// RangeFilterValue defines a range value +type RangeFilterValue struct { + Min int + Max int +} diff --git a/pkg/security/secl/rules/model.go b/pkg/security/secl/rules/model.go index 30783522b272c..ef3a25685acd6 100644 --- a/pkg/security/secl/rules/model.go +++ b/pkg/security/secl/rules/model.go @@ -60,20 +60,20 @@ type RuleID = string // RuleDefinition holds the definition of a rule type RuleDefinition struct { - ID RuleID `yaml:"id" json:"id"` - Version string `yaml:"version" json:"version,omitempty"` + ID RuleID `yaml:"id,omitempty" json:"id"` + Version string `yaml:"version,omitempty" json:"version,omitempty"` Expression string `yaml:"expression" json:"expression,omitempty"` - Description string `yaml:"description" json:"description,omitempty"` - Tags map[string]string `yaml:"tags" json:"tags,omitempty"` - AgentVersionConstraint string `yaml:"agent_version" json:"agent_version,omitempty"` - Filters []string `yaml:"filters" json:"filters,omitempty"` - Disabled bool `yaml:"disabled" json:"disabled,omitempty"` - Combine CombinePolicy `yaml:"combine" json:"combine,omitempty" jsonschema:"enum=override"` - OverrideOptions OverrideOptions `yaml:"override_options" json:"override_options,omitempty"` - Actions []*ActionDefinition `yaml:"actions" json:"actions,omitempty"` - Every time.Duration `yaml:"every" json:"every,omitempty"` - Silent bool `yaml:"silent" json:"silent,omitempty"` - GroupID string `yaml:"group_id" json:"group_id,omitempty"` + Description string `yaml:"description,omitempty" json:"description,omitempty"` + Tags map[string]string `yaml:"tags,omitempty" json:"tags,omitempty"` + AgentVersionConstraint string `yaml:"agent_version,omitempty" json:"agent_version,omitempty"` + Filters []string `yaml:"filters,omitempty" json:"filters,omitempty"` + Disabled bool `yaml:"disabled,omitempty" json:"disabled,omitempty"` + Combine CombinePolicy `yaml:"combine,omitempty" json:"combine,omitempty" jsonschema:"enum=override"` + OverrideOptions OverrideOptions `yaml:"override_options,omitempty" json:"override_options,omitempty"` + Actions []*ActionDefinition `yaml:"actions,omitempty" json:"actions,omitempty"` + Every time.Duration `yaml:"every,omitempty" json:"every,omitempty"` + Silent bool `yaml:"silent,omitempty" json:"silent,omitempty"` + GroupID string `yaml:"group_id,omitempty" json:"group_id,omitempty"` } // GetTag returns the tag value associated with a tag key @@ -89,8 +89,14 @@ func (rd *RuleDefinition) GetTag(tagKey string) (string, bool) { type ActionName = string const ( - // KillAction name a the kill action + // KillAction name of the kill action KillAction ActionName = "kill" + // SetAction name of the set action + SetAction ActionName = "set" + // CoreDumpAction name of the core dump action + CoreDumpAction ActionName = "coredump" + // HashAction name of the hash action + HashAction ActionName = "hash" ) // ActionDefinition describes a rule action section @@ -102,6 +108,22 @@ type ActionDefinition struct { Hash *HashDefinition `yaml:"hash" json:"hash,omitempty" jsonschema:"oneof_required=HashAction"` } +// Name returns the name of the action +func (a *ActionDefinition) Name() ActionName { + switch { + case a.Set != nil: + return SetAction + case a.Kill != nil: + return KillAction + case a.CoreDump != nil: + return CoreDumpAction + case a.Hash != nil: + return HashAction + default: + return "" + } +} + // Scope describes the scope variables type Scope string @@ -148,8 +170,8 @@ type HookPointArg struct { // PolicyDef represents a policy file definition type PolicyDef struct { - Version string `yaml:"version" json:"version"` - Macros []*MacroDefinition `yaml:"macros" json:"macros,omitempty"` + Version string `yaml:"version,omitempty" json:"version"` + Macros []*MacroDefinition `yaml:"macros,omitempty" json:"macros,omitempty"` Rules []*RuleDefinition `yaml:"rules" json:"rules"` - OnDemandHookPoints []OnDemandHookPoint `yaml:"hooks" json:"hooks,omitempty"` + OnDemandHookPoints []OnDemandHookPoint `yaml:"hooks,omitempty" json:"hooks,omitempty"` } diff --git a/pkg/security/secl/rules/policy_loader.go b/pkg/security/secl/rules/policy_loader.go index 07d2b19c44c63..42aea077c9397 100644 --- a/pkg/security/secl/rules/policy_loader.go +++ b/pkg/security/secl/rules/policy_loader.go @@ -15,9 +15,10 @@ import ( ) const ( - PolicyProviderTypeDir = "file" // PolicyProviderTypeDir defines directory policy provider - PolicyProviderTypeRC = "remote-config" // PolicyProviderTypeRC defines RC policy provider - PolicyProviderTypeBundled = "bundled" // PolicyProviderTypeBundled defines the bundled policy provider + PolicyProviderTypeDir = "file" // PolicyProviderTypeDir defines directory policy provider + PolicyProviderTypeRC = "remote-config" // PolicyProviderTypeRC defines RC policy provider + PolicyProviderTypeBundled = "bundled" // PolicyProviderTypeBundled defines the bundled policy provider + PolicyProviderTypeWorkload = "workload" // PolicyProviderTypeWorkload defines the workload policy provider ) var ( diff --git a/pkg/security/secl/rules/ruleset.go b/pkg/security/secl/rules/ruleset.go index 8b53f15a27992..21188d1bd221b 100644 --- a/pkg/security/secl/rules/ruleset.go +++ b/pkg/security/secl/rules/ruleset.go @@ -284,6 +284,13 @@ func GetRuleEventType(rule *eval.Rule) (eval.EventType, error) { return eventType, nil } +func (rs *RuleSet) isActionAvailable(eventType eval.EventType, action *Action) bool { + if action.Def.Name() == HashAction && eventType != model.FileOpenEventType.String() && eventType != model.ExecEventType.String() { + return false + } + return true +} + // AddRule creates the rule evaluator and adds it to the bucket of its events func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule) (*eval.Rule, error) { if pRule.Def.Disabled { @@ -339,6 +346,10 @@ func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule } for _, action := range rule.PolicyRule.Actions { + if !rs.isActionAvailable(eventType, action) { + return nil, &ErrRuleLoad{Rule: pRule, Err: &ErrActionNotAvailable{ActionName: action.Def.Name(), EventType: eventType}} + } + // compile action filter if action.Def.Filter != nil { if err := action.CompileFilter(parsingContext, rs.model, rs.evalOpts); err != nil { diff --git a/pkg/security/secl/rules/ruleset_test.go b/pkg/security/secl/rules/ruleset_test.go index 520d2ca426cdc..8221163152fd4 100644 --- a/pkg/security/secl/rules/ruleset_test.go +++ b/pkg/security/secl/rules/ruleset_test.go @@ -9,6 +9,7 @@ package rules import ( + "math" "reflect" "strings" "syscall" @@ -419,7 +420,7 @@ func TestRuleSetApprovers9(t *testing.T) { caps := FieldCapabilities{ { Field: "open.flags", - TypeBitmask: eval.ScalarValueType, + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, }, { Field: "open.file.path", @@ -745,6 +746,323 @@ func TestRuleSetApprovers20(t *testing.T) { } } +func TestRuleSetApprovers21(t *testing.T) { + exprs := []string{ + `open.flags&1 > 0 || open.flags&2 > 0`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 { + t.Fatalf("should get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers22(t *testing.T) { + exprs := []string{ + `open.flags&1 > 0 || open.flags > 0`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 0 { + t.Fatalf("shouldn't get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers23(t *testing.T) { + exprs := []string{ + `open.flags&1 > 0 && open.flags > 0`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 { + t.Fatalf("should get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers24(t *testing.T) { + exprs := []string{ + `open.flags&1 > 0 && open.flags > 2`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 0 { + t.Fatalf("shouldn't get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers25(t *testing.T) { + exprs := []string{ + `open.flags&(O_CREAT|O_WRONLY) == (O_CREAT|O_WRONLY)`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 { + t.Fatalf("should get approvers: %v", approvers) + } +} + +func TestRuleSetApprovers26(t *testing.T) { + exprs := []string{ + `open.file.path in [~"/proc/*/mem"] && open.file.path not in ["/proc/${process.pid}/mem", "/proc/self/mem"]`, + } + + rs := newRuleSet() + AddTestRuleExpr(t, rs, exprs...) + + caps := FieldCapabilities{ + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.GlobValueType, + FilterWeight: 3, + }, + } + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + if len(approvers) != 1 { + t.Fatalf("should get approvers: %v", approvers) + } +} + +func TestRuleSetAUDApprovers(t *testing.T) { + caps := FieldCapabilities{ + { + Field: "open.file.path", + TypeBitmask: eval.ScalarValueType | eval.PatternValueType, + }, + { + Field: "open.flags", + TypeBitmask: eval.ScalarValueType | eval.BitmaskValueType, + }, + { + Field: "process.auid", + TypeBitmask: eval.ScalarValueType | eval.RangeValueType, + FilterMode: ApproverOnlyMode, + RangeFilterValue: &RangeFilterValue{Min: 0, Max: model.AuditUIDUnset - 1}, + FilterWeight: 10, + HandleNotApproverValue: func(fieldValueType eval.FieldValueType, value interface{}) (eval.FieldValueType, interface{}, bool) { + if fieldValueType != eval.ScalarValueType { + return fieldValueType, value, false + } + + if i, ok := value.(int); ok && uint32(i) == model.AuditUIDUnset { + return eval.RangeValueType, RangeFilterValue{Min: 0, Max: model.AuditUIDUnset - 1}, true + } + + return fieldValueType, value, false + }, + }, + } + + getApprovers := func(exprs []string) Approvers { + handler := &testHandler{ + filters: make(map[string]testFieldValues), + } + + rs := newRuleSet() + rs.AddListener(handler) + + AddTestRuleExpr(t, rs, exprs...) + + approvers, _ := rs.GetEventTypeApprovers("open", caps) + return approvers + } + + t.Run("equal", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid == 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 || approvers["process.auid"][0].Value != 1000 { + t.Fatalf("should get an approver`: %v", approvers) + } + }) + + t.Run("not-equal", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid != 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 0 { + t.Fatalf("shouldn't get an approver`: %v", approvers) + } + }) + + t.Run("not-equal-unset", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid != AUDIT_AUID_UNSET`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 0 || rge.Max != model.AuditUIDUnset-1 { + t.Fatalf("unexpected range") + } + }) + + t.Run("lesser-equal-than", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid <= 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 0 || rge.Max != 1000 { + t.Fatalf("unexpected range") + } + }) + + t.Run("lesser-than", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid < 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 0 || rge.Max != 999 { + t.Fatalf("unexpected range") + } + }) + + t.Run("greater-equal-than", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid >= 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 1000 || rge.Max != math.MaxUint32-1 { + t.Fatalf("unexpected range") + } + }) + + t.Run("greater-than", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid > 1000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 1001 || rge.Max != math.MaxUint32-1 { + t.Fatalf("unexpected range") + } + }) + + t.Run("greater-equal-than-and", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid >= 1000 && process.auid != AUDIT_AUID_UNSET`, + `open.flags&O_WRONLY > 0`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 2 || len(approvers["process.auid"]) != 2 && len(approvers["open.flags"]) != 1 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 1000 || rge.Max != math.MaxUint32-1 { + t.Fatalf("unexpected range") + } + }) + + t.Run("lesser-and-greater", func(t *testing.T) { + exprs := []string{ + `open.file.path != "" && process.auid > 1000 && process.auid < 4000`, + } + + approvers := getApprovers(exprs) + if len(approvers) != 1 || len(approvers["process.auid"]) != 2 { + t.Fatalf("should get an approver`: %v", approvers) + } + + rge := approvers["process.auid"][0].Value.(RangeFilterValue) + if rge.Min != 1001 || rge.Max != math.MaxUint32-1 { + t.Fatalf("unexpected range") + } + + rge = approvers["process.auid"][1].Value.(RangeFilterValue) + if rge.Min != 0 || rge.Max != 3999 { + t.Fatalf("unexpected range") + } + }) +} + func TestGetRuleEventType(t *testing.T) { t.Run("ok", func(t *testing.T) { rule := eval.NewRule("aaa", `open.file.name == "test"`, &eval.Opts{}) diff --git a/pkg/security/security_profile/activity_tree/activity_tree.go b/pkg/security/security_profile/activity_tree/activity_tree.go index b4d24ae1b2b9b..a32685df87a5f 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree.go +++ b/pkg/security/security_profile/activity_tree/activity_tree.go @@ -854,3 +854,66 @@ func (at *ActivityTree) EvictImageTag(imageTag string) { } at.ProcessNodes = newProcessNodes } + +func (at *ActivityTree) visitProcessNode(processNode *ProcessNode, cb func(processNode *ProcessNode)) { + for _, pn := range processNode.Children { + at.visitProcessNode(pn, cb) + } + cb(processNode) +} + +func (at *ActivityTree) visitFileNode(fileNode *FileNode, cb func(fileNode *FileNode)) { + if len(fileNode.Children) == 0 { + cb(fileNode) + return + } + + for _, file := range fileNode.Children { + at.visitFileNode(file, cb) + } +} + +func (at *ActivityTree) visit(cb func(processNode *ProcessNode)) { + for _, pn := range at.ProcessNodes { + at.visitProcessNode(pn, cb) + } +} + +// ExtractPaths returns the exec / fim, exec / parent paths +func (at *ActivityTree) ExtractPaths() (map[string][]string, map[string][]string) { + + fimPathsperExecPath := make(map[string][]string) + execAndParent := make(map[string][]string) + + at.visit(func(processNode *ProcessNode) { + var fimPaths []string + for _, file := range processNode.Files { + at.visitFileNode(file, func(fileNode *FileNode) { + path := fileNode.File.PathnameStr + if len(path) > 0 { + if strings.Contains(path, "*") { + fimPaths = append(fimPaths, `~"`+path+`"`) + } else { + fimPaths = append(fimPaths, `"`+path+`"`) + } + } + }) + } + execPath := fmt.Sprintf("\"%s\"", processNode.Process.FileEvent.PathnameStr) + paths, ok := fimPathsperExecPath[execPath] + if ok { + fimPathsperExecPath[execPath] = append(paths, fimPaths...) + } else { + fimPathsperExecPath[execPath] = fimPaths + } + p, pp := extractExecAndParent(processNode) + tmp, ok := execAndParent[p] + if ok { + execAndParent[p] = append(tmp, pp) + } else { + execAndParent[p] = []string{pp} + } + }) + + return fimPathsperExecPath, execAndParent +} diff --git a/pkg/security/security_profile/activity_tree/process_node.go b/pkg/security/security_profile/activity_tree/process_node.go index 450774141ee70..a51951c3a7dfd 100644 --- a/pkg/security/security_profile/activity_tree/process_node.go +++ b/pkg/security/security_profile/activity_tree/process_node.go @@ -168,7 +168,6 @@ func (pn *ProcessNode) scrubAndReleaseArgsEnvs(resolver *sprocess.EBPFResolver) // Matches return true if the process fields used to generate the dump are identical with the provided model.Process func (pn *ProcessNode) Matches(entry *model.Process, matchArgs bool, normalize bool) bool { if normalize { - // should convert /var/run/1234/runc.pid + /var/run/54321/runc.pic into /var/run/*/runc.pid match := utils.PathPatternMatch(pn.Process.FileEvent.PathnameStr, entry.FileEvent.PathnameStr, utils.PathPatternMatchOpts{WildcardLimit: 3, PrefixNodeRequired: 1, SuffixNodeRequired: 1, NodeSizeLimit: 8}) if !match { return false diff --git a/pkg/security/security_profile/activity_tree/process_node_snapshot.go b/pkg/security/security_profile/activity_tree/process_node_snapshot.go index e4fce84b39828..cb057962f25c5 100644 --- a/pkg/security/security_profile/activity_tree/process_node_snapshot.go +++ b/pkg/security/security_profile/activity_tree/process_node_snapshot.go @@ -50,8 +50,6 @@ func (pn *ProcessNode) snapshot(owner Owner, stats *Stats, newEvent func() *mode // snapshot files if owner.IsEventTypeValid(model.FileOpenEventType) { pn.snapshotAllFiles(p, stats, newEvent, reducer) - } else { - pn.snapshotMemoryMappedFiles(p, stats, newEvent, reducer) } // snapshot sockets @@ -109,16 +107,6 @@ func (pn *ProcessNode) snapshotAllFiles(p *process.Process, stats *Stats, newEve pn.addFiles(files, stats, newEvent, reducer) } -func (pn *ProcessNode) snapshotMemoryMappedFiles(p *process.Process, stats *Stats, newEvent func() *model.Event, reducer *PathsReducer) { - // list the mmaped files of the process - mmapedFiles, err := getMemoryMappedFiles(p.Pid, pn.Process.FileEvent.PathnameStr) - if err != nil { - seclog.Warnf("error while listing memory maps (pid: %v): %s", p.Pid, err) - } - - pn.addFiles(mmapedFiles, stats, newEvent, reducer) -} - func (pn *ProcessNode) addFiles(files []string, stats *Stats, newEvent func() *model.Event, reducer *PathsReducer) { // list the mmaped files of the process slices.Sort(files) diff --git a/pkg/security/security_profile/activity_tree/utils.go b/pkg/security/security_profile/activity_tree/utils.go index d5ecac9fce466..ecf0c4e8bddfe 100644 --- a/pkg/security/security_profile/activity_tree/utils.go +++ b/pkg/security/security_profile/activity_tree/utils.go @@ -40,3 +40,15 @@ func AppendIfNotPresent(slice []string, toAdd string) ([]string, bool) { } return slice, false } + +func extractExecAndParent(processNode *ProcessNode) (string, string) { + processPath := processNode.Process.FileEvent.PathnameStr + + var parentPath string + if parent := processNode.GetParent(); parent != nil { + if parentNode, ok := parent.(*ProcessNode); ok { + parentPath = parentNode.Process.FileEvent.PathnameStr + } + } + return processPath, parentPath +} diff --git a/pkg/security/security_profile/dump/activity_dump.go b/pkg/security/security_profile/dump/activity_dump.go index 6984b01ecac2c..5ff1dfd8a5026 100644 --- a/pkg/security/security_profile/dump/activity_dump.go +++ b/pkg/security/security_profile/dump/activity_dump.go @@ -31,6 +31,7 @@ import ( cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" stime "github.com/DataDog/datadog-agent/pkg/security/resolvers/time" "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" mtdt "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree/metadata" @@ -102,6 +103,17 @@ type ActivityDumpHeader struct { DNSNames *utils.StringKeys `json:"dns_names"` } +// SECLRuleOpts defines SECL rules options +type SECLRuleOpts struct { + EnableKill bool + AllowList bool + Lineage bool + ImageName string + ImageTag string + Service string + FIM bool +} + // NewActivityDumpLoadConfig returns a new instance of ActivityDumpLoadConfig func NewActivityDumpLoadConfig(evt []model.EventType, timeout time.Duration, waitListTimeout time.Duration, rate int, start time.Time, resolver *stime.Resolver) *model.ActivityDumpLoadConfig { adlc := &model.ActivityDumpLoadConfig{ @@ -884,3 +896,130 @@ func (ad *ActivityDump) DecodeJSON(reader io.Reader) error { return nil } + +// LoadActivityDumpsFromFiles load ads from a file or a directory +func LoadActivityDumpsFromFiles(path string) ([]*ActivityDump, error) { + + fileInfo, err := os.Stat(path) + if os.IsNotExist(err) { + return nil, fmt.Errorf("the path %s does not exist", path) + } else if err != nil { + return nil, fmt.Errorf("error checking the path: %s", err) + } + + if fileInfo.IsDir() { + dir, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open directory: %s", err) + } + defer dir.Close() + + // Read the directory contents + files, err := dir.Readdir(-1) + if err != nil { + return nil, fmt.Errorf("failed to read directory: %s", err) + } + + ads := []*ActivityDump{} + for _, file := range files { + ad, err := fileToActivityDump(filepath.Join(path, file.Name())) + if err != nil { + return nil, fmt.Errorf("couldn't decode secdump: %w", err) + } + ads = append(ads, ad) + } + return ads, nil + + } + // It's a file otherwise + ad, err := fileToActivityDump(path) + if err != nil { + return nil, fmt.Errorf("couldn't decode secdump: %w", err) + } + return []*ActivityDump{ad}, nil + +} + +func fileToActivityDump(filepath string) (*ActivityDump, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, fmt.Errorf("couldn't open secdump: %w", err) + } + defer f.Close() + ad := NewEmptyActivityDump(nil) + err = ad.DecodeProtobuf(f) + if err != nil { + return nil, fmt.Errorf("couldn't decode secdump: %w", err) + } + return ad, nil +} + +// GenerateRules return rules from activity dumps +func GenerateRules(ads []*ActivityDump, opts SECLRuleOpts) []*rules.RuleDefinition { + + var ruleDefs []*rules.RuleDefinition + groupID := getGroupID(opts) + + var execs []string + lineage := make(map[string][]string) + fims := make(map[string][]string) + + for _, ad := range ads { + fimPathsperExecPath, execAndParent := ad.ActivityTree.ExtractPaths() + + for execPath, fimPaths := range fimPathsperExecPath { + execs = append(execs, execPath) + tmp, ok := fims[execPath] + if ok { + fims[execPath] = append(tmp, fimPaths...) + } else { + fims[execPath] = fimPaths + } + } + + for p, pp := range execAndParent { + tmp, ok := lineage[p] + if ok { + lineage[p] = append(tmp, pp...) + } else { + lineage[p] = pp + } + } + } + + // add exec rules + if opts.AllowList { + ruleDefs = append(ruleDefs, addRule(fmt.Sprintf(`exec.file.path not in [%s]`, strings.Join(execs, ", ")), groupID, opts)) + } + + // add fim rules + if opts.FIM { + for exec, paths := range fims { + if len(paths) != 0 { + ruleDefs = append(ruleDefs, addRule(fmt.Sprintf(`open.file.path not in [%s] && process.file.path == %s`, strings.Join(paths, ", "), exec), groupID, opts)) + } + } + } + + // add lineage + if opts.Lineage { + var ( + parentOp = "==" + ctxOp = "!=" + ) + var expressions []string + for p, pp := range lineage { + for _, ppp := range pp { + if ppp == "" { + parentOp = "!=" + ctxOp = "==" + } + expressions = append(expressions, fmt.Sprintf(`exec.file.path == "%s" && process.parent.file.path %s "%s" && process.parent.container.id %s ""`, p, parentOp, ppp, ctxOp)) + } + } + + ruleDefs = append(ruleDefs, addRule(fmt.Sprintf(`!(%s)`, strings.Join(expressions, " || ")), groupID, opts)) + + } + return ruleDefs +} diff --git a/pkg/security/security_profile/dump/local_storage.go b/pkg/security/security_profile/dump/local_storage.go index a2d3af607eb08..1c1a29a6a6b86 100644 --- a/pkg/security/security_profile/dump/local_storage.go +++ b/pkg/security/security_profile/dump/local_storage.go @@ -193,26 +193,32 @@ func (storage *ActivityDumpLocalStorage) Persist(request config.StorageRequest, // create output file _ = os.MkdirAll(request.OutputDirectory, 0400) - file, err := os.Create(outputPath) + tmpOutputPath := outputPath + ".tmp" + + file, err := os.Create(tmpOutputPath) if err != nil { - return fmt.Errorf("couldn't persist to file [%s]: %w", outputPath, err) + return fmt.Errorf("couldn't persist to file [%s]: %w", tmpOutputPath, err) } defer file.Close() // set output file access mode - if err = os.Chmod(outputPath, 0400); err != nil { - return fmt.Errorf("couldn't set mod for file [%s]: %w", outputPath, err) + if err := os.Chmod(tmpOutputPath, 0400); err != nil { + return fmt.Errorf("couldn't set mod for file [%s]: %w", tmpOutputPath, err) } // persist data to disk - if _, err = file.Write(raw.Bytes()); err != nil { - return fmt.Errorf("couldn't write to file [%s]: %w", outputPath, err) + if _, err := file.Write(raw.Bytes()); err != nil { + return fmt.Errorf("couldn't write to file [%s]: %w", tmpOutputPath, err) } - if err = file.Close(); err != nil { + if err := file.Close(); err != nil { return fmt.Errorf("could not close file [%s]: %w", file.Name(), err) } + if err := os.Rename(tmpOutputPath, outputPath); err != nil { + return fmt.Errorf("could not rename file from [%s] to [%s]: %w", tmpOutputPath, outputPath, err) + } + seclog.Infof("[%s] file for [%s] written at: [%s]", request.Format, ad.GetSelectorStr(), outputPath) return nil } diff --git a/pkg/security/security_profile/dump/manager.go b/pkg/security/security_profile/dump/manager.go index e98d13dcad655..d7c9506c16762 100644 --- a/pkg/security/security_profile/dump/manager.go +++ b/pkg/security/security_profile/dump/manager.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" manager "github.com/DataDog/ebpf-manager" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" @@ -338,7 +338,7 @@ func (adm *ActivityDumpManager) prepareContextTags() { adm.contextTags = append(adm.contextTags, fmt.Sprintf("host:%s", adm.hostname)) // merge tags from config - for _, tag := range configUtils.GetConfiguredTags(coreconfig.Datadog(), true) { + for _, tag := range configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true) { if strings.HasPrefix(tag, "host") { continue } @@ -432,7 +432,7 @@ func (adm *ActivityDumpManager) HandleCGroupTracingEvent(event *model.CgroupTrac defer adm.Unlock() if len(event.ContainerContext.ContainerID) == 0 { - seclog.Errorf("received a cgroup tracing event with an empty container ID") + seclog.Warnf("received a cgroup tracing event with an empty container ID") return } @@ -690,7 +690,7 @@ func (pces *processCacheEntrySearcher) SearchTracedProcessCacheEntry(entry *mode // compute the list of ancestors, we need to start inserting them from the root ancestors := []*model.ProcessCacheEntry{entry} parent := pces.getNextAncestorBinaryOrArgv0(&entry.ProcessContext) - for parent != nil && pces.ad.MatchesSelector(entry) { + for parent != nil && pces.ad.MatchesSelector(parent) { ancestors = append(ancestors, parent) parent = pces.getNextAncestorBinaryOrArgv0(&parent.ProcessContext) } @@ -700,8 +700,8 @@ func (pces *processCacheEntrySearcher) SearchTracedProcessCacheEntry(entry *mode for _, parent = range ancestors { node, _, err := pces.ad.ActivityTree.CreateProcessNode(parent, imageTag, activity_tree.Snapshot, false, pces.adm.resolvers) if err != nil { - // if one of the parents wasn't inserted, leave now - break + // try to insert the other ancestors as we might find a valid root node in the lineage + continue } if node != nil { // This step is important to populate the kernel space "traced_pids" map. Some traced event types use this diff --git a/pkg/security/security_profile/dump/remote_storage.go b/pkg/security/security_profile/dump/remote_storage.go index 9c9c8958eb303..b952f77e3124c 100644 --- a/pkg/security/security_profile/dump/remote_storage.go +++ b/pkg/security/security_profile/dump/remote_storage.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" logsconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" - pkgconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/seclog" @@ -51,7 +51,7 @@ func NewActivityDumpRemoteStorage() (ActivityDumpStorage, error) { storage := &ActivityDumpRemoteStorage{ tooLargeEntities: make(map[tooLargeEntityStatsEntry]*atomic.Uint64), client: &http.Client{ - Transport: ddhttputil.CreateHTTPTransport(pkgconfig.Datadog()), + Transport: ddhttputil.CreateHTTPTransport(pkgconfigsetup.Datadog()), }, } diff --git a/pkg/security/security_profile/dump/utils.go b/pkg/security/security_profile/dump/utils.go new file mode 100644 index 0000000000000..52d8c1e7d8433 --- /dev/null +++ b/pkg/security/security_profile/dump/utils.go @@ -0,0 +1,73 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package dump + +import ( + "fmt" + "strings" + + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/google/uuid" +) + +func addRule(expression string, groupID string, opts SECLRuleOpts) *rules.RuleDefinition { + ruleDef := &rules.RuleDefinition{ + Expression: expression, + GroupID: groupID, + ID: strings.Replace(uuid.New().String(), "-", "_", -1), + } + applyContext(ruleDef, opts) + if opts.EnableKill { + applyKillAction(ruleDef) + } + return ruleDef +} + +func applyKillAction(ruleDef *rules.RuleDefinition) { + ruleDef.Actions = []*rules.ActionDefinition{ + { + Kill: &rules.KillDefinition{ + Signal: "SIGKILL", + }, + }, + } +} + +func applyContext(ruleDef *rules.RuleDefinition, opts SECLRuleOpts) { + var context []string + + if opts.ImageName != "" { + context = append(context, fmt.Sprintf(`container.tags == "image_name:%s"`, opts.ImageName)) + } + if opts.ImageTag != "" { + context = append(context, fmt.Sprintf(`container.tags == "image_tag:%s"`, opts.ImageTag)) + } + if opts.Service != "" { + context = append(context, fmt.Sprintf(`process.envp == "DD_SERVICE=%s"`, opts.Service)) + } + + if len(context) == 0 { + return + } + + ruleDef.Expression = fmt.Sprintf("%s && (%s)", ruleDef.Expression, strings.Join(context, " && ")) +} + +func getGroupID(opts SECLRuleOpts) string { + groupID := "rules_" + if len(opts.ImageName) != 0 { + groupID = fmt.Sprintf("%s%s", groupID, opts.ImageName) + } else { + groupID = fmt.Sprintf("%s%s", groupID, strings.Replace(uuid.New().String(), "-", "_", -1)) // It should be unique so that we can target it at least, but ImageName should be always set + } + if len(opts.ImageTag) != 0 { + groupID = fmt.Sprintf("%s_%s", groupID, opts.ImageTag) + } + + return groupID +} diff --git a/pkg/security/security_profile/profile/manager.go b/pkg/security/security_profile/profile/manager.go index 79ff0079f94cc..c256ae847b488 100644 --- a/pkg/security/security_profile/profile/manager.go +++ b/pkg/security/security_profile/profile/manager.go @@ -28,7 +28,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/proto/api" - "github.com/DataDog/datadog-agent/pkg/security/rconfig" "github.com/DataDog/datadog-agent/pkg/security/resolvers" "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" @@ -182,15 +181,6 @@ func NewSecurityProfileManager(config *config.Config, statsdClient statsd.Client m.onLocalStorageCleanup = dirProvider.OnLocalStorageCleanup } - // instantiate remote-config provider - if config.RuntimeSecurity.RemoteConfigurationEnabled && config.RuntimeSecurity.SecurityProfileRCEnabled { - rcProvider, err := rconfig.NewRCProfileProvider() - if err != nil { - return nil, fmt.Errorf("couldn't instantiate a new security profile remote-config provider: %w", err) - } - m.providers = append(m.providers, rcProvider) - } - m.initMetricsMap() // register the manager to the provider(s) @@ -686,6 +676,7 @@ func (m *SecurityProfileManager) persistProfile(profile *SecurityProfile) error filename := profile.Metadata.Name + ".profile" outputPath := path.Join(m.config.RuntimeSecurity.SecurityProfileDir, filename) + tmpOutputPath := outputPath + ".tmp" // create output directory and output file, truncate existing file if a profile already exists err = os.MkdirAll(m.config.RuntimeSecurity.SecurityProfileDir, 0400) @@ -693,20 +684,24 @@ func (m *SecurityProfileManager) persistProfile(profile *SecurityProfile) error return fmt.Errorf("couldn't ensure directory [%s] exists: %w", m.config.RuntimeSecurity.SecurityProfileDir, err) } - file, err := os.OpenFile(outputPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0400) + file, err := os.OpenFile(tmpOutputPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0400) if err != nil { return fmt.Errorf("couldn't persist profile to file [%s]: %w", outputPath, err) } defer file.Close() - if _, err = file.Write(raw); err != nil { - return fmt.Errorf("couldn't write profile to file [%s]: %w", outputPath, err) + if _, err := file.Write(raw); err != nil { + return fmt.Errorf("couldn't write profile to file [%s]: %w", tmpOutputPath, err) } - if err = file.Close(); err != nil { + if err := file.Close(); err != nil { return fmt.Errorf("error trying to close profile file [%s]: %w", file.Name(), err) } + if err := os.Rename(tmpOutputPath, outputPath); err != nil { + return fmt.Errorf("couldn't rename profile file [%s] to [%s]: %w", tmpOutputPath, outputPath, err) + } + seclog.Infof("[profile] file for %s written at: [%s]", profile.selector.String(), outputPath) return nil diff --git a/pkg/security/serializers/helpers.go b/pkg/security/serializers/helpers.go index d4528f46f03ed..7ec853525ab30 100644 --- a/pkg/security/serializers/helpers.go +++ b/pkg/security/serializers/helpers.go @@ -7,17 +7,9 @@ package serializers // nolint: deadcode, unused -func getUint64Pointer(i *uint64) *uint64 { - if *i == 0 { +func createNumPointer[I uint32 | uint64](i I) *I { + if i == 0 { return nil } - return i -} - -// nolint: deadcode, unused -func getUint32Pointer(i *uint32) *uint32 { - if *i == 0 { - return nil - } - return i + return &i } diff --git a/pkg/config/consts.go b/pkg/security/serializers/patcher.go similarity index 59% rename from pkg/config/consts.go rename to pkg/security/serializers/patcher.go index 29200ba9132b4..80c5a8dce8a7f 100644 --- a/pkg/config/consts.go +++ b/pkg/security/serializers/patcher.go @@ -3,9 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package config +package serializers -const ( - // ClusterIDCacheKey is the key name for the orchestrator cluster id in the agent in-mem cache - ClusterIDCacheKey = "orchestratorClusterID" -) +// EventSerializerPatcher defines an event serializer patcher +type EventSerializerPatcher interface { + PatchEvent(*EventSerializer) +} diff --git a/pkg/security/serializers/serializers_linux.go b/pkg/security/serializers/serializers_linux.go index 2c855e9c34625..15e9b7bb701c9 100644 --- a/pkg/security/serializers/serializers_linux.go +++ b/pkg/security/serializers/serializers_linux.go @@ -658,15 +658,14 @@ func newFileSerializer(fe *model.FileEvent, e *model.Event, forceInode ...uint64 inode = forceInode[0] } - mode := uint32(fe.FileFields.Mode) fs := &FileSerializer{ Path: e.FieldHandlers.ResolveFilePath(e, fe), PathResolutionError: fe.GetPathResolutionError(), Name: e.FieldHandlers.ResolveFileBasename(e, fe), - Inode: getUint64Pointer(&inode), - MountID: getUint32Pointer(&fe.MountID), + Inode: createNumPointer(inode), + MountID: createNumPointer(fe.MountID), Filesystem: e.FieldHandlers.ResolveFileFilesystem(e, fe), - Mode: getUint32Pointer(&mode), // only used by open events + Mode: createNumPointer(uint32(fe.FileFields.Mode)), // only used by open events UID: int64(fe.UID), GID: int64(fe.GID), User: e.FieldHandlers.ResolveFileFieldsUser(e, &fe.FileFields), @@ -727,7 +726,7 @@ func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer Pid: ps.Pid, Tid: ps.Tid, - PPid: getUint32Pointer(&ps.PPid), + PPid: createNumPointer(ps.PPid), Comm: ps.Comm, TTY: ps.TTYName, Executable: newFileSerializer(&ps.FileEvent, e), @@ -946,13 +945,13 @@ func newMountEventSerializer(e *model.Event) *MountEventSerializer { mountSerializer := &MountEventSerializer{ MountPoint: &FileSerializer{ Path: e.GetMountRootPath(), - MountID: &e.Mount.ParentPathKey.MountID, - Inode: &e.Mount.ParentPathKey.Inode, + MountID: createNumPointer(e.Mount.ParentPathKey.MountID), + Inode: createNumPointer(e.Mount.ParentPathKey.Inode), }, Root: &FileSerializer{ Path: e.GetMountMountpointPath(), - MountID: &e.Mount.RootPathKey.MountID, - Inode: &e.Mount.RootPathKey.Inode, + MountID: createNumPointer(e.Mount.RootPathKey.MountID), + Inode: createNumPointer(e.Mount.RootPathKey.Inode), }, MountID: e.Mount.MountID, ParentMountID: e.Mount.ParentPathKey.MountID, @@ -1174,7 +1173,7 @@ func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { s.FileEventSerializer = &FileEventSerializer{ FileSerializer: *newFileSerializer(&event.Chmod.File, event), Destination: &FileSerializer{ - Mode: &event.Chmod.Mode, + Mode: createNumPointer(event.Chmod.Mode), }, } s.EventContextSerializer.Outcome = serializeOutcome(event.Chmod.Retval) @@ -1210,7 +1209,7 @@ func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { if event.Open.Flags&syscall.O_CREAT > 0 { s.FileEventSerializer.Destination = &FileSerializer{ - Mode: &event.Open.Mode, + Mode: createNumPointer(event.Open.Mode), } } @@ -1223,7 +1222,7 @@ func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { s.FileEventSerializer = &FileEventSerializer{ FileSerializer: *newFileSerializer(&event.Mkdir.File, event), Destination: &FileSerializer{ - Mode: &event.Mkdir.Mode, + Mode: createNumPointer(event.Mkdir.Mode), }, } s.EventContextSerializer.Outcome = serializeOutcome(event.Mkdir.Retval) diff --git a/pkg/security/serializers/serializers_windows.go b/pkg/security/serializers/serializers_windows.go index f296ee9d8a440..b40b90b610284 100644 --- a/pkg/security/serializers/serializers_windows.go +++ b/pkg/security/serializers/serializers_windows.go @@ -152,7 +152,7 @@ func newProcessSerializer(ps *model.Process, e *model.Event) *ProcessSerializer ExitTime: utils.NewEasyjsonTimeIfNotZero(ps.ExitTime), Pid: ps.Pid, - PPid: getUint32Pointer(&ps.PPid), + PPid: createNumPointer(ps.PPid), Executable: newFileSerializer(&ps.FileEvent, e), CmdLine: e.FieldHandlers.ResolveProcessCmdLineScrubbed(e, ps), User: e.FieldHandlers.ResolveUser(e, ps), diff --git a/pkg/security/tests/action_test.go b/pkg/security/tests/action_test.go index 5214d6961619c..89cb86ed4c956 100644 --- a/pkg/security/tests/action_test.go +++ b/pkg/security/tests/action_test.go @@ -124,8 +124,8 @@ func TestActionKill(t *testing.T) { validateMessageSchema(t, string(msg.Data)) jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { - if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigusr2")]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGUSR2')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } }) @@ -176,11 +176,11 @@ func TestActionKill(t *testing.T) { validateMessageSchema(t, string(msg.Data)) jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { - if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigkill")]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGKILL')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } - if _, err = jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at=~/20.*/)]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at =~ /20.*/)]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } }) @@ -325,11 +325,11 @@ func TestActionKillRuleSpecific(t *testing.T) { validateMessageSchema(t, string(msg.Data)) jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { - if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigkill")]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGKILL')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } - if _, err = jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at=~/20.*/)]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at =~ /20.*/)]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } }) @@ -346,7 +346,7 @@ func TestActionKillRuleSpecific(t *testing.T) { jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions`); err == nil { - t.Error(errors.New("unexpected rule action")) + t.Errorf("unexpected rule action %s", string(msg.Data)) } }) @@ -455,11 +455,11 @@ func TestActionKillDisarm(t *testing.T) { validateMessageSchema(t, string(msg.Data)) jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) { - if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal="sigkill")]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.signal == 'SIGKILL')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } - if _, err = jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at=~/20.*/)]`); err != nil { - t.Error(err) + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.exited_at =~ /20.*/)]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) } }) @@ -489,7 +489,7 @@ func TestActionKillDisarm(t *testing.T) { jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) { if _, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions`); err == nil { - t.Error(errors.New("unexpected rule action")) + t.Errorf("unexpected rule action %s", string(msg.Data)) } }) @@ -563,3 +563,137 @@ func TestActionKillDisarm(t *testing.T) { }) }) } + +func TestActionHash(t *testing.T) { + SkipIfNotAvailable(t) + + if testEnvironment == DockerEnvironment { + t.Skip("skipping in docker, not sharing the same pid ns and doesn't have a container ID") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "hash_action", + Expression: `open.file.path == "{{.Root}}/test-hash-action" && open.flags&O_CREAT == O_CREAT`, + Actions: []*rules.ActionDefinition{ + { + Hash: &rules.HashDefinition{}, + }, + }, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + testFile, _, err := test.Path("test-hash-action") + if err != nil { + t.Fatal(err) + } + + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + done := make(chan bool, 10) + + t.Run("open-process-exit", func(t *testing.T) { + test.msgSender.flush() + test.WaitSignal(t, func() error { + go func() { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := runSyscallTesterFunc( + timeoutCtx, t, syscallTester, + "slow-write", "2", testFile, "aaa", + ); err != nil { + t.Error(err) + } + + done <- true + }() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "hash_action") + }) + + err = retry.Do(func() error { + msg := test.msgSender.getMsg("hash_action") + if msg == nil { + return errors.New("not found") + } + validateMessageSchema(t, string(msg.Data)) + + jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.state == 'Done')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.trigger == 'process_exit')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + if el, err := jsonpath.JsonPathLookup(obj, `$.file.hashes`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + }) + + return nil + }, retry.Delay(500*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + + <-done + }) + + t.Run("open-timeout", func(t *testing.T) { + test.msgSender.flush() + test.WaitSignal(t, func() error { + go func() { + timeoutCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + if err := runSyscallTesterFunc( + timeoutCtx, t, syscallTester, + // exceed the file hasher timeout, use fork to force an event that will trigger the flush mechanism + "slow-write", "2", testFile, "aaa", ";", "sleep", "4", ";", "fork", ";", "sleep", "1", + ); err != nil { + t.Error(err) + } + + done <- true + }() + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "hash_action") + }) + + err = retry.Do(func() error { + msg := test.msgSender.getMsg("hash_action") + if msg == nil { + return errors.New("not found") + } + validateMessageSchema(t, string(msg.Data)) + + jsonPathValidation(test, msg.Data, func(testMod *testModule, obj interface{}) { + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.state == 'Done')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + if el, err := jsonpath.JsonPathLookup(obj, `$.agent.rule_actions[?(@.trigger == 'timeout')]`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + if el, err := jsonpath.JsonPathLookup(obj, `$.file.hashes`); err != nil || el == nil || len(el.([]interface{})) == 0 { + t.Errorf("element not found %s => %v", string(msg.Data), err) + } + }) + + return nil + }, retry.Delay(500*time.Millisecond), retry.Attempts(30), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + + <-done + }) + +} diff --git a/pkg/security/tests/activity_dumps_loadcontroller_test.go b/pkg/security/tests/activity_dumps_loadcontroller_test.go index af6d7c106d8ca..42a3c5e8fa763 100644 --- a/pkg/security/tests/activity_dumps_loadcontroller_test.go +++ b/pkg/security/tests/activity_dumps_loadcontroller_test.go @@ -12,7 +12,6 @@ import ( "fmt" "os" "path/filepath" - "slices" "testing" "time" @@ -187,15 +186,7 @@ func TestActivityDumpsLoadControllerEventTypes(t *testing.T) { if err != nil { t.Fatal(err) } - activeTypes := make([]model.EventType, len(activeEventTypes)) - for i, eventType := range activeEventTypes { - activeTypes[i] = eventType - } - if !slices.Contains(activeTypes, model.FileOpenEventType) { - // add open to the list of expected event types because mmaped files being present in the dump - activeTypes = append(activeTypes, model.FileOpenEventType) - } - if !isEventTypesStringSlicesEqual(activeTypes, presentEventTypes) { + if !isEventTypesStringSlicesEqual(activeEventTypes, presentEventTypes) { t.Fatalf("Dump's event types don't match: expected[%v] vs observed[%v]", activeEventTypes, presentEventTypes) } dump = nextDump diff --git a/pkg/security/tests/filters_test.go b/pkg/security/tests/filters_test.go index 74486100f4470..b8beffc286020 100644 --- a/pkg/security/tests/filters_test.go +++ b/pkg/security/tests/filters_test.go @@ -73,7 +73,6 @@ func TestFilterOpenBasenameApprover(t *testing.T) { if err != nil { t.Fatal(err) } - defer os.Remove(testFile1) if err := waitForOpenProbeEvent(test, func() error { @@ -86,12 +85,29 @@ func TestFilterOpenBasenameApprover(t *testing.T) { t.Fatal(err) } - defer os.Remove(testFile2) - testFile2, _, err = test.Path("test-oba-2") if err != nil { t.Fatal(err) } + defer os.Remove(testFile2) + + // stats + /*err = retry.Do(func() error { + test.eventMonitor.SendStats() + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":approver_type:basename"); count != 1 { + return errors.New("expected metrics not found") + } + + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count != 1 { + return errors.New("expected metrics not found") + } + + return nil + }, retry.Delay(1*time.Second), retry.Attempts(5), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + + // reset stats + test.statsdClient.Flush()*/ if err := waitForOpenProbeEvent(test, func() error { fd2, err = openTestFile(test, testFile2, syscall.O_CREAT) @@ -103,6 +119,10 @@ func TestFilterOpenBasenameApprover(t *testing.T) { t.Fatal("shouldn't get an event") } + /*if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count > 0 { + t.Fatal("expected metrics not found") + }*/ + if err := waitForOpenProbeEvent(test, func() error { fd2, err = openTestFile(test, testFile2, syscall.O_RDONLY) if err != nil { @@ -112,6 +132,10 @@ func TestFilterOpenBasenameApprover(t *testing.T) { }, testFile2); err == nil { t.Fatal("shouldn't get an event") } + + /*if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count > 0 { + t.Fatal("expected metrics not found") + }*/ } func TestFilterOpenLeafDiscarder(t *testing.T) { @@ -121,7 +145,7 @@ func TestFilterOpenLeafDiscarder(t *testing.T) { // a discarder is created). rule := &rules.RuleDefinition{ ID: "test_rule", - Expression: `open.filename =~ "{{.Root}}/no-approver-*" && open.flags & (O_CREAT | O_SYNC) > 0`, + Expression: `open.file.path =~ "{{.Root}}/no-approver-*" && open.flags & (O_CREAT | O_SYNC) > 0`, } test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) @@ -326,6 +350,212 @@ func TestFilterOpenGrandParentDiscarder(t *testing.T) { testFilterOpenParentDiscarder(t, "grandparent", "parent") } +func runAUIDTest(t *testing.T, test *testModule, goSyscallTester, auidOK, auidKO string) { + var cmdWrapper *dockerCmdWrapper + cmdWrapper, err := test.StartADocker() + if err != nil { + t.Fatal(err) + } + defer cmdWrapper.stop() + + if err := waitForOpenProbeEvent(test, func() error { + args := []string{ + "-login-uid-open-test", + "-login-uid-open-path", "/tmp/test-auid", + "-login-uid-open-uid", auidOK, + } + + cmd := cmdWrapper.Command(goSyscallTester, args, []string{}) + return cmd.Run() + }, "/tmp/test-auid"); err != nil { + t.Fatal(err) + } + + // stats + /*err = retry.Do(func() error { + test.eventMonitor.SendStats() + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":approver_type:auid"); count != 1 { + return errors.New("expected metrics not found") + } + + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count != 1 { + return errors.New("expected metrics not found") + } + + return nil + }, retry.Delay(1*time.Second), retry.Attempts(5), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + + // reset stats + test.statsdClient.Flush() + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count == 1 { + t.Fatal("expected metrics not found") + }*/ + + if err := waitForOpenProbeEvent(test, func() error { + args := []string{ + "-login-uid-open-test", + "-login-uid-open-path", "/tmp/test-auid", + "-login-uid-open-uid", auidKO, + } + + cmd := cmdWrapper.Command(goSyscallTester, args, []string{}) + return cmd.Run() + }, "/tmp/test-auid"); err == nil { + t.Fatal("shouldn't get an event") + } + + /*if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count > 0 { + t.Fatal("expected metrics not found") + }*/ +} + +func TestFilterOpenAUIDEqualApprover(t *testing.T) { + SkipIfNotAvailable(t) + + // skip test that are about to be run on docker (to avoid trying spawning docker in docker) + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_equal_1", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid == 1005`, + }, + { + ID: "test_equal_2", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid == 0`, + }, + { + ID: "test_equal_3", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid == AUDIT_AUID_UNSET`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + goSyscallTester, err := loadSyscallTester(t, test, "syscall_go_tester") + if err != nil { + t.Fatal(err) + } + + t.Run("equal-fixed-value", func(t *testing.T) { + runAUIDTest(t, test, goSyscallTester, "1005", "6000") + }) + + t.Run("equal-zero", func(t *testing.T) { + runAUIDTest(t, test, goSyscallTester, "0", "6000") + }) + + t.Run("equal-unset", func(t *testing.T) { + runAUIDTest(t, test, goSyscallTester, "-1", "6000") + }) +} + +func TestFilterOpenAUIDLesserApprover(t *testing.T) { + SkipIfNotAvailable(t) + + // skip test that are about to be run on docker (to avoid trying spawning docker in docker) + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_range_lesser", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid < 500`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + goSyscallTester, err := loadSyscallTester(t, test, "syscall_go_tester") + if err != nil { + t.Fatal(err) + } + + runAUIDTest(t, test, goSyscallTester, "450", "605") +} + +func TestFilterOpenAUIDGreaterApprover(t *testing.T) { + SkipIfNotAvailable(t) + + // skip test that are about to be run on docker (to avoid trying spawning docker in docker) + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_range_greater", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid > 1000`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + goSyscallTester, err := loadSyscallTester(t, test, "syscall_go_tester") + if err != nil { + t.Fatal(err) + } + + runAUIDTest(t, test, goSyscallTester, "1500", "605") +} + +func TestFilterOpenAUIDNotEqualUnsetApprover(t *testing.T) { + SkipIfNotAvailable(t) + + // skip test that are about to be run on docker (to avoid trying spawning docker in docker) + if testEnvironment == DockerEnvironment { + t.Skip("Skip test spawning docker containers on docker") + } + if _, err := whichNonFatal("docker"); err != nil { + t.Skip("Skip test where docker is unavailable") + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_equal_4", + Expression: `open.file.path =~ "/tmp/test-auid" && process.auid != AUDIT_AUID_UNSET`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + goSyscallTester, err := loadSyscallTester(t, test, "syscall_go_tester") + if err != nil { + t.Fatal(err) + } + + runAUIDTest(t, test, goSyscallTester, "6000", "-1") +} + func TestFilterDiscarderMask(t *testing.T) { SkipIfNotAvailable(t) @@ -413,7 +643,7 @@ func TestFilterRenameFileDiscarder(t *testing.T) { // a discarder is created). rule := &rules.RuleDefinition{ ID: "test_rule", - Expression: `open.filename =~ "{{.Root}}/a*/test"`, + Expression: `open.file.path =~ "{{.Root}}/a*/test"`, } test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) @@ -499,7 +729,7 @@ func TestFilterRenameFolderDiscarder(t *testing.T) { // a discarder is created). rule := &rules.RuleDefinition{ ID: "test_rule", - Expression: `open.filename =~ "{{.Root}}/a*/test"`, + Expression: `open.file.path =~ "{{.Root}}/a*/test"`, } test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}) @@ -608,6 +838,24 @@ func TestFilterOpenFlagsApprover(t *testing.T) { t.Fatal(err) } + // stats + /*err = retry.Do(func() error { + test.eventMonitor.SendStats() + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":approver_type:flag"); count == 0 { + return errors.New("expected approver metrics not found") + } + + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count == 0 { + return errors.New("expected metrics not found") + } + + return nil + }, retry.Delay(1*time.Second), retry.Attempts(5), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err) + + // reset stats + test.statsdClient.Flush()*/ + if err := waitForOpenProbeEvent(test, func() error { fd, err = openTestFile(test, testFile, syscall.O_SYNC) if err != nil { @@ -618,6 +866,20 @@ func TestFilterOpenFlagsApprover(t *testing.T) { t.Fatal(err) } + /*err = retry.Do(func() error { + test.eventMonitor.SendStats() + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":approver_type:flag"); count == 0 { + return errors.New("expected metrics not found") + } + + if count := test.statsdClient.Get(metrics.MetricEventApproved + ":event_type:open"); count == 0 { + return errors.New("expected metrics not found") + } + + return nil + }, retry.Delay(1*time.Second), retry.Attempts(5), retry.DelayType(retry.FixedDelay)) + assert.NoError(t, err)*/ + if err := waitForOpenProbeEvent(test, func() error { fd, err = openTestFile(test, testFile, syscall.O_RDONLY) if err != nil { @@ -796,3 +1058,52 @@ func TestFilterBpfCmd(t *testing.T) { } } } + +func TestFilterRuntimeDiscarded(t *testing.T) { + SkipIfNotAvailable(t) + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_open", + Expression: `open.file.path == "{{.Root}}/no-event"`, + }, + { + ID: "test_unlink", + Expression: `unlink.file.path == "{{.Root}}/no-event"`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs, withStaticOpts(testOpts{discardRuntime: true})) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + testFile, _, err := test.Path("no-event") + if err != nil { + t.Fatal(err) + } + defer os.Remove(testFile) + + // test that we don't receive event from the kernel + if err := waitForOpenProbeEvent(test, func() error { + fd, err := openTestFile(test, testFile, syscall.O_CREAT) + if err != nil { + return err + } + return syscall.Close(fd) + }, testFile); err == nil { + t.Fatal("shouldn't get an event") + } + + // unlink aren't discarded kernel side (inode invalidation) but should be discarded before the rule evaluation + err = test.GetSignal(t, func() error { + return os.Remove(testFile) + }, func(event *model.Event, r *rules.Rule) { + t.Errorf("shouldn't get an event") + }) + + if err == nil { + t.Errorf("shouldn't get an event") + } +} diff --git a/pkg/security/tests/login_uid_test.go b/pkg/security/tests/login_uid_test.go index a71c9e75c526f..5e1e65c9d784f 100644 --- a/pkg/security/tests/login_uid_test.go +++ b/pkg/security/tests/login_uid_test.go @@ -57,8 +57,13 @@ func TestLoginUID(t *testing.T) { t.Run("login-uid-open-test", func(t *testing.T) { test.WaitSignal(t, func() error { - // run the syscall drift test command - cmd := dockerInstance.Command(goSyscallTester, []string{"-login-uid-open-test"}, []string{}) + args := []string{ + "-login-uid-open-test", + "-login-uid-open-path", "/tmp/test-auid", + "-login-uid-open-uid", "1005", + } + + cmd := dockerInstance.Command(goSyscallTester, args, []string{}) _, err = cmd.CombinedOutput() return err }, func(event *model.Event, rule *rules.Rule) { @@ -69,8 +74,12 @@ func TestLoginUID(t *testing.T) { t.Run("login-uid-exec-test", func(t *testing.T) { test.WaitSignal(t, func() error { - // run the syscall drift test command - cmd := dockerInstance.Command(goSyscallTester, []string{"-login-uid-exec-test", "-login-uid-exec-path", goSyscallTester}, []string{}) + args := []string{ + "-login-uid-exec-test", + "-login-uid-exec-path", goSyscallTester, + } + + cmd := dockerInstance.Command(goSyscallTester, args, []string{}) out, err := cmd.CombinedOutput() if err != nil { t.Logf("command exited with an error: out:'%s' err:'%v'", string(out), err) diff --git a/pkg/security/tests/main_linux.go b/pkg/security/tests/main_linux.go index 7de491a609fb9..faa73ac357fea 100644 --- a/pkg/security/tests/main_linux.go +++ b/pkg/security/tests/main_linux.go @@ -59,6 +59,7 @@ func SkipIfNotAvailable(t *testing.T) { "~TestOpen", "~TestUnlink", "~TestActionKill", + "~TestActionHash", "~TestRmdir", "~TestRename", "~TestMkdir", diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index dcd853df9e258..1b4b9f85399b4 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -26,10 +26,9 @@ import ( "time" "unsafe" + "gopkg.in/yaml.v3" + spconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - "github.com/DataDog/datadog-agent/comp/core/telemetry" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "go.uber.org/fx" emconfig "github.com/DataDog/datadog-agent/pkg/eventmonitor/config" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -643,7 +642,7 @@ func assertFieldStringArrayIndexedOneOf(tb *testing.T, e *model.Event, field str return false } -func setTestPolicy(dir string, onDemandProbes []rules.OnDemandHookPoint, macros []*rules.MacroDefinition, rules []*rules.RuleDefinition) (string, error) { +func setTestPolicy(dir string, onDemandProbes []rules.OnDemandHookPoint, macroDefs []*rules.MacroDefinition, ruleDefs []*rules.RuleDefinition) (string, error) { testPolicyFile, err := os.Create(path.Join(dir, "secagent-policy.policy")) if err != nil { return "", err @@ -654,21 +653,19 @@ func setTestPolicy(dir string, onDemandProbes []rules.OnDemandHookPoint, macros return err } - tmpl, err := template.New("test-policy").Parse(testPolicy) - if err != nil { - return "", fail(err) + policyDef := &rules.PolicyDef{ + Version: "1.2.3", + Macros: macroDefs, + Rules: ruleDefs, + OnDemandHookPoints: onDemandProbes, } - buffer := new(bytes.Buffer) - if err := tmpl.Execute(buffer, map[string]interface{}{ - "OnDemandProbes": onDemandProbes, - "Rules": rules, - "Macros": macros, - }); err != nil { + testPolicy, err := yaml.Marshal(policyDef) + if err != nil { return "", fail(err) } - _, err = testPolicyFile.Write(buffer.Bytes()) + _, err = testPolicyFile.Write(testPolicy) if err != nil { return "", fail(err) } @@ -888,10 +885,3 @@ func jsonPathValidation(testMod *testModule, data []byte, fnc func(testMod *test fnc(testMod, obj) } - -type testModuleFxDeps struct { - fx.In - - Telemetry telemetry.Component - WMeta workloadmeta.Component -} diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index da0946b338ce6..4785123c286b1 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -34,9 +34,6 @@ import ( "github.com/stretchr/testify/assert" "golang.org/x/sys/unix" - "github.com/DataDog/datadog-agent/comp/core" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/eventmonitor" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -57,7 +54,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/serializers" "github.com/DataDog/datadog-agent/pkg/security/tests/statsdclient" "github.com/DataDog/datadog-agent/pkg/security/utils" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" utilkernel "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -208,61 +204,6 @@ runtime_security_config: period: {{.EnforcementDisarmerExecutablePeriod}} ` -const testPolicy = `--- -version: 1.2.3 - -hooks: -{{range $OnDemandProbe := .OnDemandProbes}} - - name: {{$OnDemandProbe.Name}} - syscall: {{$OnDemandProbe.IsSyscall}} - args: -{{range $Arg := $OnDemandProbe.Args}} - - n: {{$Arg.N}} - kind: {{$Arg.Kind}} -{{end}} -{{end}} - -macros: -{{range $Macro := .Macros}} - - id: {{$Macro.ID}} - expression: >- - {{$Macro.Expression}} -{{end}} - -rules: -{{range $Rule := .Rules}} - - id: {{$Rule.ID}} - version: {{$Rule.Version}} - expression: >- - {{$Rule.Expression}} - disabled: {{$Rule.Disabled}} - tags: -{{- range $Tag, $Val := .Tags}} - {{$Tag}}: {{$Val}} -{{- end}} - actions: -{{- range $Action := .Actions}} -{{- if $Action.Set}} - - set: - name: {{$Action.Set.Name}} - {{- if $Action.Set.Value}} - value: {{$Action.Set.Value}} - {{- else if $Action.Set.Field}} - field: {{$Action.Set.Field}} - {{- end}} - scope: {{$Action.Set.Scope}} - append: {{$Action.Set.Append}} -{{- end}} -{{- if $Action.Kill}} - - kill: - {{- if $Action.Kill.Signal}} - signal: {{$Action.Kill.Signal}} - {{- end}} -{{- end}} -{{- end}} -{{end}} -` - const ( // HostEnvironment for the Host environment HostEnvironment = "host" @@ -807,12 +748,11 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman emopts.ProbeOpts.TagsResolver = NewFakeResolverDifferentImageNames() } - fxDeps := fxutil.Test[testModuleFxDeps]( - t, - core.MockBundle(), - wmmock.MockModule(workloadmeta.NewParams()), - ) - testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, fxDeps.WMeta, fxDeps.Telemetry) + if opts.staticOpts.discardRuntime { + emopts.ProbeOpts.DontDiscardRuntime = false + } + + testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, nil) if err != nil { return nil, err } @@ -822,10 +762,13 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman if !opts.staticOpts.disableRuntimeSecurity { msgSender := newFakeMsgSender(testMod) - cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, fxDeps.WMeta, module.Opts{EventSender: testMod, MsgSender: msgSender}) + cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, nil, module.Opts{EventSender: testMod, MsgSender: msgSender}) if err != nil { return nil, fmt.Errorf("failed to create module: %w", err) } + // disable containers telemetry + cws.PrepareForFunctionalTests() + testMod.cws = cws testMod.ruleEngine = cws.GetRuleEngine() testMod.msgSender = msgSender diff --git a/pkg/security/tests/module_tester_windows.go b/pkg/security/tests/module_tester_windows.go index 972437a8ca435..052350b7be636 100644 --- a/pkg/security/tests/module_tester_windows.go +++ b/pkg/security/tests/module_tester_windows.go @@ -18,9 +18,6 @@ import ( "github.com/hashicorp/go-multierror" - "github.com/DataDog/datadog-agent/comp/core" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/eventmonitor" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/events" @@ -31,7 +28,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/tests/statsdclient" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -40,49 +36,6 @@ var ( testActivityDumpLoadControllerPeriod = time.Second * 10 ) -const testPolicy = `--- -version: 1.2.3 - -macros: -{{range $Macro := .Macros}} - - id: {{$Macro.ID}} - expression: >- - {{$Macro.Expression}} -{{end}} - -rules: -{{range $Rule := .Rules}} - - id: {{$Rule.ID}} - version: {{$Rule.Version}} - expression: >- - {{$Rule.Expression}} - tags: -{{- range $Tag, $Val := .Tags}} - {{$Tag}}: {{$Val}} -{{- end}} - actions: -{{- range $Action := .Actions}} -{{- if $Action.Set}} - - set: - name: {{$Action.Set.Name}} - {{- if $Action.Set.Value}} - value: {{$Action.Set.Value}} - {{- else if $Action.Set.Field}} - field: {{$Action.Set.Field}} - {{- end}} - scope: {{$Action.Set.Scope}} - append: {{$Action.Set.Append}} -{{- end}} -{{- if $Action.Kill}} - - kill: - {{- if $Action.Kill.Signal}} - signal: {{$Action.Kill.Signal}} - {{- end}} -{{- end}} -{{- end}} -{{end}} -` - const testConfig = `--- log_level: DEBUG @@ -279,12 +232,7 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs [] StatsdClient: statsdClient, }, } - fxDeps := fxutil.Test[testModuleFxDeps]( - t, - core.MockBundle(), - wmmock.MockModule(workloadmeta.NewParams()), - ) - testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, fxDeps.WMeta, fxDeps.Telemetry) + testMod.eventMonitor, err = eventmonitor.NewEventMonitor(emconfig, secconfig, emopts, nil) if err != nil { return nil, err } @@ -292,10 +240,12 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs [] var ruleSetloadedErr *multierror.Error if !opts.staticOpts.disableRuntimeSecurity { - cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, fxDeps.WMeta, module.Opts{EventSender: testMod}) + cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, nil, module.Opts{EventSender: testMod}) if err != nil { return nil, fmt.Errorf("failed to create module: %w", err) } + cws.PrepareForFunctionalTests() + testMod.cws = cws testMod.ruleEngine = cws.GetRuleEngine() diff --git a/pkg/security/tests/rmdir_test.go b/pkg/security/tests/rmdir_test.go index fc309a20e447f..071395b56c01d 100644 --- a/pkg/security/tests/rmdir_test.go +++ b/pkg/security/tests/rmdir_test.go @@ -182,21 +182,26 @@ func TestRmdirInvalidate(t *testing.T) { } defer test.Close() - for i := 0; i != 5; i++ { - testFile, _, err := test.Path(fmt.Sprintf("test-rmdir-%d", i)) - if err != nil { - t.Fatal(err) - } + ifSyscallSupported("SYS_RMDIR", func(t *testing.T, syscallNB uintptr) { + for i := 0; i != 5; i++ { + testFile, testFilePtr, err := test.Path(fmt.Sprintf("test-rmdir-%d", i)) + if err != nil { + t.Fatal(err) + } - if err := syscall.Mkdir(testFile, 0777); err != nil { - t.Fatal(err) - } + if err := syscall.Mkdir(testFile, 0777); err != nil { + t.Fatal(err) + } - test.WaitSignal(t, func() error { - return syscall.Rmdir(testFile) - }, func(event *model.Event, rule *rules.Rule) { - assert.Equal(t, "rmdir", event.GetType(), "wrong event type") - assertFieldEqual(t, event, "rmdir.file.path", testFile) - }) - } + test.WaitSignal(t, func() error { + if _, _, errno := syscall.Syscall(syscallNB, uintptr(testFilePtr), 0, 0); errno != 0 { + return error(errno) + } + return nil + }, func(event *model.Event, rule *rules.Rule) { + assert.Equal(t, "rmdir", event.GetType(), "wrong event type") + assertFieldEqual(t, event, "rmdir.file.path", testFile) + }) + } + }) } diff --git a/pkg/security/tests/schemas/agent_context.json b/pkg/security/tests/schemas/agent_context.json index 93cef1abb240e..daf8233065056 100644 --- a/pkg/security/tests/schemas/agent_context.json +++ b/pkg/security/tests/schemas/agent_context.json @@ -33,6 +33,9 @@ "oneOf": [ { "$ref": "/schemas/kill.schema.json" + }, + { + "$ref": "/schemas/hash.schema.json" } ] } diff --git a/pkg/security/tests/schemas/hash.schema.json b/pkg/security/tests/schemas/hash.schema.json new file mode 100644 index 0000000000000..1415e736a96b8 --- /dev/null +++ b/pkg/security/tests/schemas/hash.schema.json @@ -0,0 +1,25 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "kill.json", + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "path": { + "type": "string" + }, + "state": { + "type": "string" + }, + "trigger": { + "type": "string" + } + }, + "required": [ + "type", + "path", + "state", + "trigger" + ] +} \ No newline at end of file diff --git a/pkg/security/tests/syscall_tester/c/syscall_tester.c b/pkg/security/tests/syscall_tester/c/syscall_tester.c index 948ff885ab198..1a1cc06e800dc 100644 --- a/pkg/security/tests/syscall_tester/c/syscall_tester.c +++ b/pkg/security/tests/syscall_tester/c/syscall_tester.c @@ -642,8 +642,8 @@ int test_sleep(int argc, char **argv) { if (duration <= 0) { fprintf(stderr, "Please specify at a valid sleep duration\n"); } - for (int i = 0; i < duration; i++) - sleep(1); + sleep(duration); + return EXIT_SUCCESS; } @@ -659,8 +659,28 @@ int test_slow_cat(int argc, char **argv) { if (duration <= 0) { fprintf(stderr, "Please specify at a valid sleep duration\n"); } - for (int i = 0; i < duration; i++) - sleep(1); + sleep(duration); + + close(fd); + + return EXIT_SUCCESS; +} + +int test_slow_write(int argc, char **argv) { + if (argc != 4) { + fprintf(stderr, "%s: Please pass a duration in seconds, a path, and a content.\n", __FUNCTION__); + return EXIT_FAILURE; + } + + int duration = atoi(argv[1]); + int fd = open(argv[2], O_CREAT|O_WRONLY); + + if (duration <= 0) { + fprintf(stderr, "Please specify at a valid sleep duration\n"); + } + sleep(duration); + + write(fd, argv[3], strlen(argv[3])); close(fd); @@ -793,7 +813,10 @@ int main(int argc, char **argv) { exit_code = test_new_netns_exec(sub_argc, sub_argv); } else if (strcmp(cmd, "slow-cat") == 0) { exit_code = test_slow_cat(sub_argc, sub_argv); - } else { + } else if (strcmp(cmd, "slow-write") == 0) { + exit_code = test_slow_write(sub_argc, sub_argv); + } + else { fprintf(stderr, "Unknown command `%s`\n", cmd); exit_code = EXIT_FAILURE; } diff --git a/pkg/security/tests/syscall_tester/go/syscall_go_tester.go b/pkg/security/tests/syscall_tester/go/syscall_go_tester.go index 454ade057c8d0..c8c72ff67831b 100644 --- a/pkg/security/tests/syscall_tester/go/syscall_go_tester.go +++ b/pkg/security/tests/syscall_tester/go/syscall_go_tester.go @@ -43,6 +43,8 @@ var ( userSessionOpenPath string syscallDriftTest bool loginUIDOpenTest bool + loginUIDOpenPath string + loginUIDOpenUID int loginUIDExecTest bool loginUIDExecPath string ) @@ -231,25 +233,22 @@ func setSelfLoginUID(uid int) error { } func RunLoginUIDOpenTest() error { - if err := setSelfLoginUID(1005); err != nil { - return err + if loginUIDOpenUID != -1 { + if err := setSelfLoginUID(loginUIDOpenUID); err != nil { + return err + } } - testAUIDPath := "/tmp/test-auid" - // open test file to trigger an event - f, err := os.OpenFile(testAUIDPath, os.O_RDWR|os.O_CREATE, 0755) + f, err := os.OpenFile(loginUIDOpenPath, os.O_RDWR|os.O_CREATE, 0755) if err != nil { return fmt.Errorf("couldn't create test-auid file: %v", err) } + defer os.Remove(loginUIDOpenPath) if err = f.Close(); err != nil { return fmt.Errorf("couldn't close test file: %v", err) } - - if err = os.Remove(testAUIDPath); err != nil { - return fmt.Errorf("failed to remove test-auid file: %v", err) - } return nil } @@ -279,6 +278,8 @@ func main() { flag.BoolVar(&runIMDSTest, "run-imds-test", false, "when set, binds an IMDS server locally and sends a query to it") flag.BoolVar(&syscallDriftTest, "syscall-drift-test", false, "when set, runs the syscall drift test") flag.BoolVar(&loginUIDOpenTest, "login-uid-open-test", false, "when set, runs the login_uid open test") + flag.StringVar(&loginUIDOpenPath, "login-uid-open-path", "", "file used for the login_uid open test") + flag.IntVar(&loginUIDOpenUID, "login-uid-open-uid", 0, "uid used for the login_uid open test") flag.BoolVar(&loginUIDExecTest, "login-uid-exec-test", false, "when set, runs the login_uid exec test") flag.StringVar(&loginUIDExecPath, "login-uid-exec-path", "", "path to the executable to run during the login_uid exec test") diff --git a/pkg/security/tests/testopts.go b/pkg/security/tests/testopts.go index 43ef3dc1bbebf..d8eb578131e45 100644 --- a/pkg/security/tests/testopts.go +++ b/pkg/security/tests/testopts.go @@ -71,6 +71,7 @@ type testOpts struct { enforcementDisarmerExecutableMaxAllowed int enforcementDisarmerExecutablePeriod time.Duration eventServerRetention time.Duration + discardRuntime bool } type dynamicTestOpts struct { @@ -153,5 +154,6 @@ func (to testOpts) Equal(opts testOpts) bool { to.enforcementDisarmerExecutableEnabled == opts.enforcementDisarmerExecutableEnabled && to.enforcementDisarmerExecutableMaxAllowed == opts.enforcementDisarmerExecutableMaxAllowed && to.enforcementDisarmerExecutablePeriod == opts.enforcementDisarmerExecutablePeriod && - to.eventServerRetention == opts.eventServerRetention + to.eventServerRetention == opts.eventServerRetention && + to.discardRuntime == opts.discardRuntime } diff --git a/pkg/security/utils/hostname.go b/pkg/security/utils/hostname.go index 39988f41e1be3..1968802bd42ed 100644 --- a/pkg/security/utils/hostname.go +++ b/pkg/security/utils/hostname.go @@ -13,7 +13,7 @@ import ( "github.com/avast/retry-go/v4" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -60,12 +60,12 @@ func getHostnameFromAgent(ctx context.Context) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return err } - client, err := grpc.GetDDAgentClient(ctx, ipcAddress, config.GetIPCPort()) + client, err := grpc.GetDDAgentClient(ctx, ipcAddress, pkgconfigsetup.GetIPCPort()) if err != nil { return err } diff --git a/pkg/security/utils/path_linux.go b/pkg/security/utils/path_linux.go index 4fa2cf4ea0dbb..1ba722dad45e9 100644 --- a/pkg/security/utils/path_linux.go +++ b/pkg/security/utils/path_linux.go @@ -5,6 +5,15 @@ package utils +import ( + "fmt" + "regexp" + "slices" + "strings" + + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) + // PathPatternMatchOpts PathPatternMatch options type PathPatternMatchOpts struct { WildcardLimit int // max number of wildcard in the pattern @@ -108,3 +117,163 @@ func PathPatternMatch(pattern string, path string, opts PathPatternMatchOpts) bo return false } + +// PathPatternBuilder pattern builder for files +func PathPatternBuilder(pattern string, path string, opts PathPatternMatchOpts) (string, bool) { + lenMax := len(pattern) + if l := len(path); l > lenMax { + lenMax = l + } + + var ( + i, j = 0, 0 + wildcardCount, nodeCount, suffixNode = 0, 0, 0 + offsetPattern, offsetPath, size = 0, 0, 0 + patternLen, pathLen = len(pattern), len(path) + wildcard bool + result = make([]byte, lenMax) + + computeNode = func() bool { + if wildcard { + wildcardCount++ + if wildcardCount > opts.WildcardLimit { + return false + } + if nodeCount < opts.PrefixNodeRequired { + return false + } + if opts.NodeSizeLimit != 0 && j-offsetPath < opts.NodeSizeLimit { + return false + } + + result[size], result[size+1] = '/', '*' + size += 2 + + offsetPattern, suffixNode = i, 0 + } else { + copy(result[size:], pattern[offsetPattern:i]) + size += i - offsetPattern + offsetPattern = i + suffixNode++ + } + + offsetPath = j + + if i > 0 { + nodeCount++ + } + return true + } + ) + + if patternLen > 0 && pattern[0] != '/' { + return "", false + } + + if pathLen > 0 && path[0] != '/' { + return "", false + } + + for i < len(pattern) && j < len(path) { + pn, ph := pattern[i], path[j] + if pn == '/' && ph == '/' { + if !computeNode() { + return "", false + } + wildcard = false + + i++ + j++ + continue + } + + if pn != ph { + wildcard = true + } + if pn != '/' { + i++ + } + if ph != '/' { + j++ + } + } + + if patternLen != i || pathLen != j { + wildcard = true + } + + for i < patternLen { + if pattern[i] == '/' { + return "", false + } + i++ + } + + for j < pathLen { + if path[j] == '/' { + return "", false + } + j++ + } + + if !computeNode() { + return "", false + } + + if opts.SuffixNodeRequired == 0 || suffixNode >= opts.SuffixNodeRequired { + return string(result[:size]), true + } + + return "", false +} + +// BuildPatterns find and build patterns for the path in the ruleset +func BuildPatterns(ruleset []*rules.RuleDefinition) []*rules.RuleDefinition { + for _, rule := range ruleset { + findAndReplacePatterns(&rule.Expression) + } + return ruleset +} + +func findAndReplacePatterns(expression *string) { + + re := regexp.MustCompile(`\[(.*?)\]`) + matches := re.FindAllStringSubmatch(*expression, -1) + for _, match := range matches { + if len(match) > 1 { + arrayContent := match[1] + paths := replacePatterns(strings.Split(arrayContent, ",")) + // reconstruct the modified array as a string + modifiedArrayString := "[" + strings.Join(paths, ", ") + "]" + // replace the original array with the modified array in the input string + *expression = strings.Replace(*expression, match[0], modifiedArrayString, 1) + } + } + +} + +func replacePatterns(paths []string) []string { + var result []string + for _, pattern := range paths { + strippedPattern := strings.Trim(pattern, `~" `) + initalLength := len(result) + for _, path := range paths { + strippedPath := strings.Trim(path, `~" `) + if pattern == path { + continue + } + finalPath, ok := PathPatternBuilder(strippedPattern, strippedPath, PathPatternMatchOpts{WildcardLimit: 1}) + if ok { + finalPath = fmt.Sprintf("~\"%s\"", finalPath) + result = append(result, finalPath) + } + } + if len(result) == initalLength { + result = append(result, strings.Trim(pattern, ` `)) + } + } + // remove duplicates + slices.Sort(result) + result = slices.Compact(result) + return result +} diff --git a/pkg/security/utils/path_linux_test.go b/pkg/security/utils/path_linux_test.go index b4049c1e34099..a62b29c6ddae6 100644 --- a/pkg/security/utils/path_linux_test.go +++ b/pkg/security/utils/path_linux_test.go @@ -244,7 +244,278 @@ func TestPathPatternMatch(t *testing.T) { } } -func BenchmarkPathPatternBuilder(b *testing.B) { +func TestPathPatternBuilder(t *testing.T) { + tests := []struct { + Pattern string + Path string + Opts PathPatternMatchOpts + ExpectedResult bool + ExpectedPattern string + }{ + { + Pattern: "/etc/passwd", + Path: "/etc/passwd", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/etc/passwd", + }, + { + Pattern: "/bin/baz", + Path: "/bin/baz2", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/bin/*", + }, + { + Pattern: "/abc/12312/sad", + Path: "/abc/51231", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/sad/", + Path: "/abc/51231", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/sad/", + Path: "/abc/51231/", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/sad", + Path: "/abc/51231/", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312", + Path: "/abc/51231/sad", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312", + Path: "/abc/51231/sad/", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/", + Path: "/abc/51231/sad/", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/abc/12312/", + Path: "/abc/51231/sad", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/12312", + Path: "/51231", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/*", + }, + { + Pattern: "12312", + Path: "51231", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "", + Path: "", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "", + }, + { + Pattern: "/bin/baz2", + Path: "/bin/baz", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/bin/*", + }, + { + Pattern: "/etc/http", + Path: "/etc/passwd", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/etc/*", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/54321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/12345/runc.pid", + Path: "/var/run/5432/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/12345/12345/runc.pid", + Path: "/var/run/54321/54321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/12345/12345/runc.pid", + Path: "/var/run/54321/54321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 2}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/*/runc.pid", + }, + { + Pattern: "/12345/12345/runc.pid", + Path: "/54321/12345/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/*/12345/runc.pid", + }, + { + Pattern: "/var/runc/12345", + Path: "/var/runc/54321", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/runc/*", + }, + { + Pattern: "/var/runc12345", + Path: "/var/runc54321", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/*", + }, + { + Pattern: "/var/run/12345/runc.pid", + Path: "/var/run/12/45/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/66/45/runc.pid", + Path: "/var/run/12345/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/12345/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/4321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/sdfgh/runc.pid", + Path: "/var/run/hgfds/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 3}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/4321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2, SuffixNodeRequired: 1}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/runc.pid", + }, + { + Pattern: "/var/run/1234/runc.pid", + Path: "/var/run/4321/runc.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2, SuffixNodeRequired: 2}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var", + Path: "/var", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2}, + ExpectedResult: true, + ExpectedPattern: "/var", + }, + { + Pattern: "/var", + Path: "/var", + Opts: PathPatternMatchOpts{WildcardLimit: 1, SuffixNodeRequired: 2}, + ExpectedResult: true, + ExpectedPattern: "/var", + }, + { + Pattern: "/var/run/1234/http.pid", + Path: "/var/run/4321/http.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, NodeSizeLimit: 10}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/var/run/1234/mysql.pid", + Path: "/var/run/4321/mysql.pid", + Opts: PathPatternMatchOpts{WildcardLimit: 1, NodeSizeLimit: 4}, + ExpectedResult: true, + ExpectedPattern: "/var/run/*/mysql.pid", + }, + { + Pattern: "/bin/baz2", + Path: "/bin/baz", + Opts: PathPatternMatchOpts{WildcardLimit: 1, NodeSizeLimit: 6}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/bin/baz2", + Path: "/bin/baz", + Opts: PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2}, + ExpectedResult: false, + ExpectedPattern: "", + }, + { + Pattern: "/bin/baz2", + Path: "/bin/baz", + Opts: PathPatternMatchOpts{WildcardLimit: 1, SuffixNodeRequired: 1}, + ExpectedResult: false, + ExpectedPattern: "", + }, + } + + for _, test := range tests { + t.Run("test", func(t *testing.T) { + p, r := PathPatternBuilder(test.Pattern, test.Path, test.Opts) + assert.Equal(t, test.ExpectedPattern, p, "%s vs %s", test.Pattern, test.Path) + assert.Equal(t, test.ExpectedResult, r, "%s vs %s", test.Pattern, test.Path) + }) + } +} + +func BenchmarkPathPatternMatch(b *testing.B) { b.Run("pattern", func(b *testing.B) { for i := 0; i < b.N; i++ { PathPatternMatch("/var/run/1234/runc.pid", "/var/run/54321/runc.pid", PathPatternMatchOpts{WildcardLimit: 1, PrefixNodeRequired: 2, SuffixNodeRequired: 2}) diff --git a/pkg/serializer/go.mod b/pkg/serializer/go.mod index bbe11b5b863be..03a98a35c816b 100644 --- a/pkg/serializer/go.mod +++ b/pkg/serializer/go.mod @@ -75,7 +75,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 github.com/gogo/protobuf v1.3.2 github.com/json-iterator/go v1.1.12 github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 diff --git a/pkg/serializer/go.sum b/pkg/serializer/go.sum index d952c59d01660..8628a262989a8 100644 --- a/pkg/serializer/go.sum +++ b/pkg/serializer/go.sum @@ -4,10 +4,10 @@ github.com/DataDog/agent-payload/v5 v5.0.114 h1:qg3jfzz2/lOFKbFOw2yM6RM8eyMs4HlE github.com/DataDog/agent-payload/v5 v5.0.114/go.mod h1:COngtbYYCncpIPiE5D93QlXDH/3VAKk10jDNwGHcMRE= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.14.0 h1:J0IEqkrB8BjtuDHofR8Q3J+Z8829Ja1Mlix9cyG8wJI= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.14.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0 h1:QHx6B/VUx3rZQqrQNZI5BfypbhhGSRzCz05viyJEQmM= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.14.0/go.mod h1:q4c7zbmdnIdSJNZuBsveTk5ZeRkSkS2g6b8zzFF1mE4= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/pkg/serializer/internal/metrics/origin_mapping.go b/pkg/serializer/internal/metrics/origin_mapping.go index c6d21f843d807..8e362c78c99ad 100644 --- a/pkg/serializer/internal/metrics/origin_mapping.go +++ b/pkg/serializer/internal/metrics/origin_mapping.go @@ -18,6 +18,8 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { return 10 case metrics.MetricSourceJmxCustom, metrics.MetricSourceActivemq, + metrics.MetricSourceAnyscale, + metrics.MetricSourceAppgateSDP, metrics.MetricSourceCassandra, metrics.MetricSourceConfluentPlatform, metrics.MetricSourceHazelcast, @@ -41,6 +43,7 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { metrics.MetricSourceNtp, metrics.MetricSourceSystemd, metrics.MetricSourceHelm, + metrics.MetricSourceKubeflow, metrics.MetricSourceKubernetesAPIServer, metrics.MetricSourceKubernetesStateCore, metrics.MetricSourceOrchestrator, @@ -215,13 +218,13 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { metrics.MetricSourceKubeAPIserverMetrics, metrics.MetricSourceKubeControllerManager, metrics.MetricSourceKubeDNS, - metrics.MetricSourceKubeflow, metrics.MetricSourceKubeMetricsServer, metrics.MetricSourceKubeProxy, metrics.MetricSourceKubeScheduler, metrics.MetricSourceKubelet, metrics.MetricSourceKubernetesState, metrics.MetricSourceKyototycoon, + metrics.MetricSourceKyverno, metrics.MetricSourceLighttpd, metrics.MetricSourceLinkerd, metrics.MetricSourceLinuxProcExtras, @@ -262,6 +265,7 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { metrics.MetricSourceScylla, metrics.MetricSourceSilk, metrics.MetricSourceSinglestore, + metrics.MetricSourceSlurm, metrics.MetricSourceSnowflake, metrics.MetricSourceSpark, metrics.MetricSourceSqlserver, @@ -274,6 +278,7 @@ func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { metrics.MetricSourceTCPCheck, metrics.MetricSourceTeamcity, metrics.MetricSourceTeradata, + metrics.MetricSourceTibcoEMS, metrics.MetricSourceTLS, metrics.MetricSourceTokumx, metrics.MetricSourceTrafficServer, @@ -851,8 +856,18 @@ func metricSourceToOriginService(ms metrics.MetricSource) int32 { return 412 case metrics.MetricSourceAwsNeuron: return 413 + case metrics.MetricSourceAnyscale: + return 414 + case metrics.MetricSourceAppgateSDP: + return 415 case metrics.MetricSourceKubeflow: return 416 + case metrics.MetricSourceSlurm: + return 417 + case metrics.MetricSourceKyverno: + return 418 + case metrics.MetricSourceTibcoEMS: + return 419 default: return 0 } diff --git a/pkg/serverless/apikey/api_key.go b/pkg/serverless/apikey/api_key.go index f5a969dac1c7e..883123e9db92e 100644 --- a/pkg/serverless/apikey/api_key.go +++ b/pkg/serverless/apikey/api_key.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/kms" "github.com/aws/aws-sdk-go-v2/service/secretsmanager" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" datadogHttp "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -95,7 +95,7 @@ func readAPIKeyFromKMS(cipherText string) (string, error) { cfg, err := awsconfig.LoadDefaultConfig( context.TODO(), awsconfig.WithHTTPClient(&http.Client{ - Transport: datadogHttp.CreateHTTPTransport(config.Datadog()), + Transport: datadogHttp.CreateHTTPTransport(pkgconfigsetup.Datadog()), }), ) if err != nil { @@ -125,7 +125,7 @@ func readAPIKeyFromSecretsManager(arn string) (string, error) { cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithHTTPClient(&http.Client{ - Transport: datadogHttp.CreateHTTPTransport(config.Datadog()), + Transport: datadogHttp.CreateHTTPTransport(pkgconfigsetup.Datadog()), }), awsconfig.WithRegion(region), ) diff --git a/pkg/serverless/apikey/env.go b/pkg/serverless/apikey/env.go index a8c08a4ffb714..a14c5a19c780c 100644 --- a/pkg/serverless/apikey/env.go +++ b/pkg/serverless/apikey/env.go @@ -9,7 +9,7 @@ import ( "os" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -87,7 +87,7 @@ func HandleEnv() error { // Validate that an API key has been set, either by DD_API_KEY or read from KMS or Secrets Manager // --------------------------- - if !config.Datadog().IsSet("api_key") { + if !pkgconfigsetup.Datadog().IsSet("api_key") { // we're not reporting the error to AWS because we don't want the function // execution to be stopped. TODO(remy): discuss with AWS if there is way // of reporting non-critical init errors. diff --git a/pkg/serverless/daemon/routes.go b/pkg/serverless/daemon/routes.go index ff6bc20a68c8d..733b4a49b050c 100644 --- a/pkg/serverless/daemon/routes.go +++ b/pkg/serverless/daemon/routes.go @@ -78,6 +78,9 @@ func (s *StartInvocation) ServeHTTP(w http.ResponseWriter, r *http.Request) { log.Debug("a context has been found, sending the context to the tracer") w.Header().Set(invocationlifecycle.TraceIDHeader, fmt.Sprintf("%v", s.daemon.InvocationProcessor.GetExecutionInfo().TraceID)) w.Header().Set(invocationlifecycle.SamplingPriorityHeader, fmt.Sprintf("%v", s.daemon.InvocationProcessor.GetExecutionInfo().SamplingPriority)) + if s.daemon.InvocationProcessor.GetExecutionInfo().TraceIDUpper64Hex != "" { + w.Header().Set(invocationlifecycle.TraceTagsHeader, fmt.Sprintf("%s=%s", invocationlifecycle.Upper64BitsTag, s.daemon.InvocationProcessor.GetExecutionInfo().TraceIDUpper64Hex)) + } } } diff --git a/pkg/serverless/invocationlifecycle/constants.go b/pkg/serverless/invocationlifecycle/constants.go index 28b885d662b22..9e4c824762dc8 100644 --- a/pkg/serverless/invocationlifecycle/constants.go +++ b/pkg/serverless/invocationlifecycle/constants.go @@ -35,6 +35,12 @@ const ( // SamplingPriorityHeader is the header containing the sampling priority for execution and/or inferred spans SamplingPriorityHeader = "x-datadog-sampling-priority" + // TraceTagsHeader is the header containing trace tags, e.g. the upper 64 bits tag + TraceTagsHeader = "x-datadog-tags" + + // Upper64BitsTag is the tag for the upper 64 bits of the trace ID, if it exists + Upper64BitsTag = "_dd.p.tid" + // Lambda function trigger span tag values apiGateway = "api-gateway" applicationLoadBalancer = "application-load-balancer" @@ -47,4 +53,5 @@ const ( sns = "sns" sqs = "sqs" functionURL = "lambda-function-url" + stepFunction = "step-function" ) diff --git a/pkg/serverless/invocationlifecycle/init.go b/pkg/serverless/invocationlifecycle/init.go index 8ee37838ac08e..aba10065b3bce 100644 --- a/pkg/serverless/invocationlifecycle/init.go +++ b/pkg/serverless/invocationlifecycle/init.go @@ -106,6 +106,10 @@ func (lp *LifecycleProcessor) initFromDynamoDBStreamEvent(event events.DynamoDBE } func (lp *LifecycleProcessor) initFromEventBridgeEvent(event events.EventBridgeEvent) { + if !lp.DetectLambdaLibrary() && lp.InferredSpansEnabled { + lp.GetInferredSpan().EnrichInferredSpanWithEventBridgeEvent(event) + } + lp.requestHandler.event = event lp.addTag(tagFunctionTriggerEventSource, eventBridge) lp.addTag(tagFunctionTriggerEventSourceArn, event.Source) @@ -189,3 +193,7 @@ func (lp *LifecycleProcessor) initFromLambdaFunctionURLEvent(event events.Lambda lp.addTag(tagFunctionTriggerEventSourceArn, fmt.Sprintf("arn:aws:lambda:%v:%v:url:%v", region, accountID, functionName)) lp.addTags(trigger.GetTagsFromLambdaFunctionURLRequest(event)) } + +func (lp *LifecycleProcessor) initFromStepFunctionPayload(event events.StepFunctionPayload) { + lp.requestHandler.event = event +} diff --git a/pkg/serverless/invocationlifecycle/lifecycle.go b/pkg/serverless/invocationlifecycle/lifecycle.go index 90e931767cef1..2c210efdc5e08 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle.go +++ b/pkg/serverless/invocationlifecycle/lifecycle.go @@ -94,7 +94,6 @@ func (lp *LifecycleProcessor) OnInvokeStart(startDetails *InvocationStartDetails if err != nil { log.Debugf("[lifecycle] Failed to parse event payload: %v", err) } - eventType := trigger.GetEventType(lowercaseEventPayload) if eventType == trigger.Unknown { log.Debugf("[lifecycle] Failed to extract event type") @@ -230,6 +229,22 @@ func (lp *LifecycleProcessor) OnInvokeStart(startDetails *InvocationStartDetails } ev = event lp.initFromLambdaFunctionURLEvent(event, region, account, resource) + case trigger.LegacyStepFunctionEvent: + var event events.StepFunctionEvent + if err := json.Unmarshal(payloadBytes, &event); err != nil { + log.Debugf("Failed to unmarshal %s event: %s", stepFunction, err) + break + } + ev = event.Payload + lp.initFromStepFunctionPayload(event.Payload) + case trigger.StepFunctionEvent: + var eventPayload events.StepFunctionPayload + if err := json.Unmarshal(payloadBytes, &eventPayload); err != nil { + log.Debugf("Failed to unmarshal %s event: %s", stepFunction, err) + break + } + ev = eventPayload + lp.initFromStepFunctionPayload(eventPayload) default: log.Debug("Skipping adding trigger types and inferred spans as a non-supported payload was received.") } diff --git a/pkg/serverless/invocationlifecycle/lifecycle_test.go b/pkg/serverless/invocationlifecycle/lifecycle_test.go index 1eceb0d9ccf19..934a3208e969f 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle_test.go +++ b/pkg/serverless/invocationlifecycle/lifecycle_test.go @@ -122,6 +122,78 @@ func TestStartExecutionSpanWithLambdaLibrary(t *testing.T) { assert.Equal(t, startInvocationTime, testProcessor.GetExecutionInfo().startTime) } +func TestStartExecutionSpanStepFunctionEvent(t *testing.T) { + extraTags := &logs.Tags{ + Tags: []string{"functionname:test-function"}, + } + demux := createDemultiplexer(t) + mockProcessTrace := func(*api.Payload) {} + mockDetectLambdaLibrary := func() bool { return false } + + eventPayload := `{"Execution":{"Id":"arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:bc9f281c-3daa-4e5a-9a60-471a3810bf44","Input":{},"StartTime":"2024-07-30T19:55:52.976Z","Name":"bc9f281c-3daa-4e5a-9a60-471a3810bf44","RoleArn":"arn:aws:iam::425362996713:role/test-serverless-stepfunctions-dev-AgocsTestSFRole-tRkeFXScjyk4","RedriveCount":0},"StateMachine":{"Id":"arn:aws:states:us-east-1:425362996713:stateMachine:agocsTestSF","Name":"agocsTestSF"},"State":{"Name":"agocsTest1","EnteredTime":"2024-07-30T19:55:53.018Z","RetryCount":0}}` + startInvocationTime := time.Now() + startDetails := InvocationStartDetails{ + StartTime: startInvocationTime, + InvokeEventRawPayload: []byte(eventPayload), + InvokedFunctionARN: "arn:aws:lambda:us-east-1:123456789012:function:my-function", + } + + testProcessor := LifecycleProcessor{ + ExtraTags: extraTags, + ProcessTrace: mockProcessTrace, + DetectLambdaLibrary: mockDetectLambdaLibrary, + Demux: demux, + } + + testProcessor.OnInvokeStart(&startDetails) + + assert.NotNil(t, testProcessor.GetExecutionInfo()) + + assert.Equal(t, uint64(0), testProcessor.GetExecutionInfo().SpanID) + assert.Equal(t, uint64(5744042798732701615), testProcessor.GetExecutionInfo().TraceID) + assert.Equal(t, uint64(2902498116043018663), testProcessor.GetExecutionInfo().parentID) + assert.Equal(t, sampler.SamplingPriority(1), testProcessor.GetExecutionInfo().SamplingPriority) + upper64 := testProcessor.GetExecutionInfo().TraceIDUpper64Hex + assert.Equal(t, "1914fe7789eb32be", upper64) + assert.Equal(t, startInvocationTime, testProcessor.GetExecutionInfo().startTime) +} + +func TestLegacyLambdaStartExecutionSpanStepFunctionEvent(t *testing.T) { + extraTags := &logs.Tags{ + Tags: []string{"functionname:test-function"}, + } + demux := createDemultiplexer(t) + mockProcessTrace := func(*api.Payload) {} + mockDetectLambdaLibrary := func() bool { return false } + + eventPayload := `{"Payload":{"Execution":{"Id":"arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:bc9f281c-3daa-4e5a-9a60-471a3810bf44","Input":{},"StartTime":"2024-07-30T19:55:52.976Z","Name":"bc9f281c-3daa-4e5a-9a60-471a3810bf44","RoleArn":"arn:aws:iam::425362996713:role/test-serverless-stepfunctions-dev-AgocsTestSFRole-tRkeFXScjyk4","RedriveCount":0},"StateMachine":{"Id":"arn:aws:states:us-east-1:425362996713:stateMachine:agocsTestSF","Name":"agocsTestSF"},"State":{"Name":"agocsTest1","EnteredTime":"2024-07-30T19:55:53.018Z","RetryCount":0}}}` + startInvocationTime := time.Now() + startDetails := InvocationStartDetails{ + StartTime: startInvocationTime, + InvokeEventRawPayload: []byte(eventPayload), + InvokedFunctionARN: "arn:aws:lambda:us-east-1:123456789012:function:my-function", + } + + testProcessor := LifecycleProcessor{ + ExtraTags: extraTags, + ProcessTrace: mockProcessTrace, + DetectLambdaLibrary: mockDetectLambdaLibrary, + Demux: demux, + } + + testProcessor.OnInvokeStart(&startDetails) + + assert.NotNil(t, testProcessor.GetExecutionInfo()) + + assert.Equal(t, uint64(0), testProcessor.GetExecutionInfo().SpanID) + assert.Equal(t, uint64(5744042798732701615), testProcessor.GetExecutionInfo().TraceID) + assert.Equal(t, uint64(2902498116043018663), testProcessor.GetExecutionInfo().parentID) + assert.Equal(t, sampler.SamplingPriority(1), testProcessor.GetExecutionInfo().SamplingPriority) + upper64 := testProcessor.GetExecutionInfo().TraceIDUpper64Hex + assert.Equal(t, "1914fe7789eb32be", upper64) + assert.Equal(t, startInvocationTime, testProcessor.GetExecutionInfo().startTime) +} + func TestEndExecutionSpanNoLambdaLibrary(t *testing.T) { t.Setenv(functionNameEnvVar, "TestFunction") diff --git a/pkg/serverless/invocationlifecycle/trace.go b/pkg/serverless/invocationlifecycle/trace.go index de7f69eabb688..f45eeaeef8929 100644 --- a/pkg/serverless/invocationlifecycle/trace.go +++ b/pkg/serverless/invocationlifecycle/trace.go @@ -16,7 +16,7 @@ import ( json "github.com/json-iterator/go" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/random" "github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan" @@ -34,12 +34,13 @@ var /* const */ runtimeRegex = regexp.MustCompile(`^(dotnet|go|java|ruby)(\d+(\. // ExecutionStartInfo is saved information from when an execution span was started type ExecutionStartInfo struct { - startTime time.Time - TraceID uint64 - SpanID uint64 - parentID uint64 - requestPayload []byte - SamplingPriority sampler.SamplingPriority + startTime time.Time + TraceID uint64 + TraceIDUpper64Hex string + SpanID uint64 + parentID uint64 + requestPayload []byte + SamplingPriority sampler.SamplingPriority } // startExecutionSpan records information from the start of the invocation. @@ -63,6 +64,12 @@ func (lp *LifecycleProcessor) startExecutionSpan(event interface{}, rawPayload [ inferredSpan.Span.TraceID = traceContext.TraceID inferredSpan.Span.ParentID = traceContext.ParentID } + if traceContext.TraceIDUpper64Hex != "" { + executionContext.TraceIDUpper64Hex = traceContext.TraceIDUpper64Hex + lp.requestHandler.SetMetaTag(Upper64BitsTag, traceContext.TraceIDUpper64Hex) + } else { + delete(lp.requestHandler.triggerTags, Upper64BitsTag) + } } else { executionContext.TraceID = 0 executionContext.parentID = 0 @@ -112,9 +119,9 @@ func (lp *LifecycleProcessor) endExecutionSpan(endDetails *InvocationEndDetails) if len(langMatches) >= 2 { executionSpan.Meta["language"] = langMatches[1] } - captureLambdaPayloadEnabled := config.Datadog().GetBool("capture_lambda_payload") + captureLambdaPayloadEnabled := pkgconfigsetup.Datadog().GetBool("capture_lambda_payload") if captureLambdaPayloadEnabled { - capturePayloadMaxDepth := config.Datadog().GetInt("capture_lambda_payload_max_depth") + capturePayloadMaxDepth := pkgconfigsetup.Datadog().GetInt("capture_lambda_payload_max_depth") requestPayloadJSON := make(map[string]interface{}) if err := json.Unmarshal(executionContext.requestPayload, &requestPayloadJSON); err != nil { log.Debugf("[lifecycle] Failed to parse request payload: %v", err) diff --git a/pkg/serverless/invocationlifecycle/trace_test.go b/pkg/serverless/invocationlifecycle/trace_test.go index 6b45d32755165..2dda265a0479b 100644 --- a/pkg/serverless/invocationlifecycle/trace_test.go +++ b/pkg/serverless/invocationlifecycle/trace_test.go @@ -101,6 +101,21 @@ func TestStartExecutionSpan(t *testing.T) { reqHeadersWithCtx.Set("x-datadog-sampling-priority", "3") reqHeadersWithCtx.Set("traceparent", "00-00000000000000000000000000000006-0000000000000006-01") + stepFunctionEvent := events.StepFunctionPayload{ + Execution: struct { + ID string + }{ + ID: "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:aa6c9316-713a-41d4-9c30-61131716744f", + }, + State: struct { + Name string + EnteredTime string + }{ + Name: "agocsTest1", + EnteredTime: "2024-07-30T20:46:20.824Z", + }, + } + testcases := []struct { name string event interface{} @@ -315,6 +330,20 @@ func TestStartExecutionSpan(t *testing.T) { SamplingPriority: sampler.SamplingPriority(1), }, }, + { + name: "step function event", + event: stepFunctionEvent, + payload: payloadWithoutCtx, + reqHeaders: reqHeadersWithoutCtx, + infSpanEnabled: false, + propStyle: "datadog", + expectCtx: &ExecutionStartInfo{ + TraceID: 5377636026938777059, + TraceIDUpper64Hex: "6fb5c3a05c73dbfe", + parentID: 8947638978974359093, + SamplingPriority: 1, + }, + }, } for _, tc := range testcases { @@ -333,6 +362,7 @@ func TestStartExecutionSpan(t *testing.T) { requestHandler: &RequestHandler{ executionInfo: actualCtx, inferredSpans: [2]*inferredspan.InferredSpan{inferredSpan}, + triggerTags: make(map[string]string), }, } startDetails := &InvocationStartDetails{ @@ -697,6 +727,71 @@ func TestEndExecutionSpanWithTimeout(t *testing.T) { assert.Equal(t, "Datadog detected an Impending Timeout", executionSpan.Meta["error.msg"]) } +func TestEndExecutionSpanWithStepFunctions(t *testing.T) { + t.Setenv(functionNameEnvVar, "TestFunction") + currentExecutionInfo := &ExecutionStartInfo{} + lp := &LifecycleProcessor{ + requestHandler: &RequestHandler{ + executionInfo: currentExecutionInfo, + triggerTags: make(map[string]string), + }, + } + + lp.requestHandler.triggerTags["_dd.p.tid"] = "6fb5c3a05c73dbfe" + + startTime := time.Now() + startDetails := &InvocationStartDetails{ + StartTime: startTime, + InvokeEventHeaders: http.Header{}, + } + + stepFunctionEvent := events.StepFunctionPayload{ + Execution: struct{ ID string }(struct { + ID string `json:"id"` + }{ + ID: "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:aa6c9316-713a-41d4-9c30-61131716744f", + }), + State: struct { + Name string + EnteredTime string + }{ + Name: "agocsTest1", + EnteredTime: "2024-07-30T20:46:20.824Z", + }, + } + + lp.startExecutionSpan(stepFunctionEvent, []byte("[]"), startDetails) + + assert.Equal(t, uint64(5377636026938777059), currentExecutionInfo.TraceID) + assert.Equal(t, uint64(8947638978974359093), currentExecutionInfo.parentID) + assert.Equal(t, "6fb5c3a05c73dbfe", lp.requestHandler.triggerTags["_dd.p.tid"]) + + duration := 1 * time.Second + endTime := startTime.Add(duration) + + endDetails := &InvocationEndDetails{ + EndTime: endTime, + IsError: false, + RequestID: "test-request-id", + ResponseRawPayload: []byte(`{"response":"test response payload"}`), + ColdStart: true, + ProactiveInit: false, + Runtime: "dotnet6", + } + executionSpan := lp.endExecutionSpan(endDetails) + + assert.Equal(t, "aws.lambda", executionSpan.Name) + assert.Equal(t, "aws.lambda", executionSpan.Service) + assert.Equal(t, "TestFunction", executionSpan.Resource) + assert.Equal(t, "serverless", executionSpan.Type) + assert.Equal(t, currentExecutionInfo.TraceID, executionSpan.TraceID) + assert.Equal(t, currentExecutionInfo.SpanID, executionSpan.SpanID) + assert.Equal(t, startTime.UnixNano(), executionSpan.Start) + assert.Equal(t, duration.Nanoseconds(), executionSpan.Duration) + assert.Equal(t, "6fb5c3a05c73dbfe", executionSpan.Meta["_dd.p.tid"]) + +} + func TestParseLambdaPayload(t *testing.T) { assert.Equal(t, []byte(""), ParseLambdaPayload([]byte(""))) assert.Equal(t, []byte("{}"), ParseLambdaPayload([]byte("{}"))) diff --git a/pkg/serverless/metrics/enhanced_metrics.go b/pkg/serverless/metrics/enhanced_metrics.go index 3d81c71fce4a2..7320267a2f78f 100644 --- a/pkg/serverless/metrics/enhanced_metrics.go +++ b/pkg/serverless/metrics/enhanced_metrics.go @@ -63,6 +63,8 @@ const ( tmpMaxMetric = "aws.lambda.enhanced.tmp_max" fdMaxMetric = "aws.lambda.enhanced.fd_max" fdUseMetric = "aws.lambda.enhanced.fd_use" + threadsMaxMetric = "aws.lambda.enhanced.threads_max" + threadsUseMetric = "aws.lambda.enhanced.threads_use" enhancedMetricsEnvVar = "DD_ENHANCED_METRICS" // Bottlecap @@ -574,6 +576,14 @@ type generateFdEnhancedMetricsArgs struct { Time float64 } +type generateThreadEnhancedMetricsArgs struct { + ThreadsMax float64 + ThreadsUse float64 + Tags []string + Demux aggregator.Demultiplexer + Time float64 +} + // generateFdEnhancedMetrics generates enhanced metrics for the maximum number of file descriptors available and in use func generateFdEnhancedMetrics(args generateFdEnhancedMetricsArgs) { args.Demux.AggregateSample(metrics.MetricSample{ @@ -594,25 +604,61 @@ func generateFdEnhancedMetrics(args generateFdEnhancedMetricsArgs) { }) } -func SendFdEnhancedMetrics(sendMetrics chan bool, tags []string, metricAgent *ServerlessMetricAgent) { +// generateThreadEnhancedMetrics generates enhanced metrics for the maximum number of threads available and in use +func generateThreadEnhancedMetrics(args generateThreadEnhancedMetricsArgs) { + args.Demux.AggregateSample(metrics.MetricSample{ + Name: threadsMaxMetric, + Value: args.ThreadsMax, + Mtype: metrics.DistributionType, + Tags: args.Tags, + SampleRate: 1, + Timestamp: args.Time, + }) + args.Demux.AggregateSample(metrics.MetricSample{ + Name: threadsUseMetric, + Value: args.ThreadsUse, + Mtype: metrics.DistributionType, + Tags: args.Tags, + SampleRate: 1, + Timestamp: args.Time, + }) +} + +func SendProcessEnhancedMetrics(sendMetrics chan bool, tags []string, metricAgent *ServerlessMetricAgent) { if enhancedMetricsDisabled { return } - fdMaxData, err := proc.GetFileDescriptorMaxData() + pids := proc.GetPidList(proc.ProcPath) + + fdMaxData, err := proc.GetFileDescriptorMaxData(pids) if err != nil { log.Debug("Could not emit file descriptor enhanced metrics. %v", err) return } - fdUseData, err := proc.GetFileDescriptorUseData() + fdUseData, err := proc.GetFileDescriptorUseData(pids) if err != nil { log.Debugf("Could not emit file descriptor enhanced metrics. %v", err) return } + threadsMaxData, err := proc.GetThreadsMaxData(pids) + if err != nil { + log.Debugf("Could not emit thread enhanced metrics. %v", err) + return + } + + threadsUseData, err := proc.GetThreadsUseData(pids) + if err != nil { + log.Debugf("Could not emit thread enhanced metrics. %v", err) + return + } + fdMax := fdMaxData.MaximumFileHandles fdUse := fdUseData.UseFileHandles + threadsMax := threadsMaxData.ThreadsMax + threadsUse := threadsUseData.ThreadsUse ticker := time.NewTicker(1 * time.Millisecond) defer ticker.Stop() @@ -627,15 +673,31 @@ func SendFdEnhancedMetrics(sendMetrics chan bool, tags []string, metricAgent *Se Demux: metricAgent.Demux, Time: float64(time.Now().UnixNano()) / float64(time.Second), }) + generateThreadEnhancedMetrics(generateThreadEnhancedMetricsArgs{ + ThreadsMax: threadsMax, + ThreadsUse: threadsUse, + Tags: tags, + Demux: metricAgent.Demux, + Time: float64(time.Now().UnixNano()) / float64(time.Second), + }) return } case <-ticker.C: - fdUseData, err := proc.GetFileDescriptorUseData() - if err != nil { - log.Debugf("Could not emit file descriptor enhanced metrics. %v", err) - return + pids := proc.GetPidList(proc.ProcPath) + + fdUseData, err := proc.GetFileDescriptorUseData(pids) + if err == nil { + fdUse = math.Max(fdUse, fdUseData.UseFileHandles) + } else { + log.Debugf("Could not update file descriptor use enhanced metric. %v", err) + } + + threadsUseData, err := proc.GetThreadsUseData(pids) + if err == nil { + threadsUse = math.Max(threadsUse, threadsUseData.ThreadsUse) + } else { + log.Debugf("Could not update thread use enhanced metric. %v", err) } - fdUse = math.Max(fdUse, fdUseData.UseFileHandles) } } } diff --git a/pkg/serverless/metrics/enhanced_metrics_test.go b/pkg/serverless/metrics/enhanced_metrics_test.go index 1c274229ba145..81de716f01226 100644 --- a/pkg/serverless/metrics/enhanced_metrics_test.go +++ b/pkg/serverless/metrics/enhanced_metrics_test.go @@ -766,7 +766,43 @@ func TestSendFdEnhancedMetrics(t *testing.T) { assert.Len(t, timedMetrics, 0) } -func TestSendFdEnhancedMetricsDisabled(t *testing.T) { +func TestSendThreadEnhancedMetrics(t *testing.T) { + demux := createDemultiplexer(t) + tags := []string{"functionname:test-function"} + now := float64(time.Now().UnixNano()) / float64(time.Second) + args := generateThreadEnhancedMetricsArgs{ + ThreadsMax: 1024, + ThreadsUse: 41, + Tags: tags, + Demux: demux, + Time: now, + } + go generateThreadEnhancedMetrics(args) + generatedMetrics, timedMetrics := demux.WaitForNumberOfSamples(3, 0, 100*time.Millisecond) + assert.Equal(t, []metrics.MetricSample{ + { + Name: threadsMaxMetric, + Value: 1024, + Mtype: metrics.DistributionType, + Tags: tags, + SampleRate: 1, + Timestamp: now, + }, + { + Name: threadsUseMetric, + Value: 41, + Mtype: metrics.DistributionType, + Tags: tags, + SampleRate: 1, + Timestamp: now, + }, + }, + generatedMetrics, + ) + assert.Len(t, timedMetrics, 0) +} + +func TestSendProcessEnhancedMetricsDisabled(t *testing.T) { var wg sync.WaitGroup enhancedMetricsDisabled = true demux := createDemultiplexer(t) @@ -776,7 +812,7 @@ func TestSendFdEnhancedMetricsDisabled(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - SendFdEnhancedMetrics(make(chan bool), tags, &metricAgent) + SendProcessEnhancedMetrics(make(chan bool), tags, &metricAgent) }() generatedMetrics, timedMetrics := demux.WaitForNumberOfSamples(1, 0, 100*time.Millisecond) diff --git a/pkg/serverless/metrics/metric.go b/pkg/serverless/metrics/metric.go index 72ac20ba15d59..9732db4e76c8f 100644 --- a/pkg/serverless/metrics/metric.go +++ b/pkg/serverless/metrics/metric.go @@ -12,8 +12,8 @@ import ( dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -52,7 +52,7 @@ const ( // GetMultipleEndpoints returns the api keys per domain specified in the main agent config func (m *MetricConfig) GetMultipleEndpoints() (map[string][]string, error) { - return utils.GetMultipleEndpoints(config.Datadog()) + return utils.GetMultipleEndpoints(pkgconfigsetup.Datadog()) } // NewServer returns a running DogStatsD server @@ -64,17 +64,17 @@ func (m *MetricDogStatsD) NewServer(demux aggregator.Demultiplexer) (dogstatsdSe func (c *ServerlessMetricAgent) Start(forwarderTimeout time.Duration, multipleEndpointConfig MultipleEndpointConfig, dogstatFactory DogStatsDFactory) { // prevents any UDP packets from being stuck in the buffer and not parsed during the current invocation // by setting this option to 1ms, all packets received will directly be sent to the parser - config.Datadog().Set("dogstatsd_packet_buffer_flush_timeout", 1*time.Millisecond, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("dogstatsd_packet_buffer_flush_timeout", 1*time.Millisecond, model.SourceAgentRuntime) // the invocation metric is also generated by Lambda Layers // we want to avoid duplicate metric - customerList := config.Datadog().GetStringSlice(statsDMetricBlocklistKey) + customerList := pkgconfigsetup.Datadog().GetStringSlice(statsDMetricBlocklistKey) // if the proxy is enabled we need to also block the errorMetric if strings.ToLower(os.Getenv(proxyEnabledEnvVar)) == "true" { - config.Datadog().Set(statsDMetricBlocklistKey, buildMetricBlocklistForProxy(customerList), model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set(statsDMetricBlocklistKey, buildMetricBlocklistForProxy(customerList), model.SourceAgentRuntime) } else { - config.Datadog().Set(statsDMetricBlocklistKey, buildMetricBlocklist(customerList), model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set(statsDMetricBlocklistKey, buildMetricBlocklist(customerList), model.SourceAgentRuntime) } demux := buildDemultiplexer(multipleEndpointConfig, forwarderTimeout) diff --git a/pkg/serverless/metrics/metric_test.go b/pkg/serverless/metrics/metric_test.go index 2b07a3e07f00d..9f7abdc5d8b75 100644 --- a/pkg/serverless/metrics/metric_test.go +++ b/pkg/serverless/metrics/metric_test.go @@ -23,7 +23,7 @@ import ( dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" "github.com/DataDog/datadog-agent/pkg/aggregator" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/hostname" ) @@ -39,7 +39,7 @@ func TestStartDoesNotBlock(t *testing.T) { if os.Getenv("CI") == "true" && runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" { t.Skip("TestStartDoesNotBlock is known to fail on the macOS Gitlab runners because of the already running Agent") } - config.LoadWithoutSecret() + pkgconfigsetup.LoadWithoutSecret(pkgconfigsetup.Datadog(), nil) metricAgent := &ServerlessMetricAgent{ SketchesBucketOffset: time.Second * 10, } @@ -89,9 +89,9 @@ func TestStartInvalidDogStatsD(t *testing.T) { func TestStartWithProxy(t *testing.T) { t.SkipNow() - originalValues := config.Datadog().GetStringSlice(statsDMetricBlocklistKey) - defer config.Datadog().SetWithoutSource(statsDMetricBlocklistKey, originalValues) - config.Datadog().SetWithoutSource(statsDMetricBlocklistKey, []string{}) + originalValues := pkgconfigsetup.Datadog().GetStringSlice(statsDMetricBlocklistKey) + defer pkgconfigsetup.Datadog().SetWithoutSource(statsDMetricBlocklistKey, originalValues) + pkgconfigsetup.Datadog().SetWithoutSource(statsDMetricBlocklistKey, []string{}) t.Setenv(proxyEnabledEnvVar, "true") @@ -106,7 +106,7 @@ func TestStartWithProxy(t *testing.T) { ErrorsMetric, } - setValues := config.Datadog().GetStringSlice(statsDMetricBlocklistKey) + setValues := pkgconfigsetup.Datadog().GetStringSlice(statsDMetricBlocklistKey) assert.Equal(t, expected, setValues) } @@ -208,7 +208,7 @@ func getAvailableUDPPort() (int, error) { func TestRaceFlushVersusParsePacket(t *testing.T) { port, err := getAvailableUDPPort() require.NoError(t, err) - config.Datadog().SetDefault("dogstatsd_port", port) + pkgconfigsetup.Datadog().SetDefault("dogstatsd_port", port) demux := aggregator.InitAndStartServerlessDemultiplexer(nil, time.Second*1000) @@ -216,7 +216,7 @@ func TestRaceFlushVersusParsePacket(t *testing.T) { require.NoError(t, err, "cannot start DSD") defer s.Stop() - url := fmt.Sprintf("127.0.0.1:%d", config.Datadog().GetInt("dogstatsd_port")) + url := fmt.Sprintf("127.0.0.1:%d", pkgconfigsetup.Datadog().GetInt("dogstatsd_port")) conn, err := net.Dial("udp", url) require.NoError(t, err, "cannot connect to DSD socket") defer conn.Close() diff --git a/pkg/serverless/otlp/otlp.go b/pkg/serverless/otlp/otlp.go index bf03f7ca40b29..fcb44c14d3c47 100644 --- a/pkg/serverless/otlp/otlp.go +++ b/pkg/serverless/otlp/otlp.go @@ -16,7 +16,7 @@ import ( "go.opentelemetry.io/collector/otelcol" coreOtlp "github.com/DataDog/datadog-agent/comp/otelcol/otlp" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -29,7 +29,7 @@ type ServerlessOTLPAgent struct { // NewServerlessOTLPAgent creates a new ServerlessOTLPAgent with the correct // otel pipeline. func NewServerlessOTLPAgent(serializer serializer.MetricSerializer) *ServerlessOTLPAgent { - pipeline, err := coreOtlp.NewPipelineFromAgentConfig(config.Datadog(), serializer, nil, nil) + pipeline, err := coreOtlp.NewPipelineFromAgentConfig(pkgconfigsetup.Datadog(), serializer, nil, nil) if err != nil { log.Error("Error creating new otlp pipeline:", err) return nil @@ -61,7 +61,7 @@ func (o *ServerlessOTLPAgent) Stop() { // IsEnabled returns true if the OTLP endpoint should be enabled. func IsEnabled() bool { - return coreOtlp.IsEnabled(config.Datadog()) + return coreOtlp.IsEnabled(pkgconfigsetup.Datadog()) } var ( diff --git a/pkg/serverless/proc/proc.go b/pkg/serverless/proc/proc.go index b8e612de91938..2cb4e297f47be 100644 --- a/pkg/serverless/proc/proc.go +++ b/pkg/serverless/proc/proc.go @@ -27,10 +27,11 @@ const ( ProcPath = "/proc" PidLimitsPathFormat = "/%d/limits" PidFdPathFormat = "/%d/fd" + PidTaskPathFormat = "/%d/task" lambdaNetworkInterface = "vinternal_1" ) -func getPidList(procPath string) []int { +func GetPidList(procPath string) []int { files, err := os.ReadDir(procPath) pids := []int{} if err != nil { @@ -74,7 +75,7 @@ func getEnvVariablesFromPid(procPath string, pid int) map[string]string { // it returns a slice since a value could be found in more than one process func SearchProcsForEnvVariable(procPath string, envName string) []string { result := []string{} - pidList := getPidList(procPath) + pidList := GetPidList(procPath) for _, pid := range pidList { envMap := getEnvVariablesFromPid(procPath, pid) if value, ok := envMap[envName]; ok { @@ -207,12 +208,11 @@ type FileDescriptorMaxData struct { } // GetFileDescriptorMaxData returns the maximum limit of file descriptors the function can use -func GetFileDescriptorMaxData() (*FileDescriptorMaxData, error) { - return getFileDescriptorMaxData(ProcPath) +func GetFileDescriptorMaxData(pids []int) (*FileDescriptorMaxData, error) { + return getFileDescriptorMaxData(ProcPath, pids) } -func getFileDescriptorMaxData(path string) (*FileDescriptorMaxData, error) { - pids := getPidList(path) +func getFileDescriptorMaxData(path string, pids []int) (*FileDescriptorMaxData, error) { fdMax := math.Inf(1) for _, pid := range pids { @@ -260,12 +260,11 @@ type FileDescriptorUseData struct { } // GetFileDescriptorUseData returns the maximum number of file descriptors the function has used at a time -func GetFileDescriptorUseData() (*FileDescriptorUseData, error) { - return getFileDescriptorUseData(ProcPath) +func GetFileDescriptorUseData(pids []int) (*FileDescriptorUseData, error) { + return getFileDescriptorUseData(ProcPath, pids) } -func getFileDescriptorUseData(path string) (*FileDescriptorUseData, error) { - pids := getPidList(path) +func getFileDescriptorUseData(path string, pids []int) (*FileDescriptorUseData, error) { fdUse := 0 for _, pid := range pids { @@ -281,3 +280,84 @@ func getFileDescriptorUseData(path string) (*FileDescriptorUseData, error) { UseFileHandles: float64(fdUse), }, nil } + +type ThreadsMaxData struct { + ThreadsMax float64 +} + +// GetThreadsMaxData returns the maximum limit of threads the function can use +func GetThreadsMaxData(pids []int) (*ThreadsMaxData, error) { + return getThreadsMaxData(ProcPath, pids) +} + +func getThreadsMaxData(path string, pids []int) (*ThreadsMaxData, error) { + threadsMax := math.Inf(1) + + for _, pid := range pids { + limitsPath := fmt.Sprint(path + fmt.Sprintf(PidLimitsPathFormat, pid)) + file, err := os.Open(limitsPath) + if err != nil { + return nil, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "Max processes") { + fields := strings.Fields(line) + if len(fields) < 5 { + log.Debugf("threads max data not found in file '%s'", limitsPath) + break + } + + threadsMaxPidStr := fields[2] + threadsMaxPid, err := strconv.Atoi(threadsMaxPidStr) + if err != nil { + log.Debugf("file descriptor max data not found in file '%s'", limitsPath) + break + } + + threadsMax = math.Min(float64(threadsMax), float64(threadsMaxPid)) + break + } + } + } + + if threadsMax != math.Inf(1) { + return &ThreadsMaxData{ + ThreadsMax: threadsMax, + }, nil + } + + return nil, fmt.Errorf("threads max data not found") +} + +type ThreadsUseData struct { + ThreadsUse float64 +} + +// GetThreadsUseData returns the maximum number of threads the function has used at a time +func GetThreadsUseData(pids []int) (*ThreadsUseData, error) { + return getThreadsUseData(ProcPath, pids) +} + +func getThreadsUseData(path string, pids []int) (*ThreadsUseData, error) { + threadCount := 0 + for _, pid := range pids { + taskPath := fmt.Sprint(path + fmt.Sprintf(PidTaskPathFormat, pid)) + files, err := os.ReadDir(taskPath) + if err != nil { + return nil, fmt.Errorf("threads use data not found in directory '%s'", taskPath) + } + for _, file := range files { + if file.IsDir() { + threadCount++ + } + } + } + + return &ThreadsUseData{ + ThreadsUse: float64(threadCount), + }, nil +} diff --git a/pkg/serverless/proc/proc_test.go b/pkg/serverless/proc/proc_test.go index 6b445db35596c..a43e700ed7112 100644 --- a/pkg/serverless/proc/proc_test.go +++ b/pkg/serverless/proc/proc_test.go @@ -15,12 +15,12 @@ import ( ) func TestGetPidListInvalid(t *testing.T) { - pids := getPidList("/incorrect/folder") + pids := GetPidList("/incorrect/folder") assert.Equal(t, 0, len(pids)) } func TestGetPidListValid(t *testing.T) { - pids := getPidList("./testData") + pids := GetPidList("./testData") sort.Ints(pids) assert.Equal(t, 2, len(pids)) assert.Equal(t, 13, pids[0]) @@ -145,30 +145,69 @@ func TestGetNetworkData(t *testing.T) { } func TestGetFileDescriptorMaxData(t *testing.T) { - path := "./testData/file-descriptor/valid" - fileDescriptorMaxData, err := getFileDescriptorMaxData(path) + path := "./testData/process/valid" + pids := GetPidList(path) + fileDescriptorMaxData, err := getFileDescriptorMaxData(path, pids) assert.Nil(t, err) assert.Equal(t, float64(1024), fileDescriptorMaxData.MaximumFileHandles) - path = "./testData/file-descriptor/invalid_malformed" - fileDescriptorMaxData, err = getFileDescriptorMaxData(path) + path = "./testData/process/invalid_malformed" + pids = GetPidList(path) + fileDescriptorMaxData, err = getFileDescriptorMaxData(path, pids) assert.NotNil(t, err) assert.Nil(t, fileDescriptorMaxData) - path = "./testData/file-descriptor/invalid_missing" - fileDescriptorMaxData, err = getFileDescriptorMaxData(path) + path = "./testData/process/invalid_missing" + pids = GetPidList(path) + fileDescriptorMaxData, err = getFileDescriptorMaxData(path, pids) assert.NotNil(t, err) assert.Nil(t, fileDescriptorMaxData) } func TestGetFileDescriptorUseData(t *testing.T) { - path := "./testData/file-descriptor/valid" - fileDescriptorUseData, err := getFileDescriptorUseData(path) + path := "./testData/process/valid" + pids := GetPidList(path) + fileDescriptorUseData, err := getFileDescriptorUseData(path, pids) assert.Nil(t, err) assert.Equal(t, float64(5), fileDescriptorUseData.UseFileHandles) - path = "./testData/file-descriptor/invalid_missing" - fileDescriptorUseData, err = getFileDescriptorUseData(path) + path = "./testData/process/invalid_missing" + pids = GetPidList(path) + fileDescriptorUseData, err = getFileDescriptorUseData(path, pids) assert.NotNil(t, err) assert.Nil(t, fileDescriptorUseData) } + +func TestGetThreadsMaxData(t *testing.T) { + path := "./testData/process/valid" + pids := GetPidList(path) + threadsMaxData, err := getThreadsMaxData(path, pids) + assert.Nil(t, err) + assert.Equal(t, float64(1024), threadsMaxData.ThreadsMax) + + path = "./testData/process/invalid_malformed" + pids = GetPidList(path) + threadsMaxData, err = getThreadsMaxData(path, pids) + assert.NotNil(t, err) + assert.Nil(t, threadsMaxData) + + path = "./testData/process/invalid_missing" + pids = GetPidList(path) + threadsMaxData, err = getThreadsMaxData(path, pids) + assert.NotNil(t, err) + assert.Nil(t, threadsMaxData) +} + +func TestGetThreadsUseData(t *testing.T) { + path := "./testData/process/valid" + pids := GetPidList(path) + threadsUseData, err := getThreadsUseData(path, pids) + assert.Nil(t, err) + assert.Equal(t, float64(5), threadsUseData.ThreadsUse) + + path = "./testData/process/invalid_missing" + pids = GetPidList(path) + threadsUseData, err = getThreadsUseData(path, pids) + assert.NotNil(t, err) + assert.Nil(t, threadsUseData) +} diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits b/pkg/serverless/proc/testData/process/invalid_malformed/31/limits similarity index 93% rename from pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits rename to pkg/serverless/proc/testData/process/invalid_malformed/31/limits index 45c06574a388c..2436ec08542cc 100644 --- a/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/31/limits +++ b/pkg/serverless/proc/testData/process/invalid_malformed/31/limits @@ -5,7 +5,7 @@ Max data size unlimited unlimited bytes Max stack size 8388608 unlimited bytes Max core file size unlimited unlimited bytes Max resident set unlimited unlimited bytes -Max processes 1024 1024 processes +Max processes 1024 Max open files 1024 1024 Max locked memory 65536 65536 bytes Max address space unlimited unlimited bytes diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits b/pkg/serverless/proc/testData/process/invalid_malformed/9/limits similarity index 98% rename from pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits rename to pkg/serverless/proc/testData/process/invalid_malformed/9/limits index 3ad780c33f48d..2d25ac3010bd4 100644 --- a/pkg/serverless/proc/testData/file-descriptor/invalid_malformed/9/limits +++ b/pkg/serverless/proc/testData/process/invalid_malformed/9/limits @@ -5,7 +5,7 @@ Max data size unlimited unlimited bytes Max stack size 8388608 unlimited bytes Max core file size unlimited unlimited bytes Max resident set unlimited unlimited bytes -Max processes 1024 1024 processes +Max processes 1024 1024 Max open files 1024 Max locked memory 65536 65536 bytes Max address space unlimited unlimited bytes diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits b/pkg/serverless/proc/testData/process/invalid_missing/31/limits similarity index 93% rename from pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits rename to pkg/serverless/proc/testData/process/invalid_missing/31/limits index 34925a8f557f9..c7dc2c55dc689 100644 --- a/pkg/serverless/proc/testData/file-descriptor/invalid_missing/31/limits +++ b/pkg/serverless/proc/testData/process/invalid_missing/31/limits @@ -5,7 +5,6 @@ Max data size unlimited unlimited bytes Max stack size 8388608 unlimited bytes Max core file size unlimited unlimited bytes Max resident set unlimited unlimited bytes -Max processes 1024 1024 processes Max locked memory 65536 65536 bytes Max address space unlimited unlimited bytes Max file locks unlimited unlimited locks diff --git a/pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits b/pkg/serverless/proc/testData/process/invalid_missing/9/limits similarity index 92% rename from pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits rename to pkg/serverless/proc/testData/process/invalid_missing/9/limits index 17e615740c934..07de49ec476ae 100644 --- a/pkg/serverless/proc/testData/file-descriptor/invalid_missing/9/limits +++ b/pkg/serverless/proc/testData/process/invalid_missing/9/limits @@ -4,8 +4,7 @@ Max file size unlimited unlimited bytes Max data size unlimited unlimited bytes Max stack size 8388608 unlimited bytes Max core file size unlimited unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 1024 1024 processes +Max resident set unlimited unlimited bytes Max locked memory 65536 65536 bytes Max address space unlimited unlimited bytes Max file locks unlimited unlimited locks diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/1 b/pkg/serverless/proc/testData/process/valid/31/fd/1 similarity index 100% rename from pkg/serverless/proc/testData/file-descriptor/valid/31/fd/1 rename to pkg/serverless/proc/testData/process/valid/31/fd/1 diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/31/fd/2 b/pkg/serverless/proc/testData/process/valid/31/fd/2 similarity index 100% rename from pkg/serverless/proc/testData/file-descriptor/valid/31/fd/2 rename to pkg/serverless/proc/testData/process/valid/31/fd/2 diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/31/limits b/pkg/serverless/proc/testData/process/valid/31/limits similarity index 100% rename from pkg/serverless/proc/testData/file-descriptor/valid/31/limits rename to pkg/serverless/proc/testData/process/valid/31/limits diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/1 b/pkg/serverless/proc/testData/process/valid/31/task/1/.gitkeep similarity index 100% rename from pkg/serverless/proc/testData/file-descriptor/valid/9/fd/1 rename to pkg/serverless/proc/testData/process/valid/31/task/1/.gitkeep diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/2 b/pkg/serverless/proc/testData/process/valid/31/task/2/.gitkeep similarity index 100% rename from pkg/serverless/proc/testData/file-descriptor/valid/9/fd/2 rename to pkg/serverless/proc/testData/process/valid/31/task/2/.gitkeep diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/fd/3 b/pkg/serverless/proc/testData/process/valid/31/task/3 similarity index 100% rename from pkg/serverless/proc/testData/file-descriptor/valid/9/fd/3 rename to pkg/serverless/proc/testData/process/valid/31/task/3 diff --git a/test/e2e/containers/fake_datadog/app/__init__.py b/pkg/serverless/proc/testData/process/valid/9/fd/1 similarity index 100% rename from test/e2e/containers/fake_datadog/app/__init__.py rename to pkg/serverless/proc/testData/process/valid/9/fd/1 diff --git a/test/e2e/cws-tests/tests/lib/__init__.py b/pkg/serverless/proc/testData/process/valid/9/fd/2 similarity index 100% rename from test/e2e/cws-tests/tests/lib/__init__.py rename to pkg/serverless/proc/testData/process/valid/9/fd/2 diff --git a/pkg/serverless/proc/testData/process/valid/9/fd/3 b/pkg/serverless/proc/testData/process/valid/9/fd/3 new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/file-descriptor/valid/9/limits b/pkg/serverless/proc/testData/process/valid/9/limits similarity index 100% rename from pkg/serverless/proc/testData/file-descriptor/valid/9/limits rename to pkg/serverless/proc/testData/process/valid/9/limits diff --git a/pkg/serverless/proc/testData/process/valid/9/task/1/.gitkeep b/pkg/serverless/proc/testData/process/valid/9/task/1/.gitkeep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/9/task/2/.gitkeep b/pkg/serverless/proc/testData/process/valid/9/task/2/.gitkeep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/proc/testData/process/valid/9/task/3/.gitkeep b/pkg/serverless/proc/testData/process/valid/9/task/3/.gitkeep new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pkg/serverless/serverless.go b/pkg/serverless/serverless.go index 2528e71d81e7a..b1d8c0f4c9b32 100644 --- a/pkg/serverless/serverless.go +++ b/pkg/serverless/serverless.go @@ -161,8 +161,8 @@ func callInvocationHandler(daemon *daemon.Daemon, arn string, deadlineMs int64, cpuOffsetData, cpuOffsetErr := proc.GetCPUData() uptimeOffset, uptimeOffsetErr := proc.GetUptime() networkOffsetData, networkOffsetErr := proc.GetNetworkData() - sendFdMetrics := make(chan bool) - go metrics.SendFdEnhancedMetrics(sendFdMetrics, daemon.ExtraTags.Tags, daemon.MetricAgent) + sendProcessMetrics := make(chan bool) + go metrics.SendProcessEnhancedMetrics(sendProcessMetrics, daemon.ExtraTags.Tags, daemon.MetricAgent) sendTmpMetrics := make(chan bool) go metrics.SendTmpEnhancedMetrics(sendTmpMetrics, daemon.ExtraTags.Tags, daemon.MetricAgent) @@ -181,17 +181,17 @@ func callInvocationHandler(daemon *daemon.Daemon, arn string, deadlineMs int64, case <-doneChannel: break } - sendSystemEnhancedMetrics(daemon, cpuOffsetErr == nil && uptimeOffsetErr == nil, networkOffsetErr == nil, uptimeOffset, cpuOffsetData, networkOffsetData, sendTmpMetrics, sendFdMetrics) + sendSystemEnhancedMetrics(daemon, cpuOffsetErr == nil && uptimeOffsetErr == nil, networkOffsetErr == nil, uptimeOffset, cpuOffsetData, networkOffsetData, sendTmpMetrics, sendProcessMetrics) } -func sendSystemEnhancedMetrics(daemon *daemon.Daemon, emitCPUMetrics, emitNetworkMetrics bool, uptimeOffset float64, cpuOffsetData *proc.CPUData, networkOffsetData *proc.NetworkData, sendTmpMetrics chan bool, sendFdMetrics chan bool) { +func sendSystemEnhancedMetrics(daemon *daemon.Daemon, emitCPUMetrics, emitNetworkMetrics bool, uptimeOffset float64, cpuOffsetData *proc.CPUData, networkOffsetData *proc.NetworkData, sendTmpMetrics chan bool, sendProcessMetrics chan bool) { if daemon.MetricAgent == nil { log.Debug("Could not send system enhanced metrics") return } close(sendTmpMetrics) - close(sendFdMetrics) + close(sendProcessMetrics) if emitCPUMetrics { metrics.SendCPUEnhancedMetrics(cpuOffsetData, uptimeOffset, daemon.ExtraTags.Tags, daemon.MetricAgent.Demux) diff --git a/pkg/serverless/trace/inferredspan/constants.go b/pkg/serverless/trace/inferredspan/constants.go index d48e12c6ccc07..4f677cef63d4b 100644 --- a/pkg/serverless/trace/inferredspan/constants.go +++ b/pkg/serverless/trace/inferredspan/constants.go @@ -14,6 +14,7 @@ const ( connectionID = "connection_id" detailType = "detail_type" endpoint = "endpoint" + eventBridgeTime = "x-datadog-start-time" eventID = "event_id" eventName = "event_name" eventSourceArn = "event_source_arn" @@ -35,6 +36,7 @@ const ( queueName = "queuename" receiptHandle = "receipt_handle" requestID = "request_id" + resourceName = "x-datadog-resource-name" resourceNames = "resource_names" senderID = "sender_id" sentTimestamp = "SentTimestamp" diff --git a/pkg/serverless/trace/inferredspan/inferred_span.go b/pkg/serverless/trace/inferredspan/inferred_span.go index a0c8811dc1106..9f4fc89862e76 100644 --- a/pkg/serverless/trace/inferredspan/inferred_span.go +++ b/pkg/serverless/trace/inferredspan/inferred_span.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/random" @@ -76,7 +76,7 @@ func FilterFunctionTags(input map[string]string) map[string]string { } // filter out DD_TAGS & DD_EXTRA_TAGS - ddTags := configUtils.GetConfiguredTags(config.Datadog(), false) + ddTags := configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false) for _, tag := range ddTags { tagParts := strings.SplitN(tag, ":", 2) if len(tagParts) != 2 { @@ -125,7 +125,7 @@ func (inferredSpan *InferredSpan) GenerateInferredSpan(startTime time.Time) { // IsInferredSpansEnabled is used to determine if we need to // generate and enrich inferred spans for a particular invocation func IsInferredSpansEnabled() bool { - return config.Datadog().GetBool("serverless.trace_enabled") && config.Datadog().GetBool("serverless.trace_managed_services") + return pkgconfigsetup.Datadog().GetBool("serverless.trace_enabled") && pkgconfigsetup.Datadog().GetBool("serverless.trace_managed_services") } // AddTagToInferredSpan is used to add new tags to the inferred span in diff --git a/pkg/serverless/trace/inferredspan/span_enrichment.go b/pkg/serverless/trace/inferredspan/span_enrichment.go index c469418de69ea..0425f277fdc8b 100644 --- a/pkg/serverless/trace/inferredspan/span_enrichment.go +++ b/pkg/serverless/trace/inferredspan/span_enrichment.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serverless/trigger/events" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -37,7 +37,7 @@ func CreateServiceMapping(val string) map[string]string { } func init() { - serviceMappingStr := config.Datadog().GetString("serverless.service_mapping") + serviceMappingStr := pkgconfigsetup.Datadog().GetString("serverless.service_mapping") serviceMapping = CreateServiceMapping(serviceMappingStr) } @@ -293,14 +293,27 @@ func (inferredSpan *InferredSpan) EnrichInferredSpanWithEventBridgeEvent(eventPa inferredSpan.IsAsync = true inferredSpan.Span.Name = "aws.eventbridge" inferredSpan.Span.Service = serviceName - inferredSpan.Span.Start = formatISOStartTime(eventPayload.StartTime) - inferredSpan.Span.Resource = source + inferredSpan.Span.Start = eventPayload.Time.UnixNano() // use as a backup if sent timestamp isn't passed by the tracer + inferredSpan.Span.Resource = "EventBridge" // use as a backup if bus name isn't passed by the tracer inferredSpan.Span.Type = "web" inferredSpan.Span.Meta = map[string]string{ operationName: "aws.eventbridge", resourceNames: source, detailType: eventPayload.DetailType, } + + if traceContext := eventPayload.Detail.TraceContext; traceContext != nil { + // The bus name isn't included in the default AWS payload, so we use + // `x-datadog-resource-name` from the tracer if it exists. + if bus, ok := traceContext[resourceName]; ok { + inferredSpan.Span.Resource = bus + } + + // Use the `x-datadog-start-time` from the tracer if it exists. + if startTime, ok := traceContext[eventBridgeTime]; ok { + inferredSpan.Span.Start = calculateStartTime(convertStringTimestamp(startTime)) + } + } } // EnrichInferredSpanWithKinesisEvent uses the parsed event diff --git a/pkg/serverless/trace/inferredspan/span_enrichment_test.go b/pkg/serverless/trace/inferredspan/span_enrichment_test.go index 8379c6bcf717c..b43bf9f5fac91 100644 --- a/pkg/serverless/trace/inferredspan/span_enrichment_test.go +++ b/pkg/serverless/trace/inferredspan/span_enrichment_test.go @@ -637,12 +637,10 @@ func TestEnrichInferredSpanWithEventBridgeEvent(t *testing.T) { inferredSpan := mockInferredSpan() inferredSpan.EnrichInferredSpanWithEventBridgeEvent(eventBridgeEvent) span := inferredSpan.Span - assert.Equal(t, uint64(7353030974370088224), span.TraceID) - assert.Equal(t, uint64(8048964810003407541), span.SpanID) - assert.Equal(t, formatISOStartTime("2017-12-22T18:43:48Z"), span.Start) + assert.Equal(t, int64(100*1e6), span.Start) assert.Equal(t, "eventbridge", span.Service) assert.Equal(t, "aws.eventbridge", span.Name) - assert.Equal(t, "eventbridge.custom.event.sender", span.Resource) + assert.Equal(t, "testBus", span.Resource) assert.Equal(t, "web", span.Type) assert.Equal(t, "aws.eventbridge", span.Meta[operationName]) assert.Equal(t, "eventbridge.custom.event.sender", span.Meta[resourceNames]) @@ -650,6 +648,24 @@ func TestEnrichInferredSpanWithEventBridgeEvent(t *testing.T) { assert.True(t, inferredSpan.IsAsync) } +func TestEnrichInferredSpanWithEventBridgeEventNoBus(t *testing.T) { + var eventBridgeEvent events.EventBridgeEvent + _ = json.Unmarshal(getEventFromFile("eventbridge-no-bus.json"), &eventBridgeEvent) + inferredSpan := mockInferredSpan() + inferredSpan.EnrichInferredSpanWithEventBridgeEvent(eventBridgeEvent) + span := inferredSpan.Span + assert.Equal(t, "EventBridge", span.Resource) +} + +func TestEnrichInferredSpanWithEventBridgeEventNoSentTimestamp(t *testing.T) { + var eventBridgeEvent events.EventBridgeEvent + _ = json.Unmarshal(getEventFromFile("eventbridge-no-timestamp.json"), &eventBridgeEvent) + inferredSpan := mockInferredSpan() + inferredSpan.EnrichInferredSpanWithEventBridgeEvent(eventBridgeEvent) + span := inferredSpan.Span + assert.Equal(t, int64(1726505925*1e9), span.Start) +} + func TestRemapsAllInferredSpanServiceNamesFromEventBridgeEvent(t *testing.T) { // Store the original service mapping origServiceMapping := GetServiceMapping() diff --git a/pkg/serverless/trace/propagation/carriers.go b/pkg/serverless/trace/propagation/carriers.go index 8a30a557d1072..f8f902370ce6c 100644 --- a/pkg/serverless/trace/propagation/carriers.go +++ b/pkg/serverless/trace/propagation/carriers.go @@ -7,6 +7,7 @@ package propagation import ( + "crypto/sha256" "encoding/base64" "errors" "fmt" @@ -36,16 +37,17 @@ const ( var rootRegex = regexp.MustCompile("Root=1-[0-9a-fA-F]{8}-00000000[0-9a-fA-F]{16}") var ( - errorAWSTraceHeaderMismatch = errors.New("AWSTraceHeader does not match expected regex") - errorAWSTraceHeaderEmpty = errors.New("AWSTraceHeader does not contain trace ID and parent ID") - errorStringNotFound = errors.New("String value not found in _datadog payload") - errorUnsupportedDataType = errors.New("Unsupported DataType in _datadog payload") - errorNoDDContextFound = errors.New("No Datadog trace context found") - errorUnsupportedPayloadType = errors.New("Unsupported type for _datadog payload") - errorUnsupportedTypeType = errors.New("Unsupported type in _datadog payload") - errorUnsupportedValueType = errors.New("Unsupported value type in _datadog payload") - errorUnsupportedTypeValue = errors.New("Unsupported Type in _datadog payload") - errorCouldNotUnmarshal = errors.New("Could not unmarshal the invocation event payload") + errorAWSTraceHeaderMismatch = errors.New("AWSTraceHeader does not match expected regex") + errorAWSTraceHeaderEmpty = errors.New("AWSTraceHeader does not contain trace ID and parent ID") + errorStringNotFound = errors.New("String value not found in _datadog payload") + errorUnsupportedDataType = errors.New("Unsupported DataType in _datadog payload") + errorNoDDContextFound = errors.New("No Datadog trace context found") + errorUnsupportedPayloadType = errors.New("Unsupported type for _datadog payload") + errorUnsupportedTypeType = errors.New("Unsupported type in _datadog payload") + errorUnsupportedValueType = errors.New("Unsupported value type in _datadog payload") + errorUnsupportedTypeValue = errors.New("Unsupported Type in _datadog payload") + errorCouldNotUnmarshal = errors.New("Could not unmarshal the invocation event payload") + errorNoStepFunctionContextFound = errors.New("no Step Function context found in Step Function event") ) // extractTraceContextfromAWSTraceHeader extracts trace context from the @@ -201,6 +203,16 @@ func snsEntityCarrier(event events.SNSEntity) (tracer.TextMapReader, error) { return carrier, nil } +// eventBridgeCarrier returns the tracer.TextMapReader used to extract trace +// context from the Detail field of an events.EventBridgeEvent +func eventBridgeCarrier(event events.EventBridgeEvent) (tracer.TextMapReader, error) { + traceContext := event.Detail.TraceContext + if len(traceContext) > 0 { + return tracer.TextMapCarrier(traceContext), nil + } + return nil, errorNoDDContextFound +} + type invocationPayload struct { Headers tracer.TextMapCarrier `json:"headers"` } @@ -220,3 +232,60 @@ func rawPayloadCarrier(rawPayload []byte) (tracer.TextMapReader, error) { func headersCarrier(hdrs map[string]string) (tracer.TextMapReader, error) { return tracer.TextMapCarrier(hdrs), nil } + +// extractTraceContextFromStepFunctionContext extracts the execution ARN, state name, and state entered time and uses them to generate Trace ID and Parent ID +// The logic is based on the trace context conversion in Logs To Traces, dd-trace-py, dd-trace-js, etc. +func extractTraceContextFromStepFunctionContext(event events.StepFunctionPayload) (*TraceContext, error) { + tc := new(TraceContext) + + execArn := event.Execution.ID + stateName := event.State.Name + stateEnteredTime := event.State.EnteredTime + + if execArn == "" || stateName == "" || stateEnteredTime == "" { + return nil, errorNoStepFunctionContextFound + } + + lowerTraceID, upperTraceID := stringToDdTraceIDs(execArn) + parentID := stringToDdSpanID(execArn, stateName, stateEnteredTime) + + tc.TraceID = lowerTraceID + tc.TraceIDUpper64Hex = upperTraceID + tc.ParentID = parentID + tc.SamplingPriority = sampler.PriorityAutoKeep + return tc, nil +} + +// stringToDdSpanID hashes the Execution ARN, state name, and state entered time to generate a 64-bit span ID +func stringToDdSpanID(execArn string, stateName string, stateEnteredTime string) uint64 { + uniqueSpanString := fmt.Sprintf("%s#%s#%s", execArn, stateName, stateEnteredTime) + spanHash := sha256.Sum256([]byte(uniqueSpanString)) + parentID := getPositiveUInt64(spanHash[0:8]) + return parentID +} + +// stringToDdTraceIDs hashes an Execution ARN to generate the lower and upper 64 bits of a 128-bit trace ID +func stringToDdTraceIDs(toHash string) (uint64, string) { + hash := sha256.Sum256([]byte(toHash)) + lower64 := getPositiveUInt64(hash[8:16]) + upper64 := getHexEncodedString(getPositiveUInt64(hash[0:8])) + return lower64, upper64 +} + +// getPositiveUInt64 converts the first 8 bytes of a byte array to a positive uint64 +func getPositiveUInt64(hashBytes []byte) uint64 { + var result uint64 + for i := 0; i < 8; i++ { + result = (result << 8) + uint64(hashBytes[i]) + } + result &= ^uint64(1 << 63) // Ensure the highest bit is always 0 + if result == 0 { + return 1 + } + return result +} + +func getHexEncodedString(toEncode uint64) string { + //return hex.EncodeToString(hashBytes[:8]) + return fmt.Sprintf("%x", toEncode) //maybe? +} diff --git a/pkg/serverless/trace/propagation/carriers_test.go b/pkg/serverless/trace/propagation/carriers_test.go index f21226c088fa3..1a7314e60cc47 100644 --- a/pkg/serverless/trace/propagation/carriers_test.go +++ b/pkg/serverless/trace/propagation/carriers_test.go @@ -372,6 +372,72 @@ func TestSnsEntityCarrier(t *testing.T) { } } +func TestEventBridgeCarrier(t *testing.T) { + testcases := []struct { + name string + event events.EventBridgeEvent + expMap map[string]string + expErr string + }{ + { + name: "valid_trace_context", + event: events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: map[string]string{ + "x-datadog-trace-id": "123456789", + "x-datadog-parent-id": "987654321", + "x-datadog-sampling-priority": "1", + }, + }, + }, + expMap: map[string]string{ + "x-datadog-trace-id": "123456789", + "x-datadog-parent-id": "987654321", + "x-datadog-sampling-priority": "1", + }, + expErr: "", + }, + { + name: "missing_trace_context", + event: events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: map[string]string{}, + }, + }, + expMap: nil, + expErr: "No Datadog trace context found", + }, + { + name: "nil_trace_context", + event: events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: nil, + }, + }, + expMap: nil, + expErr: "No Datadog trace context found", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + tm, err := eventBridgeCarrier(tc.event) + t.Logf("eventBridgeCarrier returned TextMapReader=%#v error=%#v", tm, err) + assert.Equal(t, tc.expErr != "", err != nil) + if tc.expErr != "" { + assert.ErrorContains(t, err, tc.expErr) + } + assert.Equal(t, tc.expMap, getMapFromCarrier(tm)) + }) + } +} + func TestExtractTraceContextfromAWSTraceHeader(t *testing.T) { ctx := func(trace, parent, priority uint64) *TraceContext { return &TraceContext{ @@ -735,3 +801,75 @@ func TestHeadersCarrier(t *testing.T) { }) } } + +func Test_stringToDdSpanId(t *testing.T) { + type args struct { + execArn string + stateName string + stateEnteredTime string + } + tests := []struct { + name string + args args + want uint64 + }{ + {"first Test Case", + args{ + "arn:aws:states:sa-east-1:601427271234:express:DatadogStateMachine:acaf1a67-336a-e854-1599-2a627eb2dd8a:c8baf081-31f1-464d-971f-70cb17d01111", + "step-one", + "2022-12-08T21:08:19.224Z", + }, + 4340734536022949921, + }, + { + "second Test Case", + args{ + "arn:aws:states:sa-east-1:601427271234:express:DatadogStateMachine:acaf1a67-336a-e854-1599-2a627eb2dd8a:c8baf081-31f1-464d-971f-70cb17d01111", + "step-one", + "2022-12-08T21:08:19.224Y", + }, + 981693280319792699, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, stringToDdSpanID(tt.args.execArn, tt.args.stateName, tt.args.stateEnteredTime), "stringToDdSpanID(%v, %v, %v)", tt.args.execArn, tt.args.stateName, tt.args.stateEnteredTime) + }) + } +} + +func Test_stringToDdTraceIds(t *testing.T) { + type args struct { + toHash string + } + tests := []struct { + name string + args args + expectedLower64 uint64 + expectedUpper64Hex string + }{ + { + "first Test Case", + args{ + "arn:aws:states:sa-east-1:425362996713:stateMachine:MyStateMachine-b276uka1j", + }, + 1680583253837593461, + "60ee1db79e4803f8", + }, + { + "lifecycle_test.go TestStartExecutionSpanStepFunctionEvent test case", + args{ + "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:bc9f281c-3daa-4e5a-9a60-471a3810bf44", + }, + 5744042798732701615, + "1914fe7789eb32be", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1 := stringToDdTraceIDs(tt.args.toHash) + assert.Equalf(t, tt.expectedLower64, got, "stringToDdTraceIDs(%v)", tt.args.toHash) + assert.Equalf(t, tt.expectedUpper64Hex, got1, "stringToDdTraceIDs(%v)", tt.args.toHash) + }) + } +} diff --git a/pkg/serverless/trace/propagation/extractor.go b/pkg/serverless/trace/propagation/extractor.go index eb745f4f49175..d6c756ae1693f 100644 --- a/pkg/serverless/trace/propagation/extractor.go +++ b/pkg/serverless/trace/propagation/extractor.go @@ -45,9 +45,10 @@ type Extractor struct { // TraceContext stores the propagated trace context values. type TraceContext struct { - TraceID uint64 - ParentID uint64 - SamplingPriority sampler.SamplingPriority + TraceID uint64 + TraceIDUpper64Hex string + ParentID uint64 + SamplingPriority sampler.SamplingPriority } // TraceContextExtended stores the propagated trace context values plus other @@ -100,6 +101,8 @@ func (e Extractor) extract(event interface{}) (*TraceContext, error) { return nil, errorNoSNSRecordFound case events.SNSEntity: carrier, err = snsEntityCarrier(ev) + case events.EventBridgeEvent: + carrier, err = eventBridgeCarrier(ev) case events.APIGatewayProxyRequest: carrier, err = headersCarrier(ev.Headers) case events.APIGatewayV2HTTPRequest: @@ -112,6 +115,11 @@ func (e Extractor) extract(event interface{}) (*TraceContext, error) { carrier, err = headersCarrier(ev.Headers) case events.LambdaFunctionURLRequest: carrier, err = headersCarrier(ev.Headers) + case events.StepFunctionPayload: + tc, err := extractTraceContextFromStepFunctionContext(ev) + if err == nil { + return tc, nil + } default: err = errorUnsupportedExtractionType } diff --git a/pkg/serverless/trace/propagation/extractor_test.go b/pkg/serverless/trace/propagation/extractor_test.go index 5635f0714aaeb..8cdd07c6d9027 100644 --- a/pkg/serverless/trace/propagation/extractor_test.go +++ b/pkg/serverless/trace/propagation/extractor_test.go @@ -9,6 +9,7 @@ import ( "encoding/base64" "encoding/json" "errors" + "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" "net/http" "os" "testing" @@ -407,6 +408,72 @@ func TestExtractorExtract(t *testing.T) { expNoErr: true, }, + // events.EventBridgeEvent + { + name: "eventbridge-event-empty", + events: []interface{}{ + events.EventBridgeEvent{}, + }, + expCtx: nil, + expNoErr: false, + }, + { + name: "eventbridge-event-with-dd-headers", + events: []interface{}{ + events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: headersMapDD, + }, + }, + }, + expCtx: ddTraceContext, + expNoErr: true, + }, + { + name: "eventbridge-event-with-all-headers", + events: []interface{}{ + events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: headersMapAll, + }, + }, + }, + expCtx: ddTraceContext, + expNoErr: true, + }, + { + name: "eventbridge-event-with-w3c-headers", + events: []interface{}{ + events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: headersMapW3C, + }, + }, + }, + expCtx: w3cTraceContext, + expNoErr: true, + }, + { + name: "eventbridge-event-without-trace-context", + events: []interface{}{ + events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: map[string]string{}, + }, + }, + }, + expCtx: nil, + expNoErr: false, + }, + // events.APIGatewayProxyRequest: { name: "APIGatewayProxyRequest", @@ -510,6 +577,34 @@ func TestExtractorExtract(t *testing.T) { expCtx: nil, expNoErr: false, }, + + // Step Functions event + { + name: "step-function-event with no input", + events: []interface{}{ + events.StepFunctionPayload{ + Execution: struct { + ID string + }{ + ID: "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:aa6c9316-713a-41d4-9c30-61131716744f", + }, + State: struct { + Name string + EnteredTime string + }{ + Name: "agocsTest1", + EnteredTime: "2024-07-30T20:46:20.824Z", + }, + }, + }, + expCtx: &TraceContext{ + TraceID: 5377636026938777059, + TraceIDUpper64Hex: "6fb5c3a05c73dbfe", + ParentID: 8947638978974359093, + SamplingPriority: 1, + }, + expNoErr: true, + }, } for _, tc := range testcases { @@ -990,3 +1085,35 @@ func TestConvertStrToUint64(t *testing.T) { }) } } + +func TestEventBridgeCarrierWithW3CHeaders(t *testing.T) { + const ( + testResourceName = "test-event-bus" + testStartTime = "1632150183123456789" + ) + + event := events.EventBridgeEvent{ + Detail: struct { + TraceContext map[string]string `json:"_datadog"` + }{ + TraceContext: map[string]string{ + "traceparent": headersMapW3C["traceparent"], + "tracestate": headersMapW3C["tracestate"], + "x-datadog-resource-name": testResourceName, + "x-datadog-start-time": testStartTime, + }, + }, + } + + carrier, err := eventBridgeCarrier(event) + assert.NoError(t, err) + assert.NotNil(t, carrier) + + textMapCarrier, ok := carrier.(tracer.TextMapCarrier) + assert.True(t, ok) + + assert.Equal(t, headersMapW3C["traceparent"], textMapCarrier["traceparent"]) + assert.Equal(t, headersMapW3C["tracestate"], textMapCarrier["tracestate"]) + assert.Equal(t, testResourceName, textMapCarrier["x-datadog-resource-name"]) + assert.Equal(t, testStartTime, textMapCarrier["x-datadog-start-time"]) +} diff --git a/pkg/serverless/trace/testdata/event_samples/eventbridge-custom.json b/pkg/serverless/trace/testdata/event_samples/eventbridge-custom.json index 96c11000600ae..f068b8efad806 100644 --- a/pkg/serverless/trace/testdata/event_samples/eventbridge-custom.json +++ b/pkg/serverless/trace/testdata/event_samples/eventbridge-custom.json @@ -12,7 +12,9 @@ "_datadog": { "x-datadog-trace-id": "12345", "x-datadog-parent-id": "67890", - "x-datadog-sampling-priority": "2" + "x-datadog-sampling-priority": "2", + "x-datadog-resource-name": "testBus", + "x-datadog-start-time": "100" } } } diff --git a/pkg/serverless/trace/testdata/event_samples/eventbridge-no-bus.json b/pkg/serverless/trace/testdata/event_samples/eventbridge-no-bus.json new file mode 100644 index 0000000000000..b0d7ede4e571b --- /dev/null +++ b/pkg/serverless/trace/testdata/event_samples/eventbridge-no-bus.json @@ -0,0 +1,19 @@ +{ + "version": "0", + "id": "fd03f394-e769-eff5-08a8-53c228933591", + "detail-type": "testdetail", + "source": "eventbridge.custom.event.sender", + "account": "425362996713", + "time": "2021-11-04T01:37:45Z", + "region": "sa-east-1", + "resources": [], + "detail": { + "foo": "bar", + "_datadog": { + "x-datadog-trace-id": "12345", + "x-datadog-parent-id": "67890", + "x-datadog-sampling-priority": "2", + "x-datadog-start-time": "100" + } + } +} diff --git a/pkg/serverless/trace/testdata/event_samples/eventbridge-no-timestamp.json b/pkg/serverless/trace/testdata/event_samples/eventbridge-no-timestamp.json new file mode 100644 index 0000000000000..89222beabd4b3 --- /dev/null +++ b/pkg/serverless/trace/testdata/event_samples/eventbridge-no-timestamp.json @@ -0,0 +1,19 @@ +{ + "version": "0", + "id": "fd03f394-e769-eff5-08a8-53c228933591", + "detail-type": "testdetail", + "source": "eventbridge.custom.event.sender", + "account": "425362996713", + "time": "2024-09-16T16:58:45Z", + "region": "sa-east-1", + "resources": [], + "detail": { + "foo": "bar", + "_datadog": { + "x-datadog-trace-id": "12345", + "x-datadog-parent-id": "67890", + "x-datadog-sampling-priority": "2", + "x-datadog-resource-name": "testBus" + } + } +} diff --git a/pkg/serverless/trace/testdata/event_samples/stepfunction.json b/pkg/serverless/trace/testdata/event_samples/stepfunction.json new file mode 100644 index 0000000000000..74e4c010aac2d --- /dev/null +++ b/pkg/serverless/trace/testdata/event_samples/stepfunction.json @@ -0,0 +1,21 @@ +{ + "Payload": { + "Execution": { + "Id": "arn:aws:states:us-east-1:425362996713:execution:agocsTestSF:bc9f281c-3daa-4e5a-9a60-471a3810bf44", + "Input": {}, + "StartTime": "2024-07-30T19:55:52.976Z", + "Name": "bc9f281c-3daa-4e5a-9a60-471a3810bf44", + "RoleArn": "arn:aws:iam::425362996713:role/test-serverless-stepfunctions-dev-AgocsTestSFRole-tRkeFXScjyk4", + "RedriveCount": 0 + }, + "StateMachine": { + "Id": "arn:aws:states:us-east-1:425362996713:stateMachine:agocsTestSF", + "Name": "agocsTestSF" + }, + "State": { + "Name": "agocsTest1", + "EnteredTime": "2024-07-30T19:55:53.018Z", + "RetryCount": 0 + } + } +} diff --git a/pkg/serverless/trace/trace.go b/pkg/serverless/trace/trace.go index 1c7d901e04619..c941f113069a3 100644 --- a/pkg/serverless/trace/trace.go +++ b/pkg/serverless/trace/trace.go @@ -11,19 +11,20 @@ import ( "os" "strings" + "github.com/DataDog/datadog-go/v5/statsd" + "github.com/DataDog/datadog-agent/cmd/serverless-init/cloudservice" compcorecfg "github.com/DataDog/datadog-agent/comp/core/config" zstd "github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd" comptracecfg "github.com/DataDog/datadog-agent/comp/trace/config" - ddConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/agent" "github.com/DataDog/datadog-agent/pkg/trace/api" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-go/v5/statsd" ) // ServerlessTraceAgent represents a trace agent in a serverless context @@ -96,7 +97,7 @@ func StartServerlessTraceAgent(enabled bool, loadConfig Load, lambdaSpanChan cha // Set the serverless config option which will be used to determine if // hostname should be resolved. Skipping hostname resolution saves >1s // in load time between gRPC calls and agent commands. - ddConfig.Datadog().Set("serverless.enabled", true, model.SourceAgentRuntime) + pkgconfigsetup.Datadog().Set("serverless.enabled", true, model.SourceAgentRuntime) tc, confErr := loadConfig.Load() if confErr != nil { diff --git a/pkg/serverless/trigger/events.go b/pkg/serverless/trigger/events.go index 95dca9ad76348..eea104d487de1 100644 --- a/pkg/serverless/trigger/events.go +++ b/pkg/serverless/trigger/events.go @@ -76,6 +76,12 @@ const ( // LambdaFunctionURLEvent describes an event from an HTTP lambda function URL invocation LambdaFunctionURLEvent + + // StepFunctionEvent describes an event with a Step Function execution context + StepFunctionEvent + + // LegacyStepFunctionEvent describes an event with a Legacy Lambda Step Function execution context + LegacyStepFunctionEvent ) // eventParseFunc defines the signature of AWS event parsing functions @@ -110,6 +116,8 @@ var ( {isAppSyncResolverEvent, AppSyncResolverEvent}, {isEventBridgeEvent, EventBridgeEvent}, {isLambdaFunctionURLEvent, LambdaFunctionURLEvent}, + {isStepFunctionEvent, StepFunctionEvent}, + {isLegacyStepFunctionEvent, LegacyStepFunctionEvent}, // Ultimately check this is a Kong API Gateway event as a last resort. // This is because Kong API Gateway events are a subset of API Gateway events // as of https://github.com/Kong/kong/blob/348c980/kong/plugins/aws-lambda/request-util.lua#L248-L260 @@ -270,6 +278,32 @@ func isLambdaFunctionURLEvent(event map[string]any) bool { return strings.Contains(lambdaURL, "lambda-url") } +func isLegacyStepFunctionEvent(event map[string]any) bool { + execId := json.GetNestedValue(event, "payload", "execution", "id") + if execId == nil { + return false + } + stateName := json.GetNestedValue(event, "payload", "state", "name") + if stateName == nil { + return false + } + stateEnteredTime := json.GetNestedValue(event, "payload", "state", "enteredtime") + return stateEnteredTime != nil +} + +func isStepFunctionEvent(event map[string]any) bool { + execId := json.GetNestedValue(event, "execution", "id") + if execId == nil { + return false + } + stateName := json.GetNestedValue(event, "state", "name") + if stateName == nil { + return false + } + stateEnteredTime := json.GetNestedValue(event, "state", "enteredtime") + return stateEnteredTime != nil +} + func eventRecordsKeyExists(event map[string]any, key string) bool { records, ok := json.GetNestedValue(event, "records").([]interface{}) if !ok { @@ -336,6 +370,8 @@ func (et AWSEventType) String() string { return "EventBridgeEvent" case LambdaFunctionURLEvent: return "LambdaFunctionURLEvent" + case StepFunctionEvent: + return "StepFunctionEvent" default: return fmt.Sprintf("EventType(%d)", et) } diff --git a/pkg/serverless/trigger/events/events.go b/pkg/serverless/trigger/events/events.go index 23c0e6566d853..f3213e82ef94e 100644 --- a/pkg/serverless/trigger/events/events.go +++ b/pkg/serverless/trigger/events/events.go @@ -230,7 +230,10 @@ type KinesisRecord struct { type EventBridgeEvent struct { DetailType string `json:"detail-type"` Source string - StartTime string + Time time.Time + Detail struct { + TraceContext map[string]string `json:"_datadog"` + } } // S3Event mirrors events.S3Event type, removing unused fields. @@ -334,3 +337,21 @@ type LambdaFunctionURLRequestContextHTTPDescription struct { SourceIP string UserAgent string } + +// StepFunctionEvent is the event you get when you instrument a legacy Stepfunction Lambda:Invoke task state +// as recommended by https://docs.datadoghq.com/serverless/step_functions/installation?tab=custom +// This isn't an "official" event, as a default StepFunction invocation will just contain {} +type StepFunctionEvent struct { + Payload StepFunctionPayload +} + +// StepFunctionPayload is the payload of a StepFunctionEvent. It's also a non-legacy version of the `StepFunctionEvent`. +type StepFunctionPayload struct { + Execution struct { + ID string + } + State struct { + Name string + EnteredTime string + } +} diff --git a/pkg/serverless/trigger/events_test.go b/pkg/serverless/trigger/events_test.go index 9a9130f2567d6..f5462feafc4fb 100644 --- a/pkg/serverless/trigger/events_test.go +++ b/pkg/serverless/trigger/events_test.go @@ -37,6 +37,7 @@ func TestEventPayloadParsing(t *testing.T) { "sns.json": isSNSEvent, "sqs.json": isSQSEvent, "lambdaurl.json": isLambdaFunctionURLEvent, + "stepfunction.json": isStepFunctionEvent, } for testFile, testFunc := range testCases { file, err := os.Open(fmt.Sprintf("%v/%v", testDir, testFile)) @@ -115,6 +116,8 @@ func TestGetEventType(t *testing.T) { "sns.json": SNSEvent, "sqs.json": SQSEvent, "lambdaurl.json": LambdaFunctionURLEvent, + "stepfunction.json": StepFunctionEvent, + "legacystepfunction.json": LegacyStepFunctionEvent, } for testFile, expectedEventType := range testCases { diff --git a/pkg/serverless/trigger/testData/legacystepfunction.json b/pkg/serverless/trigger/testData/legacystepfunction.json new file mode 100644 index 0000000000000..70fe2e5c995d3 --- /dev/null +++ b/pkg/serverless/trigger/testData/legacystepfunction.json @@ -0,0 +1,22 @@ +{ + "FunctionName": "${lambdaArn}", + "Payload": { + "Execution": { + "Id": "arn:aws:states:sa-east-1:425362996713:execution:invokeJavaLambda:c0ca8d0f-a3af-4c42-bfd4-b3b100e77f01", + "Input": {}, + "StartTime": "2024-08-29T21:48:55.187Z", + "Name": "c0ca8d0f-a3af-4c42-bfd4-b3b100e77f01", + "RoleArn": "arn:aws:iam::425362996713:role/new-extension-test-java-dev-InvokeJavaLambdaRole-LtJmnJReIOTS", + "RedriveCount": 0 + }, + "StateMachine": { + "Id": "arn:aws:states:sa-east-1:425362996713:stateMachine:invokeJavaLambda", + "Name": "invokeJavaLambda" + }, + "State": { + "Name": "invoker", + "EnteredTime": "2024-08-29T21:48:55.275Z", + "RetryCount": 0 + } + } +} diff --git a/pkg/serverless/trigger/testData/stepfunction.json b/pkg/serverless/trigger/testData/stepfunction.json new file mode 100644 index 0000000000000..5cdd11d7f405e --- /dev/null +++ b/pkg/serverless/trigger/testData/stepfunction.json @@ -0,0 +1,19 @@ +{ + "Execution": { + "Id": "arn:aws:states:sa-east-1:425362996713:execution:invokeJavaLambda:c0ca8d0f-a3af-4c42-bfd4-b3b100e77f01", + "Input": {}, + "StartTime": "2024-08-29T21:48:55.187Z", + "Name": "c0ca8d0f-a3af-4c42-bfd4-b3b100e77f01", + "RoleArn": "arn:aws:iam::425362996713:role/new-extension-test-java-dev-InvokeJavaLambdaRole-LtJmnJReIOTS", + "RedriveCount": 0 + }, + "StateMachine": { + "Id": "arn:aws:states:sa-east-1:425362996713:stateMachine:invokeJavaLambda", + "Name": "invokeJavaLambda" + }, + "State": { + "Name": "invoker", + "EnteredTime": "2024-08-29T21:48:55.275Z", + "RetryCount": 0 + } +} diff --git a/pkg/snmp/snmp.go b/pkg/snmp/snmp.go index 6ee423b23df50..6913ae4a866a7 100644 --- a/pkg/snmp/snmp.go +++ b/pkg/snmp/snmp.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/viper" "github.com/gosnmp/gosnmp" - coreconfig "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/snmp/gosnmplib" "github.com/DataDog/datadog-agent/pkg/snmp/snmpintegration" @@ -119,13 +119,13 @@ func NewListenerConfig() (ListenerConfig, error) { snmpConfig.CollectDeviceMetadata = true snmpConfig.CollectTopology = true - if coreconfig.Datadog().IsSet("network_devices.autodiscovery") { - err := coreconfig.Datadog().UnmarshalKey("network_devices.autodiscovery", &snmpConfig, opt) + if pkgconfigsetup.Datadog().IsSet("network_devices.autodiscovery") { + err := pkgconfigsetup.Datadog().UnmarshalKey("network_devices.autodiscovery", &snmpConfig, opt) if err != nil { return snmpConfig, err } - } else if coreconfig.Datadog().IsSet("snmp_listener") { - err := coreconfig.Datadog().UnmarshalKey("snmp_listener", &snmpConfig, opt) + } else if pkgconfigsetup.Datadog().IsSet("snmp_listener") { + err := pkgconfigsetup.Datadog().UnmarshalKey("snmp_listener", &snmpConfig, opt) if err != nil { return snmpConfig, err } @@ -182,7 +182,7 @@ func NewListenerConfig() (ListenerConfig, error) { config.PingConfig.Timeout = firstNonNil(config.PingConfig.Timeout, snmpConfig.PingConfig.Timeout) config.PingConfig.Count = firstNonNil(config.PingConfig.Count, snmpConfig.PingConfig.Count) - config.Namespace = firstNonEmpty(config.Namespace, snmpConfig.Namespace, coreconfig.Datadog().GetString("network_devices.namespace")) + config.Namespace = firstNonEmpty(config.Namespace, snmpConfig.Namespace, pkgconfigsetup.Datadog().GetString("network_devices.namespace")) config.Community = firstNonEmpty(config.Community, config.CommunityLegacy) config.AuthKey = firstNonEmpty(config.AuthKey, config.AuthKeyLegacy) config.AuthProtocol = firstNonEmpty(config.AuthProtocol, config.AuthProtocolLegacy) diff --git a/pkg/snmp/snmp_test.go b/pkg/snmp/snmp_test.go index 5230eb3c3b608..bffe8bef122b6 100644 --- a/pkg/snmp/snmp_test.go +++ b/pkg/snmp/snmp_test.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/snmp/snmpintegration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/gosnmp/gosnmp" "github.com/stretchr/testify/assert" @@ -89,10 +89,10 @@ func TestBuildSNMPParams(t *testing.T) { } func TestNewListenerConfig(t *testing.T) { - config.Datadog().SetConfigType("yaml") + pkgconfigsetup.Datadog().SetConfigType("yaml") // default collect_device_metadata should be true - err := config.Datadog().ReadConfig(strings.NewReader(` + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: configs: - network: 127.0.0.1/30 @@ -114,7 +114,7 @@ snmp_listener: assert.Equal(t, false, conf.Configs[2].CollectDeviceMetadata) // collect_device_metadata: false - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: collect_device_metadata: false configs: @@ -137,7 +137,7 @@ snmp_listener: assert.Equal(t, false, conf.Configs[2].CollectDeviceMetadata) // collect_device_metadata: true - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: collect_device_metadata: true configs: @@ -161,10 +161,10 @@ snmp_listener: } func TestNewNetworkDevicesListenerConfig(t *testing.T) { - config.Datadog().SetConfigType("yaml") + pkgconfigsetup.Datadog().SetConfigType("yaml") // default collect_device_metadata should be true - err := config.Datadog().ReadConfig(strings.NewReader(` + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -187,7 +187,7 @@ network_devices: assert.Equal(t, false, conf.Configs[2].CollectDeviceMetadata) // collect_device_metadata: false - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: collect_device_metadata: false @@ -211,7 +211,7 @@ network_devices: assert.Equal(t, false, conf.Configs[2].CollectDeviceMetadata) // collect_device_metadata: true - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: collect_device_metadata: true @@ -236,10 +236,10 @@ network_devices: } func TestBothListenersConfig(t *testing.T) { - config.Datadog().SetConfigType("yaml") + pkgconfigsetup.Datadog().SetConfigType("yaml") // check that network_devices config override the snmp_listener config - err := config.Datadog().ReadConfig(strings.NewReader(` + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: collect_device_metadata: true configs: @@ -272,7 +272,7 @@ network_devices: assert.Equal(t, true, conf.Configs[2].CollectDeviceMetadata) // incorrect snmp_listener config and correct network_devices config - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: configs: - foo: bar @@ -299,7 +299,7 @@ network_devices: assert.Equal(t, true, conf.Configs[2].CollectDeviceMetadata) // incorrect snmp_listener config and correct network_devices config - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` snmp_listener: configs: - network: 127.0.0.4/30 @@ -318,8 +318,8 @@ network_devices: } func Test_LoaderConfig(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -338,7 +338,7 @@ network_devices: assert.Equal(t, "core", conf.Configs[1].Loader) assert.Equal(t, "python", conf.Configs[2].Loader) - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: loader: core @@ -361,8 +361,8 @@ network_devices: } func Test_MinCollectionInterval(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: min_collection_interval: 60 @@ -381,8 +381,8 @@ network_devices: } func Test_Configs(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: workers: 10 @@ -438,7 +438,7 @@ network_devices: ///////////////// // legacy configs ///////////////// - err = config.Datadog().ReadConfig(strings.NewReader(` + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: allowed_failures: 15 @@ -468,8 +468,8 @@ network_devices: func Test_NamespaceConfig(t *testing.T) { // Default Namespace - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: @@ -483,8 +483,8 @@ network_devices: assert.Equal(t, "default", networkConf.Namespace) // Custom Namespace in network_devices - config.Datadog().SetConfigType("yaml") - err = config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: namespace: ponyo autodiscovery: @@ -499,8 +499,8 @@ network_devices: assert.Equal(t, "ponyo", networkConf.Namespace) // Custom Namespace in snmp_listener - config.Datadog().SetConfigType("yaml") - err = config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err = pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: namespace: totoro @@ -528,8 +528,8 @@ func TestFirstNonEmpty(t *testing.T) { } func Test_UseDeviceIDAsHostname(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: use_device_id_as_hostname: true @@ -548,8 +548,8 @@ network_devices: } func Test_CollectTopology_withRootCollectTopologyFalse(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: collect_topology: false @@ -571,8 +571,8 @@ network_devices: } func Test_CollectTopology_withRootCollectTopologyTrue(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: collect_topology: true @@ -594,8 +594,8 @@ network_devices: } func Test_CollectTopology_withRootCollectTopologyUnset(t *testing.T) { - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(` + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(` network_devices: autodiscovery: configs: diff --git a/pkg/status/endpoints/status.go b/pkg/status/endpoints/status.go index b613cb04eb7d4..11007c87414d6 100644 --- a/pkg/status/endpoints/status.go +++ b/pkg/status/endpoints/status.go @@ -11,13 +11,13 @@ import ( "io" "github.com/DataDog/datadog-agent/comp/core/status" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" ) // PopulateStatus populates the status stats func PopulateStatus(stats map[string]interface{}) { - endpoints, err := utils.GetMultipleEndpoints(config.Datadog()) + endpoints, err := utils.GetMultipleEndpoints(pkgconfigsetup.Datadog()) if err != nil { stats["endpointsInfos"] = nil return diff --git a/pkg/tagger/types/types.go b/pkg/tagger/types/types.go index 51fc672461c0b..07e9f0f1d1698 100644 --- a/pkg/tagger/types/types.go +++ b/pkg/tagger/types/types.go @@ -21,10 +21,10 @@ const ( // OriginInfo contains the Origin Detection information. type OriginInfo struct { - FromUDS string // FromUDS is the origin resolved using Unix Domain Socket. - FromTag string // FromTag is the origin resolved from tags. - FromMsg string // FromMsg is the origin resolved from the message. - ExternalData string // ExternalData is the external data list. - Cardinality string // Cardinality is the cardinality of the resolved origin. - ProductOrigin ProductOrigin // ProductOrigin is the product that sent the origin information. + ContainerIDFromSocket string // ContainerIDFromSocket is the origin resolved using Unix Domain Socket. + PodUID string // PodUID is the origin resolved from the Kubernetes Pod UID. + ContainerID string // ContainerID is the origin resolved from the container ID. + ExternalData string // ExternalData is the external data list. + Cardinality string // Cardinality is the cardinality of the resolved origin. + ProductOrigin ProductOrigin // ProductOrigin is the product that sent the origin information. } diff --git a/pkg/trace/api/info.go b/pkg/trace/api/info.go index 4ea4a5e5bb509..79d1d55d7d5ee 100644 --- a/pkg/trace/api/info.go +++ b/pkg/trace/api/info.go @@ -10,8 +10,10 @@ import ( "encoding/json" "fmt" "net/http" + "slices" "github.com/DataDog/datadog-agent/pkg/obfuscate" + "github.com/DataDog/datadog-agent/pkg/trace/stats" ) // makeInfoHandler returns a new handler for handling the discovery endpoint. @@ -61,6 +63,17 @@ func (r *HTTPReceiver) makeInfoHandler() (hash string, handler http.HandlerFunc) oconf.Redis = o.Redis oconf.Memcached = o.Memcached } + + // We check that endpoints contains stats, even though we know this version of the + // agent supports it. It's conceivable that the stats endpoint could be disabled at some point + // so this is defensive against that case. + canDropP0 := !r.conf.ProbabilisticSamplerEnabled && slices.Contains(all, "/v0.6/stats") + + var spanKindsStatsComputed []string + if r.conf.ComputeStatsBySpanKind { + spanKindsStatsComputed = stats.KindsComputed + } + txt, err := json.MarshalIndent(struct { Version string `json:"version"` GitCommit string `json:"git_commit"` @@ -72,15 +85,17 @@ func (r *HTTPReceiver) makeInfoHandler() (hash string, handler http.HandlerFunc) EvpProxyAllowedHeaders []string `json:"evp_proxy_allowed_headers"` Config reducedConfig `json:"config"` PeerTags []string `json:"peer_tags"` + SpanKindsStatsComputed []string `json:"span_kinds_stats_computed"` }{ Version: r.conf.AgentVersion, GitCommit: r.conf.GitCommit, Endpoints: all, FeatureFlags: r.conf.AllFeatures(), - ClientDropP0s: true, + ClientDropP0s: canDropP0, SpanMetaStructs: true, LongRunningSpans: true, EvpProxyAllowedHeaders: EvpProxyAllowedHeaders, + SpanKindsStatsComputed: spanKindsStatsComputed, Config: reducedConfig{ DefaultEnv: r.conf.DefaultEnv, TargetTPS: r.conf.TargetTPS, diff --git a/pkg/trace/api/info_test.go b/pkg/trace/api/info_test.go index 0d82e3fd41b2c..d8fc856b6a6be 100644 --- a/pkg/trace/api/info_test.go +++ b/pkg/trace/api/info_test.go @@ -302,6 +302,7 @@ func TestInfoHandler(t *testing.T) { "long_running_spans": nil, "evp_proxy_allowed_headers": nil, "peer_tags": nil, + "span_kinds_stats_computed": nil, "config": map[string]interface{}{ "default_env": nil, "target_tps": nil, diff --git a/pkg/trace/api/otlp.go b/pkg/trace/api/otlp.go index fb1a9d20b58ee..1f645164ba6b6 100644 --- a/pkg/trace/api/otlp.go +++ b/pkg/trace/api/otlp.go @@ -207,7 +207,8 @@ func (o *OTLPReceiver) ReceiveResourceSpans(ctx context.Context, rspans ptrace.R if !srcok { hostFromMap(rattr, "_dd.hostname") } - env := rattr[string(semconv.AttributeDeploymentEnvironment)] + // TODO(songy23): use AttributeDeploymentEnvironmentName once collector version upgrade is unblocked + _, env := getFirstFromMap(rattr, "deployment.environment.name", semconv.AttributeDeploymentEnvironment) lang := rattr[string(semconv.AttributeTelemetrySDKLanguage)] if lang == "" { lang = fastHeaderGet(httpHeader, header.Lang) @@ -588,7 +589,8 @@ func (o *OTLPReceiver) convertSpan(rattr map[string]string, lib pcommon.Instrume return true }) if _, ok := span.Meta["env"]; !ok { - if env := span.Meta[string(semconv.AttributeDeploymentEnvironment)]; env != "" { + // TODO(songy23): use AttributeDeploymentEnvironmentName once collector version upgrade is unblocked + if _, env := getFirstFromMap(span.Meta, "deployment.environment.name", semconv.AttributeDeploymentEnvironment); env != "" { setMetaOTLP(span, "env", traceutil.NormalizeTag(env)) } } diff --git a/pkg/trace/api/otlp_test.go b/pkg/trace/api/otlp_test.go index 05a019f9e7e67..75895a36f99a5 100644 --- a/pkg/trace/api/otlp_test.go +++ b/pkg/trace/api/otlp_test.go @@ -288,6 +288,18 @@ func TestOTLPReceiveResourceSpans(t *testing.T) { require.Equal("depenv", out.Env) }, }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{"deployment.environment.name": "staging"}, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("staging", out.Env) + }, + }, { in: []testutil.OTLPResourceSpan{ { @@ -303,6 +315,21 @@ func TestOTLPReceiveResourceSpans(t *testing.T) { require.Equal("spanenv", out.Env) }, }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + {Attributes: map[string]interface{}{"deployment.environment.name": "spanenv2"}}, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("spanenv2", out.Env) + }, + }, { in: []testutil.OTLPResourceSpan{ { diff --git a/pkg/trace/api/telemetry.go b/pkg/trace/api/telemetry.go index 4a1880cb6e570..a45ac4105042f 100644 --- a/pkg/trace/api/telemetry.go +++ b/pkg/trace/api/telemetry.go @@ -36,6 +36,7 @@ const ( awsLambda cloudResourceType = "AWSLambda" awsFargate cloudResourceType = "AWSFargate" cloudRun cloudResourceType = "GCPCloudRun" + cloudFunctions cloudResourceType = "GCPCloudFunctions" azureAppService cloudResourceType = "AzureAppService" azureContainerApp cloudResourceType = "AzureContainerApp" aws cloudProvider = "AWS" @@ -257,8 +258,9 @@ func (f *TelemetryForwarder) setRequestHeader(req *http.Request) { req.Header.Set(header.ContainerID, containerID) } if containerTags != "" { - req.Header.Set("x-datadog-container-tags", containerTags) - log.Debugf("Setting header x-datadog-container-tags=%s for telemetry proxy", containerTags) + ctagsHeader := normalizeHTTPHeader(containerTags) + req.Header.Set("X-Datadog-Container-Tags", ctagsHeader) + log.Debugf("Setting header X-Datadog-Container-Tags=%s for telemetry proxy", ctagsHeader) } if f.conf.InstallSignature.Found { req.Header.Set("DD-Agent-Install-Id", f.conf.InstallSignature.InstallID) @@ -282,6 +284,12 @@ func (f *TelemetryForwarder) setRequestHeader(req *http.Request) { if serviceName, found := f.conf.GlobalTags["service_name"]; found { req.Header.Set(cloudResourceIdentifierHeader, serviceName) } + case "cloudfunction": + req.Header.Set(cloudProviderHeader, string(gcp)) + req.Header.Set(cloudResourceTypeHeader, string(cloudFunctions)) + if serviceName, found := f.conf.GlobalTags["service_name"]; found { + req.Header.Set(cloudResourceIdentifierHeader, serviceName) + } case "appservice": req.Header.Set(cloudProviderHeader, string(azure)) req.Header.Set(cloudResourceTypeHeader, string(azureAppService)) diff --git a/pkg/trace/api/telemetry_test.go b/pkg/trace/api/telemetry_test.go index 5c37737da22ae..6662572cf3b6a 100644 --- a/pkg/trace/api/telemetry_test.go +++ b/pkg/trace/api/telemetry_test.go @@ -84,6 +84,7 @@ func TestTelemetryBasicProxyRequest(t *testing.T) { assert.Equal("AWS", req.Header.Get("DD-Cloud-Provider")) assert.Equal("AWSLambda", req.Header.Get("DD-Cloud-Resource-Type")) assert.Equal("test_ARN", req.Header.Get("DD-Cloud-Resource-Identifier")) + assert.Equal("key:test_value", req.Header.Get("X-Datadog-Container-Tags")) assert.Equal("/path", req.URL.Path) assert.Equal("", req.Header.Get("User-Agent")) assert.Regexp(regexp.MustCompile("trace-agent.*"), req.Header.Get("Via")) @@ -94,7 +95,11 @@ func TestTelemetryBasicProxyRequest(t *testing.T) { cfg := getTestConfig(srv.URL) cfg.GlobalTags[functionARNKeyTag] = "test_ARN" + cfg.ContainerTags = func(_ string) ([]string, error) { + return []string{"key:test\nvalue"}, nil + } recv := newTestReceiverFromConfig(cfg) + recv.telemetryForwarder.containerIDProvider = getTestContainerIDProvider() assertSendRequest(t, recv, endpointCalled) } diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index a16cdf5d8f6a9..ffbca4d83b1f8 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -21,7 +21,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 github.com/DataDog/sketches-go v1.4.2 github.com/Microsoft/go-winio v0.6.1 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index 0acbab963039c..4ba7b5e0d92e4 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -6,8 +6,8 @@ github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/ github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0 h1:10TPqpTlIkmDPFWVIEZ4ZX3rWrCrx3rEoeoAooZr6LM= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.14.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= diff --git a/pkg/trace/stats/otel_util.go b/pkg/trace/stats/otel_util.go index 8c9a5a2970daf..1c72b6fa640b8 100644 --- a/pkg/trace/stats/otel_util.go +++ b/pkg/trace/stats/otel_util.go @@ -49,7 +49,8 @@ func OTLPTracesToConcentratorInputs( if _, exists := ignoreResNames[traceutil.GetOTelResource(otelspan, otelres)]; exists { continue } - env := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeDeploymentEnvironment) + // TODO(songy23): use AttributeDeploymentEnvironmentName once collector version upgrade is unblocked + env := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, "deployment.environment.name", semconv.AttributeDeploymentEnvironment) hostname := traceutil.GetOTelHostname(otelspan, otelres, conf.OTLPReceiver.AttributesTranslator, conf.Hostname) version := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeServiceVersion) cid := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeContainerID, semconv.AttributeK8SPodUID) diff --git a/pkg/trace/stats/otel_util_test.go b/pkg/trace/stats/otel_util_test.go index e2c78f3ef0b73..1422226c8ecc5 100644 --- a/pkg/trace/stats/otel_util_test.go +++ b/pkg/trace/stats/otel_util_test.go @@ -117,6 +117,15 @@ func TestProcessOTLPTraces(t *testing.T) { libname: "spring", expected: createStatsPayload(agentEnv, agentHost, "svc", "op", "http", "client", "res", agentHost, "tracer-env", "", nil, nil, true, false), }, + { + name: "new env convention", + spanName: "spanname2", + rattrs: map[string]string{"service.name": "svc", "deployment.environment.name": "new-env"}, + sattrs: map[string]any{"operation.name": "op", "resource.name": "res"}, + spanKind: ptrace.SpanKindClient, + libname: "spring", + expected: createStatsPayload(agentEnv, agentHost, "svc", "op", "http", "client", "res", agentHost, "new-env", "", nil, nil, true, false), + }, { name: "span operation name from span name with db attribute, peerTagsAggr not enabled", spanName: "spanname3", diff --git a/pkg/trace/stats/oteltest/go.mod b/pkg/trace/stats/oteltest/go.mod index e3466f3cda4f7..59c592dd08090 100644 --- a/pkg/trace/stats/oteltest/go.mod +++ b/pkg/trace/stats/oteltest/go.mod @@ -7,7 +7,7 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.104.0 diff --git a/pkg/trace/stats/oteltest/go.sum b/pkg/trace/stats/oteltest/go.sum index a928af42bb225..8c6d9031f59ae 100644 --- a/pkg/trace/stats/oteltest/go.sum +++ b/pkg/trace/stats/oteltest/go.sum @@ -4,8 +4,8 @@ github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/ github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1 h1:ZI8u3CgdMXpDplrf9/gIr13+/g/tUzUcBMk2ZhXgzLE= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.16.1/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= github.com/DataDog/sketches-go v1.4.2 h1:gppNudE9d19cQ98RYABOetxIhpTCl4m7CnbRZjvVA/o= github.com/DataDog/sketches-go v1.4.2/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= diff --git a/pkg/trace/stats/span_concentrator.go b/pkg/trace/stats/span_concentrator.go index d886b9e7495f7..e48f937f267a5 100644 --- a/pkg/trace/stats/span_concentrator.go +++ b/pkg/trace/stats/span_concentrator.go @@ -6,6 +6,7 @@ package stats import ( + "slices" "strings" "sync" "time" @@ -159,12 +160,16 @@ func (sc *SpanConcentrator) NewStatSpan( // computeStatsForSpanKind returns true if the span.kind value makes the span eligible for stats computation. func computeStatsForSpanKind(kind string) bool { k := strings.ToLower(kind) - switch k { - case "server", "consumer", "client", "producer": - return true - default: - return false - } + return slices.Contains(KindsComputed, k) +} + +// KindsComputed is the list of span kinds that will have stats computed on them +// when computeStatsByKind is enabled in the concentrator. +var KindsComputed = []string{ + "server", + "consumer", + "client", + "producer", } func (sc *SpanConcentrator) addSpan(s *StatSpan, aggKey PayloadAggregationKey, containerID string, containerTags []string, origin string, weight float64) { diff --git a/pkg/util/cloudproviders/alibaba/alibaba.go b/pkg/util/cloudproviders/alibaba/alibaba.go index a27ef0ee0580e..3feded8d923e0 100644 --- a/pkg/util/cloudproviders/alibaba/alibaba.go +++ b/pkg/util/cloudproviders/alibaba/alibaba.go @@ -11,7 +11,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" ) @@ -36,16 +36,16 @@ func IsRunningOn(ctx context.Context) bool { var instanceIDFetcher = cachedfetch.Fetcher{ Name: "Alibaba InstanceID", Attempt: func(ctx context.Context) (interface{}, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("cloud provider is disabled by configuration") } endpoint := metadataURL + "/latest/meta-data/instance-id" - res, err := httputils.Get(ctx, endpoint, nil, timeout, config.Datadog()) + res, err := httputils.Get(ctx, endpoint, nil, timeout, pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("Alibaba HostAliases: unable to query metadata endpoint: %s", err) } - maxLength := config.Datadog().GetInt("metadata_endpoints_max_hostname_size") + maxLength := pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size") if len(res) > maxLength { return nil, fmt.Errorf("%v gave a response with length > to %v", endpoint, maxLength) } diff --git a/pkg/util/cloudproviders/alibaba/alibaba_test.go b/pkg/util/cloudproviders/alibaba/alibaba_test.go index 3e45cdb136c1c..e0ebd5ee0c769 100644 --- a/pkg/util/cloudproviders/alibaba/alibaba_test.go +++ b/pkg/util/cloudproviders/alibaba/alibaba_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetHostname(t *testing.T) { @@ -51,7 +51,7 @@ func TestGetNTPHosts(t *testing.T) { defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"alibaba"}) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"alibaba"}) actualHosts := GetNTPHosts(ctx) assert.Equal(t, expectedHosts, actualHosts) diff --git a/pkg/util/cloudproviders/azure/azure.go b/pkg/util/cloudproviders/azure/azure.go index 7d993cda9f8e6..30982aa54957d 100644 --- a/pkg/util/cloudproviders/azure/azure.go +++ b/pkg/util/cloudproviders/azure/azure.go @@ -13,7 +13,8 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" "github.com/DataDog/datadog-agent/pkg/util/hostname/validate" httputils "github.com/DataDog/datadog-agent/pkg/util/http" @@ -73,7 +74,7 @@ var vmIDFetcher = cachedfetch.Fetcher{ Attempt: func(ctx context.Context) (interface{}, error) { res, err := getResponseWithMaxLength(ctx, metadataURL+"/metadata/instance/compute/vmId?api-version=2017-04-02&format=text", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return nil, fmt.Errorf("Azure HostAliases: unable to query metadata VM ID endpoint: %s", err) } @@ -150,16 +151,16 @@ func getResponseWithMaxLength(ctx context.Context, endpoint string, maxLength in } func getResponse(ctx context.Context, url string) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } - return httputils.Get(ctx, url, map[string]string{"Metadata": "true"}, timeout, config.Datadog()) + return httputils.Get(ctx, url, map[string]string{"Metadata": "true"}, timeout, pkgconfigsetup.Datadog()) } // GetHostname returns hostname based on Azure instance metadata. func GetHostname(ctx context.Context) (string, error) { - return getHostnameWithConfig(ctx, config.Datadog()) + return getHostnameWithConfig(ctx, pkgconfigsetup.Datadog()) } var instanceMetaFetcher = cachedfetch.Fetcher{ @@ -196,7 +197,7 @@ func getMetadata(ctx context.Context) (metadata, error) { return metadataInfo, nil } -func getHostnameWithConfig(ctx context.Context, config config.Config) (string, error) { +func getHostnameWithConfig(ctx context.Context, config model.Config) (string, error) { style := config.GetString(hostnameStyleSetting) metadata, err := getMetadata(ctx) if err != nil { diff --git a/pkg/util/cloudproviders/azure/azure_test.go b/pkg/util/cloudproviders/azure/azure_test.go index 0563d553a1cb1..b499115ea3194 100644 --- a/pkg/util/cloudproviders/azure/azure_test.go +++ b/pkg/util/cloudproviders/azure/azure_test.go @@ -17,8 +17,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetAlias(t *testing.T) { @@ -137,7 +137,7 @@ func TestGetNTPHosts(t *testing.T) { defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"azure"}) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"azure"}) actualHosts := GetNTPHosts(ctx) assert.Equal(t, expectedHosts, actualHosts) diff --git a/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go b/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go index 469dc883bf55f..773aa31b06baa 100644 --- a/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go +++ b/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util" ) @@ -28,7 +28,7 @@ var getFqdn = util.Fqdn // //nolint:revive // TODO(PLINT) Fix revive linter func GetHostAliases(_ context.Context) ([]string, error) { - if !config.Datadog().GetBool("cloud_foundry") { + if !pkgconfigsetup.Datadog().GetBool("cloud_foundry") { log.Debugf("cloud_foundry is not enabled in the conf: no cloudfoudry host alias") return nil, nil } @@ -36,7 +36,7 @@ func GetHostAliases(_ context.Context) ([]string, error) { aliases := []string{} // Always send the bosh_id if specified - boshID := config.Datadog().GetString("bosh_id") + boshID := pkgconfigsetup.Datadog().GetString("bosh_id") if boshID != "" { aliases = append(aliases, boshID) } @@ -44,7 +44,7 @@ func GetHostAliases(_ context.Context) ([]string, error) { hostname, _ := os.Hostname() fqdn := getFqdn(hostname) - if config.Datadog().GetBool("cf_os_hostname_aliasing") { + if pkgconfigsetup.Datadog().GetBool("cf_os_hostname_aliasing") { // If set, send os hostname and fqdn as additional aliases aliases = append(aliases, hostname) if fqdn != hostname { diff --git a/pkg/util/cloudproviders/cloudfoundry/garden.go b/pkg/util/cloudproviders/cloudfoundry/garden.go index 382134d4fd03a..aa6e274edd7fd 100644 --- a/pkg/util/cloudproviders/cloudfoundry/garden.go +++ b/pkg/util/cloudproviders/cloudfoundry/garden.go @@ -14,7 +14,7 @@ import ( "code.cloudfoundry.org/garden/client" "code.cloudfoundry.org/garden/client/connection" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" ) @@ -78,8 +78,8 @@ type GardenUtil struct { func GetGardenUtil() (*GardenUtil, error) { globalGardenUtilLock.Lock() defer globalGardenUtilLock.Unlock() - network := config.Datadog().GetString("cloud_foundry_garden.listen_network") - address := config.Datadog().GetString("cloud_foundry_garden.listen_address") + network := pkgconfigsetup.Datadog().GetString("cloud_foundry_garden.listen_network") + address := pkgconfigsetup.Datadog().GetString("cloud_foundry_garden.listen_address") if globalGardenUtil == nil { globalGardenUtil = &GardenUtil{ cli: client.New(connection.New(network, address)), diff --git a/pkg/util/cloudproviders/cloudproviders.go b/pkg/util/cloudproviders/cloudproviders.go index 10c1bd7395351..d9de6ac5fa68c 100644 --- a/pkg/util/cloudproviders/cloudproviders.go +++ b/pkg/util/cloudproviders/cloudproviders.go @@ -12,7 +12,7 @@ import ( "sync" logcomp "github.com/DataDog/datadog-agent/comp/core/log/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -99,8 +99,13 @@ type cloudProviderAliasesDetector struct { callback func(context.Context) ([]string, error) } +// getValidHostAliases is an alias from pkg config +func getValidHostAliases(ctx context.Context) ([]string, error) { + return pkgconfigsetup.GetValidHostAliases(ctx, pkgconfigsetup.Datadog()) +} + var hostAliasesDetectors = []cloudProviderAliasesDetector{ - {name: "config", callback: config.GetValidHostAliases}, + {name: "config", callback: getValidHostAliases}, {name: alibaba.CloudProviderName, callback: alibaba.GetHostAliases}, {name: ec2.CloudProviderName, callback: ec2.GetHostAliases}, {name: azure.CloudProviderName, callback: azure.GetHostAliases}, diff --git a/pkg/util/cloudproviders/gce/gce.go b/pkg/util/cloudproviders/gce/gce.go index 551fc14e8c875..2aa62af44bd01 100644 --- a/pkg/util/cloudproviders/gce/gce.go +++ b/pkg/util/cloudproviders/gce/gce.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" "github.com/DataDog/datadog-agent/pkg/util/common" httputils "github.com/DataDog/datadog-agent/pkg/util/http" @@ -38,7 +38,7 @@ var hostnameFetcher = cachedfetch.Fetcher{ Name: "GCP Hostname", Attempt: func(ctx context.Context) (interface{}, error) { hostname, err := getResponseWithMaxLength(ctx, metadataURL+"/instance/hostname", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to retrieve hostname from GCE: %s", err) } @@ -76,7 +76,7 @@ var nameFetcher = cachedfetch.Fetcher{ Attempt: func(ctx context.Context) (interface{}, error) { return getResponseWithMaxLength(ctx, metadataURL+"/instance/name", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) }, } @@ -85,7 +85,7 @@ var projectIDFetcher = cachedfetch.Fetcher{ Attempt: func(ctx context.Context) (interface{}, error) { projectID, err := getResponseWithMaxLength(ctx, metadataURL+"/project/project-id", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to retrieve project ID from GCE: %s", err) } @@ -123,7 +123,7 @@ var clusterNameFetcher = cachedfetch.Fetcher{ Name: "GCP Cluster Name", Attempt: func(ctx context.Context) (interface{}, error) { clusterName, err := getResponseWithMaxLength(ctx, metadataURL+"/instance/attributes/cluster-name", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to retrieve clustername from GCE: %s", err) } @@ -140,7 +140,7 @@ var publicIPv4Fetcher = cachedfetch.Fetcher{ Name: "GCP Public IP", Attempt: func(ctx context.Context) (interface{}, error) { publicIPv4, err := getResponseWithMaxLength(ctx, metadataURL+"/instance/network-interfaces/0/access-configs/0/external-ip", - config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to retrieve public IPv4 from GCE: %s", err) } @@ -216,11 +216,11 @@ func getResponseWithMaxLength(ctx context.Context, endpoint string, maxLength in } func getResponse(ctx context.Context, url string) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } - res, err := httputils.Get(ctx, url, map[string]string{"Metadata-Flavor": "Google"}, config.Datadog().GetDuration("gce_metadata_timeout")*time.Millisecond, config.Datadog()) + res, err := httputils.Get(ctx, url, map[string]string{"Metadata-Flavor": "Google"}, pkgconfigsetup.Datadog().GetDuration("gce_metadata_timeout")*time.Millisecond, pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("GCE metadata API error: %s", err) } diff --git a/pkg/util/cloudproviders/gce/gce_tags.go b/pkg/util/cloudproviders/gce/gce_tags.go index 8cd524479b281..1f5b05011aab5 100644 --- a/pkg/util/cloudproviders/gce/gce_tags.go +++ b/pkg/util/cloudproviders/gce/gce_tags.go @@ -13,7 +13,7 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -52,7 +52,7 @@ func getCachedTags(err error) ([]string, error) { // GetTags gets the tags from the GCE api func GetTags(ctx context.Context) ([]string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("cloud provider is disabled by configuration") } @@ -85,7 +85,7 @@ func GetTags(ctx context.Context) ([]string, error) { } if metadata.Project.ProjectID != "" { tags = append(tags, fmt.Sprintf("project:%s", metadata.Project.ProjectID)) - if config.Datadog().GetBool("gce_send_project_id_tag") { + if pkgconfigsetup.Datadog().GetBool("gce_send_project_id_tag") { tags = append(tags, fmt.Sprintf("project_id:%s", metadata.Project.ProjectID)) } } @@ -110,7 +110,7 @@ func GetTags(ctx context.Context) ([]string, error) { // isAttributeExcluded returns whether the attribute key should be excluded from the tags func isAttributeExcluded(attr string) bool { - excludedAttributes := config.Datadog().GetStringSlice("exclude_gce_tags") + excludedAttributes := pkgconfigsetup.Datadog().GetStringSlice("exclude_gce_tags") for _, excluded := range excludedAttributes { if attr == excluded { return true diff --git a/pkg/util/cloudproviders/gce/gce_tags_test.go b/pkg/util/cloudproviders/gce/gce_tags_test.go index 4a7684fa93192..2eae7092d0b92 100644 --- a/pkg/util/cloudproviders/gce/gce_tags_test.go +++ b/pkg/util/cloudproviders/gce/gce_tags_test.go @@ -19,8 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" ) @@ -107,8 +107,8 @@ func TestGetHostTagsWithProjectID(t *testing.T) { server := mockMetadataRequest(t) defer server.Close() defer cache.Cache.Delete(tagsCacheKey) - config.Datadog().SetWithoutSource("gce_send_project_id_tag", true) - defer config.Datadog().SetWithoutSource("gce_send_project_id_tag", false) + pkgconfigsetup.Datadog().SetWithoutSource("gce_send_project_id_tag", true) + defer pkgconfigsetup.Datadog().SetWithoutSource("gce_send_project_id_tag", false) tags, err := GetTags(ctx) require.NoError(t, err) testTags(t, tags, expectedTagsWithProjectID) diff --git a/pkg/util/cloudproviders/gce/gce_test.go b/pkg/util/cloudproviders/gce/gce_test.go index 2503edd0d95d1..501b5a8a1acde 100644 --- a/pkg/util/cloudproviders/gce/gce_test.go +++ b/pkg/util/cloudproviders/gce/gce_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func reset() { @@ -230,7 +230,7 @@ func TestGetNTPHosts(t *testing.T) { defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"gcp"}) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"gcp"}) actualHosts := GetNTPHosts(ctx) assert.Equal(t, expectedHosts, actualHosts) diff --git a/pkg/util/cloudproviders/ibm/ibm.go b/pkg/util/cloudproviders/ibm/ibm.go index f0cce17ce3cc5..00a7ef0c34e81 100644 --- a/pkg/util/cloudproviders/ibm/ibm.go +++ b/pkg/util/cloudproviders/ibm/ibm.go @@ -11,7 +11,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -49,7 +49,7 @@ func getToken(ctx context.Context) (string, time.Time, error) { "Metadata-Flavor": "ibm", }, []byte("{\"expires_in\": 3600}"), - config.Datadog().GetDuration("ibm_metadata_timeout")*time.Second, config.Datadog()) + pkgconfigsetup.Datadog().GetDuration("ibm_metadata_timeout")*time.Second, pkgconfigsetup.Datadog()) if err != nil { token.ExpirationDate = time.Now() return "", time.Time{}, err @@ -82,7 +82,7 @@ func IsRunningOn(ctx context.Context) bool { var instanceIDFetcher = cachedfetch.Fetcher{ Name: "IBM instance name", Attempt: func(ctx context.Context) (interface{}, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("IBM cloud provider is disabled by configuration") } @@ -96,7 +96,7 @@ var instanceIDFetcher = cachedfetch.Fetcher{ map[string]string{ "Authorization": fmt.Sprintf("Bearer %s", t), }, - config.Datadog().GetDuration("ibm_metadata_timeout")*time.Second, config.Datadog()) + pkgconfigsetup.Datadog().GetDuration("ibm_metadata_timeout")*time.Second, pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("IBM HostAliases: unable to query metadata endpoint: %s", err) } diff --git a/pkg/util/cloudproviders/kubernetes/kubernetes.go b/pkg/util/cloudproviders/kubernetes/kubernetes.go index 4354f5edcbd4a..20adb6239171e 100644 --- a/pkg/util/cloudproviders/kubernetes/kubernetes.go +++ b/pkg/util/cloudproviders/kubernetes/kubernetes.go @@ -11,8 +11,8 @@ import ( "context" "fmt" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/hostinfo" ) @@ -34,7 +34,7 @@ func GetHostAliases(ctx context.Context) ([]string, error) { return nil, fmt.Errorf("failed to get node annotations: %w", err) } - for _, annotation := range config.Datadog().GetStringSlice("kubernetes_node_annotations_as_host_aliases") { + for _, annotation := range pkgconfigsetup.Datadog().GetStringSlice("kubernetes_node_annotations_as_host_aliases") { if value, found := annotations[annotation]; found { aliases = append(aliases, value) } diff --git a/pkg/util/cloudproviders/network.go b/pkg/util/cloudproviders/network.go index c183ea96ce06f..80f9b1ae06ad0 100644 --- a/pkg/util/cloudproviders/network.go +++ b/pkg/util/cloudproviders/network.go @@ -9,7 +9,7 @@ import ( "context" "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" "github.com/DataDog/datadog-agent/pkg/util/ec2" @@ -31,7 +31,7 @@ func GetNetworkID(ctx context.Context) (string, error) { networkIDCacheKey, func() (string, error) { // the id from configuration - if networkID := config.Datadog().GetString("network.id"); networkID != "" { + if networkID := pkgconfigsetup.Datadog().GetString("network.id"); networkID != "" { log.Debugf("GetNetworkID: using configured network ID: %s", networkID) return networkID, nil } diff --git a/pkg/util/cloudproviders/oracle/oracle.go b/pkg/util/cloudproviders/oracle/oracle.go index 407dbce972997..a63f595fb8794 100644 --- a/pkg/util/cloudproviders/oracle/oracle.go +++ b/pkg/util/cloudproviders/oracle/oracle.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" ) @@ -35,12 +35,12 @@ func IsRunningOn(ctx context.Context) bool { var instanceIDFetcher = cachedfetch.Fetcher{ Name: "Oracle InstanceID", Attempt: func(ctx context.Context) (interface{}, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("Oracle cloud provider is disabled by configuration") } endpoint := metadataURL + "/opc/v2/instance/id" - res, err := httputils.Get(ctx, endpoint, map[string]string{"Authorization": "Bearer Oracle"}, timeout, config.Datadog()) + res, err := httputils.Get(ctx, endpoint, map[string]string{"Authorization": "Bearer Oracle"}, timeout, pkgconfigsetup.Datadog()) if err != nil { return nil, fmt.Errorf("Oracle HostAliases: unable to query metadata endpoint: %s", err) } @@ -49,7 +49,7 @@ var instanceIDFetcher = cachedfetch.Fetcher{ return nil, fmt.Errorf("Oracle '%s' returned empty id", endpoint) } - maxLength := config.Datadog().GetInt("metadata_endpoints_max_hostname_size") + maxLength := pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size") if len(res) > maxLength { return nil, fmt.Errorf("%v gave a response with length > to %v", endpoint, maxLength) } diff --git a/pkg/util/cloudproviders/oracle/oracle_test.go b/pkg/util/cloudproviders/oracle/oracle_test.go index b429c66cac894..202623a29d277 100644 --- a/pkg/util/cloudproviders/oracle/oracle_test.go +++ b/pkg/util/cloudproviders/oracle/oracle_test.go @@ -15,13 +15,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetHostAliases(t *testing.T) { - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"oracle"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"oracle"}) ctx := context.Background() expected := "ocid1.instance.oc1.iad.anuwcljte6cuweqcz7sarpn43hst2kaaaxbbbccbaaa6vpd66tvcyhgiifsq" @@ -45,9 +45,9 @@ func TestGetHostAliases(t *testing.T) { } func TestGetNTPHosts(t *testing.T) { - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"oracle"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"oracle"}) ctx := context.Background() expectedHosts := []string{"169.254.169.254"} diff --git a/pkg/util/cloudproviders/tencent/tencent.go b/pkg/util/cloudproviders/tencent/tencent.go index cc792f30cb553..798ce358dcf76 100644 --- a/pkg/util/cloudproviders/tencent/tencent.go +++ b/pkg/util/cloudproviders/tencent/tencent.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" ) @@ -44,7 +44,7 @@ func GetHostAliases(ctx context.Context) ([]string, error) { var instanceIDFetcher = cachedfetch.Fetcher{ Name: "Tencent InstanceID", Attempt: func(ctx context.Context) (interface{}, error) { - res, err := getMetadataItemWithMaxLength(ctx, metadataURL+"/meta-data/instance-id", config.Datadog().GetInt("metadata_endpoints_max_hostname_size")) + res, err := getMetadataItemWithMaxLength(ctx, metadataURL+"/meta-data/instance-id", pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size")) if err != nil { return "", fmt.Errorf("unable to get TencentCloud CVM instanceID: %s", err) } @@ -79,11 +79,11 @@ func getMetadataItemWithMaxLength(ctx context.Context, endpoint string, maxLengt } func getMetadataItem(ctx context.Context, endpoint string) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } - res, err := httputils.Get(ctx, endpoint, nil, timeout, config.Datadog()) + res, err := httputils.Get(ctx, endpoint, nil, timeout, pkgconfigsetup.Datadog()) if err != nil { return "", fmt.Errorf("unable to fetch Tencent Metadata API, %s", err) } diff --git a/pkg/util/cloudproviders/tencent/tencent_test.go b/pkg/util/cloudproviders/tencent/tencent_test.go index e8f7f7cff39e3..b220a6527693d 100644 --- a/pkg/util/cloudproviders/tencent/tencent_test.go +++ b/pkg/util/cloudproviders/tencent/tencent_test.go @@ -15,14 +15,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetInstanceID(t *testing.T) { ctx := context.Background() - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) expected := "ins-nad6bga0" var lastRequest *http.Request @@ -42,9 +42,9 @@ func TestGetInstanceID(t *testing.T) { func TestGetHostAliases(t *testing.T) { ctx := context.Background() - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) expected := "ins-nad6bga0" var lastRequest *http.Request @@ -64,9 +64,9 @@ func TestGetHostAliases(t *testing.T) { } func TestGetNTPHosts(t *testing.T) { - holdValue := config.Datadog().Get("cloud_provider_metadata") - defer config.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) - config.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) + holdValue := pkgconfigsetup.Datadog().Get("cloud_provider_metadata") + defer pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", holdValue) + pkgconfigsetup.Datadog().SetWithoutSource("cloud_provider_metadata", []string{"tencent"}) ctx := context.Background() expectedHosts := []string{"ntpupdate.tencentyun.com"} diff --git a/pkg/util/clusteragent/clcrunner.go b/pkg/util/clusteragent/clcrunner.go index aa8ab93c7e589..31b6bbb83cbbd 100644 --- a/pkg/util/clusteragent/clcrunner.go +++ b/pkg/util/clusteragent/clcrunner.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -58,7 +58,7 @@ func GetCLCRunnerClient() (CLCRunnerClientInterface, error) { func (c *CLCRunnerClient) init() { c.initErr = nil - authToken, err := security.GetClusterAgentAuthToken(config.Datadog()) + authToken, err := security.GetClusterAgentAuthToken(pkgconfigsetup.Datadog()) if err != nil { c.initErr = err return @@ -74,7 +74,7 @@ func (c *CLCRunnerClient) init() { c.clcRunnerAPIClient.Timeout = 2 * time.Second // Set http port used by the CLC Runners - c.clcRunnerPort = config.Datadog().GetInt("cluster_checks.clc_runners_port") + c.clcRunnerPort = pkgconfigsetup.Datadog().GetInt("cluster_checks.clc_runners_port") } // GetVersion fetches the version of the CLC Runner diff --git a/pkg/util/clusteragent/clcrunner_test.go b/pkg/util/clusteragent/clcrunner_test.go index 849e5ffdea8bf..dce6ad6286bac 100644 --- a/pkg/util/clusteragent/clcrunner_test.go +++ b/pkg/util/clusteragent/clcrunner_test.go @@ -22,9 +22,9 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -50,7 +50,7 @@ func newDummyCLCRunner() (*dummyCLCRunner, error) { "/api/v1/clcrunner/stats": `{"http_check:My Nginx Service:b0041608e66d20ba":{"AverageExecutionTime":241,"MetricSamples":3},"kube_apiserver_metrics:c5d2d20ccb4bb880":{"AverageExecutionTime":858,"MetricSamples":1562},"":{"AverageExecutionTime":100,"MetricSamples":10}}`, "/api/v1/clcrunner/workers": `{"Count":2,"Instances":{"worker_1":{"Utilization":0.1},"worker_2":{"Utilization":0.2}}}`, }, - token: config.Datadog().GetString("cluster_agent.auth_token"), + token: pkgconfigsetup.Datadog().GetString("cluster_agent.auth_token"), requests: make(chan *http.Request, 100), } return clcRunner, nil @@ -233,7 +233,7 @@ func TestCLCRunnerSuite(t *testing.T) { }) s := &clcRunnerSuite{conf: configmock.New(t)} - config.Datadog().SetConfigFile(f.Name()) + pkgconfigsetup.Datadog().SetConfigFile(f.Name()) s.authTokenPath = filepath.Join(fakeDir, clcRunnerAuthTokenFilename) _, err = os.Stat(s.authTokenPath) require.NotNil(t, err, fmt.Sprintf("%v", err)) diff --git a/pkg/util/clusteragent/clusteragent.go b/pkg/util/clusteragent/clusteragent.go index 46b2ddb98eb81..ab6d18cb04aa5 100644 --- a/pkg/util/clusteragent/clusteragent.go +++ b/pkg/util/clusteragent/clusteragent.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -125,14 +125,14 @@ func (c *DCAClient) init() error { return err } - authToken, err := security.GetClusterAgentAuthToken(config.Datadog()) + authToken, err := security.GetClusterAgentAuthToken(pkgconfigsetup.Datadog()) if err != nil { return err } c.clusterAgentAPIRequestHeaders = http.Header{} c.clusterAgentAPIRequestHeaders.Set(authorizationHeaderKey, fmt.Sprintf("Bearer %s", authToken)) - podIP := config.Datadog().GetString("clc_runner_host") + podIP := pkgconfigsetup.Datadog().GetString("clc_runner_host") c.clusterAgentAPIRequestHeaders.Set(RealIPHeader, podIP) if err := c.initHTTPClient(); err != nil { @@ -140,7 +140,7 @@ func (c *DCAClient) init() error { } // Run DCA connection refresh - c.startReconnectHandler(time.Duration(config.Datadog().GetInt64("cluster_agent.client_reconnect_period_seconds")) * time.Second) + c.startReconnectHandler(time.Duration(pkgconfigsetup.Datadog().GetInt64("cluster_agent.client_reconnect_period_seconds")) * time.Second) log.Infof("Successfully connected to the Datadog Cluster Agent %s", c.clusterAgentVersion.String()) return nil @@ -228,7 +228,7 @@ func GetClusterAgentEndpoint() (string, error) { const configDcaURL = "cluster_agent.url" const configDcaSvcName = "cluster_agent.kubernetes_service_name" - dcaURL := config.Datadog().GetString(configDcaURL) + dcaURL := pkgconfigsetup.Datadog().GetString(configDcaURL) if dcaURL != "" { if strings.HasPrefix(dcaURL, "http://") { return "", fmt.Errorf("cannot get cluster agent endpoint, not a https scheme: %s", dcaURL) @@ -250,7 +250,7 @@ func GetClusterAgentEndpoint() (string, error) { // Construct the URL with the Kubernetes service environment variables // *_SERVICE_HOST and *_SERVICE_PORT - dcaSvc := config.Datadog().GetString(configDcaSvcName) + dcaSvc := pkgconfigsetup.Datadog().GetString(configDcaSvcName) log.Debugf("Identified service for the Datadog Cluster Agent: %s", dcaSvc) if dcaSvc == "" { return "", fmt.Errorf("cannot get a cluster agent endpoint, both %s and %s are empty", configDcaURL, configDcaSvcName) diff --git a/pkg/util/common.go b/pkg/util/common.go index 49c5c20504a31..5403e6b25cffb 100644 --- a/pkg/util/common.go +++ b/pkg/util/common.go @@ -18,7 +18,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -176,13 +176,13 @@ func GetJSONSerializableMap(m interface{}) interface{} { // GetGoRoutinesDump returns the stack trace of every Go routine of a running Agent. func GetGoRoutinesDump() (string, error) { - ipcAddress, err := config.GetIPCAddress() + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) if err != nil { return "", err } pprofURL := fmt.Sprintf("http://%v:%s/debug/pprof/goroutine?debug=2", - ipcAddress, config.Datadog().GetString("expvar_port")) + ipcAddress, pkgconfigsetup.Datadog().GetString("expvar_port")) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() client := http.Client{} diff --git a/pkg/util/containerd/containerd_util.go b/pkg/util/containerd/containerd_util.go index 502b237db28eb..9b80304c894b4 100644 --- a/pkg/util/containerd/containerd_util.go +++ b/pkg/util/containerd/containerd_util.go @@ -18,8 +18,8 @@ import ( "github.com/opencontainers/image-spec/identity" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" dderrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" @@ -91,9 +91,9 @@ func NewContainerdUtil() (ContainerdItf, error) { // (workloadmeta, checks, etc.) might need to fetch info from different // namespaces at the same time. containerdUtil := &ContainerdUtil{ - queryTimeout: config.Datadog().GetDuration("cri_query_timeout") * time.Second, - connectionTimeout: config.Datadog().GetDuration("cri_connection_timeout") * time.Second, - socketPath: config.Datadog().GetString("cri_socket_path"), + queryTimeout: pkgconfigsetup.Datadog().GetDuration("cri_query_timeout") * time.Second, + connectionTimeout: pkgconfigsetup.Datadog().GetDuration("cri_connection_timeout") * time.Second, + socketPath: pkgconfigsetup.Datadog().GetString("cri_socket_path"), } if containerdUtil.socketPath == "" { log.Info("No socket path was specified, defaulting to /var/run/containerd/containerd.sock") diff --git a/pkg/util/containerd/namespaces.go b/pkg/util/containerd/namespaces.go index 65e7dab453402..2647ce65501bc 100644 --- a/pkg/util/containerd/namespaces.go +++ b/pkg/util/containerd/namespaces.go @@ -11,14 +11,14 @@ import ( "context" "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // NamespacesToWatch returns the namespaces to watch. If the // "containerd_namespace" option has been set, it returns the namespaces it contains. // Otherwise, it returns all of them. func NamespacesToWatch(ctx context.Context, containerdClient ContainerdItf) ([]string, error) { - if namespaces := config.Datadog().GetStringSlice("containerd_namespaces"); len(namespaces) > 0 { + if namespaces := pkgconfigsetup.Datadog().GetStringSlice("containerd_namespaces"); len(namespaces) > 0 { return namespaces, nil } @@ -27,7 +27,7 @@ func NamespacesToWatch(ctx context.Context, containerdClient ContainerdItf) ([]s return nil, err } - excludeNamespaces := config.Datadog().GetStringSlice("containerd_exclude_namespaces") + excludeNamespaces := pkgconfigsetup.Datadog().GetStringSlice("containerd_exclude_namespaces") if len(excludeNamespaces) == 0 { return namespaces, nil } @@ -55,8 +55,8 @@ func NamespacesToWatch(ctx context.Context, containerdClient ContainerdItf) ([]s // namespace that we need to watch is "ns1", this function returns // `topic=="/container/create",namespace=="ns1"`. func FiltersWithNamespaces(filters []string) []string { - namespaces := config.Datadog().GetStringSlice("containerd_namespaces") - excludeNamespaces := config.Datadog().GetStringSlice("containerd_exclude_namespaces") + namespaces := pkgconfigsetup.Datadog().GetStringSlice("containerd_namespaces") + excludeNamespaces := pkgconfigsetup.Datadog().GetStringSlice("containerd_exclude_namespaces") if len(namespaces) == 0 && len(excludeNamespaces) == 0 { // Watch all namespaces. No need to add them to the filters. diff --git a/pkg/util/containerd/namespaces_test.go b/pkg/util/containerd/namespaces_test.go index 373e1903f9ebe..451e4282ee62d 100644 --- a/pkg/util/containerd/namespaces_test.go +++ b/pkg/util/containerd/namespaces_test.go @@ -12,7 +12,7 @@ import ( "errors" "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containerd/fake" "github.com/stretchr/testify/assert" @@ -68,16 +68,16 @@ func TestNamespacesToWatch(t *testing.T) { }, } - originalContainerdNamespacesOpt := config.Datadog().GetStringSlice("containerd_namespaces") - originalExcludeNamespacesOpt := config.Datadog().GetStringSlice("containerd_exclude_namespaces") + originalContainerdNamespacesOpt := pkgconfigsetup.Datadog().GetStringSlice("containerd_namespaces") + originalExcludeNamespacesOpt := pkgconfigsetup.Datadog().GetStringSlice("containerd_exclude_namespaces") for _, test := range tests { t.Run(test.name, func(t *testing.T) { - config.Datadog().SetWithoutSource("containerd_namespaces", test.containerdNamespaceVal) - defer config.Datadog().SetWithoutSource("containerd_namespaces", originalContainerdNamespacesOpt) + pkgconfigsetup.Datadog().SetWithoutSource("containerd_namespaces", test.containerdNamespaceVal) + defer pkgconfigsetup.Datadog().SetWithoutSource("containerd_namespaces", originalContainerdNamespacesOpt) - config.Datadog().SetWithoutSource("containerd_exclude_namespaces", test.excludeNamespaceVal) - defer config.Datadog().SetWithoutSource("containerd_exclude_namespaces", originalExcludeNamespacesOpt) + pkgconfigsetup.Datadog().SetWithoutSource("containerd_exclude_namespaces", test.excludeNamespaceVal) + defer pkgconfigsetup.Datadog().SetWithoutSource("containerd_exclude_namespaces", originalExcludeNamespacesOpt) namespaces, err := NamespacesToWatch(context.TODO(), test.client) @@ -154,16 +154,16 @@ func TestFiltersWithNamespaces(t *testing.T) { }, } - originalContainerdNamespacesOpt := config.Datadog().GetStringSlice("containerd_namespaces") - originalExcludeNamespacesOpt := config.Datadog().GetStringSlice("containerd_exclude_namespaces") + originalContainerdNamespacesOpt := pkgconfigsetup.Datadog().GetStringSlice("containerd_namespaces") + originalExcludeNamespacesOpt := pkgconfigsetup.Datadog().GetStringSlice("containerd_exclude_namespaces") for _, test := range tests { t.Run(test.name, func(t *testing.T) { - config.Datadog().SetWithoutSource("containerd_namespaces", test.containerdNamespaceVal) - defer config.Datadog().SetWithoutSource("containerd_namespaces", originalContainerdNamespacesOpt) + pkgconfigsetup.Datadog().SetWithoutSource("containerd_namespaces", test.containerdNamespaceVal) + defer pkgconfigsetup.Datadog().SetWithoutSource("containerd_namespaces", originalContainerdNamespacesOpt) - config.Datadog().SetWithoutSource("containerd_exclude_namespaces", test.excludeNamespaceVal) - defer config.Datadog().SetWithoutSource("containerd_exclude_namespaces", originalExcludeNamespacesOpt) + pkgconfigsetup.Datadog().SetWithoutSource("containerd_exclude_namespaces", test.excludeNamespaceVal) + defer pkgconfigsetup.Datadog().SetWithoutSource("containerd_exclude_namespaces", originalExcludeNamespacesOpt) result := FiltersWithNamespaces(test.inputFilters) assert.ElementsMatch(t, test.expectedFilters, result) diff --git a/pkg/util/containers/cri/util.go b/pkg/util/containers/cri/util.go index 5c4fb3769733d..d4f6d6b53d164 100644 --- a/pkg/util/containers/cri/util.go +++ b/pkg/util/containers/cri/util.go @@ -23,7 +23,7 @@ import ( criv1alpha2 "k8s.io/cri-api/pkg/apis/runtime/v1alpha2" "github.com/DataDog/datadog-agent/internal/third_party/kubernetes/pkg/kubelet/cri/remote/util" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" ) @@ -112,9 +112,9 @@ func (c *CRIUtil) init() error { func GetUtil() (*CRIUtil, error) { once.Do(func() { globalCRIUtil = &CRIUtil{ - queryTimeout: config.Datadog().GetDuration("cri_query_timeout") * time.Second, - connectionTimeout: config.Datadog().GetDuration("cri_connection_timeout") * time.Second, - socketPath: config.Datadog().GetString("cri_socket_path"), + queryTimeout: pkgconfigsetup.Datadog().GetDuration("cri_query_timeout") * time.Second, + connectionTimeout: pkgconfigsetup.Datadog().GetDuration("cri_connection_timeout") * time.Second, + socketPath: pkgconfigsetup.Datadog().GetString("cri_socket_path"), } globalCRIUtil.initRetry.SetupRetrier(&retry.Config{ //nolint:errcheck Name: "criutil", diff --git a/pkg/util/containers/metrics/docker/collector_linux.go b/pkg/util/containers/metrics/docker/collector_linux.go index b2cb0cb96b5f6..b82b78c737399 100644 --- a/pkg/util/containers/metrics/docker/collector_linux.go +++ b/pkg/util/containers/metrics/docker/collector_linux.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/api/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cgroups" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/system" @@ -77,7 +77,7 @@ func convertIOStats(ioStats *types.BlkioStats) *provider.ContainerIOStats { Devices: make(map[string]provider.DeviceIOStats), } - procPath := config.Datadog().GetString("container_proc_root") + procPath := pkgconfigsetup.Datadog().GetString("container_proc_root") deviceMapping, err := system.GetDiskDeviceMapping(procPath) if err != nil { log.Debugf("Error while getting disk mapping, no disk metric will be present, err: %v", err) diff --git a/pkg/util/containers/metrics/docker/collector_linux_test.go b/pkg/util/containers/metrics/docker/collector_linux_test.go index 1db2a10a40491..1588d66cd4821 100644 --- a/pkg/util/containers/metrics/docker/collector_linux_test.go +++ b/pkg/util/containers/metrics/docker/collector_linux_test.go @@ -11,7 +11,7 @@ import ( "os" "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/DataDog/datadog-agent/pkg/util/system" @@ -190,7 +190,7 @@ func Test_convertIOStats(t *testing.T) { assert.Nil(t, err) defer os.Remove(dir + "/diskstats") - config.Datadog().SetWithoutSource("container_proc_root", dir) + pkgconfigsetup.Datadog().SetWithoutSource("container_proc_root", dir) for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/util/containers/metrics/system/collector_linux.go b/pkg/util/containers/metrics/system/collector_linux.go index 9eec6d9846a2d..0c8eabdfd7f91 100644 --- a/pkg/util/containers/metrics/system/collector_linux.go +++ b/pkg/util/containers/metrics/system/collector_linux.go @@ -17,8 +17,8 @@ import ( "github.com/hashicorp/go-multierror" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cgroups" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -58,12 +58,12 @@ func newSystemCollector(cache *provider.Cache, wlm optional.Option[workloadmeta. var collectorMetadata provider.CollectorMetadata var cf cgroups.ReaderFilter - procPath := config.Datadog().GetString("container_proc_root") + procPath := pkgconfigsetup.Datadog().GetString("container_proc_root") if strings.HasPrefix(procPath, "/host") { hostPrefix = "/host" } - if useTrie := config.Datadog().GetBool("use_improved_cgroup_parser"); useTrie { + if useTrie := pkgconfigsetup.Datadog().GetBool("use_improved_cgroup_parser"); useTrie { var w workloadmeta.Component unwrapped, ok := wlm.Get() if ok { @@ -80,7 +80,7 @@ func newSystemCollector(cache *provider.Cache, wlm optional.Option[workloadmeta. cgroups.WithProcPath(procPath), cgroups.WithHostPrefix(hostPrefix), cgroups.WithReaderFilter(cf), - cgroups.WithPIDMapper(config.Datadog().GetString("container_pid_mapper")), + cgroups.WithPIDMapper(pkgconfigsetup.Datadog().GetString("container_pid_mapper")), ) if err != nil { // Cgroup provider is pretty static. Except not having required mounts, it should always work. diff --git a/pkg/util/core.go b/pkg/util/core.go index f26db5e2b934d..77649ea1d53f3 100644 --- a/pkg/util/core.go +++ b/pkg/util/core.go @@ -13,11 +13,11 @@ import ( "golang.org/x/sys/unix" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // SetupCoreDump enables core dumps and sets the core dump size limit based on configuration -func SetupCoreDump(cfg config.Reader) error { +func SetupCoreDump(cfg model.Reader) error { if cfg.GetBool("go_core_dump") { debug.SetTraceback("crash") diff --git a/pkg/util/core_windows.go b/pkg/util/core_windows.go index 483290c592d47..c11422c8aff2b 100644 --- a/pkg/util/core_windows.go +++ b/pkg/util/core_windows.go @@ -8,11 +8,11 @@ package util import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // SetupCoreDump enables core dumps and sets the core dump size limit based on configuration -func SetupCoreDump(cfg config.Reader) error { +func SetupCoreDump(cfg model.Reader) error { if cfg.GetBool("go_core_dump") { return fmt.Errorf("Not supported on Windows") } diff --git a/pkg/util/crashreport/crashreport.go b/pkg/util/crashreport/crashreport.go index b49529a835037..0bab9ca167051 100644 --- a/pkg/util/crashreport/crashreport.go +++ b/pkg/util/crashreport/crashreport.go @@ -13,7 +13,7 @@ import ( sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/system/wincrashdetect/probe" - dd_config "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" process_net "github.com/DataDog/datadog-agent/pkg/process/net" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" @@ -102,7 +102,7 @@ func (wcr *WinCrashReporter) CheckForCrash() (*probe.WinCrashStatus, error) { return nil, nil } sysProbeUtil, err := process_net.GetRemoteSystemProbeUtil( - dd_config.SystemProbe().GetString("system_probe_config.sysprobe_socket")) + pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")) if err != nil { return nil, wcr.handleStartupError(err) } diff --git a/pkg/util/docker/docker_util.go b/pkg/util/docker/docker_util.go index 7fd5c6f4f6fa4..29503d9ebef4f 100644 --- a/pkg/util/docker/docker_util.go +++ b/pkg/util/docker/docker_util.go @@ -25,7 +25,7 @@ import ( "github.com/docker/docker/client" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" dderrors "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -53,7 +53,7 @@ type DockerUtil struct { // init makes an empty DockerUtil bootstrap itself. // This is not exposed as public API but is called by the retrier embed. func (d *DockerUtil) init() error { - d.queryTimeout = config.Datadog().GetDuration("docker_query_timeout") * time.Second + d.queryTimeout = pkgconfigsetup.Datadog().GetDuration("docker_query_timeout") * time.Second // Major failure risk is here, do that first ctx, cancel := context.WithTimeout(context.Background(), d.queryTimeout) diff --git a/pkg/util/docker/event_stream.go b/pkg/util/docker/event_stream.go index 9d2f28def0d39..3fafe14fc28c5 100644 --- a/pkg/util/docker/event_stream.go +++ b/pkg/util/docker/event_stream.go @@ -17,7 +17,7 @@ import ( "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -157,7 +157,7 @@ func eventFilters() filters.Args { res.Add("event", string(containerEventAction)) } - if config.Datadog().GetBool("container_image.enabled") { + if pkgconfigsetup.Datadog().GetBool("container_image.enabled") { res.Add("type", string(events.ImageEventType)) for _, imageEventAction := range imageEventActions { res.Add("event", string(imageEventAction)) diff --git a/pkg/util/ec2/dmi.go b/pkg/util/ec2/dmi.go index a56c4d5f17595..8e8e99c49d36c 100644 --- a/pkg/util/ec2/dmi.go +++ b/pkg/util/ec2/dmi.go @@ -9,14 +9,15 @@ import ( "fmt" "strings" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/google/uuid" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/dmi" "github.com/DataDog/datadog-agent/pkg/util/fargate" - "github.com/google/uuid" ) func isBoardVendorEC2() bool { - if !config.Datadog().GetBool("ec2_use_dmi") { + if !pkgconfigsetup.Datadog().GetBool("ec2_use_dmi") { return false } return dmi.GetBoardVendor() == DMIBoardVendor @@ -32,7 +33,7 @@ func getInstanceIDFromDMI() (string, error) { return "", fmt.Errorf("host alias detection through DMI is disabled on Fargate") } - if !config.Datadog().GetBool("ec2_use_dmi") { + if !pkgconfigsetup.Datadog().GetBool("ec2_use_dmi") { return "", fmt.Errorf("'ec2_use_dmi' is disabled") } @@ -57,7 +58,7 @@ func getInstanceIDFromDMI() (string, error) { // Depending on the instance type either the DMI product UUID or the hypervisor UUID is available. In both case, if they // start with "ec2" we return true. func isEC2UUID() bool { - if !config.Datadog().GetBool("ec2_use_dmi") { + if !pkgconfigsetup.Datadog().GetBool("ec2_use_dmi") { return false } diff --git a/pkg/util/ec2/dmi_test.go b/pkg/util/ec2/dmi_test.go index e21cc6657778b..cdd9068d0bd7e 100644 --- a/pkg/util/ec2/dmi_test.go +++ b/pkg/util/ec2/dmi_test.go @@ -8,15 +8,16 @@ package ec2 import ( "testing" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/stretchr/testify/assert" + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/dmi" - "github.com/stretchr/testify/assert" ) func TestIsBoardVendorEC2(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) setupDMIForNotEC2(t) assert.False(t, isBoardVendorEC2()) @@ -25,13 +26,13 @@ func TestIsBoardVendorEC2(t *testing.T) { assert.True(t, isBoardVendorEC2()) configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", false) assert.False(t, isBoardVendorEC2()) } func TestGetInstanceIDFromDMI(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) setupDMIForNotEC2(t) instanceID, err := getInstanceIDFromDMI() @@ -44,14 +45,14 @@ func TestGetInstanceIDFromDMI(t *testing.T) { assert.Equal(t, "i-myinstance", instanceID) configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", false) _, err = getInstanceIDFromDMI() assert.Error(t, err) } func TestIsEC2UUID(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) // no UUID dmi.SetupMock(t, "", "", "", "") @@ -78,7 +79,7 @@ func TestIsEC2UUID(t *testing.T) { func TestIsEC2UUIDSwapEndian(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) // hypervisor dmi.SetupMock(t, "45E12AEC-DCD1-B213-94ED-012345ABCDEF", "", "", "") diff --git a/pkg/util/ec2/ec2.go b/pkg/util/ec2/ec2.go index d4c8025ced09f..05cef31e3488c 100644 --- a/pkg/util/ec2/ec2.go +++ b/pkg/util/ec2/ec2.go @@ -14,8 +14,8 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cachedfetch" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -192,7 +192,7 @@ func GetNTPHosts(ctx context.Context) []string { // GetClusterName returns the name of the cluster containing the current EC2 instance func GetClusterName(ctx context.Context) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } tags, err := fetchTagsFromCache(ctx) @@ -222,7 +222,7 @@ func extractClusterName(tags []string) (string, error) { // IsDefaultHostname returns whether the given hostname is a default one for EC2 func IsDefaultHostname(hostname string) bool { - return isDefaultHostname(hostname, config.Datadog().GetBool("ec2_use_windows_prefix_detection")) + return isDefaultHostname(hostname, pkgconfigsetup.Datadog().GetBool("ec2_use_windows_prefix_detection")) } // IsDefaultHostnameForIntake returns whether the given hostname is a default one for EC2 for the intake diff --git a/pkg/util/ec2/ec2_account_id.go b/pkg/util/ec2/ec2_account_id.go index 32e9cfb9463c3..4316462e00e2e 100644 --- a/pkg/util/ec2/ec2_account_id.go +++ b/pkg/util/ec2/ec2_account_id.go @@ -11,12 +11,12 @@ import ( "context" "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // GetAccountID returns the account ID of the current AWS instance func GetAccountID(ctx context.Context) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } diff --git a/pkg/util/ec2/ec2_tags.go b/pkg/util/ec2/ec2_tags.go index ecd05fa8cda7f..d60f93bf6256b 100644 --- a/pkg/util/ec2/ec2_tags.go +++ b/pkg/util/ec2/ec2_tags.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -31,7 +31,7 @@ var ( ) func isTagExcluded(tag string) bool { - if excludedTags := config.Datadog().GetStringSlice("exclude_ec2_tags"); excludedTags != nil { + if excludedTags := pkgconfigsetup.Datadog().GetStringSlice("exclude_ec2_tags"); excludedTags != nil { for _, excludedTag := range excludedTags { if tag == excludedTag { return true @@ -42,7 +42,7 @@ func isTagExcluded(tag string) bool { } func fetchEc2Tags(ctx context.Context) ([]string, error) { - if config.Datadog().GetBool("collect_ec2_tags_use_imds") { + if pkgconfigsetup.Datadog().GetBool("collect_ec2_tags_use_imds") { // prefer to fetch tags from IMDS, falling back to the API tags, err := fetchEc2TagsFromIMDS(ctx) if err == nil { @@ -123,7 +123,7 @@ func getTagsWithCreds(ctx context.Context, instanceIdentity *EC2Identity, awsCre // We want to use 'ec2_metadata_timeout' here instead of current context. 'ctx' comes from the agent main and will // only be canceled if the agent is stopped. The default timeout for the AWS SDK is 1 minutes (20s timeout with // 3 retries). Since we call getTagsWithCreds twice in a row, it can be a 2 minutes latency. - ctx, cancel := context.WithTimeout(ctx, config.Datadog().GetDuration("ec2_metadata_timeout")*time.Millisecond) + ctx, cancel := context.WithTimeout(ctx, pkgconfigsetup.Datadog().GetDuration("ec2_metadata_timeout")*time.Millisecond) defer cancel() ec2Tags, err := connection.DescribeTags(ctx, @@ -155,7 +155,7 @@ func getTagsWithCreds(ctx context.Context, instanceIdentity *EC2Identity, awsCre var fetchTags = fetchEc2Tags func fetchTagsFromCache(ctx context.Context) ([]string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("cloud provider is disabled by configuration") } diff --git a/pkg/util/ec2/ec2_tags_test.go b/pkg/util/ec2/ec2_tags_test.go index 5707990bb698e..cce8a5800df2c 100644 --- a/pkg/util/ec2/ec2_tags_test.go +++ b/pkg/util/ec2/ec2_tags_test.go @@ -19,8 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" ) @@ -38,7 +38,7 @@ func TestGetIAMRole(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() val, err := getIAMRole(ctx) @@ -63,7 +63,7 @@ func TestGetSecurityCreds(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() cred, err := getSecurityCreds(ctx) @@ -83,7 +83,7 @@ func TestGetInstanceIdentity(t *testing.T) { })) defer ts.Close() instanceIdentityURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() val, err := GetInstanceIdentity(ctx) @@ -112,7 +112,7 @@ func TestFetchEc2TagsFromIMDS(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() confMock := configmock.New(t) @@ -133,7 +133,7 @@ func TestFetchEc2TagsFromIMDSError(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() _, err := fetchEc2TagsFromIMDS(ctx) diff --git a/pkg/util/ec2/ec2_test.go b/pkg/util/ec2/ec2_test.go index cbb660df50c8a..a37576ee1f1c0 100644 --- a/pkg/util/ec2/ec2_test.go +++ b/pkg/util/ec2/ec2_test.go @@ -18,14 +18,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/dmi" httputils "github.com/DataDog/datadog-agent/pkg/util/http" ) var ( - initialTimeout = time.Duration(config.Datadog().GetInt("ec2_metadata_timeout")) * time.Millisecond + initialTimeout = time.Duration(pkgconfigsetup.Datadog().GetInt("ec2_metadata_timeout")) * time.Millisecond initialMetadataURL = metadataURL initialTokenURL = tokenURL ) @@ -33,7 +33,7 @@ var ( const testIMDSToken = "AQAAAFKw7LyqwVmmBMkqXHpDBuDWw2GnfGswTHi2yiIOGvzD7OMaWw==" func resetPackageVars() { - config.Datadog().SetWithoutSource("ec2_metadata_timeout", initialTimeout) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", initialTimeout) metadataURL = initialMetadataURL tokenURL = initialTokenURL token = httputils.NewAPIToken(getToken) @@ -55,11 +55,11 @@ func setupDMIForNotEC2(t *testing.T) { func TestIsDefaultHostname(t *testing.T) { const key = "ec2_use_windows_prefix_detection" - prefixDetection := config.Datadog().GetBool(key) - defer config.Datadog().SetDefault(key, prefixDetection) + prefixDetection := pkgconfigsetup.Datadog().GetBool(key) + defer pkgconfigsetup.Datadog().SetDefault(key, prefixDetection) for _, prefix := range []bool{true, false} { - config.Datadog().SetDefault(key, prefix) + pkgconfigsetup.Datadog().SetDefault(key, prefix) assert.True(t, IsDefaultHostname("IP-FOO")) assert.True(t, IsDefaultHostname("domuarigato")) @@ -70,9 +70,9 @@ func TestIsDefaultHostname(t *testing.T) { func TestIsDefaultHostnameForIntake(t *testing.T) { const key = "ec2_use_windows_prefix_detection" - prefixDetection := config.Datadog().GetBool(key) - config.Datadog().SetDefault(key, true) - defer config.Datadog().SetDefault(key, prefixDetection) + prefixDetection := pkgconfigsetup.Datadog().GetBool(key) + pkgconfigsetup.Datadog().SetDefault(key, true) + defer pkgconfigsetup.Datadog().SetDefault(key, prefixDetection) assert.True(t, IsDefaultHostnameForIntake("IP-FOO")) assert.True(t, IsDefaultHostnameForIntake("domuarigato")) @@ -93,7 +93,7 @@ func TestGetInstanceID(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() // API errors out, should return error @@ -168,9 +168,9 @@ func TestGetHostAliases(t *testing.T) { configmock.New(t) if tc.disableDMI { - config.Datadog().SetWithoutSource("ec2_use_dmi", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", false) } else { - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { @@ -187,7 +187,7 @@ func TestGetHostAliases(t *testing.T) { defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() ctx := context.Background() @@ -213,7 +213,7 @@ func TestGetHostname(t *testing.T) { })) defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() // API errors out, should return error @@ -315,7 +315,7 @@ func TestGetToken(t *testing.T) { defer ts.Close() tokenURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() token, err := token.Get(ctx) @@ -328,7 +328,7 @@ func TestMetedataRequestWithToken(t *testing.T) { var requestForToken *http.Request var requestWithToken *http.Request var seq int - config.Datadog().SetDefault("ec2_prefer_imdsv2", true) + pkgconfigsetup.Datadog().SetDefault("ec2_prefer_imdsv2", true) ctx := context.Background() ipv4 := "198.51.100.1" @@ -372,7 +372,7 @@ func TestMetedataRequestWithToken(t *testing.T) { defer ts.Close() metadataURL = ts.URL tokenURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() ips, err := GetPublicIPv4(ctx) @@ -383,7 +383,7 @@ func TestMetedataRequestWithToken(t *testing.T) { assert.Equal(t, "0", requestForToken.Header.Get("X-sequence")) assert.Equal(t, "1", requestWithToken.Header.Get("X-sequence")) - assert.Equal(t, fmt.Sprint(config.Datadog().GetInt("ec2_metadata_token_lifetime")), requestForToken.Header.Get("X-aws-ec2-metadata-token-ttl-seconds")) + assert.Equal(t, fmt.Sprint(pkgconfigsetup.Datadog().GetInt("ec2_metadata_token_lifetime")), requestForToken.Header.Get("X-aws-ec2-metadata-token-ttl-seconds")) assert.Equal(t, http.MethodPut, requestForToken.Method) assert.Equal(t, "/", requestForToken.RequestURI) assert.Equal(t, testIMDSToken, requestWithToken.Header.Get("X-aws-ec2-metadata-token")) @@ -411,7 +411,7 @@ func TestMetedataRequestWithToken(t *testing.T) { func TestMetedataRequestWithoutToken(t *testing.T) { var requestWithoutToken *http.Request - config.Datadog().SetDefault("ec2_prefer_imdsv2", false) + pkgconfigsetup.Datadog().SetDefault("ec2_prefer_imdsv2", false) ipv4 := "198.51.100.1" @@ -438,7 +438,7 @@ func TestMetedataRequestWithoutToken(t *testing.T) { defer ts.Close() metadataURL = ts.URL tokenURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() ips, err := GetPublicIPv4(context.Background()) @@ -464,7 +464,7 @@ func TestGetNTPHostsFromIMDS(t *testing.T) { func TestGetNTPHostsDMI(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) setupDMIForEC2(t) defer resetPackageVars() @@ -476,7 +476,7 @@ func TestGetNTPHostsDMI(t *testing.T) { func TestGetNTPHostsEC2UUID(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) dmi.SetupMock(t, "ec2something", "", "", "") defer resetPackageVars() @@ -488,7 +488,7 @@ func TestGetNTPHostsEC2UUID(t *testing.T) { func TestGetNTPHostsDisabledDMI(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", false) // DMI without EC2 UUID dmi.SetupMock(t, "something", "something", "i-myinstance", DMIBoardVendor) @@ -533,8 +533,8 @@ func TestMetadataSourceIMDS(t *testing.T) { tokenURL = ts.URL defer resetPackageVars() configmock.New(t) - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) - config.Datadog().SetWithoutSource("ec2_prefer_imdsv2", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_prefer_imdsv2", true) assert.True(t, IsRunningOn(ctx)) assert.Equal(t, metadataSourceIMDSv2, currentMetadataSource) @@ -542,7 +542,7 @@ func TestMetadataSourceIMDS(t *testing.T) { // trying IMDSv1 hostnameFetcher.Reset() currentMetadataSource = metadataSourceNone - config.Datadog().SetWithoutSource("ec2_prefer_imdsv2", false) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_prefer_imdsv2", false) assert.True(t, IsRunningOn(ctx)) assert.Equal(t, metadataSourceIMDSv1, currentMetadataSource) @@ -550,7 +550,7 @@ func TestMetadataSourceIMDS(t *testing.T) { func TestMetadataSourceUUID(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) ctx := context.Background() @@ -572,7 +572,7 @@ func TestMetadataSourceUUID(t *testing.T) { func TestMetadataSourceDMI(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) ctx := context.Background() @@ -586,7 +586,7 @@ func TestMetadataSourceDMI(t *testing.T) { func TestMetadataSourceDMIPreventFallback(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("ec2_use_dmi", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_use_dmi", true) ctx := context.Background() diff --git a/pkg/util/ec2/imds_helpers.go b/pkg/util/ec2/imds_helpers.go index afc2ef22fffbd..845d75b9c1471 100644 --- a/pkg/util/ec2/imds_helpers.go +++ b/pkg/util/ec2/imds_helpers.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -25,7 +25,7 @@ var ( ) func getToken(ctx context.Context) (string, time.Time, error) { - tokenLifetime := time.Duration(config.Datadog().GetInt("ec2_metadata_token_lifetime")) * time.Second + tokenLifetime := time.Duration(pkgconfigsetup.Datadog().GetInt("ec2_metadata_token_lifetime")) * time.Second // Set the local expiration date before requesting the metadata endpoint so the local expiration date will always // expire before the expiration date computed on the AWS side. The expiration date is set minus the renewal window // to ensure the token will be refreshed before it expires. @@ -37,7 +37,7 @@ func getToken(ctx context.Context) (string, time.Time, error) { "X-aws-ec2-metadata-token-ttl-seconds": fmt.Sprintf("%d", int(tokenLifetime.Seconds())), }, nil, - config.Datadog().GetDuration("ec2_metadata_timeout")*time.Millisecond, config.Datadog()) + pkgconfigsetup.Datadog().GetDuration("ec2_metadata_timeout")*time.Millisecond, pkgconfigsetup.Datadog()) if err != nil { return "", time.Now(), err } @@ -50,7 +50,7 @@ func getMetadataItemWithMaxLength(ctx context.Context, endpoint string, forceIMD return result, err } - maxLength := config.Datadog().GetInt("metadata_endpoints_max_hostname_size") + maxLength := pkgconfigsetup.Datadog().GetInt("metadata_endpoints_max_hostname_size") if len(result) > maxLength { return "", fmt.Errorf("%v gave a response with length > to %v", endpoint, maxLength) } @@ -58,7 +58,7 @@ func getMetadataItemWithMaxLength(ctx context.Context, endpoint string, forceIMD } func getMetadataItem(ctx context.Context, endpoint string, forceIMDSv2 bool) (string, error) { - if !config.IsCloudProviderEnabled(CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(CloudProviderName, pkgconfigsetup.Datadog()) { return "", fmt.Errorf("cloud provider is disabled by configuration") } @@ -67,7 +67,7 @@ func getMetadataItem(ctx context.Context, endpoint string, forceIMDSv2 bool) (st // UseIMDSv2 returns true if the agent should use IMDSv2 func UseIMDSv2(forceIMDSv2 bool) bool { - return config.Datadog().GetBool("ec2_prefer_imdsv2") || forceIMDSv2 + return pkgconfigsetup.Datadog().GetBool("ec2_prefer_imdsv2") || forceIMDSv2 } func doHTTPRequest(ctx context.Context, url string, forceIMDSv2 bool) (string, error) { @@ -87,7 +87,7 @@ func doHTTPRequest(ctx context.Context, url string, forceIMDSv2 bool) (string, e } } } - res, err := httputils.Get(ctx, url, headers, time.Duration(config.Datadog().GetInt("ec2_metadata_timeout"))*time.Millisecond, config.Datadog()) + res, err := httputils.Get(ctx, url, headers, time.Duration(pkgconfigsetup.Datadog().GetInt("ec2_metadata_timeout"))*time.Millisecond, pkgconfigsetup.Datadog()) // We don't want to register the source when we force imdsv2 if err == nil && !forceIMDSv2 { setCloudProviderSource(source) diff --git a/pkg/util/ec2/network_test.go b/pkg/util/ec2/network_test.go index 1e4ca0bc36b42..577fd9cc5da75 100644 --- a/pkg/util/ec2/network_test.go +++ b/pkg/util/ec2/network_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) func TestGetPublicIPv4(t *testing.T) { @@ -38,7 +38,7 @@ func TestGetPublicIPv4(t *testing.T) { defer ts.Close() metadataURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() val, err := GetPublicIPv4(ctx) @@ -70,7 +70,7 @@ func TestGetNetworkID(t *testing.T) { defer ts.Close() metadataURL = ts.URL tokenURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() val, err := GetNetworkID(ctx) @@ -93,7 +93,7 @@ func TestGetInstanceIDNoMac(t *testing.T) { defer ts.Close() metadataURL = ts.URL tokenURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() _, err := GetNetworkID(ctx) @@ -130,7 +130,7 @@ func TestGetInstanceIDMultipleVPC(t *testing.T) { defer ts.Close() metadataURL = ts.URL tokenURL = ts.URL - config.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_metadata_timeout", 1000) defer resetPackageVars() _, err := GetNetworkID(ctx) diff --git a/pkg/util/ecs/common/common.go b/pkg/util/ecs/common/common.go index ea0aada4925aa..db68f594aac66 100644 --- a/pkg/util/ecs/common/common.go +++ b/pkg/util/ecs/common/common.go @@ -9,7 +9,7 @@ package common import ( "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // CloudProviderName contains the inventory name of for ECS @@ -17,5 +17,5 @@ const CloudProviderName = "AWS" // MetadataTimeout defines timeout for ECS metadata endpoints func MetadataTimeout() time.Duration { - return config.Datadog().GetDuration("ecs_metadata_timeout") * time.Millisecond + return pkgconfigsetup.Datadog().GetDuration("ecs_metadata_timeout") * time.Millisecond } diff --git a/pkg/util/ecs/detection.go b/pkg/util/ecs/detection.go index 498623ca0bc36..a946f87e09a06 100644 --- a/pkg/util/ecs/detection.go +++ b/pkg/util/ecs/detection.go @@ -11,7 +11,7 @@ import ( "context" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/ecs/common" ecsmeta "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata" @@ -27,7 +27,7 @@ const ( // HasEC2ResourceTags returns whether the metadata endpoint in ECS exposes // resource tags. func HasEC2ResourceTags() bool { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return false } return queryCacheBool(hasEC2ResourceTagsCacheKey, func() (bool, time.Duration) { @@ -60,7 +60,7 @@ func HasFargateResourceTags(ctx context.Context) bool { } func queryCacheBool(cacheKey string, cacheMissEvalFunc func() (bool, time.Duration)) bool { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return false } if cachedValue, found := cache.Cache.Get(cacheKey); found { diff --git a/pkg/util/ecs/metadata/clients.go b/pkg/util/ecs/metadata/clients.go index 819ca629de774..027dfe6082c52 100644 --- a/pkg/util/ecs/metadata/clients.go +++ b/pkg/util/ecs/metadata/clients.go @@ -13,7 +13,7 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" @@ -50,7 +50,7 @@ type util struct { // endpoint, by detecting the endpoint address. Returns an error if it was not // possible to detect the endpoint address. func V1() (v1.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("Cloud Provider %s is disabled by configuration", common.CloudProviderName) } @@ -73,7 +73,7 @@ func V1() (v1.Client, error) { // V2 returns a client for the ECS metadata API v2 that uses the default // endpoint address. func V2() (v2.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("Cloud Provider %s is disabled by configuration", common.CloudProviderName) } @@ -99,7 +99,7 @@ func V2() (v2.Client, error) { // error if it was not possible to detect the endpoint address. // v4 metadata API is preferred over v3 if both are available. func V3orV4FromCurrentTask() (v3or4.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("Cloud Provider %s is disabled by configuration", common.CloudProviderName) } @@ -123,7 +123,7 @@ func V3orV4FromCurrentTask() (v3or4.Client, error) { // the endpoint address from the task the executable is running in. Returns an // error if it was not possible to detect the endpoint address. func V4FromCurrentTask() (v3or4.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("Cloud Provider %s is disabled by configuration", common.CloudProviderName) } diff --git a/pkg/util/ecs/metadata/clients_nodocker.go b/pkg/util/ecs/metadata/clients_nodocker.go index a73cb38fd75b0..1ce3edcd3d7a7 100644 --- a/pkg/util/ecs/metadata/clients_nodocker.go +++ b/pkg/util/ecs/metadata/clients_nodocker.go @@ -10,7 +10,7 @@ package metadata import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/ecs/common" @@ -29,7 +29,7 @@ func V1() (*v1.Client, error) { // V2 returns a client for the ECS metadata API v2 that uses the default // endpoint address. func V2() (*v2.Client, error) { - if !config.IsCloudProviderEnabled(common.CloudProviderName) { + if !pkgconfigsetup.IsCloudProviderEnabled(common.CloudProviderName, pkgconfigsetup.Datadog()) { return nil, fmt.Errorf("cloud Provider %s is disabled by configuration", common.CloudProviderName) } diff --git a/pkg/util/ecs/metadata/detection.go b/pkg/util/ecs/metadata/detection.go index 58d5814d1c783..c3ac6b96c06ba 100644 --- a/pkg/util/ecs/metadata/detection.go +++ b/pkg/util/ecs/metadata/detection.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/system" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" v1 "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata/v1" @@ -41,8 +41,8 @@ const ( func detectAgentV1URL() (string, error) { urls := make([]string, 0, 3) - if len(config.Datadog().GetString("ecs_agent_url")) > 0 { - urls = append(urls, config.Datadog().GetString("ecs_agent_url")) + if len(pkgconfigsetup.Datadog().GetString("ecs_agent_url")) > 0 { + urls = append(urls, pkgconfigsetup.Datadog().GetString("ecs_agent_url")) } if env.IsContainerized() { @@ -54,7 +54,7 @@ func detectAgentV1URL() (string, error) { urls = append(urls, agentURLS...) } // Try the default gateway - gw, err := system.GetDefaultGateway(config.Datadog().GetString("proc_root")) + gw, err := system.GetDefaultGateway(pkgconfigsetup.Datadog().GetString("proc_root")) if err != nil { log.Debugf("Could not get docker default gateway: %s", err) } @@ -88,7 +88,7 @@ func getAgentV1ContainerURLs(ctx context.Context) ([]string, error) { if err != nil { return nil, err } - ecsConfig, err := du.Inspect(ctx, config.Datadog().GetString("ecs_agent_container_name"), false) + ecsConfig, err := du.Inspect(ctx, pkgconfigsetup.Datadog().GetString("ecs_agent_container_name"), false) if err != nil { return nil, err } diff --git a/pkg/util/ecs/metadata/detection_test.go b/pkg/util/ecs/metadata/detection_test.go index a88a5b815e9dd..69e48d9a8f30b 100644 --- a/pkg/util/ecs/metadata/detection_test.go +++ b/pkg/util/ecs/metadata/detection_test.go @@ -18,8 +18,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata/testutil" @@ -36,7 +36,7 @@ func TestLocateECSHTTP(t *testing.T) { ts := ecsinterface.Start() defer ts.Close() - config.Datadog().SetDefault("ecs_agent_url", ts.URL) + pkgconfigsetup.Datadog().SetDefault("ecs_agent_url", ts.URL) _, err = newAutodetectedClientV1() require.NoError(t, err) @@ -59,7 +59,7 @@ func TestLocateECSHTTPFail(t *testing.T) { ts := ecsinterface.Start() defer ts.Close() - config.Datadog().SetDefault("ecs_agent_url", ts.URL) + pkgconfigsetup.Datadog().SetDefault("ecs_agent_url", ts.URL) _, err = newAutodetectedClientV1() require.Error(t, err) @@ -74,11 +74,11 @@ func TestLocateECSHTTPFail(t *testing.T) { } func TestGetAgentV1ContainerURLs(t *testing.T) { - config.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) ctx := context.Background() - config.Datadog().SetDefault("ecs_agent_container_name", "ecs-agent-custom") - defer config.Datadog().SetDefault("ecs_agent_container_name", "ecs-agent") + pkgconfigsetup.Datadog().SetDefault("ecs_agent_container_name", "ecs-agent-custom") + defer pkgconfigsetup.Datadog().SetDefault("ecs_agent_container_name", "ecs-agent") // Setting mocked data in cache nets := make(map[string]*network.EndpointSettings) diff --git a/pkg/util/fargate/detection.go b/pkg/util/fargate/detection.go index 20a7d80aa0c12..56dd0e0e05267 100644 --- a/pkg/util/fargate/detection.go +++ b/pkg/util/fargate/detection.go @@ -8,8 +8,8 @@ package fargate import ( "errors" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // IsFargateInstance returns whether the Agent is running in Fargate. @@ -30,7 +30,7 @@ func GetOrchestrator() OrchestratorName { // GetEKSFargateNodename returns the node name in EKS Fargate func GetEKSFargateNodename() (string, error) { - if nodename := config.Datadog().GetString("kubernetes_kubelet_nodename"); nodename != "" { + if nodename := pkgconfigsetup.Datadog().GetString("kubernetes_kubelet_nodename"); nodename != "" { return nodename, nil } return "", errors.New("kubernetes_kubelet_nodename is not defined, make sure DD_KUBERNETES_KUBELET_NODENAME is set via the downward API") diff --git a/pkg/util/hostname/common.go b/pkg/util/hostname/common.go index 968316db8adcb..059076775949c 100644 --- a/pkg/util/hostname/common.go +++ b/pkg/util/hostname/common.go @@ -13,8 +13,8 @@ import ( "strings" "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/azure" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" "github.com/DataDog/datadog-agent/pkg/util/ec2" @@ -39,7 +39,7 @@ var ( type Data = hostnameinterface.Data func fromConfig(ctx context.Context, _ string) (string, error) { - configName := config.Datadog().GetString("hostname") + configName := pkgconfigsetup.Datadog().GetString("hostname") err := validate.ValidHostname(configName) if err != nil { return "", err @@ -50,7 +50,7 @@ func fromConfig(ctx context.Context, _ string) (string, error) { func fromHostnameFile(ctx context.Context, _ string) (string, error) { // Try `hostname_file` config option next - hostnameFilepath := config.Datadog().GetString("hostname_file") + hostnameFilepath := pkgconfigsetup.Datadog().GetString("hostname_file") if hostnameFilepath == "" { return "", fmt.Errorf("'hostname_file' configuration is not enabled") } @@ -91,7 +91,7 @@ func fromFQDN(ctx context.Context, _ string) (string, error) { return "", fmt.Errorf("FQDN hostname is not usable") } - if config.Datadog().GetBool("hostname_fqdn") { + if pkgconfigsetup.Datadog().GetBool("hostname_fqdn") { fqdn, err := fqdnHostname() if err == nil { return fqdn, nil @@ -127,7 +127,7 @@ func fromEC2(ctx context.Context, currentHostname string) (string, error) { // We use the instance id if we're on an ECS cluster or we're on EC2 // and the hostname is one of the default ones - prioritizeEC2Hostname := config.Datadog().GetBool("ec2_prioritize_instance_id_as_hostname") + prioritizeEC2Hostname := pkgconfigsetup.Datadog().GetBool("ec2_prioritize_instance_id_as_hostname") log.Debugf("Detected a default EC2 hostname: %v", ec2.IsDefaultHostname(currentHostname)) log.Debugf("ec2_prioritize_instance_id_as_hostname is set to %v", prioritizeEC2Hostname) diff --git a/pkg/util/hostname/common_test.go b/pkg/util/hostname/common_test.go index 8788ea698c393..920a2bf850cf1 100644 --- a/pkg/util/hostname/common_test.go +++ b/pkg/util/hostname/common_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/ec2" "github.com/DataDog/datadog-agent/pkg/util/fargate" ) @@ -24,7 +24,7 @@ import ( func TestFromConfig(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("hostname", "test-hostname") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "test-hostname") hostname, err := fromConfig(context.TODO(), "") require.NoError(t, err) @@ -33,7 +33,7 @@ func TestFromConfig(t *testing.T) { func TestFromConfigInvalid(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("hostname", "hostname_with_underscore") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "hostname_with_underscore") _, err := fromConfig(context.TODO(), "") assert.Error(t, err) @@ -50,7 +50,7 @@ func setupHostnameFile(t *testing.T, content string) { require.NoError(t, err, "Could not write to tmp file %s: %s", destFile.Name(), err) configmock.New(t) - config.Datadog().SetWithoutSource("hostname_file", destFile.Name()) + pkgconfigsetup.Datadog().SetWithoutSource("hostname_file", destFile.Name()) destFile.Close() } @@ -73,7 +73,7 @@ func TestFromHostnameFileWhitespaceTrim(t *testing.T) { func TestFromHostnameFileNoFileName(t *testing.T) { configmock.New(t) - config.Datadog().SetWithoutSource("hostname_file", "") + pkgconfigsetup.Datadog().SetWithoutSource("hostname_file", "") _, err := fromHostnameFile(context.TODO(), "") assert.NotNil(t, err) @@ -113,12 +113,12 @@ func TestFromFQDN(t *testing.T) { fqdnHostname = func() (string, error) { return "fqdn-hostname", nil } configmock.New(t) - config.Datadog().SetWithoutSource("hostname_fqdn", false) + pkgconfigsetup.Datadog().SetWithoutSource("hostname_fqdn", false) _, err := fromFQDN(context.TODO(), "") assert.Error(t, err) - config.Datadog().SetWithoutSource("hostname_fqdn", true) + pkgconfigsetup.Datadog().SetWithoutSource("hostname_fqdn", true) hostname, err := fromFQDN(context.TODO(), "") assert.NoError(t, err) @@ -167,7 +167,7 @@ func TestFromEc2Prioritize(t *testing.T) { // to true we use the instance ID defer func() { ec2GetInstanceID = ec2.GetInstanceID }() configmock.New(t) - config.Datadog().SetWithoutSource("ec2_prioritize_instance_id_as_hostname", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_prioritize_instance_id_as_hostname", true) // make AWS provider return an error ec2GetInstanceID = func(context.Context) (string, error) { return "", fmt.Errorf("some error") } diff --git a/pkg/util/hostname/os_hostname_linux.go b/pkg/util/hostname/os_hostname_linux.go index 793c747eb7cdf..afb9075a46a24 100644 --- a/pkg/util/hostname/os_hostname_linux.go +++ b/pkg/util/hostname/os_hostname_linux.go @@ -10,7 +10,7 @@ package hostname import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/system" ) @@ -19,7 +19,7 @@ import ( // in a non-root UTS namespace because in that case, the OS hostname characterizes the // identity of the agent container and not the one of the nodes it is running on. func isOSHostnameUsable(_ context.Context) bool { - if config.Datadog().GetBool("hostname_trust_uts_namespace") { + if pkgconfigsetup.Datadog().GetBool("hostname_trust_uts_namespace") { return true } diff --git a/pkg/util/hostname/providers_test.go b/pkg/util/hostname/providers_test.go index e95605acb92ab..9060f9678a6d7 100644 --- a/pkg/util/hostname/providers_test.go +++ b/pkg/util/hostname/providers_test.go @@ -15,9 +15,9 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/azure" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" @@ -63,7 +63,7 @@ func setupHostnameTest(t *testing.T, tc testCase) { configmock.New(t) if tc.configHostname { - config.Datadog().SetWithoutSource("hostname", "hostname-from-configuration") + pkgconfigsetup.Datadog().SetWithoutSource("hostname", "hostname-from-configuration") } if tc.hostnameFile { setupHostnameFile(t, "hostname-from-file") @@ -89,7 +89,7 @@ func setupHostnameTest(t *testing.T, tc testCase) { if tc.FQDN || tc.FQDNEC2 { // making isOSHostnameUsable return true osHostnameUsable = func(context.Context) bool { return true } - config.Datadog().SetWithoutSource("hostname_fqdn", true) + pkgconfigsetup.Datadog().SetWithoutSource("hostname_fqdn", true) if !tc.FQDNEC2 { fqdnHostname = func() (string, error) { return "hostname-from-fqdn", nil } } else { @@ -118,7 +118,7 @@ func setupHostnameTest(t *testing.T, tc testCase) { } if tc.EC2Proritized { - config.Datadog().SetWithoutSource("ec2_prioritize_instance_id_as_hostname", true) + pkgconfigsetup.Datadog().SetWithoutSource("ec2_prioritize_instance_id_as_hostname", true) } } diff --git a/pkg/util/hostname/warnings.go b/pkg/util/hostname/warnings.go index 5efafbc831a61..d578d4d7b48c6 100644 --- a/pkg/util/hostname/warnings.go +++ b/pkg/util/hostname/warnings.go @@ -10,7 +10,7 @@ import ( "os" "runtime" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/ec2" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -26,7 +26,7 @@ func isHostnameCanonicalForIntake(ctx context.Context, hostname string) bool { } func warnIfNotCanonicalHostname(ctx context.Context, hostname string) { - if !isHostnameCanonicalForIntake(ctx, hostname) && !config.Datadog().GetBool("hostname_force_config_as_canonical") { + if !isHostnameCanonicalForIntake(ctx, hostname) && !pkgconfigsetup.Datadog().GetBool("hostname_force_config_as_canonical") { log.Warnf( "Hostname '%s' defined in configuration will not be used as the in-app hostname. "+ "For more information: https://dtdg.co/agent-hostname-force-config-as-canonical", @@ -49,7 +49,7 @@ func warnAboutFQDN(ctx context.Context, hostname string) { // We have a FQDN that does not match to the resolved hostname, and the configuration // field `hostname_fqdn` isn't set -> we display a warning message about // the future behavior - if !config.Datadog().GetBool("hostname_fqdn") && hostname == h && h != fqdn { + if !pkgconfigsetup.Datadog().GetBool("hostname_fqdn") && hostname == h && h != fqdn { if runtime.GOOS != "windows" { // REMOVEME: This should be removed when the default `hostname_fqdn` is set to true log.Warnf("DEPRECATION NOTICE: The agent resolved your hostname as '%s'. However in a future version, it will be resolved as '%s' by default. To enable the future behavior, please enable the `hostname_fqdn` flag in the configuration. For more information: https://dtdg.co/flag-hostname-fqdn", h, fqdn) diff --git a/pkg/util/installinfo/install_info.go b/pkg/util/installinfo/install_info.go index 0486c1324493e..e7443aff6c8d5 100644 --- a/pkg/util/installinfo/install_info.go +++ b/pkg/util/installinfo/install_info.go @@ -18,7 +18,8 @@ import ( "gopkg.in/yaml.v2" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" @@ -50,12 +51,12 @@ const maxVersionHistoryEntries = 60 // GetFilePath returns the path of the 'install_info' directory relative to the loaded coinfiguration file. The // 'install_info' directory contains information about how the agent was installed. -func GetFilePath(conf config.Reader) string { +func GetFilePath(conf model.Reader) string { return filepath.Join(configUtils.ConfFileDirectory(conf), "install_info") } // Get returns information about how the Agent was installed. -func Get(conf config.Reader) (*InstallInfo, error) { +func Get(conf model.Reader) (*InstallInfo, error) { return getFromPath(GetFilePath(conf)) } @@ -77,8 +78,8 @@ func getFromPath(path string) (*InstallInfo, error) { // LogVersionHistory loads version history file, append new entry if agent version is different than the last entry in the // JSON file, trim the file if too many entries then save the file. func LogVersionHistory() { - versionHistoryFilePath := filepath.Join(config.Datadog().GetString("run_path"), "version-history.json") - installInfoFilePath := GetFilePath(config.Datadog()) + versionHistoryFilePath := filepath.Join(pkgconfigsetup.Datadog().GetString("run_path"), "version-history.json") + installInfoFilePath := GetFilePath(pkgconfigsetup.Datadog()) logVersionHistoryToFile(versionHistoryFilePath, installInfoFilePath, version.AgentVersion, time.Now().UTC()) } diff --git a/pkg/util/kernel/find_headers.go b/pkg/util/kernel/find_headers.go index 8aa9888477528..245e582bad16b 100644 --- a/pkg/util/kernel/find_headers.go +++ b/pkg/util/kernel/find_headers.go @@ -22,7 +22,6 @@ import ( "strings" "sync" - model "github.com/DataDog/agent-payload/v5/process" "github.com/DataDog/datadog-go/v5/statsd" "github.com/DataDog/nikos/types" "golang.org/x/exp/maps" @@ -42,6 +41,22 @@ var versionCodeRegexp = regexp.MustCompile(`^#define[\t ]+LINUX_VERSION_CODE[\t var errReposDirInaccessible = errors.New("unable to access repos directory") +// Copied from https://github.com/DataDog/agent-payload/blob/master/process/connections.pb.go +// to avoid CGO dependency +var kernelHeaderFetchResultName = map[int]string{ + 0: "FetchNotAttempted", + 1: "CustomHeadersFound", + 2: "DefaultHeadersFound", + 3: "SysfsHeadersFound", + 4: "DownloadedHeadersFound", + 5: "DownloadSuccess", + 6: "HostVersionErr", + 7: "DownloadFailure", + 8: "ValidationFailure", + 9: "ReposDirAccessFailure", + 10: "HeadersNotFoundDownloadDisabled", +} + type headerFetchResult int const ( @@ -461,7 +476,7 @@ func submitTelemetry(result headerFetchResult, client statsd.ClientInterface) { khdTags := append(tags, fmt.Sprintf("result:%s", resultTag), - fmt.Sprintf("reason:%s", model.KernelHeaderFetchResult(result).String()), + fmt.Sprintf("reason:%s", kernelHeaderFetchResultName[int(result)]), ) if err := client.Count("datadog.system_probe.kernel_header_fetch.attempted", 1.0, khdTags, 1); err != nil && !errors.Is(err, statsd.ErrNoClient) { diff --git a/pkg/util/kubelet/hostname_test.go b/pkg/util/kubelet/hostname_test.go index e29c1d0f61d41..eb130f43e8e06 100644 --- a/pkg/util/kubelet/hostname_test.go +++ b/pkg/util/kubelet/hostname_test.go @@ -15,7 +15,6 @@ import ( "github.com/stretchr/testify/mock" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" @@ -33,7 +32,7 @@ func (m *kubeUtilMock) GetNodename(_ context.Context) (string, error) { } func TestHostnameProvider(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) ctx := context.Background() mockConfig := configmock.New(t) @@ -66,7 +65,7 @@ func TestHostnameProvider(t *testing.T) { } func TestHostnameProviderInvalid(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) ctx := context.Background() mockConfig := configmock.New(t) diff --git a/pkg/util/kubernetes/apiserver/apiserver.go b/pkg/util/kubernetes/apiserver/apiserver.go index 8a0668a76d4ff..a760626982de1 100644 --- a/pkg/util/kubernetes/apiserver/apiserver.go +++ b/pkg/util/kubernetes/apiserver/apiserver.go @@ -41,7 +41,7 @@ import ( apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -147,9 +147,9 @@ type APIClient struct { func initAPIClient() { globalAPIClient = &APIClient{ - defaultClientTimeout: time.Duration(config.Datadog().GetInt64("kubernetes_apiserver_client_timeout")) * time.Second, - defaultInformerTimeout: time.Duration(config.Datadog().GetInt64("kubernetes_apiserver_informer_client_timeout")) * time.Second, - defaultInformerResyncPeriod: time.Duration(config.Datadog().GetInt64("kubernetes_informers_resync_period")) * time.Second, + defaultClientTimeout: time.Duration(pkgconfigsetup.Datadog().GetInt64("kubernetes_apiserver_client_timeout")) * time.Second, + defaultInformerTimeout: time.Duration(pkgconfigsetup.Datadog().GetInt64("kubernetes_apiserver_informer_client_timeout")) * time.Second, + defaultInformerResyncPeriod: time.Duration(pkgconfigsetup.Datadog().GetInt64("kubernetes_informers_resync_period")) * time.Second, } globalAPIClient.initRetry.SetupRetrier(&retry.Config{ //nolint:errcheck Name: "apiserver", @@ -200,15 +200,15 @@ func WaitForAPIClient(ctx context.Context) (*APIClient, error) { func getClientConfig(timeout time.Duration) (*rest.Config, error) { var clientConfig *rest.Config var err error - cfgPath := config.Datadog().GetString("kubernetes_kubeconfig_path") + cfgPath := pkgconfigsetup.Datadog().GetString("kubernetes_kubeconfig_path") if cfgPath == "" { clientConfig, err = rest.InClusterConfig() - if !config.Datadog().GetBool("kubernetes_apiserver_tls_verify") { + if !pkgconfigsetup.Datadog().GetBool("kubernetes_apiserver_tls_verify") { clientConfig.TLSClientConfig.Insecure = true } - if customCAPath := config.Datadog().GetString("kubernetes_apiserver_ca_path"); customCAPath != "" { + if customCAPath := pkgconfigsetup.Datadog().GetString("kubernetes_apiserver_ca_path"); customCAPath != "" { clientConfig.TLSClientConfig.CAFile = customCAPath } @@ -225,7 +225,7 @@ func getClientConfig(timeout time.Duration) (*rest.Config, error) { } } - if config.Datadog().GetBool("kubernetes_apiserver_use_protobuf") { + if pkgconfigsetup.Datadog().GetBool("kubernetes_apiserver_use_protobuf") { clientConfig.ContentType = "application/vnd.kubernetes.protobuf" } @@ -367,20 +367,20 @@ func (c *APIClient) connect() error { // Creating informers c.InformerFactory = c.GetInformerWithOptions(nil) - if config.Datadog().GetBool("admission_controller.enabled") || - config.Datadog().GetBool("compliance_config.enabled") || - config.Datadog().GetBool("orchestrator_explorer.enabled") || - config.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") || - config.Datadog().GetBool("external_metrics_provider.wpa_controller") || - config.Datadog().GetBool("cluster_checks.enabled") || - config.Datadog().GetBool("autoscaling.workload.enabled") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.enabled") || + pkgconfigsetup.Datadog().GetBool("compliance_config.enabled") || + pkgconfigsetup.Datadog().GetBool("orchestrator_explorer.enabled") || + pkgconfigsetup.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") || + pkgconfigsetup.Datadog().GetBool("external_metrics_provider.wpa_controller") || + pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") || + pkgconfigsetup.Datadog().GetBool("autoscaling.workload.enabled") { c.DynamicInformerFactory = dynamicinformer.NewDynamicSharedInformerFactory(c.DynamicInformerCl, c.defaultInformerResyncPeriod) } - if config.Datadog().GetBool("admission_controller.enabled") { + if pkgconfigsetup.Datadog().GetBool("admission_controller.enabled") { nameFieldkey := "metadata.name" optionsForService := func(options *metav1.ListOptions) { - options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, config.Datadog().GetString("admission_controller.certificate.secret_name")).String() + options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, pkgconfigsetup.Datadog().GetString("admission_controller.certificate.secret_name")).String() } c.CertificateSecretInformerFactory = c.GetInformerWithOptions( nil, @@ -389,7 +389,7 @@ func (c *APIClient) connect() error { ) optionsForWebhook := func(options *metav1.ListOptions) { - options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, config.Datadog().GetString("admission_controller.webhook_name")).String() + options.FieldSelector = fields.OneTermEqualSelector(nameFieldkey, pkgconfigsetup.Datadog().GetString("admission_controller.webhook_name")).String() } c.WebhookConfigInformerFactory = c.GetInformerWithOptions( nil, @@ -417,7 +417,7 @@ type MetadataMapperBundle struct { func NewMetadataMapperBundle() *MetadataMapperBundle { return &MetadataMapperBundle{ Services: apiv1.NewNamespacesPodsStringsSet(), - mapOnIP: config.Datadog().GetBool("kubernetes_map_services_on_ip"), + mapOnIP: pkgconfigsetup.Datadog().GetBool("kubernetes_map_services_on_ip"), } } @@ -449,7 +449,7 @@ func (c *APIClient) GetTokenFromConfigmap(token string) (string, time.Time, erro namespace := common.GetResourcesNamespace() nowTs := time.Now() - configMapDCAToken := config.Datadog().GetString("cluster_agent.token_name") + configMapDCAToken := pkgconfigsetup.Datadog().GetString("cluster_agent.token_name") cmEvent, err := c.getOrCreateConfigMap(configMapDCAToken, namespace) if err != nil { // we do not process event if we can't interact with the CM. @@ -488,7 +488,7 @@ func (c *APIClient) GetTokenFromConfigmap(token string) (string, time.Time, erro // sets its collected timestamp in the ConfigMap `configmaptokendca` func (c *APIClient) UpdateTokenInConfigmap(token, tokenValue string, timestamp time.Time) error { namespace := common.GetResourcesNamespace() - configMapDCAToken := config.Datadog().GetString("cluster_agent.token_name") + configMapDCAToken := pkgconfigsetup.Datadog().GetString("cluster_agent.token_name") tokenConfigMap, err := c.getOrCreateConfigMap(configMapDCAToken, namespace) if err != nil { return err diff --git a/pkg/util/kubernetes/apiserver/common/common.go b/pkg/util/kubernetes/apiserver/common/common.go index e9999f8f97bdd..d2acbcc303c0e 100644 --- a/pkg/util/kubernetes/apiserver/common/common.go +++ b/pkg/util/kubernetes/apiserver/common/common.go @@ -18,7 +18,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/setup/constants" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -29,7 +30,7 @@ const ( // GetResourcesNamespace is used to fetch the namespace of the resources used by the Kubernetes check (e.g. Leader Election, Event collection). func GetResourcesNamespace() string { - namespace := config.Datadog().GetString("kube_resources_namespace") + namespace := pkgconfigsetup.Datadog().GetString("kube_resources_namespace") if namespace != "" { return namespace } @@ -63,7 +64,7 @@ func GetKubeSystemUID(coreClient corev1.CoreV1Interface) (string, error) { // It first checks if the CM exists, in which case it uses the ID it contains // It thus requires get, create, and update perms on configmaps in the cluster-agent's namespace func GetOrCreateClusterID(coreClient corev1.CoreV1Interface) (string, error) { - cacheClusterIDKey := cache.BuildAgentKey(config.ClusterIDCacheKey) + cacheClusterIDKey := cache.BuildAgentKey(constants.ClusterIDCacheKey) x, found := cache.Cache.Get(cacheClusterIDKey) if found { return x.(string), nil diff --git a/pkg/util/kubernetes/apiserver/controllers/controller_util.go b/pkg/util/kubernetes/apiserver/controllers/controller_util.go index ab277b7f040fb..adca9baaa6915 100644 --- a/pkg/util/kubernetes/apiserver/controllers/controller_util.go +++ b/pkg/util/kubernetes/apiserver/controllers/controller_util.go @@ -22,7 +22,7 @@ import ( datadogclient "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/autoscalers" @@ -44,8 +44,8 @@ func newAutoscalersController(client kubernetes.Interface, h.toStore.data = make(map[string]custommetrics.ExternalMetricValue) - gcPeriodSeconds := config.Datadog().GetInt("hpa_watcher_gc_period") - refreshPeriod := config.Datadog().GetInt("external_metrics_provider.refresh_period") + gcPeriodSeconds := pkgconfigsetup.Datadog().GetInt("hpa_watcher_gc_period") + refreshPeriod := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.refresh_period") if gcPeriodSeconds <= 0 || refreshPeriod <= 0 { return nil, fmt.Errorf("tickers must be strictly positive in the autoscalersController"+ diff --git a/pkg/util/kubernetes/apiserver/controllers/controllers.go b/pkg/util/kubernetes/apiserver/controllers/controllers.go index 0b514a7515846..174e793165540 100644 --- a/pkg/util/kubernetes/apiserver/controllers/controllers.go +++ b/pkg/util/kubernetes/apiserver/controllers/controllers.go @@ -24,7 +24,7 @@ import ( datadogclient "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -43,21 +43,21 @@ type controllerFuncs struct { var controllerCatalog = map[controllerName]controllerFuncs{ metadataControllerName: { - func() bool { return config.Datadog().GetBool("kubernetes_collect_metadata_tags") }, + func() bool { return pkgconfigsetup.Datadog().GetBool("kubernetes_collect_metadata_tags") }, startMetadataController, }, autoscalersControllerName: { func() bool { - return config.Datadog().GetBool("external_metrics_provider.enabled") && !config.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") + return pkgconfigsetup.Datadog().GetBool("external_metrics_provider.enabled") && !pkgconfigsetup.Datadog().GetBool("external_metrics_provider.use_datadogmetric_crd") }, startAutoscalersController, }, servicesControllerName: { - func() bool { return config.Datadog().GetBool("cluster_checks.enabled") }, + func() bool { return pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") }, registerServicesInformer, }, endpointsControllerName: { - func() bool { return config.Datadog().GetBool("cluster_checks.enabled") }, + func() bool { return pkgconfigsetup.Datadog().GetBool("cluster_checks.enabled") }, registerEndpointsInformer, }, } @@ -156,7 +156,7 @@ func startAutoscalersController(ctx *ControllerContext, c chan error) { return } - if config.Datadog().GetBool("external_metrics_provider.wpa_controller") { + if pkgconfigsetup.Datadog().GetBool("external_metrics_provider.wpa_controller") { go autoscalersController.runWPA(ctx.StopCh, ctx.DynamicClient, ctx.DynamicInformerFactory) } diff --git a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go index 37396c33c0e28..45ea62c6e2d26 100644 --- a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go +++ b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection.go @@ -16,9 +16,18 @@ import ( "sync" "time" + "golang.org/x/mod/semver" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/leaderelection" + rl "k8s.io/client-go/tools/leaderelection/resourcelock" + "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" configmaplock "github.com/DataDog/datadog-agent/internal/third_party/client-go/tools/leaderelection/resourcelock" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -26,14 +35,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/retry" - "golang.org/x/mod/semver" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/discovery" - coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" - corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/leaderelection" - rl "k8s.io/client-go/tools/leaderelection/resourcelock" ) const ( @@ -75,9 +76,9 @@ type LeaderEngine struct { func newLeaderEngine(ctx context.Context) *LeaderEngine { return &LeaderEngine{ ctx: ctx, - LeaseName: config.Datadog().GetString("leader_lease_name"), + LeaseName: pkgconfigsetup.Datadog().GetString("leader_lease_name"), LeaderNamespace: common.GetResourcesNamespace(), - ServiceName: config.Datadog().GetString("cluster_agent.kubernetes_service_name"), + ServiceName: pkgconfigsetup.Datadog().GetString("cluster_agent.kubernetes_service_name"), leaderMetric: metrics.NewLeaderMetric(), subscribers: []chan struct{}{}, LeaseDuration: defaultLeaderLeaseDuration, @@ -139,7 +140,7 @@ func (le *LeaderEngine) init() error { } log.Debugf("Init LeaderEngine with HolderIdentity: %q", le.HolderIdentity) - leaseDuration := config.Datadog().GetInt("leader_lease_duration") + leaseDuration := pkgconfigsetup.Datadog().GetInt("leader_lease_duration") if leaseDuration > 0 { le.LeaseDuration = time.Duration(leaseDuration) * time.Second } else { @@ -307,7 +308,7 @@ func detectLeases(client discovery.DiscoveryInterface) (bool, error) { // CanUseLeases returns if leases can be used for leader election. If the resource is defined in the config // It uses it. Otherwise it uses the discovery client for leader election. func CanUseLeases(client discovery.DiscoveryInterface) (bool, error) { - resourceType := config.Datadog().GetString("leader_election_default_resource") + resourceType := pkgconfigsetup.Datadog().GetString("leader_election_default_resource") if resourceType == "lease" || resourceType == "leases" { return true, nil } else if resourceType == "configmap" || resourceType == "configmaps" { @@ -323,7 +324,7 @@ func CanUseLeases(client discovery.DiscoveryInterface) (bool, error) { func getLeaseLeaderElectionRecord(client coordinationv1.CoordinationV1Interface) (rl.LeaderElectionRecord, error) { var empty rl.LeaderElectionRecord - lease, err := client.Leases(common.GetResourcesNamespace()).Get(context.TODO(), config.Datadog().GetString("leader_lease_name"), metav1.GetOptions{}) + lease, err := client.Leases(common.GetResourcesNamespace()).Get(context.TODO(), pkgconfigsetup.Datadog().GetString("leader_lease_name"), metav1.GetOptions{}) if err != nil { return empty, err } @@ -334,7 +335,7 @@ func getLeaseLeaderElectionRecord(client coordinationv1.CoordinationV1Interface) func getConfigMapLeaderElectionRecord(client corev1.CoreV1Interface) (rl.LeaderElectionRecord, error) { var led rl.LeaderElectionRecord - leaderElectionCM, err := client.ConfigMaps(common.GetResourcesNamespace()).Get(context.TODO(), config.Datadog().GetString("leader_lease_name"), metav1.GetOptions{}) + leaderElectionCM, err := client.ConfigMaps(common.GetResourcesNamespace()).Get(context.TODO(), pkgconfigsetup.Datadog().GetString("leader_lease_name"), metav1.GetOptions{}) if err != nil { return led, err } diff --git a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_engine.go b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_engine.go index 66e19775eecb9..d134c95f17eb2 100644 --- a/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_engine.go +++ b/pkg/util/kubernetes/apiserver/leaderelection/leaderelection_engine.go @@ -24,7 +24,7 @@ import ( "k8s.io/client-go/tools/record" configmaplock "github.com/DataDog/datadog-agent/internal/third_party/client-go/tools/leaderelection/resourcelock" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -192,7 +192,7 @@ func (le *LeaderEngine) newElection() (*ld.LeaderElector, error) { electionConfig := ld.LeaderElectionConfig{ // ReleaseOnCancel updates the leader election lock when the main context is canceled by setting the Lease Duration to 1s. // It allows the next DCA to initialize faster. However, it performs a network call on shutdown. - ReleaseOnCancel: config.Datadog().GetBool("leader_election_release_on_shutdown"), + ReleaseOnCancel: pkgconfigsetup.Datadog().GetBool("leader_election_release_on_shutdown"), Lock: leaderElectorInterface, LeaseDuration: le.LeaseDuration, RenewDeadline: le.LeaseDuration / 2, diff --git a/pkg/util/kubernetes/apiserver/util.go b/pkg/util/kubernetes/apiserver/util.go index 8ed06c75c4d07..1790de6682038 100644 --- a/pkg/util/kubernetes/apiserver/util.go +++ b/pkg/util/kubernetes/apiserver/util.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/watermarkpodautoscaler/api/v1alpha1" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -28,7 +28,7 @@ import ( // An extra timeout duration can be provided depending on the informer func SyncInformers(informers map[InformerName]cache.SharedInformer, extraWait time.Duration) error { var g errgroup.Group - timeoutConfig := config.Datadog().GetDuration("kube_cache_sync_timeout_seconds") * time.Second + timeoutConfig := pkgconfigsetup.Datadog().GetDuration("kube_cache_sync_timeout_seconds") * time.Second // syncTimeout can be used to wait for the kubernetes client-go cache to sync. // It cannot be retrieved at the package-level due to the package being imported before configs are loaded. syncTimeout := timeoutConfig + extraWait @@ -60,7 +60,7 @@ type syncInformerResult struct { func SyncInformersReturnErrors(informers map[InformerName]cache.SharedInformer, extraWait time.Duration) map[InformerName]error { resultChan := make(chan syncInformerResult) errors := make(map[InformerName]error, len(informers)) - timeoutConfig := config.Datadog().GetDuration("kube_cache_sync_timeout_seconds") * time.Second + timeoutConfig := pkgconfigsetup.Datadog().GetDuration("kube_cache_sync_timeout_seconds") * time.Second // syncTimeout can be used to wait for the kubernetes client-go cache to sync. // It cannot be retrieved at the package-level due to the package being imported before configs are loaded. syncTimeout := timeoutConfig + extraWait diff --git a/pkg/util/kubernetes/autoscalers/datadogexternal.go b/pkg/util/kubernetes/autoscalers/datadogexternal.go index 7903dbd75a84e..11e44050650ed 100644 --- a/pkg/util/kubernetes/autoscalers/datadogexternal.go +++ b/pkg/util/kubernetes/autoscalers/datadogexternal.go @@ -19,7 +19,7 @@ import ( "gopkg.in/zorkian/go-datadog-api.v2" utilserror "k8s.io/apimachinery/pkg/util/errors" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/telemetry" le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -76,7 +76,7 @@ var ( func getMinRemainingRequestsTracker() *minTracker { once.Do(func() { - refreshPeriod := config.Datadog().GetInt("external_metrics_provider.refresh_period") + refreshPeriod := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.refresh_period") expiryDuration := 2 * refreshPeriod minRemainingRequestsTracker = newMinTracker(time.Duration(time.Duration(expiryDuration) * time.Second)) }) @@ -92,6 +92,14 @@ func isRateLimitError(err error) bool { return strings.Contains(err.Error(), "429 Too Many Requests") } +// isUnprocessableEntityError is a helper function that checks if the received error is an unprocessable entity error +func isUnprocessableEntityError(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "422 Unprocessable Entity") +} + // queryDatadogExternal converts the metric name and labels from the Ref format into a Datadog metric. // It returns the last value for a bucket of 5 minutes, func (p *Processor) queryDatadogExternal(ddQueries []string, timeWindow time.Duration) (map[string]Point, error) { @@ -108,6 +116,8 @@ func (p *Processor) queryDatadogExternal(ddQueries []string, timeWindow time.Dur if err != nil { if isRateLimitError(err) { ddRequests.Inc("rate_limit_error", le.JoinLeaderValue) + } else if isUnprocessableEntityError(err) { + ddRequests.Inc("unprocessable_entity_error", le.JoinLeaderValue) } else { ddRequests.Inc("error", le.JoinLeaderValue) } diff --git a/pkg/util/kubernetes/autoscalers/datadogexternal_test.go b/pkg/util/kubernetes/autoscalers/datadogexternal_test.go index 1f72345f02d08..b806cfee4487a 100644 --- a/pkg/util/kubernetes/autoscalers/datadogexternal_test.go +++ b/pkg/util/kubernetes/autoscalers/datadogexternal_test.go @@ -17,7 +17,7 @@ import ( "gopkg.in/zorkian/go-datadog-api.v2" datadogclientmock "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/mock" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -251,7 +251,7 @@ func TestDatadogExternalQuery(t *testing.T) { datadogClientComp := datadogclientmock.New(t).Comp datadogClientComp.SetQueryMetricsFunc(test.queryfunc) p := Processor{datadogClient: datadogClientComp} - points, err := p.queryDatadogExternal(test.metricName, time.Duration(config.Datadog().GetInt64("external_metrics_provider.bucket_size"))*time.Second) + points, err := p.queryDatadogExternal(test.metricName, time.Duration(pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.bucket_size"))*time.Second) if test.err != nil { require.EqualError(t, test.err, err.Error()) } @@ -303,3 +303,39 @@ func TestIsRateLimitError(t *testing.T) { }) } } + +func TestIsUnprocessableEntityError(t *testing.T) { + + tests := []struct { + name string + err error + isUnprocessableEntity bool + }{ + { + name: "nil error", + err: nil, + isUnprocessableEntity: false, + }, + { + name: "empty error", + err: errors.New(""), + isUnprocessableEntity: false, + }, + { + name: "unprocessable entity error", + err: errors.New("422 Unprocessable Entity"), + isUnprocessableEntity: true, + }, + { + name: "unprocessable entity error variant", + err: errors.New("API error 422 Unprocessable Entity: "), + isUnprocessableEntity: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require.Equal(t, isUnprocessableEntityError(test.err), test.isUnprocessableEntity) + }) + } +} diff --git a/pkg/util/kubernetes/autoscalers/processor.go b/pkg/util/kubernetes/autoscalers/processor.go index 3440c6ba01e2d..3ecb74adf1864 100644 --- a/pkg/util/kubernetes/autoscalers/processor.go +++ b/pkg/util/kubernetes/autoscalers/processor.go @@ -23,7 +23,7 @@ import ( datadogclientcomp "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -55,7 +55,7 @@ type queryResponse struct { // NewProcessor returns a new Processor func NewProcessor(datadogCl datadogclientcomp.Component) *Processor { - externalMaxAge := math.Max(config.Datadog().GetFloat64("external_metrics_provider.max_age"), 3*config.Datadog().GetFloat64("external_metrics_provider.rollup")) + externalMaxAge := math.Max(pkgconfigsetup.Datadog().GetFloat64("external_metrics_provider.max_age"), 3*pkgconfigsetup.Datadog().GetFloat64("external_metrics_provider.rollup")) return &Processor{ externalMaxAge: time.Duration(externalMaxAge) * time.Second, datadogClient: datadogCl, @@ -108,24 +108,24 @@ func (p *Processor) ProcessWPAs(wpa *v1alpha1.WatermarkPodAutoscaler) map[string // GetDefaultMaxAge returns the configured default max age. func GetDefaultMaxAge() time.Duration { - return time.Duration(config.Datadog().GetInt64("external_metrics_provider.max_age")) * time.Second + return time.Duration(pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.max_age")) * time.Second } // GetDefaultTimeWindow returns the configured default time window func GetDefaultTimeWindow() time.Duration { - return time.Duration(config.Datadog().GetInt64("external_metrics_provider.bucket_size")) * time.Second + return time.Duration(pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.bucket_size")) * time.Second } // GetDefaultMaxTimeWindow returns the configured max time window func GetDefaultMaxTimeWindow() time.Duration { - return time.Duration(config.Datadog().GetInt64("external_metrics_provider.max_time_window")) * time.Second + return time.Duration(pkgconfigsetup.Datadog().GetInt64("external_metrics_provider.max_time_window")) * time.Second } // UpdateExternalMetrics does the validation and processing of the ExternalMetrics // TODO if a metric's ts in emList is too recent, no need to add it to the batchUpdate. func (p *Processor) UpdateExternalMetrics(emList map[string]custommetrics.ExternalMetricValue) (updated map[string]custommetrics.ExternalMetricValue) { - aggregator := config.Datadog().GetString("external_metrics.aggregator") - rollup := config.Datadog().GetInt("external_metrics_provider.rollup") + aggregator := pkgconfigsetup.Datadog().GetString("external_metrics.aggregator") + rollup := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.rollup") maxAge := int64(p.externalMaxAge.Seconds()) var err error updated = make(map[string]custommetrics.ExternalMetricValue) @@ -221,7 +221,7 @@ func isURLBeyondLimits(uriLength, numBuckets int) (bool, error) { return true, fmt.Errorf("Query is too long, could yield a server side error. Dropping") } - chunkSize := config.Datadog().GetInt("external_metrics_provider.chunk_size") + chunkSize := pkgconfigsetup.Datadog().GetInt("external_metrics_provider.chunk_size") return uriLength >= maxCharactersPerChunk || numBuckets >= chunkSize, nil } diff --git a/pkg/util/kubernetes/clustername/clustername.go b/pkg/util/kubernetes/clustername/clustername.go index d6b283aff3d7a..094dbf9542b84 100644 --- a/pkg/util/kubernetes/clustername/clustername.go +++ b/pkg/util/kubernetes/clustername/clustername.go @@ -14,8 +14,9 @@ import ( "strings" "sync" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/setup/constants" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/azure" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" @@ -74,7 +75,7 @@ func getClusterName(ctx context.Context, data *clusterNameData, hostname string) } if !data.initDone { - data.clusterName = config.Datadog().GetString("cluster_name") + data.clusterName = pkgconfigsetup.Datadog().GetString("cluster_name") if data.clusterName != "" { log.Infof("Got cluster name %s from config", data.clusterName) // the host alias "hostname-clustername" must not exceed 255 chars @@ -154,7 +155,7 @@ func GetClusterName(ctx context.Context, hostname string) string { // "enabled_rfc1123_compliant_cluster_name_tag" is set to "true" // this allow to limit the risk of breaking user that currently rely on previous `kube_cluster_name` tag value. func GetClusterNameTagValue(ctx context.Context, hostname string) string { - if config.Datadog().GetBool("enabled_rfc1123_compliant_cluster_name_tag") { + if pkgconfigsetup.Datadog().GetBool("enabled_rfc1123_compliant_cluster_name_tag") { return GetRFC1123CompliantClusterName(ctx, hostname) } return GetClusterName(ctx, hostname) @@ -187,7 +188,7 @@ func ResetClusterName() { // This variable should come from a configmap, created by the cluster-agent. // This function is meant for the node-agent to call (cluster-agent should call GetOrCreateClusterID) func GetClusterID() (string, error) { - cacheClusterIDKey := cache.BuildAgentKey(config.ClusterIDCacheKey) + cacheClusterIDKey := cache.BuildAgentKey(constants.ClusterIDCacheKey) if cachedClusterID, found := cache.Cache.Get(cacheClusterIDKey); found { return cachedClusterID.(string), nil } diff --git a/pkg/util/kubernetes/clustername/clustername_test.go b/pkg/util/kubernetes/clustername/clustername_test.go index f9c0ca366f8c9..674ab904ae23f 100644 --- a/pkg/util/kubernetes/clustername/clustername_test.go +++ b/pkg/util/kubernetes/clustername/clustername_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) @@ -19,7 +18,7 @@ import ( func TestGetClusterName(t *testing.T) { ctx := context.Background() mockConfig := configmock.New(t) - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) data := newClusterNameData() testClusterName := "laika" diff --git a/pkg/util/kubernetes/hostinfo/cluster_name_from_node_label.go b/pkg/util/kubernetes/hostinfo/cluster_name_from_node_label.go index 73558be8c801b..7e2470d590f11 100644 --- a/pkg/util/kubernetes/hostinfo/cluster_name_from_node_label.go +++ b/pkg/util/kubernetes/hostinfo/cluster_name_from_node_label.go @@ -8,7 +8,7 @@ package hostinfo import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) const ( @@ -41,7 +41,7 @@ func (n *NodeInfo) GetNodeClusterNameLabel(ctx context.Context, clusterName stri var clusterNameLabelKeys []clusterNameLabelType // check if a node label has been added on the config - if customLabels := config.Datadog().GetString("kubernetes_node_label_as_cluster_name"); customLabels != "" { + if customLabels := pkgconfigsetup.Datadog().GetString("kubernetes_node_label_as_cluster_name"); customLabels != "" { clusterNameLabelKeys = append(clusterNameLabelKeys, clusterNameLabelType{key: customLabels, shouldOverride: true}) } else { // Use default configuration diff --git a/pkg/util/kubernetes/hostinfo/no_tags.go b/pkg/util/kubernetes/hostinfo/no_tags.go index f81cd46cbd673..a7648df88c274 100644 --- a/pkg/util/kubernetes/hostinfo/no_tags.go +++ b/pkg/util/kubernetes/hostinfo/no_tags.go @@ -10,14 +10,14 @@ package hostinfo import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" ) // KubeNodeTagsProvider allows computing node tags based on the user configurations for node labels and annotations as tags type KubeNodeTagsProvider struct{} // NewKubeNodeTagsProvider creates and returns a new kube node tags provider object -func NewKubeNodeTagsProvider(_ config.Reader) KubeNodeTagsProvider { +func NewKubeNodeTagsProvider(_ model.Reader) KubeNodeTagsProvider { return KubeNodeTagsProvider{} } diff --git a/pkg/util/kubernetes/hostinfo/node_annotations.go b/pkg/util/kubernetes/hostinfo/node_annotations.go index 04188fea28640..b65f217edf048 100644 --- a/pkg/util/kubernetes/hostinfo/node_annotations.go +++ b/pkg/util/kubernetes/hostinfo/node_annotations.go @@ -10,7 +10,7 @@ package hostinfo import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" ) @@ -27,7 +27,7 @@ func GetNodeAnnotations(ctx context.Context) (map[string]string, error) { return nil, err } - if config.Datadog().GetBool("cluster_agent.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") { cl, err := clusteragent.GetClusterAgentClient() if err != nil { return nil, err diff --git a/pkg/util/kubernetes/hostinfo/node_labels.go b/pkg/util/kubernetes/hostinfo/node_labels.go index 23f0ce819d435..52afb1a15e64c 100644 --- a/pkg/util/kubernetes/hostinfo/node_labels.go +++ b/pkg/util/kubernetes/hostinfo/node_labels.go @@ -10,7 +10,7 @@ package hostinfo import ( "context" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" ) @@ -49,7 +49,7 @@ func (n *NodeInfo) GetNodeLabels(ctx context.Context) (map[string]string, error) return nil, err } - if config.Datadog().GetBool("cluster_agent.enabled") { + if pkgconfigsetup.Datadog().GetBool("cluster_agent.enabled") { cl, err := n.getClusterAgentFunc() if err != nil { return nil, err diff --git a/pkg/util/kubernetes/hostinfo/tags.go b/pkg/util/kubernetes/hostinfo/tags.go index 6fc7e4314b722..fa2a8e47d1c89 100644 --- a/pkg/util/kubernetes/hostinfo/tags.go +++ b/pkg/util/kubernetes/hostinfo/tags.go @@ -13,7 +13,7 @@ import ( k8smetadata "github.com/DataDog/datadog-agent/comp/core/tagger/k8s_metadata" "github.com/DataDog/datadog-agent/comp/core/tagger/taglist" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/pkg/config/model" configutils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -25,7 +25,7 @@ type KubeNodeTagsProvider struct { } // NewKubeNodeTagsProvider creates and returns a new kube node tags provider object -func NewKubeNodeTagsProvider(conf config.Reader) KubeNodeTagsProvider { +func NewKubeNodeTagsProvider(conf model.Reader) KubeNodeTagsProvider { return KubeNodeTagsProvider{configutils.GetMetadataAsTags(conf)} } diff --git a/pkg/util/kubernetes/kubelet/json.go b/pkg/util/kubernetes/kubelet/json.go index 3a3b103be3501..caba0719737f3 100644 --- a/pkg/util/kubernetes/kubelet/json.go +++ b/pkg/util/kubernetes/kubelet/json.go @@ -13,7 +13,7 @@ import ( jsoniter "github.com/json-iterator/go" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) // jsoniterConfig mirrors jsoniter.ConfigFastest @@ -34,7 +34,7 @@ type podUnmarshaller struct { func newPodUnmarshaller() *podUnmarshaller { pu := &podUnmarshaller{ - podExpirationDuration: config.Datadog().GetDuration("kubernetes_pod_expiration_duration") * time.Second, + podExpirationDuration: pkgconfigsetup.Datadog().GetDuration("kubernetes_pod_expiration_duration") * time.Second, timeNowFunction: time.Now, } diff --git a/pkg/util/kubernetes/kubelet/kubelet.go b/pkg/util/kubernetes/kubelet/kubelet.go index 62fe74f9183af..b1622e27bf2ea 100644 --- a/pkg/util/kubernetes/kubelet/kubelet.go +++ b/pkg/util/kubernetes/kubelet/kubelet.go @@ -15,7 +15,7 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/containers" @@ -89,11 +89,11 @@ func (ku *KubeUtil) init() error { func NewKubeUtil() *KubeUtil { ku := &KubeUtil{ rawConnectionInfo: make(map[string]string), - podListCacheDuration: config.Datadog().GetDuration("kubelet_cache_pods_duration") * time.Second, + podListCacheDuration: pkgconfigsetup.Datadog().GetDuration("kubelet_cache_pods_duration") * time.Second, podUnmarshaller: newPodUnmarshaller(), } - waitOnMissingContainer := config.Datadog().GetDuration("kubelet_wait_on_missing_container") + waitOnMissingContainer := pkgconfigsetup.Datadog().GetDuration("kubelet_wait_on_missing_container") if waitOnMissingContainer > 0 { ku.waitOnMissingContainer = waitOnMissingContainer * time.Second } diff --git a/pkg/util/kubernetes/kubelet/kubelet_client.go b/pkg/util/kubernetes/kubelet/kubelet_client.go index fa3d3c3cda249..32f3745fe79be 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_client.go +++ b/pkg/util/kubernetes/kubelet/kubelet_client.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -164,16 +164,16 @@ func getKubeletClient(ctx context.Context) (*kubeletClient, error) { var err error kubeletTimeout := 30 * time.Second - kubeletProxyEnabled := config.Datadog().GetBool("eks_fargate") - kubeletHost := config.Datadog().GetString("kubernetes_kubelet_host") - kubeletHTTPSPort := config.Datadog().GetInt("kubernetes_https_kubelet_port") - kubeletHTTPPort := config.Datadog().GetInt("kubernetes_http_kubelet_port") - kubeletTLSVerify := config.Datadog().GetBool("kubelet_tls_verify") - kubeletCAPath := config.Datadog().GetString("kubelet_client_ca") - kubeletTokenPath := config.Datadog().GetString("kubelet_auth_token_path") - kubeletClientCertPath := config.Datadog().GetString("kubelet_client_crt") - kubeletClientKeyPath := config.Datadog().GetString("kubelet_client_key") - kubeletNodeName := config.Datadog().Get("kubernetes_kubelet_nodename") + kubeletProxyEnabled := pkgconfigsetup.Datadog().GetBool("eks_fargate") + kubeletHost := pkgconfigsetup.Datadog().GetString("kubernetes_kubelet_host") + kubeletHTTPSPort := pkgconfigsetup.Datadog().GetInt("kubernetes_https_kubelet_port") + kubeletHTTPPort := pkgconfigsetup.Datadog().GetInt("kubernetes_http_kubelet_port") + kubeletTLSVerify := pkgconfigsetup.Datadog().GetBool("kubelet_tls_verify") + kubeletCAPath := pkgconfigsetup.Datadog().GetString("kubelet_client_ca") + kubeletTokenPath := pkgconfigsetup.Datadog().GetString("kubelet_auth_token_path") + kubeletClientCertPath := pkgconfigsetup.Datadog().GetString("kubelet_client_crt") + kubeletClientKeyPath := pkgconfigsetup.Datadog().GetString("kubelet_client_key") + kubeletNodeName := pkgconfigsetup.Datadog().Get("kubernetes_kubelet_nodename") var kubeletPathPrefix string var kubeletToken string @@ -209,7 +209,7 @@ func getKubeletClient(ctx context.Context) (*kubeletClient, error) { } kubeletHTTPSPort = int(httpsPort) - if config.Datadog().Get("kubernetes_kubelet_nodename") != "" { + if pkgconfigsetup.Datadog().Get("kubernetes_kubelet_nodename") != "" { kubeletPathPrefix = fmt.Sprintf("/api/v1/nodes/%s/proxy", kubeletNodeName) apiServerIP := os.Getenv("KUBERNETES_SERVICE_HOST") diff --git a/pkg/util/kubernetes/kubelet/kubelet_orchestrator_test.go b/pkg/util/kubernetes/kubelet/kubelet_orchestrator_test.go index 88e49edb841c1..83d84de59cda6 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_orchestrator_test.go +++ b/pkg/util/kubernetes/kubelet/kubelet_orchestrator_test.go @@ -15,8 +15,9 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) type KubeletOrchestratorTestSuite struct { @@ -85,14 +86,15 @@ func (suite *KubeletOrchestratorTestSuite) TestGetRawLocalPodList() { } func TestKubeletOrchestratorTestSuite(t *testing.T) { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "trace", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) suite.Run(t, new(KubeletOrchestratorTestSuite)) } diff --git a/pkg/util/kubernetes/kubelet/kubelet_test.go b/pkg/util/kubernetes/kubelet/kubelet_test.go index eacb28eaaff47..5ced2ecf98817 100644 --- a/pkg/util/kubernetes/kubelet/kubelet_test.go +++ b/pkg/util/kubernetes/kubelet/kubelet_test.go @@ -28,10 +28,11 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/util/log" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) const ( @@ -829,14 +830,15 @@ func (suite *KubeletTestSuite) TestPodListExpire() { } func TestKubeletTestSuite(t *testing.T) { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "trace", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) suite.Run(t, new(KubeletTestSuite)) } diff --git a/pkg/util/log/log_podman_util.go b/pkg/util/log/log_podman_util.go new file mode 100644 index 0000000000000..4327572820cff --- /dev/null +++ b/pkg/util/log/log_podman_util.go @@ -0,0 +1,27 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import ( + "strings" +) + +// The paths below are set in podman code and cannot be modified by the user. +// Ref: https://github.com/containers/podman/blob/7c38ee756592d95e718967fcd3983b81abd95e76/test/e2e/run_transient_test.go#L19-L45 +const ( + sqlDBSuffix string = "/storage/db.sql" + boltDBSuffix string = "/storage/libpod/bolt_state.db" +) + +// ExtractPodmanRootDirFromDBPath extracts the podman base path for the containers directory based on the user-provided `podman_db_path`. +func ExtractPodmanRootDirFromDBPath(podmanDBPath string) string { + if strings.HasSuffix(podmanDBPath, sqlDBSuffix) { + return strings.TrimSuffix(podmanDBPath, sqlDBSuffix) + } else if strings.HasSuffix(podmanDBPath, boltDBSuffix) { + return strings.TrimSuffix(podmanDBPath, boltDBSuffix) + } + return "" +} diff --git a/pkg/util/log/log_podman_util_test.go b/pkg/util/log/log_podman_util_test.go new file mode 100644 index 0000000000000..13e9a601b2d0d --- /dev/null +++ b/pkg/util/log/log_podman_util_test.go @@ -0,0 +1,33 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package log + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_ExtractPodmanRootDirFromDBPath(t *testing.T) { + testCases := []struct { + name string + input string + expected string + }{ + {"Rootless & BoltDB", "/data/containers_tomcat/storage/libpod/bolt_state.db", "/data/containers_tomcat"}, + {"Rootfull & BoltDB", "/var/lib/containers/storage/libpod/bolt_state.db", "/var/lib/containers"}, + {"Rootless & SQLite", "/home/ubuntu/.local/share/containers/storage/db.sql", "/home/ubuntu/.local/share/containers"}, + {"Rootfull & SQLite", "/var/lib/containers/storage/db.sql", "/var/lib/containers"}, + {"No matching suffix", "/foo/bar/baz", ""}, + } + + for _, testCase := range testCases { + output := ExtractPodmanRootDirFromDBPath(testCase.input) + assert.Equal(t, testCase.expected, output, fmt.Sprintf("%s: Expected %s but output is %s for input %s", testCase.name, testCase.expected, output, testCase.input)) + } + +} diff --git a/pkg/util/pdhutil/pdhcounter.go b/pkg/util/pdhutil/pdhcounter.go index 95ddc1b6fbb0b..a92e39fd21394 100644 --- a/pkg/util/pdhutil/pdhcounter.go +++ b/pkg/util/pdhutil/pdhcounter.go @@ -9,7 +9,7 @@ package pdhutil import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" "golang.org/x/sys/windows" @@ -118,7 +118,7 @@ func (counter *pdhCounter) ShouldInit() bool { // already initialized return false } - var initFailLimit = config.Datadog().GetInt("windows_counter_init_failure_limit") + var initFailLimit = pkgconfigsetup.Datadog().GetInt("windows_counter_init_failure_limit") if initFailLimit > 0 && counter.initFailCount >= initFailLimit { counter.initError = fmt.Errorf("counter exceeded the maximum number of failed initialization attempts. This error indicates that the Windows performance counter database may need to be rebuilt") // attempts exceeded @@ -134,7 +134,7 @@ func (counter *pdhCounter) SetInitError(err error) error { } counter.initFailCount++ - var initFailLimit = config.Datadog().GetInt("windows_counter_init_failure_limit") + var initFailLimit = pkgconfigsetup.Datadog().GetInt("windows_counter_init_failure_limit") if initFailLimit > 0 && counter.initFailCount >= initFailLimit { err = fmt.Errorf("%v. Counter exceeded the maximum number of failed initialization attempts", err) } else if initFailLimit > 0 { diff --git a/pkg/util/pdhutil/pdhhelper.go b/pkg/util/pdhutil/pdhhelper.go index 48a43f4487b58..370c320f6e818 100644 --- a/pkg/util/pdhutil/pdhhelper.go +++ b/pkg/util/pdhutil/pdhhelper.go @@ -18,7 +18,7 @@ import ( "go.uber.org/atomic" "golang.org/x/sys/windows" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -92,7 +92,7 @@ func refreshPdhObjectCache(forceRefresh bool) (didrefresh bool, err error) { var len uint32 //revive:enable:redefines-builtin-id - refreshInterval := config.Datadog().GetInt("windows_counter_refresh_interval") + refreshInterval := pkgconfigsetup.Datadog().GetInt("windows_counter_refresh_interval") if refreshInterval == 0 { // refresh disabled return false, nil diff --git a/pkg/util/static_tags.go b/pkg/util/static_tags.go index 5239f3371ab6f..f0591d0243f9d 100644 --- a/pkg/util/static_tags.go +++ b/pkg/util/static_tags.go @@ -9,8 +9,8 @@ import ( "context" "strings" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fargate" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" @@ -33,7 +33,7 @@ func GetStaticTagsSlice(ctx context.Context) []string { tags := []string{} // DD_TAGS / DD_EXTRA_TAGS - tags = append(tags, configUtils.GetConfiguredTags(config.Datadog(), false)...) + tags = append(tags, configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), false)...) // EKS Fargate specific tags if env.IsFeaturePresent(env.EKSFargate) { diff --git a/pkg/util/static_tags_test.go b/pkg/util/static_tags_test.go index 8ab0ab515f0aa..e9d9de3374ad8 100644 --- a/pkg/util/static_tags_test.go +++ b/pkg/util/static_tags_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" ) @@ -21,7 +20,7 @@ func TestStaticTags(t *testing.T) { mockConfig.SetWithoutSource("kubernetes_kubelet_nodename", "eksnode") defer mockConfig.SetWithoutSource("kubernetes_kubelet_nodename", "") - config.SetFeatures(t, env.EKSFargate) + env.SetFeatures(t, env.EKSFargate) t.Run("just tags", func(t *testing.T) { mockConfig.SetWithoutSource("tags", []string{"some:tag", "another:tag", "nocolon"}) @@ -63,7 +62,7 @@ func TestStaticTagsSlice(t *testing.T) { mockConfig.SetWithoutSource("kubernetes_kubelet_nodename", "eksnode") defer mockConfig.SetWithoutSource("kubernetes_kubelet_nodename", "") - config.SetFeatures(t, env.EKSFargate) + env.SetFeatures(t, env.EKSFargate) t.Run("just tags", func(t *testing.T) { mockConfig.SetWithoutSource("tags", []string{"some:tag", "another:tag", "nocolon"}) diff --git a/release.json b/release.json index 34d4e70efb1cd..c486ba9a22ca2 100644 --- a/release.json +++ b/release.json @@ -3,12 +3,12 @@ "current_milestone": "7.59.0", "last_stable": { "6": "6.53.0", - "7": "7.57.0" + "7": "7.57.1" }, "nightly": { "INTEGRATIONS_CORE_VERSION": "master", - "OMNIBUS_SOFTWARE_VERSION": "5d4f6995c19b604d7fc876446e4350ce52b235fb", - "OMNIBUS_RUBY_VERSION": "f3fc847e03ba7081e266b2d333210ba129128a14", + "OMNIBUS_SOFTWARE_VERSION": "375618d70253293d71b13f9385260aa3dedd7125", + "OMNIBUS_RUBY_VERSION": "db96ee7e2a9269ab864f618d7c447bc22e107fff", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", "MACOS_BUILD_VERSION": "master", @@ -26,8 +26,8 @@ }, "nightly-a7": { "INTEGRATIONS_CORE_VERSION": "master", - "OMNIBUS_SOFTWARE_VERSION": "5d4f6995c19b604d7fc876446e4350ce52b235fb", - "OMNIBUS_RUBY_VERSION": "f3fc847e03ba7081e266b2d333210ba129128a14", + "OMNIBUS_SOFTWARE_VERSION": "375618d70253293d71b13f9385260aa3dedd7125", + "OMNIBUS_RUBY_VERSION": "db96ee7e2a9269ab864f618d7c447bc22e107fff", "JMXFETCH_VERSION": "0.49.3", "JMXFETCH_HASH": "258085a94d529a6bdf914db36dd50faf6fde2cebc44b1f54a60eb209a5d8917c", "MACOS_BUILD_VERSION": "master", diff --git a/releasenotes/notes/add-run-in-core-agent-to-template-e6c2c3134d2fb17d.yaml b/releasenotes/notes/add-run-in-core-agent-to-template-e6c2c3134d2fb17d.yaml new file mode 100644 index 0000000000000..204176b172025 --- /dev/null +++ b/releasenotes/notes/add-run-in-core-agent-to-template-e6c2c3134d2fb17d.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + Add ability to run process/container collection on the core Agent (Linux only). This is controlled + by the `process_config.run_in_core_agent.enabled` option in datadog.yaml. diff --git a/releasenotes/notes/addsbomtoconfig-99a7cd52fa412336.yaml b/releasenotes/notes/addsbomtoconfig-99a7cd52fa412336.yaml new file mode 100644 index 0000000000000..835cceda2fd29 --- /dev/null +++ b/releasenotes/notes/addsbomtoconfig-99a7cd52fa412336.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes the default configuration template to include the Cloud Security Management configuration options. diff --git a/releasenotes/notes/auto-instrumentation-fix-memory-request-8391bb2d06ac6990.yaml b/releasenotes/notes/auto-instrumentation-fix-memory-request-8391bb2d06ac6990.yaml new file mode 100644 index 0000000000000..4acb4ef044224 --- /dev/null +++ b/releasenotes/notes/auto-instrumentation-fix-memory-request-8391bb2d06ac6990.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fix OOM error with cluster agent auto instrumentation by increasing default memory request from 20Mi to 100Mi. diff --git a/releasenotes/notes/bump-go-to-1.22.7-1002302b0ef8c246.yaml b/releasenotes/notes/bump-go-to-1.22.7-1002302b0ef8c246.yaml new file mode 100644 index 0000000000000..54e50ffbf243f --- /dev/null +++ b/releasenotes/notes/bump-go-to-1.22.7-1002302b0ef8c246.yaml @@ -0,0 +1,4 @@ +--- +enhancements: +- | + Agents are now built with Go ``1.22.7``. diff --git a/releasenotes/notes/create-trace-context-from-step-function-invocation-7b9d1c3536f7135b.yaml b/releasenotes/notes/create-trace-context-from-step-function-invocation-7b9d1c3536f7135b.yaml new file mode 100644 index 0000000000000..a869025dcd272 --- /dev/null +++ b/releasenotes/notes/create-trace-context-from-step-function-invocation-7b9d1c3536f7135b.yaml @@ -0,0 +1,9 @@ +--- +enhancements: + - | + While using the AWS Lambda Extension, when a Lambda Function is invoked by + a [properly instrumented][1] Step Function, the Lambda Function will create + its Trace and Parent IDs deterministically based on the Step Function's + execution context. + [1]: https://docs.datadoghq.com/serverless/step_functions/installation/?tab=custom "Install Serverless Monitoring for AWS Step Functions" + diff --git a/releasenotes/notes/dotnet-lib-inject-v3-d5fb50cd7eccb116.yaml b/releasenotes/notes/dotnet-lib-inject-v3-d5fb50cd7eccb116.yaml new file mode 100644 index 0000000000000..947c33e62ab89 --- /dev/null +++ b/releasenotes/notes/dotnet-lib-inject-v3-d5fb50cd7eccb116.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Updates default .NET library used for auto-instrumentation from v2 to v3 \ No newline at end of file diff --git a/releasenotes/notes/fix-container-check-memleak-d9ff028d5499197c.yaml b/releasenotes/notes/fix-container-check-memleak-d9ff028d5499197c.yaml new file mode 100644 index 0000000000000..1166a23c8be7e --- /dev/null +++ b/releasenotes/notes/fix-container-check-memleak-d9ff028d5499197c.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes memory leak caused by container check. diff --git a/releasenotes/notes/fix-forwarder-health-check-09eeefbe1a4e20d1.yaml b/releasenotes/notes/fix-forwarder-health-check-09eeefbe1a4e20d1.yaml new file mode 100644 index 0000000000000..65bfcbb319166 --- /dev/null +++ b/releasenotes/notes/fix-forwarder-health-check-09eeefbe1a4e20d1.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix the forwarder health check so that it reports unhealthy when the API key is invalid. diff --git a/releasenotes/notes/fix-panic-on-readonly-filesystems-1012ba761aa70aaa.yaml b/releasenotes/notes/fix-panic-on-readonly-filesystems-1012ba761aa70aaa.yaml new file mode 100644 index 0000000000000..fda72f5fb9852 --- /dev/null +++ b/releasenotes/notes/fix-panic-on-readonly-filesystems-1012ba761aa70aaa.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes a panic caused by running the Agent on readonly filesystems. The + Agent will now return integration launchers and handle memory gracefully. diff --git a/releasenotes/notes/network-path-latency-fix-575efe1aa26c250b.yaml b/releasenotes/notes/network-path-latency-fix-575efe1aa26c250b.yaml new file mode 100644 index 0000000000000..a710cbb370c15 --- /dev/null +++ b/releasenotes/notes/network-path-latency-fix-575efe1aa26c250b.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes an issue where TCP traceroute latency was not being calculated correctly. diff --git a/releasenotes/notes/openssl_3.3.2-f873f60a69cfe59c.yaml b/releasenotes/notes/openssl_3.3.2-f873f60a69cfe59c.yaml new file mode 100644 index 0000000000000..91e622eb1a257 --- /dev/null +++ b/releasenotes/notes/openssl_3.3.2-f873f60a69cfe59c.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +security: + - | + Update OpenSSL to 3.3.2 (on Linux & macOS) in order to mitigate CVE-2024-6119. diff --git a/releasenotes/notes/otlp-env-convention-b5c9da638d.yaml b/releasenotes/notes/otlp-env-convention-b5c9da638d.yaml new file mode 100644 index 0000000000000..be46b8baa5fa6 --- /dev/null +++ b/releasenotes/notes/otlp-env-convention-b5c9da638d.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + The OTLP ingest endpoint now maps the new OTel semantic convention `deployment.environment.name` to `env` diff --git a/releasenotes/notes/podman_file_logs_path_rootless_support-55a1af8f41cf94c4.yaml b/releasenotes/notes/podman_file_logs_path_rootless_support-55a1af8f41cf94c4.yaml new file mode 100644 index 0000000000000..17fab73bf7b3d --- /dev/null +++ b/releasenotes/notes/podman_file_logs_path_rootless_support-55a1af8f41cf94c4.yaml @@ -0,0 +1,4 @@ +--- +enhancements: + - | + Adds support for file log collection from Podman rootless containers when ``logs_config.use_podman_logs`` is set to ``true`` and ``podman_db_path`` is configured. diff --git a/tasks/__init__.py b/tasks/__init__.py index 477dff8c510e9..9b061ac2cdbcd 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -38,6 +38,7 @@ modules, msi, new_e2e_tests, + notes, notify, omnibus, oracle, @@ -176,6 +177,7 @@ ns.add_collection(gitlab_helpers, "gitlab") ns.add_collection(package) ns.add_collection(pipeline) +ns.add_collection(notes) ns.add_collection(notify) ns.add_collection(oracle) ns.add_collection(otel_agent) diff --git a/tasks/agent.py b/tasks/agent.py index 8474176fc244f..0e6ff86f92137 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -454,6 +454,8 @@ def hacky_dev_image_build( ctx, base_image=None, target_image="agent", + process_agent=False, + trace_agent=False, push=False, signed_pull=False, ): @@ -492,6 +494,18 @@ def hacky_dev_image_build( f'perl -0777 -pe \'s|{extracted_python_dir}(/opt/datadog-agent/embedded/lib/python\\d+\\.\\d+/../..)|substr $1."\\0"x length$&,0,length$&|e or die "pattern not found"\' -i dev/lib/libdatadog-agent-three.so' ) + copy_extra_agents = "" + if process_agent: + from tasks.process_agent import build as process_agent_build + + process_agent_build(ctx, bundle=False) + copy_extra_agents += "COPY bin/process-agent/process-agent /opt/datadog-agent/embedded/bin/process-agent\n" + if trace_agent: + from tasks.trace_agent import build as trace_agent_build + + trace_agent_build(ctx) + copy_extra_agents += "COPY bin/trace-agent/trace-agent /opt/datadog-agent/embedded/bin/trace-agent\n" + with tempfile.NamedTemporaryFile(mode='w') as dockerfile: dockerfile.write( f'''FROM ubuntu:latest AS src @@ -519,6 +533,13 @@ def hacky_dev_image_build( RUN go install github.com/go-delve/delve/cmd/dlv@latest +FROM {base_image} AS bash_completion + +RUN apt-get update && \ + apt-get install -y gawk + +RUN awk -i inplace '!/^#/ {{uncomment=0}} uncomment {{gsub(/^#/, "")}} /# enable bash completion/ {{uncomment=1}} {{print}}' /etc/bash.bashrc + FROM {base_image} ENV DEBIAN_FRONTEND=noninteractive @@ -529,10 +550,12 @@ def hacky_dev_image_build( ENV DELVE_PAGER=less COPY --from=dlv /go/bin/dlv /usr/local/bin/dlv +COPY --from=bash_completion /etc/bash.bashrc /etc/bash.bashrc COPY --from=src /usr/src/datadog-agent {os.getcwd()} COPY --from=bin /opt/datadog-agent/bin/agent/agent /opt/datadog-agent/bin/agent/agent COPY --from=bin /opt/datadog-agent/embedded/lib/libdatadog-agent-rtloader.so.0.1.0 /opt/datadog-agent/embedded/lib/libdatadog-agent-rtloader.so.0.1.0 COPY --from=bin /opt/datadog-agent/embedded/lib/libdatadog-agent-three.so /opt/datadog-agent/embedded/lib/libdatadog-agent-three.so +{copy_extra_agents} RUN agent completion bash > /usr/share/bash-completion/completions/agent RUN process-agent completion bash > /usr/share/bash-completion/completions/process-agent RUN security-agent completion bash > /usr/share/bash-completion/completions/security-agent diff --git a/tasks/gitlab_helpers.py b/tasks/gitlab_helpers.py index 0e347c61997b1..dc2647c1282f7 100644 --- a/tasks/gitlab_helpers.py +++ b/tasks/gitlab_helpers.py @@ -8,10 +8,14 @@ import os import tempfile +import yaml from invoke import task +from invoke.exceptions import Exit +from tasks.kernel_matrix_testing.ci import get_kmt_dashboard_links from tasks.libs.ciproviders.gitlab_api import ( get_all_gitlab_ci_configurations, + get_full_gitlab_ci_configuration, get_gitlab_ci_configuration, get_gitlab_repo, print_gitlab_ci_configuration, @@ -23,6 +27,7 @@ get_test_link_to_job_on_main, ) from tasks.libs.common.color import Color, color_message +from tasks.libs.common.utils import experimental @task @@ -62,7 +67,7 @@ def generate_ci_visibility_links(_ctx, output: str | None): def create_gitlab_annotations_report(ci_job_id: str, ci_job_name: str): - return { + links = { "CI Visibility": [ { "external_link": { @@ -91,6 +96,12 @@ def create_gitlab_annotations_report(ci_job_id: str, ci_job_name: str): ] } + kmt_links = get_kmt_dashboard_links() + if kmt_links: + links["KMT Dashboard"] = kmt_links + + return links + def print_gitlab_object(get_object, ctx, ids, repo='DataDog/datadog-agent', jq: str | None = None, jq_colors=True): """ @@ -136,6 +147,80 @@ def get_job(repo, id): print_gitlab_object(get_job, ctx, ids, repo, jq, jq_colors) +@task +@experimental( + 'This task takes into account only explicit dependencies (job `needs` / `dependencies`), implicit dependencies (stages order) are ignored' +) +def gen_config_subset(ctx, jobs, dry_run=False, force=False): + """ + Will generate a full .gitlab-ci.yml containing only the jobs necessary to run the target jobs `jobs`. + That is, the resulting pipeline will have `jobs` as last jobs to run. + + Warning: This doesn't take implicit dependencies into account (stages order), only explicit dependencies (job `needs` / `dependencies`). + + - dry_run: Print only the new configuration without writing it to the .gitlab-ci.yml file. + - force: Force the update of the .gitlab-ci.yml file even if it has been modified. + + Example: + $ inv gitlab.gen-config-subset tests_deb-arm64-py3 + $ inv gitlab.gen-config-subset tests_rpm-arm64-py3,tests_deb-arm64-py3 --dry-run + """ + + jobs_to_keep = ['cancel-prev-pipelines', 'github_rate_limit_info', 'setup_agent_version'] + attributes_to_keep = 'stages', 'variables', 'default', 'workflow' + + # .gitlab-ci.yml should not be modified + if not force and not dry_run and ctx.run('git status -s .gitlab-ci.yml', hide='stdout').stdout.strip(): + raise Exit(color_message('The .gitlab-ci.yml file should not be modified as it will be overwritten', Color.RED)) + + config = get_full_gitlab_ci_configuration(ctx, '.gitlab-ci.yml') + + jobs = [j for j in jobs.split(',') if j] + jobs_to_keep + required = set() + + def add_dependencies(job): + nonlocal required, config + + if job in required: + return + required.add(job) + + dependencies = [] + if 'needs' in config[job]: + dependencies = config[job]['needs'] + if 'dependencies' in config[job]: + dependencies = config[job]['dependencies'] + + for dep in dependencies: + if isinstance(dep, dict): + dep = dep['job'] + add_dependencies(dep) + + # Make a DFS to find all the jobs that are needed to run the target jobs + for job in jobs: + add_dependencies(job) + + new_config = {job: config[job] for job in required} + + # Remove extends + for job in new_config.values(): + job.pop('extends', None) + + # Keep gitlab config + for attr in attributes_to_keep: + new_config[attr] = config[attr] + + content = yaml.safe_dump(new_config) + + if dry_run: + print(content) + else: + with open('.gitlab-ci.yml', 'w') as f: + f.write(content) + + print(color_message('The .gitlab-ci.yml file has been updated', Color.GREEN)) + + @task def print_job_trace(_, job_id, repo='DataDog/datadog-agent'): """ diff --git a/tasks/go.py b/tasks/go.py index c90f0dcdb174c..8036a313d86e7 100644 --- a/tasks/go.py +++ b/tasks/go.py @@ -450,7 +450,7 @@ def tidy(ctx): @task def check_go_version(ctx): go_version_output = ctx.run('go version') - # result is like "go version go1.22.6 linux/amd64" + # result is like "go version go1.22.7 linux/amd64" running_go_version = go_version_output.stdout.split(' ')[2] with open(".go-version") as f: diff --git a/tasks/gotest.py b/tasks/gotest.py index ede25f85d4f05..54015e3f7c35b 100644 --- a/tasks/gotest.py +++ b/tasks/gotest.py @@ -34,7 +34,7 @@ from tasks.libs.common.junit_upload_core import enrich_junitxml, produce_junit_tar from tasks.libs.common.utils import clean_nested_paths, get_build_flags, gitlab_section from tasks.libs.releasing.json import _get_release_json_value -from tasks.modules import DEFAULT_MODULES, GoModule +from tasks.modules import DEFAULT_MODULES, GoModule, get_module_by_path from tasks.test_core import ModuleTestResult, process_input_args, process_module_results, test_core from tasks.testwasher import TestWasher from tasks.trace_agent import integration_tests as trace_integration_tests @@ -454,20 +454,14 @@ def get_modified_packages(ctx, build_tags=None, lint=False) -> list[GoModule]: go_mod_modified_modules = set() for modified_file in modified_go_files: - match_precision = 0 - best_module_path = None - - # Since several modules can match the path we take only the most precise one - for module_path in DEFAULT_MODULES: - if module_path in modified_file and len(module_path) > match_precision: - match_precision = len(module_path) - best_module_path = module_path + best_module_path = Path(get_go_module(modified_file)) # Check if the package is in the target list of the module we want to test targeted = False assert best_module_path, f"No module found for {modified_file}" - targets = DEFAULT_MODULES[best_module_path].lint_targets if lint else DEFAULT_MODULES[best_module_path].targets + module = get_module_by_path(best_module_path) + targets = module.lint_targets if lint else module.targets for target in targets: if os.path.normpath(os.path.join(best_module_path, target)) in modified_file: @@ -482,7 +476,7 @@ def get_modified_packages(ctx, build_tags=None, lint=False) -> list[GoModule]: # If we modify the go.mod or go.sum we run the tests for the whole module if modified_file.endswith(".mod") or modified_file.endswith(".sum"): - modules_to_test[best_module_path] = DEFAULT_MODULES[best_module_path] + modules_to_test[best_module_path] = get_module_by_path(best_module_path) go_mod_modified_modules.add(best_module_path) continue @@ -748,7 +742,7 @@ def format_packages(ctx: Context, impacted_packages: set[str], build_tags: list[ modules_to_test = {} for package in packages: - module_path = get_go_module(package).replace("./", "") + module_path = get_go_module(package) # Check if the module is in the target list of the modules we want to test if module_path not in DEFAULT_MODULES or not DEFAULT_MODULES[module_path].condition(): @@ -821,7 +815,7 @@ def get_go_module(path): while path != '/': go_mod_path = os.path.join(path, 'go.mod') if os.path.isfile(go_mod_path): - return path + return os.path.relpath(path) path = os.path.dirname(path) raise Exception(f"No go.mod file found for package at {path}") diff --git a/tasks/install_tasks.py b/tasks/install_tasks.py index 17371da7ab3fe..880fda4146e52 100644 --- a/tasks/install_tasks.py +++ b/tasks/install_tasks.py @@ -8,6 +8,7 @@ from invoke import Context, Exit, task from tasks.libs.ciproviders.github_api import GithubAPI +from tasks.libs.common.color import Color, color_message from tasks.libs.common.go import download_go_dependencies from tasks.libs.common.retry import run_command_with_retry from tasks.libs.common.utils import bin_name, environ, gitlab_section @@ -41,6 +42,7 @@ @task def download_tools(ctx): """Download all Go tools for testing.""" + print(color_message("This command is deprecated, please use `install-tools` instead", Color.ORANGE)) with environ({'GO111MODULE': 'on'}): download_go_dependencies(ctx, paths=list(TOOLS.keys())) diff --git a/tasks/installer.py b/tasks/installer.py index db665f97546ea..786cb0861eac7 100644 --- a/tasks/installer.py +++ b/tasks/installer.py @@ -3,11 +3,14 @@ """ import os +import shutil from invoke import task +from invoke.exceptions import Exit from tasks.build_tags import filter_incompatible_tags, get_build_tags, get_default_build_tags from tasks.libs.common.utils import REPO_PATH, bin_name, get_build_flags +from tasks.libs.releasing.version import get_version BIN_PATH = os.path.join(".", "bin", "installer") MAJOR_VERSION = '7' @@ -23,7 +26,8 @@ def build( build_include=None, build_exclude=None, go_mod="mod", - no_strip_binary=True, + no_strip_binary=False, + no_cgo=False, ): """ Build the updater. @@ -49,7 +53,53 @@ def build( build_type = "-a" if rebuild else "" go_build_tags = " ".join(build_tags) updater_bin = os.path.join(BIN_PATH, bin_name("installer")) + + if no_cgo: + env["CGO_ENABLED"] = "0" + cmd = f"go build -mod={go_mod} {race_opt} {build_type} -tags \"{go_build_tags}\" " cmd += f"-o {updater_bin} -gcflags=\"{gcflags}\" -ldflags=\"{ldflags} {strip_flags}\" {REPO_PATH}/cmd/installer" ctx.run(cmd, env=env) + + +@task +def push_artifact( + ctx, + artifact, + registry, + version="", + tag="latest", + arch="amd64", +): + ''' + Pushes an OCI artifact to a registry. + example: + inv -e installer.push-artifact --artifact "datadog-installer" --registry "docker.io/myregistry" --tag "latest" + ''' + if version == "": + version = get_version(ctx, include_git=True, url_safe=True, major_version='7', include_pipeline_id=True) + + # structural pattern matching is only available in Python 3.10+, which currently fails the `vulture` check + if artifact == 'datadog-agent': + image_name = 'agent-package' + elif artifact == 'datadog-installer': + image_name = 'installer-package' + else: + print("Unexpected artifact") + raise Exit(code=1) + + if os.name == 'nt': + target_os = 'windows' + else: + print('Unexpected os') + raise Exit(code=1) + + datadog_package = shutil.which('datadog-package') + if datadog_package is None: + print('datadog-package could not be found in path') + raise Exit(code=1) + + ctx.run( + f'{datadog_package} push {registry}/{image_name}:{tag} omnibus/pkg/{artifact}-{version}-1-{target_os}-{arch}.oci.tar' + ) diff --git a/tasks/kernel_matrix_testing/README.md b/tasks/kernel_matrix_testing/README.md index 5916fc9fe3516..dcc971585d015 100644 --- a/tasks/kernel_matrix_testing/README.md +++ b/tasks/kernel_matrix_testing/README.md @@ -414,3 +414,36 @@ This will show several tables, skipping the cases where all jobs/tests passed to - For each component (security-agent or system-probe) and vmset (e.g., in system-probe we have `only_tracersuite` and `no_tracersuite` test sets) it will show the jobs that failed and why (e.g., if the job failed due to an infra or a test failure). - Again, for each component and vmset, it will show which tests failed in a table showing in which distros/archs they failed (tests and distros that did not have any failures will not be shown). - For each job that failed due to infra reasons, it will show a summary with quick detection of possible boot causes (e.g., it will show if the VM did not reach the login prompt, or if it didn't get an IP address, etc). + +## Alien VMs +The KMT tasks provided here allow developers to run the system-probe build process, and test setup exactly as in the CI. As such it can be useful to use these tasks to package system-probe and target VMs outside the purview of KMT. For this we can provide a profile representing these "alien" vms, and the invoke tasks will +correctly package system-probe and share with the provided VMs as if they were launch by KMT. This can be useful when a developer wants to use these tasks with local VMs launch with VMware, parallels, etc, or remote VMs launch in ec2 or gcp. + +The format of the profile is a json list of objects representing a vm. For each VM the following information is required: +- ssh_key_path +- IP +- architecture +- name +- ssh_user + +An example of an alien VMs profile: +```json +[ + { + "ssh_key_path": "/home/user/.ssh/some-key.id_rsa", + "ip": "xxx.yyy.aaa.bbb", + "arch": "x86", + "name": "ubuntu-gcp", + "ssh_user": "ubuntu" + } +] +``` + +To target these alien profiles use the `--alien-vms` flag to provide the path to this profile file. +``` +inv -e kmt.build --alien-vms=/tmp/alien.profile +``` + +``` +inv -e kmt.test --packages=./pkg/ebpf --run=TestLockRanges/Hashmap --alien-vms=./alien.profile +``` diff --git a/tasks/kernel_matrix_testing/ci.py b/tasks/kernel_matrix_testing/ci.py index a3cb7c3fc9b39..f6e83fcb6e571 100644 --- a/tasks/kernel_matrix_testing/ci.py +++ b/tasks/kernel_matrix_testing/ci.py @@ -1,10 +1,12 @@ from __future__ import annotations +import datetime import io import json import os import re import tarfile +import urllib.parse import xml.etree.ElementTree as ET from typing import TYPE_CHECKING, overload @@ -250,3 +252,47 @@ def get_all_jobs_for_pipeline(pipeline_id: int | str) -> tuple[list[KMTSetupEnvJ break return setup_jobs, test_jobs + + +def get_kmt_dashboard_links() -> None | list: + stage = os.environ.get("CI_JOB_STAGE") + pipeline = os.environ.get("CI_PIPELINE_ID") + branch = os.environ.get("CI_COMMIT_REF_NAME") + pipeline_start = os.environ.get("CI_PIPELINE_CREATED_AT") + + # Check we're running in Gitlab CI + if pipeline_start is None or branch is None or pipeline is None or stage is None: + return None + + # Check this is a KMT job + if "kernel_matrix_testing" not in stage: + return None + + try: + pipeline_start_date = datetime.datetime.fromisoformat(pipeline_start) + except Exception: + print(f"Error: Could not parse pipeline start date {pipeline_start}") + return None + + dashboard_end = pipeline_start_date + datetime.timedelta(hours=4) + + query_args = { + "fromUser": "false", + "refresh_mode": "paused", + "tpl_var_ci.pipeline.id[0]": pipeline, + "tpl_var_git-branch[0]": branch, + "from_ts": int(pipeline_start_date.timestamp()) * 1000, + "to_ts": int(dashboard_end.timestamp()) * 1000, + "live": "false", + } + + url = f"https://app.datadoghq.com/dashboard/zs9-uia-gsg?{urllib.parse.urlencode(query_args)}" + + return [ + { + "external_link": { + "label": "KMT: Pipeline dashboard", + "url": url, + } + } + ] diff --git a/tasks/kernel_matrix_testing/compiler.py b/tasks/kernel_matrix_testing/compiler.py index ff62f77ba1581..cc093a0cdc795 100644 --- a/tasks/kernel_matrix_testing/compiler.py +++ b/tasks/kernel_matrix_testing/compiler.py @@ -118,7 +118,7 @@ def exec(self, cmd: str, user="compiler", verbose=True, run_dir: PathOrStr | Non self.ensure_running() # Set FORCE_COLOR=1 so that termcolor works in the container - self.ctx.run( + return self.ctx.run( f"docker exec -u {user} -i -e FORCE_COLOR=1 {self.name} bash -c \"{cmd}\"", hide=(not verbose), warn=allow_fail, @@ -221,7 +221,7 @@ def prepare_for_cross_compile(self): # Extract into a .tar file and then use tar to extract the contents to avoid issues # with dpkg-deb not respecting symlinks. self.exec(f"dpkg-deb --fsys-tarfile {header_package_path} > {header_package_path}.tar", user="root") - self.exec(f"tar -h -xvf {header_package_path}.tar -C /", user="root") + self.exec(f"tar -h -xf {header_package_path}.tar -C /", user="root") # Install the corresponding arch compilers self.exec(f"apt update && apt install -y gcc-{target.gcc_arch.replace('_', '-')}-linux-gnu", user="root") diff --git a/tasks/kernel_matrix_testing/infra.py b/tasks/kernel_matrix_testing/infra.py index 3bb2c948c06a5..7a14f14211fe0 100644 --- a/tasks/kernel_matrix_testing/infra.py +++ b/tasks/kernel_matrix_testing/infra.py @@ -4,7 +4,7 @@ import json import os from pathlib import Path -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, TypedDict from invoke.context import Context @@ -100,7 +100,9 @@ def __init__( tag: str, vmset_tags: list[str], ssh_key_path: str | None, + arch: KMTArchNameOrLocal | None, instance: HostInstance, + user: str = "root", ): self.ip = ip self.name = domain_id @@ -108,6 +110,8 @@ def __init__( self.vmset_tags = vmset_tags self.ssh_key = ssh_key_path self.instance = instance + self.arch = arch + self.user = user def run_cmd(self, ctx: Context, cmd: str, allow_fail=False, verbose=False, timeout_sec=None): if timeout_sec is not None: @@ -115,15 +119,19 @@ def run_cmd(self, ctx: Context, cmd: str, allow_fail=False, verbose=False, timeo else: extra_opts = SSH_MULTIPLEX_OPTIONS - run = f"ssh {ssh_options_command(extra_opts)} -o IdentitiesOnly=yes -i {self.ssh_key} root@{self.ip} {{proxy_cmd}} '{cmd}'" + cmd = f"sudo bash -c \"{cmd}\"" if self.user != "root" else cmd + run = f"ssh {ssh_options_command(extra_opts)} -o IdentitiesOnly=yes -i {self.ssh_key} {self.user}@{self.ip} {{proxy_cmd}} '{cmd}'" return self.instance.runner.run_cmd(ctx, self.instance, run, allow_fail, verbose) - def _get_rsync_base(self, exclude: PathOrStr | None) -> str: + def _get_rsync_base(self, exclude: PathOrStr | None, verbose=False) -> str: exclude_arg = "" if exclude is not None: exclude_arg = f"--exclude '{exclude}'" - return f"rsync -e \"ssh {ssh_options_command({'IdentitiesOnly': 'yes'} | SSH_MULTIPLEX_OPTIONS)} {{proxy_cmd}} -i {self.ssh_key}\" -p -rt --exclude='.git*' {exclude_arg} --filter=':- .gitignore'" + verbose_arg = "-vP" if verbose else "" + sudo = "--rsync-path=\"sudo rsync\"" if self.user != "root" else "" + + return f"rsync {sudo} {verbose_arg} -e \"ssh {ssh_options_command({'IdentitiesOnly': 'yes'} | SSH_MULTIPLEX_OPTIONS)} {{proxy_cmd}} -i {self.ssh_key}\" -p -rt --exclude='.git*' {exclude_arg} --filter=':- .gitignore'" def copy( self, @@ -136,10 +144,14 @@ def copy( # Always ensure that the parent directory exists, rsync creates the rest self.run_cmd(ctx, f"mkdir -p {os.path.dirname(target)}", verbose=verbose) - run = self._get_rsync_base(exclude) + f" {source} root@{self.ip}:{target}" + info(f"[+] Copying (HOST: {source}) => (VM: {target})...") + + run = ( + self._get_rsync_base(exclude, verbose=ctx.config.run["echo"]) + f" {source} {self.user}@{self.ip}:{target}" + ) res = self.instance.runner.run_cmd(ctx, self.instance, run, False, verbose) if res: - info(f"[+] (HOST: {source}) => (VM: {target})") + info(f"[+] Copied (HOST: {source}) => (VM: {target})") return res @@ -151,7 +163,9 @@ def download( exclude: PathOrStr | None = None, verbose: bool = False, ): - run = self._get_rsync_base(exclude) + f" root@{self.ip}:{source} {target}" + run = ( + self._get_rsync_base(exclude, verbose=ctx.config.run["echo"]) + f" {self.user}@{self.ip}:{source} {target}" + ) res = self.instance.runner.run_cmd(ctx, self.instance, run, False, verbose) if res: info(f"[+] (VM: {source}) => (HOST: {target})") @@ -200,7 +214,7 @@ def build_infrastructure(stack: str, ssh_key_obj: SSHKey | None = None): # location in the local machine. instance.add_microvm( LibvirtDomain( - vm["ip"], vm["id"], vm["tag"], vm["vmset-tags"], os.fspath(get_kmt_os().ddvm_rsa), instance + vm["ip"], vm["id"], vm["tag"], vm["vmset-tags"], os.fspath(get_kmt_os().ddvm_rsa), arch, instance ) ) @@ -209,6 +223,43 @@ def build_infrastructure(stack: str, ssh_key_obj: SSHKey | None = None): return infra +class AlienVMInfo(TypedDict): + ip: str + ssh_key_path: str + name: str + arch: str + + +AlienInfrastructure = list[AlienVMInfo] + + +def build_alien_infrastructure(alien_vms: Path) -> dict[KMTArchNameOrLocal, HostInstance]: + with open(alien_vms) as f: + profile: AlienInfrastructure = json.load(f) + + # lets pretend all VMs are present locally even if they are not, because we just + # want to bypass the ssh proxying stuff when running commands and copying things + instance = HostInstance("local", "local", None) + for vm in profile: + ssh_user = "root" + if "ssh_user" in vm: + ssh_user = vm["ssh_user"] + instance.add_microvm( + LibvirtDomain( + vm["ip"], + "", + "", + [], + vm["ssh_key_path"], + vm["arch"], + instance, + ssh_user, + ) + ) + + return {"local": instance} + + def get_ssh_key_name(pubkey: Path) -> str | None: parts = pubkey.read_text().split() if len(parts) != 3: diff --git a/tasks/kernel_matrix_testing/vmconfig.py b/tasks/kernel_matrix_testing/vmconfig.py index ff4c4ead46b2a..9f24f44dc2d32 100644 --- a/tasks/kernel_matrix_testing/vmconfig.py +++ b/tasks/kernel_matrix_testing/vmconfig.py @@ -658,7 +658,7 @@ def gen_config_for_stack( ## get all possible (recipe, version, arch) combinations we can support. vmconfig_file = f"{get_kmt_os().stacks_dir}/{stack}/{VMCONFIG}" - if os.path.exists(vmconfig_file): + if os.path.exists(vmconfig_file) and not new: raise Exit( "Editing configuration is current not supported. Destroy the stack first to change the configuration." ) diff --git a/tasks/kmt.py b/tasks/kmt.py index 486ce76fd1976..685e41e0370a6 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -30,6 +30,7 @@ SSH_OPTIONS, HostInstance, LibvirtDomain, + build_alien_infrastructure, build_infrastructure, ensure_key_in_ec2, get_ssh_agent_key_names, @@ -213,6 +214,7 @@ def gen_config_from_ci_pipeline( failed_packages: set[str] = set() failed_tests: set[str] = set() + successful_tests: set[str] = set() for test_job in test_jobs: if test_job.status == "failed" and job.component == vmconfig_template: vm_arch = test_job.arch @@ -222,14 +224,49 @@ def gen_config_from_ci_pipeline( results = test_job.get_test_results() for test, result in results.items(): if result is False: - package, test = test.split(":") + package, test = test.split(":", maxsplit=1) failed_tests.add(test) failed_packages.add(package) + elif result is True: # It can also be None if the test was skipped + successful_tests.add(test) vm_name = f"{vm_arch}-{test_job.distro}-distro" info(f"[+] Adding {vm_name} from failed job {test_job.name}") vms.add(vm_name) + # Simplify the failed tests so that we show only the parent tests with all failures below + # and not all child tests that failed + # Not at all the most efficient way to do this, but it works for the amount of data we have + # and is simple enough + successful_tests = successful_tests.difference(failed_tests) + coalesced_failed_tests: set[str] = set() + non_coalesced_failed_tests: set[str] = set() + for test in sorted(failed_tests): # Sort to have parent tests first + is_included = False + + # Check if this test is already included in some parent test + for already_coalesced in coalesced_failed_tests: + if test.startswith(already_coalesced): + is_included = True + break + else: + # If not, check if there is a subtest that succeeded. If there is not, + # we assume all children tests of this one failed and we can coalesce them + # into a single one + for succesful_test in successful_tests: + if succesful_test.startswith(test): + # There was a subtest of this one that succeeded, we cannot coalesce + # Add it to the non-coalesced list so that it's not checked as a parent + # and its children will be checked again + non_coalesced_failed_tests.add(test) + is_included = True + break + + if not is_included: + coalesced_failed_tests.add(test) + + failed_tests = non_coalesced_failed_tests | {f"{t}/.*" for t in coalesced_failed_tests} + if len(vms) == 0: raise Exit(f"No failed jobs found in pipeline {pipeline}") @@ -240,7 +277,7 @@ def gen_config_from_ci_pipeline( ctx, stack, ",".join(vms), "", init_stack, vcpu, memory, new, ci, arch, output_file, vmconfig_template, yes=yes ) info("[+] You can run the following command to execute only packages with failed tests") - print(f"inv kmt.test --packages=\"{' '.join(failed_packages)}\" --run='^{'|'.join(failed_tests)}$'") + print(f"inv kmt.test --packages=\"{','.join(failed_packages)}\" --run='^{'|'.join(failed_tests)}$'") @task @@ -374,11 +411,8 @@ def config_ssh_key(ctx: Context): ssh_keys = [] for f in ssh_key_files: - key_comment = get_ssh_key_name(f.with_suffix(".pub")) - if key_comment is None: - warn(f"[x] {f} does not have a valid key name, cannot be used") - else: - ssh_keys.append({'path': os.fspath(f), 'name': key_comment, 'aws_key_name': ''}) + key_name = get_ssh_key_name(f.with_suffix(".pub")) or f.name + ssh_keys.append({'path': os.fspath(f), 'name': key_name, 'aws_key_name': ''}) keys_str = "\n".join([f" - [{i + 1}] {key['name']} (path: {key['path']})" for i, key in enumerate(ssh_keys)]) result = ask(f"[?] Found these valid key files:\n{keys_str}\nChoose one of these files (1-{len(ssh_keys)}): ") @@ -389,6 +423,10 @@ def config_ssh_key(ctx: Context): except IndexError as e: # out of range raise Exit(f"Invalid choice {result}, must be a number between 1 and {len(ssh_keys)} (inclusive)") from e + info("[+] KMT needs this SSH key to be loaded in AWS so that it can be used to access the instances") + info( + "[+] If you haven't loaded it yet, go to https://dtdg.co/aws-sso-prod -> DataDog Sandbox -> EC2 -> Network & Security -> Key Pairs" + ) aws_key_name = ask( f"Enter the key name configured in AWS for this key (leave blank to set the same as the local key name '{ssh_key['name']}'): " ) @@ -458,7 +496,7 @@ def filter_target_domains(vms: str, infra: dict[KMTArchNameOrLocal, HostInstance def get_archs_in_domains(domains: Iterable[LibvirtDomain]) -> set[Arch]: archs: set[Arch] = set() for d in domains: - archs.add(Arch.from_str(d.instance.arch)) + archs.add(Arch.from_str(d.arch)) return archs @@ -530,6 +568,19 @@ def ninja_build_dependencies(ctx: Context, nw: NinjaWriter, kmt_paths: KMTPaths, inputs=[os.path.abspath(f)], ) + vm_metrics_files = glob("test/new-e2e/system-probe/vm-metrics/*.go") + nw.build( + rule="gobin", + pool="gobuild", + outputs=[os.path.join(kmt_paths.dependencies, "vm-metrics")], + implicit=vm_metrics_files, + variables={ + "go": go_path, + "chdir": "cd test/new-e2e/system-probe/vm-metrics", + "env": env_str, + }, + ) + test_json_files = glob("test/new-e2e/system-probe/test-json-review/*.go") nw.build( rule="gobin", @@ -607,10 +658,8 @@ def filter(x: Path): @task def kmt_secagent_prepare( ctx: Context, - vms: str | None = None, stack: str | None = None, arch: Arch | str = "local", - ssh_key: str | None = None, packages: str | None = None, verbose: bool = True, ci: bool = True, @@ -660,6 +709,7 @@ def prepare( ctx: Context, component: Component, vms: str | None = None, + alien_vms: str | None = None, stack: str | None = None, arch: str | Arch = "local", ssh_key: str | None = None, @@ -668,20 +718,39 @@ def prepare( ci=False, compile_only=False, ): - if not ci: - stack = check_and_get_stack(stack) - assert stacks.stack_exists( - stack - ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" - else: - stack = "ci" - arch_obj = Arch.from_str(arch) if arch_obj.kmt_arch not in KMT_SUPPORTED_ARCHS: raise Exit( f"Architecture {arch} (inferred {arch_obj}) is not supported. Supported architectures are amd64 and arm64" ) + if ci: + domains = None + stack = "ci" + return _prepare(ctx, stack, component, arch_obj, packages, verbose, ci, compile_only) + + if alien_vms is not None: + err_msg = f"no alient VMs discovered from provided profile {alien_vms}." + else: + err_msg = f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" + stack = get_kmt_or_alien_stack(ctx, stack, vms, alien_vms) + domains = get_target_domains(ctx, stack, ssh_key, arch_obj, vms, alien_vms) + assert len(domains) > 0, err_msg + + _prepare(ctx, stack, component, arch, packages, verbose, ci, compile_only, domains=domains) + + +def _prepare( + ctx: Context, + stack: str, + component: Component, + arch_obj: Arch, + packages=None, + verbose=True, + ci=False, + compile_only=False, + domains: list[LibvirtDomain] | None = None, +): if not ci: cc = get_compiler(ctx) @@ -697,7 +766,7 @@ def prepare( info(f"[+] Compiling artifacts for {arch_obj}, component = {component}") if component == "security-agent": if ci: - kmt_secagent_prepare(ctx, vms, stack, arch_obj, ssh_key, packages, verbose, ci) + kmt_secagent_prepare(ctx, stack, arch_obj, packages, verbose, ci) else: cc.exec( f"git config --global --add safe.directory {CONTAINER_AGENT_PATH} && inv {inv_echo} kmt.kmt-secagent-prepare --stack={stack} {pkgs} --arch={arch_obj.name}", @@ -752,14 +821,7 @@ def prepare( if ci or compile_only: return - if vms is None or vms == "": - raise Exit("No vms specified to sync with") - - ssh_key_obj = try_get_ssh_key(ctx, ssh_key) - infra = build_infrastructure(stack, ssh_key_obj) - domains = filter_target_domains(vms, infra, arch_obj) - - info(f"[+] Preparing VMs {vms} in stack {stack} for {arch}") + info(f"[+] Preparing VMs in stack {stack} for {arch_obj}") target_instances: list[HostInstance] = [] for d in domains: @@ -811,12 +873,12 @@ def build_run_config(run: str | None, packages: list[str]): return c -def build_target_packages(filter_packages): - all_packages = go_package_dirs(TEST_PACKAGES_LIST, [NPM_TAG, BPF_TAG]) - if filter_packages == []: +def build_target_packages(filter_packages: list[str], build_tags: list[str]): + all_packages = go_package_dirs(TEST_PACKAGES_LIST, build_tags) + if not filter_packages: return all_packages - filter_packages = [os.path.relpath(p) for p in go_package_dirs(filter_packages, [NPM_TAG, BPF_TAG])] + filter_packages = [os.path.relpath(p) for p in go_package_dirs(filter_packages, build_tags)] return [pkg for pkg in all_packages if os.path.relpath(pkg) in filter_packages] @@ -826,9 +888,8 @@ def build_object_files(ctx, fp, arch: Arch): ctx.run(f"ninja -d explain -f {fp}") -def compute_package_dependencies(ctx: Context, packages: list[str]) -> dict[str, set[str]]: +def compute_package_dependencies(ctx: Context, packages: list[str], build_tags: list[str]) -> dict[str, set[str]]: dd_pkg_name = "github.com/DataDog/datadog-agent/" - build_tags = get_sysprobe_buildtags(False, False) pkg_deps: dict[str, set[str]] = defaultdict(set) packages_list = " ".join(packages) @@ -862,7 +923,6 @@ def kmt_sysprobe_prepare( ctx: Context, arch: str | Arch, stack: str | None = None, - kernel_release: str | None = None, packages=None, extra_arguments: str | None = None, ci: bool = False, @@ -895,8 +955,9 @@ def kmt_sysprobe_prepare( build_object_files(ctx, f"{kmt_paths.arch_dir}/kmt-object-files.ninja", arch) info("[+] Computing Go dependencies for test packages...") - target_packages = build_target_packages(filter_pkgs) - pkg_deps = compute_package_dependencies(ctx, target_packages) + build_tags = get_sysprobe_buildtags(False, False) + target_packages = build_target_packages(filter_pkgs, build_tags) + pkg_deps = compute_package_dependencies(ctx, target_packages, build_tags) info("[+] Generating build instructions..") with open(nf_path, 'w') as ninja_file: @@ -915,6 +976,7 @@ def kmt_sysprobe_prepare( ninja_build_dependencies(ctx, nw, kmt_paths, go_path, arch) ninja_copy_ebpf_files(nw, "system-probe", kmt_paths, arch) + build_tags = get_sysprobe_buildtags(False, False) for pkg in target_packages: pkg_name = os.path.relpath(pkg, os.getcwd()) target_path = os.path.join(kmt_paths.sysprobe_tests, pkg_name) @@ -922,7 +984,7 @@ def kmt_sysprobe_prepare( variables = { "env": env_str, "go": go_path, - "build_tags": get_sysprobe_buildtags(False, False), + "build_tags": build_tags, } timeout = get_test_timeout(os.path.relpath(pkg, os.getcwd())) if timeout: @@ -954,9 +1016,9 @@ def kmt_sysprobe_prepare( rule="copyfiles", ) - # handle testutils and testdata seperately since they are + # handle testutils and testdata separately since they are # shared across packages - target_pkgs = build_target_packages([]) + target_pkgs = build_target_packages([], build_tags) for pkg in target_pkgs: target_path = os.path.join(kmt_paths.sysprobe_tests, os.path.relpath(pkg, os.getcwd())) @@ -1035,6 +1097,33 @@ def images_matching_ci(_: Context, domains: list[LibvirtDomain]): return len(not_matches) == 0 +def get_target_domains(ctx, stack, ssh_key, arch_obj, vms, alien_vms) -> list[LibvirtDomain]: + def _get_infrastructure(ctx, stack, ssh_key, vms, alien_vms): + if alien_vms: + alien_vms_path = Path(alien_vms) + if not alien_vms_path.exists(): + raise Exit(f"No alien VMs profile found @ {alien_vms_path}") + return build_alien_infrastructure(alien_vms_path) + + ssh_key_obj = try_get_ssh_key(ctx, ssh_key) + return build_infrastructure(stack, ssh_key_obj) + + if vms is None and alien_vms is None: + vms = ",".join(stacks.get_all_vms_in_stack(stack)) + info(f"[+] running tests on all vms in stack {stack}: vms={vms}") + + infra = _get_infrastructure(ctx, stack, ssh_key, vms, alien_vms) + if alien_vms is not None: + return infra["local"].microvms + + domains = filter_target_domains(vms, infra, arch_obj) + if not images_matching_ci(ctx, domains): + if ask("Some VMs do not match version in CI. Continue anyway [y/N]") != "y": + raise Exit("[-] Aborting due to version mismatch") + + return domains + + @task( help={ "vms": "Comma seperated list of vms to target when running tests. If None, run against all vms", @@ -1055,6 +1144,7 @@ def test( ctx: Context, component: str = "system-probe", vms: str | None = None, + alien_vms: str | None = None, stack: str | None = None, packages=None, run: str | None = None, @@ -1067,39 +1157,30 @@ def test( test_extra_arguments=None, test_extra_env=None, ): - stack = check_and_get_stack(stack) - assert stacks.stack_exists( - stack - ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" - - if vms is None: - vms = ",".join(stacks.get_all_vms_in_stack(stack)) - info(f"[+] Running tests on all VMs in stack {stack}: vms={vms}") - - ssh_key_obj = try_get_ssh_key(ctx, ssh_key) - infra = build_infrastructure(stack, ssh_key_obj) - domains = filter_target_domains(vms, infra) + stack = get_kmt_or_alien_stack(ctx, stack, vms, alien_vms) + domains = get_target_domains(ctx, stack, ssh_key, None, vms, alien_vms) used_archs = get_archs_in_domains(domains) - if not images_matching_ci(ctx, domains): - if ask("Some VMs do not match version in CI. Continue anyway [y/N]") != "y": - return + if alien_vms is not None: + err_msg = f"no alient VMs discovered from provided profile {alien_vms}." + else: + err_msg = f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" - assert len(domains) > 0, f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" + assert len(domains) > 0, err_msg info("[+] Detected architectures in target VMs: " + ", ".join(map(str, used_archs))) if not quick: for arch in used_archs: info(f"[+] Preparing {component} for {arch}") - prepare(ctx, component, stack=stack, vms=vms, packages=packages, ssh_key=ssh_key, arch=arch) + _prepare(ctx, stack, component, arch, packages=packages, verbose=verbose, domains=domains) if run is not None and packages is None: raise Exit("Package must be provided when specifying test") pkgs = [] if packages is not None: - pkgs = [os.path.relpath(p) for p in go_package_dirs(packages.split(","), [NPM_TAG, BPF_TAG])] + pkgs = [os.path.relpath(os.path.realpath(p)) for p in go_package_dirs(packages.split(","), [NPM_TAG, BPF_TAG])] if run is not None and len(pkgs) > 1: raise Exit("Only a single package can be specified when running specific tests") @@ -1162,6 +1243,22 @@ def build_layout(ctx, domains, layout: str, verbose: bool): d.run_cmd(ctx, cmd, verbose) +def get_kmt_or_alien_stack(ctx, stack, vms, alien_vms): + assert not (vms is not None and alien_vms is not None), "target VMs can be either KMT VMs or alien VMs, not both" + + if alien_vms is not None and vms is None: + stack = check_and_get_stack("alien-stack") + if not stacks.stack_exists(stack): + stacks.create_stack(ctx, stack) + return stack + + stack = check_and_get_stack(stack) + assert stacks.stack_exists( + stack + ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" + return stack + + @task( help={ "vms": "Comma seperated list of vms to target when running tests", @@ -1176,6 +1273,7 @@ def build_layout(ctx, domains, layout: str, verbose: bool): def build( ctx: Context, vms: str | None = None, + alien_vms: str | None = None, stack: str | None = None, ssh_key: str | None = None, verbose=True, @@ -1185,10 +1283,7 @@ def build( compile_only=False, override_agent=False, ): - stack = check_and_get_stack(stack) - assert stacks.stack_exists( - stack - ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" + stack = get_kmt_or_alien_stack(ctx, stack, vms, alien_vms) if arch is None: arch = "local" @@ -1212,26 +1307,30 @@ def build( if compile_only: return - if vms is None: - vms = ",".join(stacks.get_all_vms_in_stack(stack)) - assert os.path.exists(layout), f"File {layout} does not exist" - ssh_key_obj = try_get_ssh_key(ctx, ssh_key) - infra = build_infrastructure(stack, ssh_key_obj) - domains = filter_target_domains(vms, infra, arch_obj) + domains = get_target_domains(ctx, stack, ssh_key, arch_obj, vms, alien_vms) + if alien_vms is not None: + err_msg = f"no alient VMs discovered from provided profile {alien_vms}." + else: + err_msg = f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" - if not images_matching_ci(ctx, domains): - if ask("Some VMs do not match version in CI. Continue anyway [y/N]") != "y": - return + assert len(domains) > 0, err_msg - assert len(domains) > 0, f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" + llc_path = paths.tools / "llc-bpf" + clang_path = paths.tools / "clang-bpf" + setup_runtime_clang(ctx, arch_obj, paths.tools) build_layout(ctx, domains, layout, verbose) for d in domains: + # Copy embedded tools, make them + embedded_remote_path = Path("/opt/datadog-agent/embedded/bin") + d.copy(ctx, llc_path, embedded_remote_path / llc_path.name, verbose=verbose) + d.copy(ctx, clang_path, embedded_remote_path / clang_path.name, verbose=verbose) + if override_agent: d.run_cmd(ctx, f"[ -f /opt/datadog-agent/embedded/bin/{component} ]", verbose=False) - d.copy(ctx, f"./bin/{component}/{component}", "/opt/datadog-agent/embedded/bin/{component}") + d.copy(ctx, f"./bin/{component}/{component}", f"/opt/datadog-agent/embedded/bin/{component}") else: d.copy(ctx, f"./bin/{component}", "/root/") @@ -1285,7 +1384,7 @@ def ssh_config( # Ensure correct permissions of the ddvm_rsa file if we're using # it to connect to VMs. This attribute change doesn't seem to be tracked # in git correctly - ctx.run(f"chmod 600 {ddvm_rsa}") + ctx.run(f"chmod 600 {ddvm_rsa}", echo=False) for stack_dir in stacks_dir.iterdir(): if not stack_dir.is_dir(): @@ -1848,6 +1947,7 @@ def show_last_test_results(ctx: Context, stack: str | None = None): vm_list: list[str] = [] total_by_vm: dict[str, tuple[int, int, int, int]] = defaultdict(lambda: (0, 0, 0, 0)) sum_failures = 0 + sum_tests = 0 for vm_folder in paths.test_results.iterdir(): if not vm_folder.is_dir(): @@ -1883,6 +1983,7 @@ def show_last_test_results(ctx: Context, stack: str | None = None): for testresults in tests.values(): if len(testresults) == 1: result = next(iter(testresults)) + sum_tests += 1 if result == "failed": failures += 1 sum_failures += 1 @@ -1913,8 +2014,16 @@ def _color_result(result: tuple[int, int, int, int]) -> str: table.append(["Total"] + [_color_result(total_by_vm[vm]) for vm in vm_list]) - print(tabulate(table, headers=["Package"] + vm_list)) - print("\nLegend: Successes/Successes on retry/Failures/Skipped") + print(tabulate(table, headers=["Package"] + vm_list) + "\n") + + if sum_tests == 0: + warn("WARN: No test runs") + elif sum_failures > 0: + error("ERROR: Found failed tests") + else: + info("SUCCESS: All tests passed") + + print("Legend: Successes/Successes on retry/Failures/Skipped") if sum_failures: sys.exit(1) @@ -2059,6 +2168,7 @@ def install_ddagent( ctx: Context, api_key: str, vms: str | None = None, + alien_vms: str | None = None, stack: str | None = None, ssh_key: str | None = None, verbose=True, @@ -2067,24 +2177,20 @@ def install_ddagent( datadog_yaml: str | None = None, layout: str | None = None, ): - stack = check_and_get_stack(stack) - assert stacks.stack_exists( - stack - ), f"Stack {stack} does not exist. Please create with 'inv kmt.create-stack --stack='" + stack = get_kmt_or_alien_stack(ctx, stack, vms, alien_vms) if arch is None: arch = "local" arch_obj = Arch.from_str(arch) - if vms is None: - vms = ",".join(stacks.get_all_vms_in_stack(stack)) - - ssh_key_obj = try_get_ssh_key(ctx, ssh_key) - infra = build_infrastructure(stack, ssh_key_obj) - domains = filter_target_domains(vms, infra, arch_obj) + domains = get_target_domains(ctx, stack, ssh_key, arch_obj, vms, alien_vms) + if alien_vms is not None: + err_msg = f"no alient VMs discovered from provided profile {alien_vms}." + else: + err_msg = f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" - assert len(domains) > 0, f"no vms found from list {vms}. Run `inv -e kmt.status` to see all VMs in current stack" + assert len(domains) > 0, err_msg if version is not None: check_version(version) diff --git a/tasks/libs/ciproviders/gitlab_api.py b/tasks/libs/ciproviders/gitlab_api.py index 8c940db6479af..19454a2dae773 100644 --- a/tasks/libs/ciproviders/gitlab_api.py +++ b/tasks/libs/ciproviders/gitlab_api.py @@ -294,7 +294,7 @@ def str_note() -> list[str]: if only_summary: if not cli: - res.append(':warning: Diff too large to display on Github') + res.append(':warning: Diff too large to display on Github.') else: if self.modified: wrap = len(self.modified) > max_detailed_jobs @@ -382,7 +382,7 @@ def display(self, cli: bool = True, job_url: str = None, **kwargs) -> str: return '' if len(self.diffs) == 1: - return self.diffs[0].diff.display(cli, **kwargs) + return self.diffs[0].diff.display(cli, job_url=job_url, **kwargs) def str_entry(diff: MultiGitlabCIDiff.MultiDiff) -> str: if cli: @@ -764,7 +764,7 @@ def generate_gitlab_full_configuration( # Override some variables with a dedicated context if context: - full_configuration['variables'] = full_configuration.get('variables', {}).update(context) + full_configuration.get('variables', {}).update(context) if compare_to: for value in full_configuration.values(): if ( @@ -896,6 +896,15 @@ def get_preset_contexts(required_tests): ("CI_PIPELINE_SOURCE", ["pipeline"]), # ["trigger", "pipeline", "schedule"] ("DDR_WORKFLOW_ID", ["true"]), ] + integrations_core_contexts = [ + ("RELEASE_VERSION_6", ["nightly"]), + ("RELEASE_VERSION_7", ["nightly-a7"]), + ("BUCKET_BRANCH", ["dev"]), + ("DEPLOY_AGENT", ["false"]), + ("INTEGRATIONS_CORE_VERSION", ["foo/bar"]), + ("RUN_KITCHEN_TESTS", ["false"]), + ("RUN_E2E_TESTS", ["off"]), + ] all_contexts = [] for test in required_tests: if test in ["all", "main"]: @@ -906,6 +915,8 @@ def get_preset_contexts(required_tests): generate_contexts(mq_contexts, [], all_contexts) if test in ["all", "conductor"]: generate_contexts(conductor_contexts, [], all_contexts) + if test in ["all", "integrations"]: + generate_contexts(integrations_core_contexts, [], all_contexts) return all_contexts @@ -983,7 +994,7 @@ def gitlab_configuration_is_modified(ctx): print(f"Found a gitlab configuration file: {new_file}") else: in_config = False - if in_config and line.startswith("@@"): + if in_config and line.startswith("@@") and os.path.exists(new_file): lines = changed_lines.match(line) start = int(lines.group(1)) with open(new_file) as f: diff --git a/tasks/libs/common/go.py b/tasks/libs/common/go.py index 6ee7f6bf0834d..61b5ecdb34784 100644 --- a/tasks/libs/common/go.py +++ b/tasks/libs/common/go.py @@ -11,8 +11,10 @@ def download_go_dependencies(ctx: Context, paths: list[str], verbose: bool = False, max_retry: int = 3): print("downloading dependencies") - with timed("go mod download"): + with timed("go mod download && go mod tidy"): verbosity = ' -x' if verbose else '' for path in paths: with ctx.cd(path): - run_command_with_retry(ctx, f"go mod download{verbosity}", max_retry=max_retry) + run_command_with_retry( + ctx, f"go mod download{verbosity} && go mod tidy{verbosity}", max_retry=max_retry + ) diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py index 8b2e04244bb52..fe5d4710113c3 100644 --- a/tasks/libs/common/utils.py +++ b/tasks/libs/common/utils.py @@ -725,3 +725,23 @@ def download_to_tempfile(url, checksum=None): os.close(fd) if os.path.exists(tmp_path): os.remove(tmp_path) + + +def experimental(message): + """ + Marks this task as experimental and prints the message. + + Note: This decorator must be placed after the `task` decorator. + """ + + def decorator(f): + @wraps(f) + def wrapper(*args, **kwargs): + fname = f.__name__ + print(color_message(f"Warning: {fname} is experimental: {message}", Color.ORANGE), file=sys.stderr) + + return f(*args, **kwargs) + + return wrapper + + return decorator diff --git a/tasks/libs/releasing/qa.py b/tasks/libs/releasing/qa.py new file mode 100644 index 0000000000000..9483b2f05c45e --- /dev/null +++ b/tasks/libs/releasing/qa.py @@ -0,0 +1,23 @@ +import os + +from tasks.libs.ciproviders.github_api import GithubAPI + + +def setup_ddqa(ctx): + """ + Setup the environment for ddqa + """ + config_file = ctx.run("ddqa config show", hide=True).stdout.strip() + with open(config_file, "w") as config, open("tools/agent_QA/ddqa_template_config.toml") as template: + config.write(template.read()) + ctx.run(f"ddqa config set repo.datadog-agent.path {os.getcwd()}", hide=True) + gh = GithubAPI() + ctx.run("ddqa config set github.user github-actions[bot]", hide=True) + ctx.run(f"ddqa config set github.token {gh._auth.token}", hide=True) + ctx.run(f"ddqa config set jira.email {os.getenv('ATLASSIAN_USERNAME')}", hide=True) + ctx.run(f"ddqa config set jira.token {os.getenv('ATLASSIAN_PASSWORD')}", hide=True) + ctx.run("ddqa --auto sync", hide=True) + + +def get_labels(version): + return f"-l {version} -l {version.qa_label()} -l ddqa" diff --git a/tasks/libs/releasing/version.py b/tasks/libs/releasing/version.py index 4709d3a97acb6..8c781fb2e134f 100644 --- a/tasks/libs/releasing/version.py +++ b/tasks/libs/releasing/version.py @@ -265,10 +265,12 @@ def get_version( agent_version_cache_file_exist = os.path.exists(AGENT_VERSION_CACHE_NAME) if not agent_version_cache_file_exist: if pipeline_id and pipeline_id.isdigit() and project_name == REPO_NAME: - ctx.run( + result = ctx.run( f"aws s3 cp s3://dd-ci-artefacts-build-stable/datadog-agent/{pipeline_id}/{AGENT_VERSION_CACHE_NAME} .", hide="stdout", ) + if "unable to locate credentials" in result.stderr.casefold(): + raise Exit("Permanent error: unable to locate credentials, retry the job", 42) agent_version_cache_file_exist = True if agent_version_cache_file_exist: @@ -327,10 +329,12 @@ def get_version_numeric_only(ctx, major_version='7'): if pipeline_id and pipeline_id.isdigit() and project_name == REPO_NAME: try: if not os.path.exists(AGENT_VERSION_CACHE_NAME): - ctx.run( + result = ctx.run( f"aws s3 cp s3://dd-ci-artefacts-build-stable/datadog-agent/{pipeline_id}/{AGENT_VERSION_CACHE_NAME} .", hide="stdout", ) + if "unable to locate credentials" in result.stderr.casefold(): + raise Exit("Permanent error: unable to locate credentials, retry the job", 42) with open(AGENT_VERSION_CACHE_NAME) as file: cache_data = json.load(file) diff --git a/tasks/libs/types/arch.py b/tasks/libs/types/arch.py index 7f5408bf0f9ef..241e9d65e087a 100644 --- a/tasks/libs/types/arch.py +++ b/tasks/libs/types/arch.py @@ -136,7 +136,7 @@ def local() -> Arch: kmt_arch="x86_64", windows_arch="x64", ci_arch="x64", - spellings={"amd64", "x86_64", "x64", "x86-64"}, + spellings={"amd64", "x86_64", "x64", "x86-64", "x86"}, ) ALL_ARCHS = [ARCH_AMD64, ARCH_ARM64] diff --git a/tasks/libs/types/copyright.py b/tasks/libs/types/copyright.py index 301db649f32b0..3d19e49ba4603 100755 --- a/tasks/libs/types/copyright.py +++ b/tasks/libs/types/copyright.py @@ -34,6 +34,7 @@ '/pkg/remoteconfig/state/products/apmsampling/.*_gen(_test){,1}.go', '/pkg/security/security_profile/dump/activity_dump_easyjson.go', '/pkg/security/probe/actions_easyjson.go', + '/pkg/security/probe/actions_linux_easyjson.go', '/pkg/security/probe/custom_events_easyjson.go', '/pkg/security/serializers/serializers_easyjson.go', '/pkg/security/serializers/serializers_linux_easyjson.go', diff --git a/tasks/libs/types/version.py b/tasks/libs/types/version.py index 24ae7eef887a0..3e85d0c3b7fc5 100644 --- a/tasks/libs/types/version.py +++ b/tasks/libs/types/version.py @@ -105,5 +105,17 @@ def next_version(self, bump_major=False, bump_minor=False, bump_patch=False, rc= return new_version + def previous_rc_version(self): + if self.patch is None or self.rc is None or self.rc == 0: + raise RuntimeError("Cannot determine the previous version of incomplete or non-rc version") + previous = self.clone() + if previous.rc == 1: + previous.devel = True + previous.rc -= 1 + return previous + + def qa_label(self): + return f"{self._safe_value('major')}.{self._safe_value('minor')}.{self._safe_value('patch')}-qa" + def tag_pattern(self): return f"{self._safe_value('major')}.{self._safe_value('minor')}.{self._safe_value('patch')}*" diff --git a/tasks/linter.py b/tasks/linter.py index 1d32a7e38ea43..10aa5af3b9ef6 100644 --- a/tasks/linter.py +++ b/tasks/linter.py @@ -383,7 +383,7 @@ def __repr__(self): def list_get_parameter_calls(file): aws_ssm_call = re.compile(r"^.+ssm get-parameter.+--name +(?P[^ ]+).*$") # remove the first letter of the script name because '\f' is badly interpreted for windows paths - wrapper_call = re.compile(r"^.+etch_secret.(sh|ps1)[\"]? +(?P[^ )]+).*$") + wrapper_call = re.compile(r"^.+etch_secret.(sh|ps1)[\"]? (-parameterName )?+(?P[^ )]+).*$") calls = [] with open(file) as f: try: @@ -613,7 +613,9 @@ def contains_valid_change_rule(rule): tests_without_change_path = defaultdict(list) tests_without_change_path_allowed = defaultdict(list) for test, filepath in tests: - if not any(contains_valid_change_rule(rule) for rule in config[test]['rules'] if isinstance(rule, dict)): + if "rules" in config[test] and not any( + contains_valid_change_rule(rule) for rule in config[test]['rules'] if isinstance(rule, dict) + ): if test in tests_without_change_path_allow_list: tests_without_change_path_allowed[filepath].append(test) else: diff --git a/tasks/modules.py b/tasks/modules.py index ebed764f57eaf..276f8106a753d 100644 --- a/tasks/modules.py +++ b/tasks/modules.py @@ -163,8 +163,6 @@ def dependency_path(self, agent_version): "comp/otelcol/collector-contrib/impl": GoModule( "comp/otelcol/collector-contrib/impl", independent=True, used_by_otel=True ), - "comp/otelcol/configstore/def": GoModule("comp/otelcol/configstore/def", independent=True, used_by_otel=True), - "comp/otelcol/configstore/impl": GoModule("comp/otelcol/configstore/impl", independent=True, used_by_otel=True), "comp/otelcol/converter/def": GoModule("comp/otelcol/converter/def", independent=True, used_by_otel=True), "comp/otelcol/converter/impl": GoModule("comp/otelcol/converter/impl", independent=True, used_by_otel=True), "comp/otelcol/ddflareextension/def": GoModule( @@ -189,6 +187,9 @@ def dependency_path(self, agent_version): "comp/otelcol/otlp/components/metricsclient": GoModule( "comp/otelcol/otlp/components/metricsclient", independent=True, used_by_otel=True ), + "comp/otelcol/otlp/components/processor/infraattributesprocessor": GoModule( + "comp/otelcol/otlp/components/processor/infraattributesprocessor", independent=True, used_by_otel=True + ), "comp/otelcol/otlp/components/statsprocessor": GoModule( "comp/otelcol/otlp/components/statsprocessor", independent=True, used_by_otel=True ), @@ -217,6 +218,7 @@ def dependency_path(self, agent_version): "pkg/config/model": GoModule("pkg/config/model", independent=True, used_by_otel=True), "pkg/config/remote": GoModule("pkg/config/remote", independent=True), "pkg/config/setup": GoModule("pkg/config/setup", independent=True, used_by_otel=True), + "pkg/config/structure": GoModule("pkg/config/structure", independent=True, used_by_otel=True), "pkg/config/utils": GoModule("pkg/config/utils", independent=True, used_by_otel=True), "pkg/errors": GoModule("pkg/errors", independent=True), "pkg/gohai": GoModule("pkg/gohai", independent=True, importable=False), @@ -281,9 +283,6 @@ def dependency_path(self, agent_version): "pkg/util/uuid": GoModule("pkg/util/uuid", independent=True), "pkg/util/winutil": GoModule("pkg/util/winutil", independent=True, used_by_otel=True), "pkg/version": GoModule("pkg/version", independent=True, used_by_otel=True), - "test/e2e/containers/otlp_sender": GoModule( - "test/e2e/containers/otlp_sender", condition=lambda: False, should_tag=False - ), "test/fakeintake": GoModule("test/fakeintake", independent=True), "test/new-e2e": GoModule( "test/new-e2e", @@ -503,3 +502,14 @@ def validate_used_by_otel(ctx: Context): message += "Please label them as \"used_by_otel\" in the DEFAULT_MODULES list." raise Exit(message) + + +def get_module_by_path(path: Path) -> GoModule | None: + """ + Return the GoModule object corresponding to the given path. + """ + for module in DEFAULT_MODULES.values(): + if Path(module.path) == path: + return module + + return None diff --git a/tasks/msi.py b/tasks/msi.py index ea838f27817c5..714646bec9540 100644 --- a/tasks/msi.py +++ b/tasks/msi.py @@ -181,7 +181,7 @@ def _build( # back to the mount. try: ctx.run( - f'robocopy {SOURCE_ROOT_DIR} {BUILD_SOURCE_DIR} /MIR /XF cabcache packages embedded2.COMPRESSED embedded3.COMPRESSED', + f'robocopy {SOURCE_ROOT_DIR} {BUILD_SOURCE_DIR} /MIR /XF *.COMPRESSED *.g.wxs *.msi *.exe /XD bin obj .vs cab cabcache packages', hide=True, ) except UnexpectedExit as e: diff --git a/tasks/omnibus.py b/tasks/omnibus.py index 9caaf29941353..b903ae4aab172 100644 --- a/tasks/omnibus.py +++ b/tasks/omnibus.py @@ -90,6 +90,7 @@ def get_omnibus_env( go_mod_cache=None, flavor=AgentFlavor.base, pip_config_file="pip.conf", + custom_config_dir=None, ): env = load_release_versions(ctx, release_version) @@ -133,20 +134,23 @@ def get_omnibus_env( env['SYSTEM_PROBE_BIN'] = system_probe_bin env['AGENT_FLAVOR'] = flavor.name + if custom_config_dir: + env["OUTPUT_CONFIG_DIR"] = custom_config_dir + # We need to override the workers variable in omnibus build when running on Kubernetes runners, # otherwise, ohai detect the number of CPU on the host and run the make jobs with all the CPU. kubernetes_cpu_request = os.environ.get('KUBERNETES_CPU_REQUEST') if kubernetes_cpu_request: env['OMNIBUS_WORKERS_OVERRIDE'] = str(int(kubernetes_cpu_request) + 1) - # Forward the DEPLOY_AGENT variable so that we can use a higher compression level for deployed artifacts - deploy_agent = os.environ.get('DEPLOY_AGENT') - if deploy_agent: - env['DEPLOY_AGENT'] = deploy_agent - if 'PACKAGE_ARCH' in os.environ: - env['PACKAGE_ARCH'] = os.environ['PACKAGE_ARCH'] - if 'INSTALL_DIR' in os.environ: - print('Forwarding INSTALL_DIR') - env['INSTALL_DIR'] = os.environ['INSTALL_DIR'] + env_to_forward = [ + # Forward the DEPLOY_AGENT variable so that we can use a higher compression level for deployed artifacts + 'DEPLOY_AGENT', + 'PACKAGE_ARCH', + 'INSTALL_DIR', + ] + for key in env_to_forward: + if key in os.environ: + env[key] = os.environ[key] return env @@ -177,6 +181,7 @@ def build( pip_config_file="pip.conf", host_distribution=None, install_directory=None, + config_directory=None, target_project=None, ): """ @@ -208,6 +213,7 @@ def build( go_mod_cache=go_mod_cache, flavor=flavor, pip_config_file=pip_config_file, + custom_config_dir=config_directory, ) if not target_project: @@ -246,7 +252,7 @@ def build( # For instance if git_cache_dir is set to "/git/cache/dir" and install_dir is # set to /a/b/c, the cache git repository will be located in # /git/cache/dir/a/b/c/.git - if install_directory is None: + if not install_directory: install_directory = install_dir_for_project(target_project) # Is the path starts with a /, it's considered the new root for the joined path # which effectively drops whatever was in omnibus_cache_dir @@ -379,3 +385,41 @@ def manifest( omnibus_s3_cache=False, log_level=log_level, ) + + +@task +def rpath_edit(ctx, install_path, target_rpath_dd_folder, platform="linux"): + # Collect mime types for all files inside the Agent installation + files = ctx.run(rf"find {install_path} -type f -exec file --mime-type \{{\}} \+", hide=True).stdout + for line in files.splitlines(): + if not line: + continue + file, file_type = line.split(":") + file_type = file_type.strip() + + if platform == "linux": + if file_type not in ["application/x-executable", "inode/symlink", "application/x-sharedlib"]: + continue + binary_rpath = ctx.run(f'objdump -x {file} | grep "RPATH"', warn=True, hide=True).stdout + else: + if file_type != "application/x-mach-binary": + continue + binary_rpath = ctx.run(f'otool -l {file} | grep -A 2 "RPATH"', warn=True, hide=True).stdout + + if install_path in binary_rpath: + new_rpath = os.path.relpath(target_rpath_dd_folder, os.path.dirname(file)) + if platform == "linux": + ctx.run(f"patchelf --force-rpath --set-rpath \\$ORIGIN/{new_rpath}/embedded/lib {file}") + else: + # The macOS agent binary has 18 RPATH definition, replacing the first one should be enough + # but just in case we're replacing them all. + # We're also avoiding unnecessary `install_name_tool` call as much as possible. + number_of_rpaths = binary_rpath.count('\n') // 3 + for _ in range(number_of_rpaths): + exit_code = ctx.run( + f"install_name_tool -rpath {install_path}/embedded/lib @loader_path/{new_rpath}/embedded/lib {file}", + warn=True, + hide=True, + ).exited + if exit_code != 0: + break diff --git a/tasks/pipeline.py b/tasks/pipeline.py index 1d300a7f38ee2..97fec5231f87f 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -582,10 +582,10 @@ def changelog(ctx, new_commit_sha): if messages: slack_message += ( "\n".join(messages) + "\n:wave: Authors, please check the " - " for issues" + " for issues" ) else: slack_message += empty_changelog_msg @@ -593,11 +593,13 @@ def changelog(ctx, new_commit_sha): print(f"Posting message to slack: \n {slack_message}") send_slack_message("system-probe-ops", slack_message) print(f"Writing new commit sha: {new_commit_sha} to SSM") - ctx.run( + res = ctx.run( f"aws ssm put-parameter --name ci.datadog-agent.gitlab_changelog_commit_sha --value {new_commit_sha} " "--type \"SecureString\" --region us-east-1 --overwrite", hide=True, ) + if "unable to locate credentials" in res.stderr.casefold(): + raise Exit("Permanent error: unable to locate credentials, retry the job", code=42) @task @@ -1027,7 +1029,7 @@ def compare_to_itself(ctx): ctx.run("git config --global user.email 'github-app[bot]@users.noreply.github.com'", hide=True) # The branch must exist in gitlab to be able to "compare_to" # Push an empty commit to prevent linking this pipeline to the actual PR - ctx.run("git commit -m 'Compare to itself' --allow-empty", hide=True) + ctx.run("git commit -m 'Initial push of the compare/to branch' --allow-empty", hide=True) ctx.run(f"git push origin {new_branch}") from tasks.libs.releasing.json import load_release_json @@ -1040,7 +1042,7 @@ def compare_to_itself(ctx): with open(file, 'w') as f: f.write(content.replace(f'compare_to: {release_json["base_branch"]}', f'compare_to: {new_branch}')) - ctx.run("git commit -am 'Compare to itself'", hide=True) + ctx.run("git commit -am 'Commit to compare to itself'", hide=True) ctx.run(f"git push origin {new_branch}", hide=True) max_attempts = 6 compare_to_pipeline = None diff --git a/tasks/release.py b/tasks/release.py index 8668e9f3a8b50..52dbc4fde4be9 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -1034,3 +1034,18 @@ def check_for_changes(ctx, release_branch, warning_mode=False): ) # Send a value for the create_rc_pr.yml workflow print(changes) + + +@task +def create_qa_cards(ctx, tag): + """ + Automate the call to ddqa + """ + from tasks.libs.releasing.qa import get_labels, setup_ddqa + + version = _create_version_from_match(RC_VERSION_RE.match(tag)) + if not version.rc: + print(f"{tag} is not a release candidate, skipping") + return + setup_ddqa(ctx) + ctx.run(f"ddqa --auto create {version.previous_rc_version()} {tag} {get_labels(version)}") diff --git a/tasks/setup.py b/tasks/setup.py index f1ffa8d689718..0426543a16010 100644 --- a/tasks/setup.py +++ b/tasks/setup.py @@ -43,7 +43,6 @@ def setup(ctx, vscode=False): check_python_version, check_go_version, update_python_dependencies, - download_go_tools, install_go_tools, install_protoc, enable_pre_commit, @@ -257,19 +256,3 @@ def install_protoc(ctx) -> SetupResult: status = Status.FAIL return SetupResult("Install protoc", status, message) - - -def download_go_tools(ctx) -> SetupResult: - print(color_message("Downloading go tools...", Color.BLUE)) - status = Status.OK - message = "" - - try: - from tasks import download_tools - - download_tools(ctx) - except Exception as e: - message = f'Download Go tools failed: {e}' - status = Status.FAIL - - return SetupResult("Download Go tools", status, message) diff --git a/tasks/system_probe.py b/tasks/system_probe.py index a0e218e222fbd..e54131bbd2fe0 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -333,7 +333,7 @@ def ninja_test_ebpf_programs(nw: NinjaWriter, build_dir): ebpf_c_dir = os.path.join(ebpf_bpf_dir, "testdata", "c") test_flags = "-g -DDEBUG=1" - test_programs = ["logdebug-test", "error_telemetry"] + test_programs = ["logdebug-test", "error_telemetry", "uprobe_attacher-test"] for prog in test_programs: infile = os.path.join(ebpf_c_dir, f"{prog}.c") @@ -845,7 +845,6 @@ def go_package_dirs(packages, build_tags): This handles the ellipsis notation (eg. ./pkg/ebpf/...) """ - target_packages = [] format_arg = '{{ .Dir }}' buildtags_arg = ",".join(build_tags) packages_arg = " ".join(packages) diff --git a/tasks/unit_tests/junit_tests.py b/tasks/unit_tests/junit_tests.py index 00a95dc49e48d..37876198a4668 100644 --- a/tasks/unit_tests/junit_tests.py +++ b/tasks/unit_tests/junit_tests.py @@ -55,7 +55,7 @@ def test_without_split(self): def test_with_split(self): xml_file = Path("./tasks/unit_tests/testdata/secret.tar.gz/-go-src-datadog-agent-junit-out-base.xml") owners = read_owners(".github/CODEOWNERS") - self.assertEqual(junit.split_junitxml(xml_file.parent, xml_file, owners, []), 28) + self.assertEqual(junit.split_junitxml(xml_file.parent, xml_file, owners, []), 27) class TestGroupPerTag(unittest.TestCase): @@ -144,4 +144,4 @@ def test_e2e(self, mock_popen, mock_gitlab): mock_gitlab.return_value = mock_project junit.junit_upload_from_tgz("tasks/unit_tests/testdata/testjunit-tests_deb-x64-py3.tgz") mock_popen.assert_called() - self.assertEqual(mock_popen.call_count, 30) + self.assertEqual(mock_popen.call_count, 29) diff --git a/tasks/unit_tests/linter_tests.py b/tasks/unit_tests/linter_tests.py index b6a2cd3a5f283..f3e3d0c51d0ce 100644 --- a/tasks/unit_tests/linter_tests.py +++ b/tasks/unit_tests/linter_tests.py @@ -32,7 +32,7 @@ def test_without_wrapper_no_env(self): def test_without_wrapper_with_env(self): with open(self.test_file, "w") as f: f.write( - " - export DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name $API_KEY_ORG2 --with-decryption --query Parameter.Value --out text" + " - DD_API_KEY=$(aws ssm get-parameter --region us-east-1 --name $API_KEY_ORG2 --with-decryption --query Parameter.Value --out text || exit $?; export DD_API_KEY" ) matched = linter.list_get_parameter_calls(self.test_file)[0] self.assertFalse(matched.with_wrapper) @@ -41,7 +41,7 @@ def test_without_wrapper_with_env(self): def test_with_wrapper_no_env(self): with open(self.test_file, "w") as f: f.write( - "export DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh test.datadog-agent.datadog_api_key_org2)" + "DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh test.datadog-agent.datadog_api_key_org2) || exit $?; export DD_API_KEY" ) matched = linter.list_get_parameter_calls(self.test_file)[0] self.assertTrue(matched.with_wrapper) @@ -49,17 +49,19 @@ def test_with_wrapper_no_env(self): def test_with_wrapper_with_env(self): with open(self.test_file, "w") as f: - f.write("export DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $APP_KEY_ORG2)") + f.write( + "DD_APP_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $APP_KEY_ORG2) || exit $?; export DD_APP_KEY" + ) matched = linter.list_get_parameter_calls(self.test_file) self.assertListEqual([], matched) def test_multi_match_windows(self): with open(self.test_file, "w") as f: f.write( - 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch_secret.ps1" test.datadog-agent.datadog_api_key_org2 $tmpfile)\n' - 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch secret.ps1" "$Env:MISSING_UNDERSCORE" $tmpfile)\n' - '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" "bad.name" "$tmpfile")\n' - 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" "$Env:TEST" $tmpfile)\n' + 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch_secret.ps1" -parameterName test.datadog-agent.datadog_api_key_org2 -tempFile $tmpfile)\n' + 'DD_API_KEY=$(& "$CI_PROJECT_DIR\tools \\ci\fetch secret.ps1" -parameterName "$Env:MISSING_UNDERSCORE" -tempFile $tmpfile)\n' + '`DD_APP_KEY=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" -parameterName "bad.name" -tempFile "$tmpfile")\n' + 'DD_APP=$(& "$CI_PROJECT_DIR\tools\\ci\fetch_secret.ps1" -parameterName "$Env:TEST" -tempFile $tmpfile)\n' ) matched = linter.list_get_parameter_calls(self.test_file) self.assertEqual(2, len(matched)) diff --git a/tasks/unit_tests/pipeline_tests.py b/tasks/unit_tests/pipeline_tests.py index a418bc1fbb67d..366fc3b54bb92 100644 --- a/tasks/unit_tests/pipeline_tests.py +++ b/tasks/unit_tests/pipeline_tests.py @@ -106,9 +106,9 @@ class TestCompareToItself(unittest.TestCase): "git remote set-url origin https://x-access-token:zidane@github.com/DataDog/datadog-agent.git": Result(), "git config --global user.name 'github-actions[bot]'": Result(), "git config --global user.email 'github-app[bot]@users.noreply.github.com'": Result(), - "git commit -m 'Compare to itself' --allow-empty": Result(), + "git commit -m 'Initial push of the compare/to branch' --allow-empty": Result(), "git push origin compare/Football/900284400": Result(), - "git commit -am 'Compare to itself'": Result(), + "git commit -am 'Commit to compare to itself'": Result(), "git checkout Football": Result(), "git branch -D compare/Football/900284400": Result(), "git push origin :compare/Football/900284400": Result(), diff --git a/tasks/unit_tests/version_tests.py b/tasks/unit_tests/version_tests.py index 9cc343cdbb2c7..2f6255c68310c 100644 --- a/tasks/unit_tests/version_tests.py +++ b/tasks/unit_tests/version_tests.py @@ -210,6 +210,48 @@ def test_next_version_promote_rc(self): self.assertEqual(new_version, expected_version) +class TestPreviousRCVersion(unittest.TestCase): + def test_non_rc(self): + version = Version(major=1, minor=1) + with self.assertRaises(RuntimeError): + version.previous_rc_version() + + def test_rc_1_no_patch(self): + version = Version(major=1, minor=1, rc=1) + with self.assertRaises(RuntimeError): + version.previous_rc_version() + + def test_rc_1(self): + version = Version(major=1, minor=1, patch=1, rc=1) + previous = str(version.previous_rc_version()) + self.assertEqual(previous, "1.1.1-devel") + + def test_rc_42(self): + version = Version(major=1, minor=1, patch=1, rc=42) + previous = str(version.previous_rc_version()) + self.assertEqual(previous, "1.1.1-rc.41") + + +class TestQALabel(unittest.TestCase): + expected = "1.2.0-qa" + + def test_minor_major(self): + v = Version(1, 2) + self.assertEqual(v.qa_label(), self.expected) + + def test_minor_major_patch(self): + v = Version(1, 2, patch=0) + self.assertEqual(v.qa_label(), self.expected) + + def test_minor_major_patch_devel(self): + v = Version(1, 2, devel=True) + self.assertEqual(v.qa_label(), self.expected) + + def test_minor_major_patch_rc(self): + v = Version(1, 2, rc=1) + self.assertEqual(v.qa_label(), self.expected) + + class TestQueryVersion(unittest.TestCase): @patch.dict(os.environ, {"BUCKET_BRANCH": "dev"}, clear=True) def test_on_dev_bucket(self): diff --git a/tasks/vscode.py b/tasks/vscode.py index e528d188e1c79..e792327343084 100644 --- a/tasks/vscode.py +++ b/tasks/vscode.py @@ -44,7 +44,7 @@ def setup(ctx, force=False): print(color_message("* Setting up tasks", Color.BOLD)) setup_tasks(ctx, force) print(color_message("* Setting up tests", Color.BOLD)) - setup_tasks(ctx, force) + setup_tests(ctx, force) print(color_message("* Setting up settings", Color.BOLD)) setup_settings(ctx, force) print(color_message("* Setting up launch settings", Color.BOLD)) diff --git a/tasks/winbuildscripts/Generate-OCIPackage.ps1 b/tasks/winbuildscripts/Generate-OCIPackage.ps1 index 274778d4a16b8..ee48f4badc9fe 100644 --- a/tasks/winbuildscripts/Generate-OCIPackage.ps1 +++ b/tasks/winbuildscripts/Generate-OCIPackage.ps1 @@ -1,8 +1,7 @@ Param( - [Parameter(Mandatory=$true,Position=0)] - [ValidateSet("datadog-agent", "datadog-installer")] - [String] - $package + [Parameter(Mandatory=$true)] + [string] $package, + [string] $version ) $omnibusOutput = "$($Env:REPO_ROOT)\omnibus\pkg\" @@ -11,10 +10,15 @@ if (-not (Test-Path C:\tools\datadog-package.exe)) { Write-Host "Downloading datadog-package.exe" (New-Object System.Net.WebClient).DownloadFile("https://dd-agent-omnibus.s3.amazonaws.com/datadog-package.exe", "C:\\tools\\datadog-package.exe") } -$rawAgentVersion = "{0}-1" -f (inv agent.version --url-safe --major-version 7) -Write-Host "Detected agent version ${rawAgentVersion}" +if ([string]::IsNullOrWhitespace($version)) { + $version = "{0}-1" -f (inv agent.version --url-safe --major-version 7) + Write-Host "Detected agent version ${version}" +} +if (-not $version.EndsWith("-1")) { + $version += "-1" +} -$packageName = "${package}-${rawAgentVersion}-windows-amd64.oci.tar" +$packageName = "${package}-${version}-windows-amd64.oci.tar" if (Test-Path $omnibusOutput\$packageName) { Remove-Item $omnibusOutput\$packageName @@ -23,9 +27,9 @@ if (Test-Path $omnibusOutput\$packageName) { # datadog-package takes a folder as input and will package everything in that, so copy the msi to its own folder Remove-Item -Recurse -Force C:\oci-pkg -ErrorAction SilentlyContinue New-Item -ItemType Directory C:\oci-pkg -Copy-Item (Get-ChildItem $omnibusOutput\${package}-${rawAgentVersion}-x86_64.msi).FullName -Destination C:\oci-pkg\${package}-${rawAgentVersion}-x86_64.msi +Copy-Item (Get-ChildItem $omnibusOutput\${package}-${version}-x86_64.msi).FullName -Destination C:\oci-pkg\${package}-${version}-x86_64.msi # The argument --archive-path ".\omnibus\pkg\datadog-agent-${version}.tar.gz" is currently broken and has no effects -& C:\tools\datadog-package.exe create --package $package --os windows --arch amd64 --archive --version $rawAgentVersion C:\oci-pkg +& C:\tools\datadog-package.exe create --package $package --os windows --arch amd64 --archive --version $version C:\oci-pkg -Move-Item ${package}-${rawAgentVersion}-windows-amd64.tar $omnibusOutput\$packageName +Move-Item ${package}-${version}-windows-amd64.tar $omnibusOutput\$packageName diff --git a/tasks/winbuildscripts/buildinstaller.bat b/tasks/winbuildscripts/buildinstaller.bat index a9d7b3378ea05..bd41ccad5dc2a 100644 --- a/tasks/winbuildscripts/buildinstaller.bat +++ b/tasks/winbuildscripts/buildinstaller.bat @@ -18,6 +18,8 @@ set OMNIBUS_BUILD=omnibus.build @rem It's not strictly needed, as we will only invoke the .cmd for the Datadog Installer in the invoke task build-installer, but it's a good practice to be consistent. set OMNIBUS_TARGET=installer set OMNIBUS_ARGS=%OMNIBUS_ARGS% --target-project %OMNIBUS_TARGET% +@rem Have to use arcane syntax to store AGENT_VERSION, see https://ss64.com/nt/for_cmd.html +FOR /F "tokens=*" %%g IN ('inv agent.version --url-safe --major-version 7') do (SET AGENT_VERSION=%%g) if DEFINED GOMODCACHE set OMNIBUS_ARGS=%OMNIBUS_ARGS% --go-mod-cache %GOMODCACHE% if DEFINED USE_S3_CACHING set OMNIBUS_ARGS=%OMNIBUS_ARGS% %USE_S3_CACHING% @@ -36,19 +38,18 @@ pip3 install -r requirements.txt inv -e %OMNIBUS_BUILD% %OMNIBUS_ARGS% --skip-deps --release-version %RELEASE_VERSION% || exit /b 1 inv -e msi.build-installer || exit /b 2 -Powershell -C "./tasks/winbuildscripts/Generate-OCIPackage.ps1 datadog-installer" +Powershell -C "./tasks/winbuildscripts/Generate-OCIPackage.ps1 -package 'datadog-installer'" REM show output package directories (for debugging) dir \omnibus-ruby\pkg\ - +dir C:\opt\datadog-installer\ dir %REPO_ROOT%\omnibus\pkg\ REM copy resulting packages to expected location for collection by gitlab. if not exist c:\mnt\omnibus\pkg\ mkdir c:\mnt\omnibus\pkg\ || exit /b 5 copy %REPO_ROOT%\omnibus\pkg\* c:\mnt\omnibus\pkg\ || exit /b 6 - -REM show output binary directories (for debugging) -dir C:\opt\datadog-installer\ +REM Save the installer.exe for bootstrapping +copy C:\opt\datadog-installer\datadog-installer.exe c:\mnt\omnibus\pkg\datadog-installer-%AGENT_VERSION%-1-x86_64.exe || exit /b 7 goto :EOF diff --git a/tasks/winbuildscripts/dobuild.bat b/tasks/winbuildscripts/dobuild.bat index 852903f987b50..6af97b5a3d3bf 100644 --- a/tasks/winbuildscripts/dobuild.bat +++ b/tasks/winbuildscripts/dobuild.bat @@ -58,7 +58,7 @@ if "%OMNIBUS_TARGET%" == "main" ( REM Build the OCI package for the Agent 7 only. if %MAJOR_VERSION% == 7 ( - Powershell -C "./tasks/winbuildscripts/Generate-OCIPackage.ps1 datadog-agent" + Powershell -C "./tasks/winbuildscripts/Generate-OCIPackage.ps1 -package 'datadog-agent'" ) popd diff --git a/tasks/winbuildscripts/extract-modcache.bat b/tasks/winbuildscripts/extract-modcache.bat index 6d516b4139311..562d6d5647257 100644 --- a/tasks/winbuildscripts/extract-modcache.bat +++ b/tasks/winbuildscripts/extract-modcache.bat @@ -33,7 +33,7 @@ if exist %MODCACHE_XZ_FILE% ( REM This shouldn't have any negative impact: since modules are REM stored per version and hash, files that get replaced will REM get replaced by the same files - Powershell -C "7z x %MODCACHE_TAR_FILE% -o%GOMODCACHE% -aoa -bt" + Powershell -C "7z x %MODCACHE_TAR_FILE% -o%GOMODCACHE%\cache -aoa -bt" @echo Modcache extracted ) else ( @echo %MODCACHE_XZ_FILE% not found, dependencies will be downloaded diff --git a/tasks/winbuildscripts/unittests.ps1 b/tasks/winbuildscripts/unittests.ps1 index ad28ec0540ad6..8d09afd956ae5 100644 --- a/tasks/winbuildscripts/unittests.ps1 +++ b/tasks/winbuildscripts/unittests.ps1 @@ -63,7 +63,7 @@ $ErrorActionPreference = "Continue" $tmpfile = [System.IO.Path]::GetTempFileName() # 1. Upload coverage reports to Codecov -& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" "$Env:CODECOV_TOKEN" "$tmpfile" +& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" -parameterName "$Env:CODECOV_TOKEN" -tempFile "$tmpfile" If ($LASTEXITCODE -ne "0") { exit $LASTEXITCODE } @@ -75,12 +75,12 @@ $Env:CODECOV_TOKEN=$(cat "$tmpfile") Get-ChildItem -Path "$UT_BUILD_ROOT" -Filter "junit-out-*.xml" -Recurse | ForEach-Object { Copy-Item -Path $_.FullName -Destination C:\mnt } -& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" "$Env:API_KEY_ORG2" "$tmpfile" +& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" -parameterName "$Env:API_KEY_ORG2" -tempFile "$tmpfile" If ($LASTEXITCODE -ne "0") { exit $LASTEXITCODE } $Env:DATADOG_API_KEY=$(cat "$tmpfile") -& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" "$Env:GITLAB_TOKEN" "$tmpfile" +& "$UT_BUILD_ROOT\tools\ci\fetch_secret.ps1" -parameterName "$Env:GITLAB_TOKEN" -tempFile "$tmpfile" If ($LASTEXITCODE -ne "0") { exit $LASTEXITCODE } diff --git a/test/benchmarks/util.go b/test/benchmarks/util.go index 7cfdfacc958d6..898ef5daad46b 100644 --- a/test/benchmarks/util.go +++ b/test/benchmarks/util.go @@ -10,7 +10,8 @@ import ( "math/rand" "time" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" @@ -31,7 +32,7 @@ func TimeNowNano() float64 { // InitLogging inits default logger func InitLogging(level string) error { - err := config.SetupLogger(config.LoggerName("test"), level, "", "", false, true, false) + err := pkglogsetup.SetupLogger(pkglogsetup.LoggerName("test"), level, "", "", false, true, false, pkgconfigsetup.Datadog()) if err != nil { return fmt.Errorf("Unable to initiate logger: %s", err) } diff --git a/test/e2e/README.md b/test/e2e/README.md deleted file mode 100644 index 5bf55766a9b76..0000000000000 --- a/test/e2e/README.md +++ /dev/null @@ -1,122 +0,0 @@ -# End to End testing - -# ToC -- [How it works](#how-it-works) - * [Setup instance](#setup-instance) - * [Run instance](#run-instance) - * [Command line](#command-line) - * [AWS development](#aws-development) - * [Locally](#locally) -- [Argo workflow](#argo-workflow) - * [argo assertion](#argo-assertion) - * [argo container](#argo-container) -- [Upgrade](#upgrade---bump) - -# How it works - -There are 3 main directories: -- [argo-workflows](./argo-workflows) - Specification of the end to end testing - -- [containers](./containers) - Custom container images needed within the workflows - -- [scripts](./scripts) - - [`setup-instance`](./scripts/setup-instance) - Entrypoint and scripts dedicated for environments (locally, AWS dev, AWS gitlab) - - [`run-instance`](./scripts/run-instance) - Scripts executed in the argo-machine (locally, AWS instance) - -## `setup-instance` - - - -## `run-instance` - - - -## Command line - -### AWS development - -```bash -$ cd ${GOPATH}/src/github.com/DataDog/datadog-agent -$ aws-vault exec ${DEV} -- inv -e e2e-tests -t dev --agent-image datadog/agent-dev:master --dca-image datadog/cluster-agent-dev:master -``` - -### Locally (Linux only) - -```bash -$ inv -e e2e-tests -t local --agent-image datadog/agent-dev:master --dca-image datadog/cluster-agent-dev:master -``` - -# Argo workflow - -The argo documentation is available [here](https://argo-cd.readthedocs.io/en/stable/), there are a lot of examples [here](https://github.com/argoproj/argo/tree/master/examples) too. - -## Argo assertion - -To assert something in an argo workflow, you need to create a mongodb query: -```yaml -name: find-kubernetes-state-deployments -activeDeadlineSeconds: 200 -script: - image: mongo:3.6.3 - command: [mongo, "fake-datadog.default.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: "kubernetes_state.deployment.replicas_available", - tags: {$all: ["namespace:default", "deployment:fake-datadog"] }, - "points.0.1": { $eq: 1} }); - print("find: " + nb) - if (nb != 0) { - break; - } - prevNb = nb; - sleep(2000); - } -``` - -This is an infinite loop with a timeout set by `activeDeadlineSeconds: 200`. -The source is EOF to the command, equivalent to: -```bash -mongo "fake-datadog.default.svc.cluster.local/datadog" << EOF -while (1) -[...] -EOF -``` - -Try to maximise the usage of MongoDB query system without rewriting too much logic in JavaScript. - -See some examples [here](./containers/fake_datadog/README.md#find) - -To discover more MongoDB capabilities: -- [find](https://docs.mongodb.com/manual/tutorial/query-documents/) -- [aggregation](https://docs.mongodb.com/manual/aggregation/) - -## Argo container - -If you need to add a non existing public container in the workflow, create it in the [container directory](./containers). - -But, keep in mind this become an additional piece of software to maintain. - -# Upgrade - bump - -This section helps you to upgrade any part of the end to end testing. - -The current end to end testing pipeline relies on: -* [Argo](https://github.com/argoproj/argo) - -Upgrade Argo version by changing version in `test/e2e/scripts/run-instance/20-argo-download.sh` and setting new checksum value in `test/e2e/scripts/run-instance/argo.sha512sum` - -* [Kind](https://kind.sigs.k8s.io/) - -Upgrade Kind version by changing version in `test/e2e/scripts/run-instance/10-setup-kind.sh`. -By default Kind will use the latest stable Kubernetes known at the time of Kind release. - -* [Fedora CoreOS](https://getfedora.org/en/coreos?stream=stable) - -You don't need to update CoreOS version as the setup script (`test/e2e/scripts/setup-instance/00-entrypoint-[dev|gitlab].sh`) always uses the latest `stable` version by default. - -If needed, use the [ignition-linter](https://coreos.com/validate/) to validate any changes. diff --git a/test/e2e/argo-workflows/cspm-workflow.yaml b/test/e2e/argo-workflows/cspm-workflow.yaml deleted file mode 100644 index c124dbf807d74..0000000000000 --- a/test/e2e/argo-workflows/cspm-workflow.yaml +++ /dev/null @@ -1,121 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: argo-datadog-agent- -spec: - entrypoint: main - onExit: exit-handler - arguments: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-agent-site - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - volumes: - - name: datadog-agent-volume - hostPath: - path: /host/datadog-agent - - name: host-root-proc - hostPath: - path: /proc - templates: - - name: main - inputs: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-agent-site - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - steps: - - - name: start-fake-datadog - templateRef: - name: fake-datadog - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-dsd-hostname - templateRef: - name: dsd-hostname - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: start-datadog-agent - templateRef: - name: datadog-agent - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: agent-image-repository - value: "{{inputs.parameters.datadog-agent-image-repository}}" - - name: agent-image-tag - value: "{{inputs.parameters.datadog-agent-image-tag}}" - - name: cluster-agent-image-repository - value: "{{inputs.parameters.datadog-cluster-agent-image-repository}}" - - name: cluster-agent-image-tag - value: "{{inputs.parameters.datadog-cluster-agent-image-tag}}" - - name: site - value: "{{inputs.parameters.datadog-agent-site}}" - - name: dd-url - value: "" - - name: ci_commit_short_sha - value: "{{inputs.parameters.ci_commit_short_sha}}" - - name: ci_pipeline_id - value: "{{inputs.parameters.ci_pipeline_id}}" - - name: ci_job_id - value: "{{inputs.parameters.ci_job_id}}" - - name: remote_configuration_enabled - value: "false" - - name: networkmonitoring_enabled - value: "false" - - - - name: wait-datadog-agent - templateRef: - name: datadog-agent - template: wait - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: test-cspm-e2e - templateRef: - name: datadog-agent - template: test-cspm-e2e - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: site - value: "{{inputs.parameters.datadog-agent-site}}" - - - name: exit-handler - steps: - - - name: diagnose - template: diagnose - - - name: diagnose - steps: - - - name: diagnose-datadog-agent - templateRef: - name: datadog-agent - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" diff --git a/test/e2e/argo-workflows/default-workflow.yaml b/test/e2e/argo-workflows/default-workflow.yaml deleted file mode 100644 index 9c9f54e30d89c..0000000000000 --- a/test/e2e/argo-workflows/default-workflow.yaml +++ /dev/null @@ -1,352 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: argo-datadog-agent- -spec: - entrypoint: main - onExit: exit-handler - arguments: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - volumes: - - name: datadog-agent-volume - hostPath: - path: /host/datadog-agent - templates: - - name: main - inputs: - parameters: - - name: datadog-agent-image-repository - - name: datadog-agent-image-tag - - name: datadog-cluster-agent-image-repository - - name: datadog-cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - steps: - - - name: start-fake-datadog - templateRef: - name: fake-datadog - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-redis - templateRef: - name: redis - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-cpu-stress - templateRef: - name: cpu-stress - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-dsd-hostname - templateRef: - name: dsd-hostname - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-logs-hello-world - templateRef: - name: logs-hello-world - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: start-nginx - templateRef: - name: nginx - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: fake-dd-reset - templateRef: - name: fake-datadog - template: reset - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: start-datadog-agent - templateRef: - name: datadog-agent - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: agent-image-repository - value: "{{inputs.parameters.datadog-agent-image-repository}}" - - name: agent-image-tag - value: "{{inputs.parameters.datadog-agent-image-tag}}" - - name: dd-url - value: "http://fake-datadog.{{workflow.namespace}}.svc.cluster.local" - - name: site - value: "" - - name: cluster-agent-image-repository - value: "{{inputs.parameters.datadog-cluster-agent-image-repository}}" - - name: cluster-agent-image-tag - value: "{{inputs.parameters.datadog-cluster-agent-image-tag}}" - - name: ci_commit_short_sha - value: "{{inputs.parameters.ci_commit_short_sha}}" - - name: ci_pipeline_id - value: "{{inputs.parameters.ci_pipeline_id}}" - - name: ci_job_id - value: "{{inputs.parameters.ci_job_id}}" - - name: remote_configuration_enabled - value: "false" - - name: networkmonitoring_enabled - value: "false" - - - - name: wait-datadog-agent - templateRef: - name: datadog-agent - template: wait - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: start-busybox - templateRef: - name: busybox - template: create - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: test-datadog-agent - templateRef: - name: datadog-agent - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-redis - templateRef: - name: redis - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-cpu - templateRef: - name: cpu-stress - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-dsd - templateRef: - name: dsd-hostname - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-nginx - templateRef: - name: nginx - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: test-busybox - templateRef: - name: busybox - template: test - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: stop-redis - templateRef: - name: redis - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-nginx - templateRef: - name: nginx - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - - name: no-more-redis - templateRef: - name: redis - template: no-more-metrics - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: no-more-nginx - templateRef: - name: nginx - template: no-more-metrics - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: exit-handler - steps: - - - name: delete - template: delete - when: "{{workflow.status}} == Succeeded" - - - name: diagnose - template: diagnose - when: "{{workflow.status}} != Succeeded" - - - name: delete - steps: - - - name: stop-datadog-agent - templateRef: - name: datadog-agent - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-redis - templateRef: - name: redis - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-cpu-stress - templateRef: - name: cpu-stress - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-dsd-hostname - templateRef: - name: dsd-hostname - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-logs-hello-world - templateRef: - name: logs-hello-world - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-nginx - templateRef: - name: nginx - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-fake-datadog - templateRef: - name: fake-datadog - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: stop-busybox - templateRef: - name: busybox - template: delete - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - - name: diagnose - steps: - - - name: diagnose-datadog-agent - templateRef: - name: datadog-agent - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: diagnose-fake-datadog - templateRef: - name: fake-datadog - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: diagnose-nginx - templateRef: - name: nginx - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" - - name: diagnose-busybox - templateRef: - name: busybox - template: diagnose - arguments: - parameters: - - name: namespace - value: "{{workflow.namespace}}" diff --git a/test/e2e/argo-workflows/templates/cpu-stress.yaml b/test/e2e/argo-workflows/templates/cpu-stress.yaml deleted file mode 100644 index e210d9aa0eaf2..0000000000000 --- a/test/e2e/argo-workflows/templates/cpu-stress.yaml +++ /dev/null @@ -1,175 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: WorkflowTemplate -metadata: - name: cpu-stress -spec: - templates: - - name: create - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: cpu-stress - namespace: {{inputs.parameters.namespace}} - spec: - replicas: 1 - selector: - matchLabels: - app: cpu-stress - template: - metadata: - labels: - app: cpu-stress - spec: - containers: - - name: cpu-stress - image: datadog/docker-library:progrium_stress - args: - - "--cpu" - - "2" - resources: - requests: - memory: "64Mi" - cpu: "1" - limits: - memory: "64Mi" - cpu: "1" - - - name: delete - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: cpu-stress - namespace: {{inputs.parameters.namespace}} - - - name: find-metrics-cpu-container-runtime - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: "cri.cpu.usage", - tags: { $all: ["kube_deployment:cpu-stress", "kube_container_name:cpu-stress"] }, - "points.0.1": { $gt: 950000000, $lt: 1010000000 } }).count(); - print("find: " + nb) - if (nb != 0) { - print("cpu value in target range") - break; - } - sleep(2000); - } - - - name: find-metrics-cpu-kubelet - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: "kubernetes.cpu.usage.total", - tags: { $all: ["kube_deployment:cpu-stress", "kube_container_name:cpu-stress"] }, - "points.0.1": { $gt: 800000000, $lt: 1200000000 } }).count(); - print("find: " + nb) - if (nb != 0) { - print("cpu value in target range") - break; - } - sleep(2000); - } - - - name: find-metrics-cpu-system - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - sleep(2000); - - // Determine the hostname the cpu-stress pod is running on - var point = db.series.find({ - metric: "kubernetes.cpu.usage.total", - tags: {$all: ["kube_deployment:cpu-stress", "kube_container_name:cpu-stress"]} - }).limit(1).sort({$natural:-1})[0]; - if (!point) { - print("cannot get hostname for pod"); - continue; - } - hostname = point.host; - - // Get the number of CPUs on that host - var point = db.series.find({ - metric: "kubernetes_state.node.cpu_capacity", - host: hostname - }).limit(1).sort({$natural:-1})[0]; - if (!point) { - print("cannot get cpu capacity for host " + hostname); - continue; - } - cpucount = point.points[0][1]; - print("cpu count: " + cpucount) - - // Get the user CPU usage, make sure it's above 39% non-normalized - var point = db.series.find({ - metric: "system.cpu.user", - host: hostname - }).limit(1).sort({$natural:-1})[0]; - if (!point) { - print("no system.cpu.usage metric reported for host " + hostname) - continue; - } - print("raw value: " + point.points[0][1]) - value = point.points[0][1] * cpucount; - print("cpu value: " + value) - if (value > 95) { - print("cpu value in target range"); - break; - } - } - - - name: test - inputs: - parameters: - - name: namespace - steps: - - - name: find-metrics-cpu-container-runtime - template: find-metrics-cpu-container-runtime - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-cpu-kubelet - template: find-metrics-cpu-kubelet - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-cpu-system - template: find-metrics-cpu-system - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" diff --git a/test/e2e/argo-workflows/templates/datadog-agent.yaml b/test/e2e/argo-workflows/templates/datadog-agent.yaml deleted file mode 100644 index 2c040444f3104..0000000000000 --- a/test/e2e/argo-workflows/templates/datadog-agent.yaml +++ /dev/null @@ -1,666 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: WorkflowTemplate -metadata: - name: datadog-agent -spec: - templates: - - name: create - inputs: - parameters: - - name: namespace - - name: dd-url - - name: site - - name: agent-image-repository - - name: agent-image-tag - - name: cluster-agent-image-repository - - name: cluster-agent-image-tag - - name: ci_commit_short_sha - - name: ci_pipeline_id - - name: ci_job_id - - name: remote_configuration_enabled - - name: networkmonitoring_enabled - script: - image: alpine/k8s:1.27.1 - envFrom: - - secretRef: - name: dd-keys - command: [sh] - source: | - set -euo pipefail - - cat > /tmp/values.yaml <& /dev/null - sleep 0.01 - done ) & - - until [[ "$(kubectl --namespace {{inputs.parameters.namespace}} get hpa nginxext -o jsonpath='{.status.currentReplicas}')" -gt 1 ]]; do - kubectl --namespace {{inputs.parameters.namespace}} describe hpa nginxext - sleep 1 - done - - - name: test - inputs: - parameters: - - name: namespace - dag: - tasks: - - name: find-kube-state-metrics - template: find-kube-state-metrics - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-nginx - template: find-metrics-nginx - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: validate-hpa - template: validate-hpa - dependencies: - - find-metrics-nginx - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: run-hpa - template: run-hpa - dependencies: - - validate-hpa - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: no-more-metrics - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - var prevNb = -1; - while (1) { - var nb = db.series.find({ - metric: {$regex: "nginx*"} - }).count(); - - print("prev-find: " + prevNb) - print("find: " + nb) - if (nb == prevNb) { - break; - } - prevNb = nb; - sleep(30000); - } - - - name: describe-hpa - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: alpine/k8s:1.27.1 - command: [sh] - source: | - set -euo pipefail - - kubectl --namespace {{inputs.parameters.namespace}} describe hpa nginxext - - - name: diagnose - inputs: - parameters: - - name: namespace - steps: - - - name: describe-hpa - template: describe-hpa - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" diff --git a/test/e2e/argo-workflows/templates/redis.yaml b/test/e2e/argo-workflows/templates/redis.yaml deleted file mode 100644 index df3cbf79ea615..0000000000000 --- a/test/e2e/argo-workflows/templates/redis.yaml +++ /dev/null @@ -1,366 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: WorkflowTemplate -metadata: - name: redis -spec: - templates: - - name: create-service - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: v1 - kind: Service - metadata: - name: redis - namespace: {{inputs.parameters.namespace}} - spec: - ports: - - port: 6379 - protocol: TCP - targetPort: 6379 - name: redis - selector: - app: redis - type: ClusterIP - - - name: create-deployment - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: redis - namespace: {{inputs.parameters.namespace}} - spec: - selector: - matchLabels: - app: redis - replicas: 1 - template: - metadata: - labels: - app: redis - annotations: - ad.datadoghq.com/redis.check_names: '["redisdb"]' - ad.datadoghq.com/redis.init_configs: '[{}]' - ad.datadoghq.com/redis.instances: '[{"host": "%%host%%", "port": "%%port%%"}]' - spec: - initContainers: - - name: useless - image: busybox:latest - command: - - /bin/true - resources: - requests: - memory: "32Mi" - cpu: "25m" - limits: - memory: "64Mi" - cpu: "50m" - containers: - - name: redis - image: redis - ports: - - name: redis - containerPort: 6379 - resources: - requests: - memory: "64Mi" - cpu: "50m" - limits: - memory: "128Mi" - cpu: "100m" - - - name: create-deployment-unready - inputs: - parameters: - - name: namespace - resource: - action: apply - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: redis-unready - namespace: {{inputs.parameters.namespace}} - spec: - replicas: 1 - selector: - matchLabels: - app: redis - template: - metadata: - labels: - app: redis - annotations: - ad.datadoghq.com/tolerate-unready: "true" - spec: - containers: - - name: redis-unready - image: redis - ports: - - name: redis - containerPort: 6379 - resources: - requests: - memory: "64Mi" - cpu: "50m" - limits: - memory: "128Mi" - cpu: "100m" - readinessProbe: - tcpSocket: - port: 8080 - initialDelaySeconds: 1 - periodSeconds: 1 - - - name: delete-service - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: v1 - kind: Service - metadata: - name: redis - namespace: {{inputs.parameters.namespace}} - - - name: delete-deployment - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: redis - namespace: {{inputs.parameters.namespace}} - - - name: delete-deployment-unready - inputs: - parameters: - - name: namespace - resource: - action: delete - manifest: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: redis-unready - namespace: {{inputs.parameters.namespace}} - - - name: create - inputs: - parameters: - - name: namespace - steps: - - - name: service - template: create-service - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment - template: create-deployment - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment-unready - template: create-deployment-unready - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: delete - inputs: - parameters: - - name: namespace - steps: - - - name: service - template: delete-service - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment - template: delete-deployment - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: deployment-unready - template: delete-deployment-unready - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: find-kube-state-metrics - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - // This step is intended to test end-to-end scraping of prometheus metrics - // by asserting the value of a few simple metrics collected from the - // kubernetes_state integration. - - while (1) { - var nb = db.series.find({ - metric: "kubernetes_state.deployment.replicas_available", - tags: { $all: ["kube_namespace:{{inputs.parameters.namespace}}", "kube_deployment:redis"] }, - "points.0.1": { $eq: 1 } }).count(); - print("find: " + nb) - if (nb != 0) { - break; - } - sleep(2000); - } - - - name: find-metrics-redis - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: {$regex: "redis*"} - }).count(); - - print("find: " + nb) - if (nb != 0) { - break; - } - sleep(2000); - } - - - name: find-metrics-redis-unready - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: {$regex: "redis*"}, - tags: {$all: ["kube_deployment:redis-unready", "kube_container_name:redis-unready"]} - }).count(); - - print("find: " + nb) - if (nb != 0) { - break; - } - sleep(2000); - } - - - name: find-metrics-redis-tagged - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - while (1) { - var nb = db.series.find({ - metric: {$regex: "redis*"}, - tags: "kube_service:redis" - }).count(); - print("find: " + nb) - if (nb != 0) { - break; - } - sleep(2000); - } - - - name: test - inputs: - parameters: - - name: namespace - steps: - - - name: find-kube-state-metrics - template: find-kube-state-metrics - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-redis - template: find-metrics-redis - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-redis-unready - template: find-metrics-redis-unready - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - name: find-metrics-redis-tagged - template: find-metrics-redis-tagged - arguments: - parameters: - - name: namespace - value: "{{inputs.parameters.namespace}}" - - - name: no-more-metrics - inputs: - parameters: - - name: namespace - activeDeadlineSeconds: 300 - script: - image: mongo:4.4.1 - command: [mongo, "fake-datadog.{{inputs.parameters.namespace}}.svc.cluster.local/datadog"] - source: | - var prevNb = -1; - while (1) { - var nb = db.series.find({ - metric: {$regex: "redis*"} - }).count(); - - print("prev-find: " + prevNb) - print("find: " + nb) - if (nb == prevNb) { - break; - } - prevNb = nb; - sleep(30000); - } - var prevNb = -1 - while (1) { - var nb = db.check_run.find({check: "datadog.agent.check_status", - tags: "check:redisdb", - status: {$ne: 0}}).count(); - - print("prev-find: " + prevNb) - print("find: " + nb) - if (nb == prevNb) { - break; - } - prevNb = nb; - sleep(30000); - } diff --git a/test/e2e/containers/dsd_sender/Dockerfile b/test/e2e/containers/dsd_sender/Dockerfile deleted file mode 100644 index 1b6a5ae33c311..0000000000000 --- a/test/e2e/containers/dsd_sender/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM datadog/docker-library:python_2_7-alpine3_6 - -RUN pip install datadog - -COPY sender.py /sender.py - -CMD [ "python", "/sender.py" ] diff --git a/test/e2e/containers/dsd_sender/Makefile b/test/e2e/containers/dsd_sender/Makefile deleted file mode 100644 index bfdc5e51e0272..0000000000000 --- a/test/e2e/containers/dsd_sender/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -TAG?=latest - -default: build push - -build: - docker build --force-rm -t datadog/docker-library:e2e-dsd-sender_$(TAG) . - -push: - docker push datadog/docker-library:e2e-dsd-sender_$(TAG) diff --git a/test/e2e/containers/dsd_sender/sender.py b/test/e2e/containers/dsd_sender/sender.py deleted file mode 100644 index a589f38a6fa7b..0000000000000 --- a/test/e2e/containers/dsd_sender/sender.py +++ /dev/null @@ -1,23 +0,0 @@ -import time - -import datadog - -client = datadog.dogstatsd.base.DogStatsd(socket_path="/var/run/dogstatsd/dsd.socket") - -while True: - # Nominal case, dsd will inject its hostname - client.gauge('dsd.hostname.e2e', 1, tags=["case:nominal"]) - client.service_check('dsd.hostname.e2e', 0, tags=["case:nominal"]) - client.event('dsd.hostname.e2e', 'text', tags=["case:nominal"]) - - # Force the hostname value - client.gauge('dsd.hostname.e2e', 1, tags=["case:forced", "host:forced"]) - client.service_check('dsd.hostname.e2e', 0, tags=["case:forced"], hostname="forced") - client.event('dsd.hostname.e2e', 'text', tags=["case:forced"], hostname="forced") - - # Force an empty hostname - client.gauge('dsd.hostname.e2e', 1, tags=["case:empty", "host:"]) - client.service_check('dsd.hostname.e2e', 0, tags=["case:empty", "host:"]) - client.event('dsd.hostname.e2e', 'text', tags=["case:empty", "host:"]) - - time.sleep(10) diff --git a/test/e2e/containers/fake_datadog/Dockerfile b/test/e2e/containers/fake_datadog/Dockerfile deleted file mode 100644 index 451b008e217c8..0000000000000 --- a/test/e2e/containers/fake_datadog/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.9-alpine - -COPY app /opt/fake_datadog - -RUN apk update && \ - apk add python3-dev gcc g++ musl-dev libstdc++ && \ - pip install -r /opt/fake_datadog/requirements.txt && \ - apk del python3-dev gcc g++ musl-dev && \ - rm -rf /var/cache/apk/* - -VOLUME /opt/fake_datadog/recorded - -ENV prometheus_multiproc_dir "/var/lib/prometheus" - -CMD ["gunicorn", "--bind", "0.0.0.0:80", "--pythonpath", "/opt/fake_datadog", "api:app"] diff --git a/test/e2e/containers/fake_datadog/Makefile b/test/e2e/containers/fake_datadog/Makefile deleted file mode 100644 index 27bcd71329f18..0000000000000 --- a/test/e2e/containers/fake_datadog/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -TAG?=$(shell date '+%Y%m%d') - -.PHONY: venv pip build default push multiarch - -default: pip - -venv: - virtualenv venv -p python3 - -pip: venv - venv/bin/pip install -r app/requirements.txt - -build: - docker build --force-rm -t datadog/fake-datadog:$(TAG) . - -multiarch: - docker buildx build --platform linux/amd64,linux/arm64 -t datadog/fake-datadog:$(TAG) . --push - -push: - docker push datadog/fake-datadog:$(TAG) diff --git a/test/e2e/containers/fake_datadog/README.md b/test/e2e/containers/fake_datadog/README.md deleted file mode 100644 index efaf81bc96624..0000000000000 --- a/test/e2e/containers/fake_datadog/README.md +++ /dev/null @@ -1,240 +0,0 @@ -# fake_datadog - - -Expose the needed API to make the agent submit payloads. - - -#### API - -Prefer using mongo. - -Get all series: -```bash -curl ${SERVICE_IP}/records/series | jq . -``` - -Get all check_run: -```bash -curl ${SERVICE_IP}/records/check_run | jq . -``` - -Get all intake: -```bash -curl ${SERVICE_IP}/records/intake | jq . -``` - -#### MongoDB - -Explore: -```bash -docker run --rm -it --net=host mongo mongo ${SERVICE_IP}/datadog -``` -```bash -apt-get install -yqq mongodb-clients && mongo ${SERVICE_IP}/datadog -``` -```bash -> show collections -check_run -intake -series - -``` - -#### Find - -Find a metric: -```text -> db.series.findOne() - -{ - "_id" : ObjectId("5ab3e567cd9a72000912abad"), - "metric" : "datadog.agent.running", - "points" : [ - [ - 1521739111, - 1 - ] - ], - "tags" : null, - "host" : "haf", - "type" : "gauge", - "interval" : 0, - "source_type_name" : "System" -} -``` - -Find a metric by metric name: -```text -db.series.findOne({metric: "kubernetes.network.tx_errors"}) - -{ - "_id" : ObjectId("5ab4cca8c914b50008c10615"), - "metric" : "kubernetes.network.tx_errors", - "points" : [ - [ - 1521798304, - 0 - ] - ], - "tags" : [ - "kube_deployment:workflow-controller", - "kube_namespace:kube-system", - "kube_replica_set:workflow-controller-58bbf49865", - "pod_name:workflow-controller-58bbf49865-55xdz" - ], - "host" : "v1704", - "type" : "gauge", - "interval" : 0, - "source_type_name" : "System" -} -``` - -Advanced find: -```js -db.series.find({ - metric: "kubernetes.cpu.usage.total", - tags: { $all: ["kube_namespace:kube-system", "pod_name:kube-controller-manager"] } -}, {_id: 0}) // .count() -``` - -#### Aggregation pipeline - -Aggregate all tags for a metric: -```js -db.series.aggregate([ - { $match: { metric: "kubernetes.cpu.usage.total"} }, - { $project: {tags: 1} }, - { $unwind: "$tags" }, - { $group: {_id: "allTags", tags: {$addToSet: "$tags" } } } -]) -``` - -Aggregate all tags for a metric regex: -```js -db.series.aggregate([ - { $match: { metric: {$regex: "kubernetes*"} } }, - { $project: {tags: 1} }, - { $unwind: "$tags" }, - { $group: {_id: "allTags", tags: {$addToSet: "$tags" } } } -]) -``` - -Aggregate all tags for each metric matched by a regex: -```js -db.series.aggregate([ - { $match: { metric: {$regex: "kubernetes*"} } }, - { $project: { metric: 1, tags: 1 } }, - { $unwind: "$tags" }, - { $group: {_id: "$metric", tags: {$addToSet: "$tags" } } } -]) -``` - -Aggregate all metrics from a tag: -```js -db.series.aggregate([ - { $match: { tags: "kube_deployment:fake-app-datadog"} }, - { $group: { _id: "kube_deployment:fake-app-datadog", metrics: { $addToSet: "$metric" } } } -]) -``` - -Aggregate all metrics from tags ($or || $and): -```js -db.series.aggregate([ - { $match: { $or: [ - {tags: "kube_deployment:fake-app-datadog"}, - {tags: "kube_service:fake-app-datadog"} - ] } }, - { $group: { _id: "metricsToTags", metrics: { $addToSet: "$metric" } } } -]) -``` - -Aggregate a metric and a tag as timeseries: -```js -db.series.aggregate([ - { $match: { tags: "kube_deployment:dd", metric: "kubernetes.cpu.usage.total"} }, - { $unwind: "$points" }, - { $project: { - _id: { $arrayElemAt: [ "$points", 0 ] }, - value: { $arrayElemAt: [ "$points", 1 ] }, - tags: "$tags" - } - }, - { $sort: { _id: 1 } } -]) -``` - -Count tag occurrences on a given metric: -```js -db.series.aggregate([ - { $match: { metric: "kubernetes.filesystem.usage", tags: { $all: ["pod_name:fake-app-datadog-7cfb79db4d-dd4jr"] } } }, - { $project: {tags: 1} }, - { $unwind: "$tags" }, - { $group: {_id: "$tags", count: { $sum: 1 } } }, - { $sort: {count: -1} } -]) -``` - -#### Use standalone - -This tool can be used as a debug proxy to inspect agent payloads. Here is how to do it for Kubernetes. - -##### K8S -- run the following from within this folder: - -```console -docker build -t fake-datadog:latest . -docker tag fake-datadog:latest -docker push -# replace in fake-datadog.yaml before running the next command -kubectl apply -f fake-datadog.yaml -``` - -- edit your Datadog Agent Daemonset to use the service deployed above as the Datadog API. Be aware that each agent has its own intake - configuring `DD_DD_URL` doesn't cover the logs agent for example. - -```yaml -... - env: - ... - - name: DD_DD_URL - # if you deployed the service & deployment in a separate namespace, add `..svc.cluster.local - value: "http://fake-datadog" -``` - -##### Docker - -1. Create a `agent-docker-compose-extra.yaml` file to override url and V2 series environment variables - -```yaml -services: - agent: # use your agent service name here - environment: - DD_DD_URL: "http://fake-datadog" - DD_USE_V2_API_SERIES: false -``` - -- `agent` is the docker service name used for Datadog Agent. Rename it if you are using another service id. -- `DD_DD_URL` overrides the URL for metric submission -- `DD_USE_V2_API_SERIES` force using v1 APIs - -2. Run `docker compose up` passing datadog agent compose, agent extra compose and fake datadog compose - -```bash -docker compose up -f "${PATH_TO_AGENT_COMPOSE}.yaml" -f "fake-datadog.yaml" -f "agent-docker-compose-extra.yaml" -``` - -3. Query `datadog` on `mongo` service, reachable from host at `localhost:27017` and from another container at `mongo:27017` - -##### VM - -1. Create `fake-datadog` compose - -```bash -docker compose up -f "fake-datadog.yaml" -``` - -2. Configure the agent to send requests to `fake-datadog` using `V1` endpoint passing following environment variables - -```txt -DD_DD_URL="http://fake-datadog" -DD_USE_V2_API_SERIES=false -``` diff --git a/test/e2e/containers/fake_datadog/app/api.py b/test/e2e/containers/fake_datadog/app/api.py deleted file mode 100644 index 37445ac6d0ae1..0000000000000 --- a/test/e2e/containers/fake_datadog/app/api.py +++ /dev/null @@ -1,334 +0,0 @@ -import json -import logging -import os -import sys -import zlib -from os import path - -import monitoring -import pymongo -from flask import Flask, Response, jsonify, request - -app = application = Flask("datadoghq") -monitoring.monitor_flask(app) -handler = logging.StreamHandler(sys.stderr) -app.logger.addHandler(handler) -app.logger.setLevel("INFO") - -record_dir = path.join(path.dirname(path.abspath(__file__)), "recorded") - - -def get_collection(name: str): - c = pymongo.MongoClient("127.0.0.1", 27017, connectTimeoutMS=5000) - db = c.get_database("datadog") - return db.get_collection(name) - - -payload_names = [ - "check_run", - "series", - "intake", - "logs", -] - - -def reset_records(): - for elt in payload_names: - to_remove = path.join(record_dir, elt) - if path.isfile(to_remove): - app.logger.warning("rm %s", to_remove) - os.remove(to_remove) - - try: - get_collection(elt).drop() - - except Exception as e: - app.logger.error(e) - - -def record_and_loads(filename: str, content_type: str, content_encoding: str, content: str): - """ - :param filename: - :param content_type: - :param content_encoding: - :param content: - :return: list or dict - """ - if content_type != "application/json": - app.logger.error("Unsupported content-type: %s", content_type) - raise TypeError(content_type) - - if content_encoding == "deflate": - content = zlib.decompress(content) - - content = content.decode() - content = f"{content}\n" if content[-1] != "\n" else content - with open(path.join(record_dir, filename), "a") as f: - f.write(content) - - return json.loads(content) - - -def patch_data(data, patch_key, patch_leaf): - if isinstance(data, dict): - return {patch_key(k): patch_data(v, patch_key, patch_leaf) for k, v in iter(data.items())} - elif isinstance(data, list): - return [patch_data(i, patch_key, patch_leaf) for i in data] - else: - return patch_leaf(data) - - -def fix_data(data): - return patch_data( - data, - # Whereas dot (.) and dollar ($) are valid characters inside a JSON dict key, - # they are not allowed as keys in a MongoDB BSON object. - # The official MongoDB documentation suggests to replace them with their - # unicode full width equivalent: - # https://docs.mongodb.com/v2.6/faq/developers/#dollar-sign-operator-escaping - patch_key=lambda x: x.translate(str.maketrans('.$', '\uff0e\uff04')), - # Values that cannot fit in a 64 bits integer must be represented as a float. - patch_leaf=lambda x: float(x) if isinstance(x, int) and x > 2**63 - 1 else x, - ) - - -def insert_series(data: dict): - coll = get_collection("series") - coll.insert_many(data["series"]) - - -def insert_intake(data: dict): - coll = get_collection("intake") - coll.insert_one(data) - - -def insert_check_run(data: list): - coll = get_collection("check_run") - coll.insert_many(data) - - -def insert_logs(data: list): - coll = get_collection("logs") - coll.insert_many(data) - - -def get_series_from_query(q: dict): - app.logger.info("Query is %s", q["query"]) - query = q["query"].replace("avg:", "") - first_open_brace, first_close_brace = query.index("{"), query.index("}") - - metric_name = query[:first_open_brace] - from_ts, to_ts = int(q["from"]), int(q["to"]) - - # tags - all_tags = query[first_open_brace + 1 : first_close_brace] - all_tags = all_tags.split(",") if all_tags else [] - - # group by - # TODO - last_open_brace, last_close_brace = query.rindex("{"), query.rindex("}") - group_by = query[last_open_brace + 1 : last_close_brace].split(",") # noqa: F841 - - match_conditions = [ - {"metric": metric_name}, - {"points.0.0": {"$gt": from_ts}}, - {"points.0.0": {"$lt": to_ts}}, - ] - if all_tags: - match_conditions.append({'tags': {"$all": all_tags}}) - - c = get_collection("series") - aggregate = [ - {"$match": {"$and": match_conditions}}, - {"$unwind": "$points"}, - {"$group": {"_id": "$metric", "points": {"$push": "$points"}}}, - {"$sort": {"points.0": 1}}, - ] - app.logger.info("Mongodb aggregate is %s", aggregate) - cur = c.aggregate(aggregate) - points_list = [] - for elt in cur: - for p in elt["points"]: - p[0] *= 1000 - points_list.append(p) - - result = { - "status": "ok", - "res_type": "time_series", - "series": [ - { - "metric": metric_name, - "attributes": {}, - "display_name": metric_name, - "unit": None, - "pointlist": points_list, - "end": points_list[-1][0] if points_list else 0.0, - "interval": 600, - "start": points_list[0][0] if points_list else 0.0, - "length": len(points_list), - "aggr": None, - "scope": "host:vagrant-ubuntu-trusty-64", # TODO - "expression": query, - } - ], - "from_date": from_ts, - "group_by": ["host"], - "to_date": to_ts, - "query": q["query"], - "message": "", - } - return result - - -@app.route("/api/v1/validate", methods=["GET"]) -def validate(): - return Response(status=200) - - -@app.route("/api/v1/query", methods=["GET"]) -def metrics_query(): - """ - Honor a query like documented here: - https://docs.datadoghq.com/api/?lang=bash#query-time-series-points - :return: - """ - if "query" not in request.args or "from" not in request.args or "to" not in request.args: - return Response(status=400) - - return jsonify(get_series_from_query(request.args)) - - -@app.route("/api/v1/series", methods=["POST"]) -def series(): - data = record_and_loads( - filename="series", - content_type=request.content_type, - content_encoding=request.content_encoding, - content=request.data, - ) - data = fix_data(data) - insert_series(data) - return Response(status=200) - - -@app.route("/api/v1/check_run", methods=["POST"]) -def check_run(): - data = record_and_loads( - filename="check_run", - content_type=request.content_type, - content_encoding=request.content_encoding, - content=request.data, - ) - data = fix_data(data) - insert_check_run(data) - return Response(status=200) - - -@app.route("/intake/", methods=["POST"]) -def intake(): - data = record_and_loads( - filename="intake", - content_type=request.content_type, - content_encoding=request.content_encoding, - content=request.data, - ) - data = fix_data(data) - insert_intake(data) - return Response(status=200) - - -@app.route("/v1/input/", methods=["POST"]) -def logs(): - data = record_and_loads( - filename="logs", - content_type=request.content_type, - content_encoding=request.content_encoding, - content=request.data, - ) - data = fix_data(data) - insert_logs(data) - return Response(status=200) - - -@app.route("/api/v2/orch", methods=["POST"]) -def orchestrator(): - # TODO - return Response(status=200) - - -@app.before_request -def logging(): - # use only if you need to check headers - # mind where the logs of this container go since headers contain an API key - # app.logger.info( - # "path: %s, method: %s, content-type: %s, content-encoding: %s, content-length: %s, headers: %s", - # request.path, request.method, request.content_type, request.content_encoding, request.content_length, request.headers) - app.logger.info( - "path: %s, method: %s, content-type: %s, content-encoding: %s, content-length: %s", - request.path, - request.method, - request.content_type, - request.content_encoding, - request.content_length, - ) - - -def stat_records(): - j = {} - for elt in payload_names: - try: - p = path.join(record_dir, elt) - st = os.stat(p) - lines = 0 - with open(p) as f: - for _ in f: - lines += 1 - j[elt] = {"size": st.st_size, "lines": lines} - - except FileNotFoundError: - j[elt] = {"size": -1, "lines": -1} - return j - - -@app.route("/_/records") -def available_records(): - return jsonify(stat_records()) - - -@app.route("/_/records/") -def get_records(name): - if name not in payload_names: - return Response(status=404) - - if path.isfile(path.join(record_dir, name)) is False: - return Response(status=503) - - payloads = [] - with open(path.join(record_dir, name)) as f: - for line in f: - payloads.append(json.loads(line)) - return json.dumps(payloads), 200 - - -@application.route('/', methods=['GET']) -def api_mapper(): - rules = [k.rule for k in application.url_map.iter_rules()] - rules = list(set(rules)) - rules.sort() - return jsonify(rules) - - -@application.route('/_/reset', methods=['POST']) -def reset(): - reset_records() - return jsonify(stat_records()) - - -@application.errorhandler(404) -def not_found(_): - app.logger.warning("404 %s %s", request.path, request.method) - return Response("404", status=404, mimetype="text/plain") - - -if __name__ == '__main__': - app.run(host="0.0.0.0", debug=True, port=5000) diff --git a/test/e2e/containers/fake_datadog/app/monitoring.py b/test/e2e/containers/fake_datadog/app/monitoring.py deleted file mode 100644 index 15ae0a1a9e3c8..0000000000000 --- a/test/e2e/containers/fake_datadog/app/monitoring.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -import sys -import time - -from flask import Flask, Response, g, request -from prometheus_client import CONTENT_TYPE_LATEST, CollectorRegistry, Counter, Histogram, generate_latest, multiprocess - - -def extract_exception_name(exc_info=None): - """ - Function to get the exception name and module - :param exc_info: - :return: - """ - if not exc_info: - exc_info = sys.exc_info() - return f'{exc_info[0].__module__}.{exc_info[0].__name__}' - - -def monitor_flask(app: Flask): - """ - Add components to monitor each route with prometheus - The monitoring is available at /metrics - :param app: Flask application - :return: - """ - prometheus_state_dir = os.getenv('prometheus_multiproc_dir', "") - if "gunicorn" not in os.getenv("SERVER_SOFTWARE", "") and prometheus_state_dir == "": - return - - if os.path.isdir(prometheus_state_dir) is False: - os.mkdir(prometheus_state_dir) - - metrics = CollectorRegistry() - - def collect(): - registry = CollectorRegistry() - multiprocess.MultiProcessCollector(registry) - data = generate_latest(registry) - return Response(data, mimetype=CONTENT_TYPE_LATEST) - - app.add_url_rule('/metrics', 'metrics', collect) - - additional_kwargs = {'registry': metrics} - request_latency = Histogram( - 'requests_duration_seconds', 'Backend API request latency', ['method', 'path'], **additional_kwargs - ) - status_count = Counter( - 'responses_total', 'Backend API response count', ['method', 'path', 'status_code'], **additional_kwargs - ) - exception_latency = Histogram( - 'exceptions_duration_seconds', - 'Backend API top-level exception latency', - ['method', 'path', 'type'], - **additional_kwargs, - ) - - @app.before_request - def start_measure(): - g._start_time = time.time() - - @app.after_request - def count_status(response: Response): - status_count.labels(request.method, request.url_rule, response.status_code).inc() - request_latency.labels(request.method, request.url_rule).observe(time.time() - g._start_time) - return response - - # Override log_exception to increment the exception counter - def log_exception(exc_info): - class_name = extract_exception_name(exc_info) - exception_latency.labels(request.method, request.url_rule, class_name).observe(time.time() - g._start_time) - app.logger.error('Exception on %s [%s]', request.path, request.method, exc_info=exc_info) - - app.log_exception = log_exception diff --git a/test/e2e/containers/fake_datadog/app/requirements.txt b/test/e2e/containers/fake_datadog/app/requirements.txt deleted file mode 100644 index 146792ede7f30..0000000000000 --- a/test/e2e/containers/fake_datadog/app/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -Flask==2.1.2 -gunicorn==20.1.0 -pymongo==4.1.1 -prometheus-client==0.14.1 diff --git a/test/e2e/containers/fake_datadog/docker-compose.yaml b/test/e2e/containers/fake_datadog/docker-compose.yaml deleted file mode 100644 index eb4ff70532c8d..0000000000000 --- a/test/e2e/containers/fake_datadog/docker-compose.yaml +++ /dev/null @@ -1,13 +0,0 @@ -version: "3.9" -services: - fake-datadog: - image: "datadog/fake-datadog:20220621" - ports: - - "8080:80" - - "27017:27017" - container_name: fake-datadog - mongo: - image: "mongo:5.0" - container_name: mongo - network_mode: "service:fake-datadog" - diff --git a/test/e2e/containers/fake_datadog/fake-datadog.yaml b/test/e2e/containers/fake_datadog/fake-datadog.yaml deleted file mode 100644 index ceeceda9b3b60..0000000000000 --- a/test/e2e/containers/fake_datadog/fake-datadog.yaml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: fake-datadog -spec: - ports: - - port: 80 - protocol: TCP - targetPort: 80 - name: api - - port: 27017 - protocol: TCP - targetPort: 27017 - name: mongo - selector: - app: fake-datadog - type: ClusterIP - ---- - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: fake-datadog -spec: - replicas: 1 - selector: - matchLabels: - app: fake-datadog - strategy: - type: RollingUpdate - rollingUpdate: - maxSurge: 1 - maxUnavailable: 0 - template: - metadata: - labels: - app: fake-datadog - spec: - containers: - - name: api - image: - imagePullPolicy: Always - - name: mongo - image: mongo:3.6.3 - diff --git a/test/e2e/containers/otlp_sender/Dockerfile b/test/e2e/containers/otlp_sender/Dockerfile deleted file mode 100644 index 5613d30c6c642..0000000000000 --- a/test/e2e/containers/otlp_sender/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM alpine:latest as prep -RUN apk --update add ca-certificates - -FROM scratch -ARG USER_UID=10001 -USER ${USER_UID} -COPY --from=prep /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt -COPY otlpsender / -EXPOSE 4317 55680 55679 -ENTRYPOINT ["/otlpsender"] -CMD ["--config", "/etc/otel/config.yaml"] diff --git a/test/e2e/containers/otlp_sender/Makefile b/test/e2e/containers/otlp_sender/Makefile deleted file mode 100644 index 880e85dfca46c..0000000000000 --- a/test/e2e/containers/otlp_sender/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -TAG?=latest - -default: push - -otlpsender: - GOOS=linux GOARCH=amd64 go build -o $@ ./cmd/sender - -docker-build: otlpsender - docker build --force-rm -t datadog/docker-library:e2e-otlp-sender_$(TAG) . - -push: docker-build - docker push datadog/docker-library:e2e-otlp-sender_$(TAG) diff --git a/test/e2e/containers/otlp_sender/cmd/sender/main.go b/test/e2e/containers/otlp_sender/cmd/sender/main.go deleted file mode 100644 index b30813cc7f789..0000000000000 --- a/test/e2e/containers/otlp_sender/cmd/sender/main.go +++ /dev/null @@ -1,79 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2021-present Datadog, Inc. - -// Program otlp_sender sends telemetry data defined in a given file -package main - -import ( - "log" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/debugexporter" - "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/exporter/otlphttpexporter" - "go.opentelemetry.io/collector/extension" - "go.opentelemetry.io/collector/otelcol" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" - "go.uber.org/multierr" - - "github.com/DataDog/datadog-agent/tests/e2e/containers/otlp_sender/internal/filereceiver" -) - -func components() ( - otelcol.Factories, - error, -) { - var errs error - - extensions, err := extension.MakeFactoryMap() - errs = multierr.Append(errs, err) - - receivers, err := receiver.MakeFactoryMap( - filereceiver.NewFactory(), - ) - errs = multierr.Append(errs, err) - - exporters, err := exporter.MakeFactoryMap( - otlpexporter.NewFactory(), - otlphttpexporter.NewFactory(), - debugexporter.NewFactory(), - ) - errs = multierr.Append(errs, err) - - processors, err := processor.MakeFactoryMap() - errs = multierr.Append(errs, err) - - factories := otelcol.Factories{ - Extensions: extensions, - Receivers: receivers, - Processors: processors, - Exporters: exporters, - } - - return factories, errs -} - -func main() { - factories, err := components() - if err != nil { - log.Fatalf("failed to build components: %v", err) - } - - cmd := otelcol.NewCommand(otelcol.CollectorSettings{ - BuildInfo: component.BuildInfo{ - Command: "otlpsender", - Description: "OpenTelemetry test sender", - Version: "latest", - }, - Factories: func() (otelcol.Factories, error) { - return factories, nil - }, - }) - if err := cmd.Execute(); err != nil { - log.Fatalf("collector server run finished with error: %v", err) - } -} diff --git a/test/e2e/containers/otlp_sender/go.mod b/test/e2e/containers/otlp_sender/go.mod deleted file mode 100644 index 3ae057624be2d..0000000000000 --- a/test/e2e/containers/otlp_sender/go.mod +++ /dev/null @@ -1,109 +0,0 @@ -module github.com/DataDog/datadog-agent/tests/e2e/containers/otlp_sender - -go 1.22.0 - -require ( - go.opentelemetry.io/collector/component v0.104.0 - go.opentelemetry.io/collector/consumer v0.104.0 - go.opentelemetry.io/collector/exporter v0.104.0 - go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 - go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 - go.opentelemetry.io/collector/extension v0.104.0 - go.opentelemetry.io/collector/otelcol v0.104.0 - go.opentelemetry.io/collector/pdata v1.11.0 - go.opentelemetry.io/collector/processor v0.104.0 - go.opentelemetry.io/collector/receiver v0.104.0 - go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.27.0 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/go-version v1.7.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect - github.com/knadh/koanf/maps v0.1.1 // indirect - github.com/knadh/koanf/providers/confmap v0.1.0 // indirect - github.com/knadh/koanf/v2 v2.1.1 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mostynb/go-grpc-compression v1.2.3 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.54.0 // indirect - github.com/prometheus/procfs v0.15.0 // indirect - github.com/rs/cors v1.10.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.5 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.104.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.104.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.11.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.104.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.104.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.104.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.11.0 // indirect - go.opentelemetry.io/collector/config/configretry v1.11.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect - go.opentelemetry.io/collector/config/configtls v0.104.0 // indirect - go.opentelemetry.io/collector/config/internal v0.104.0 // indirect - go.opentelemetry.io/collector/confmap v0.104.0 // indirect - go.opentelemetry.io/collector/connector v0.104.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.104.0 // indirect - go.opentelemetry.io/collector/featuregate v1.11.0 // indirect - go.opentelemetry.io/collector/semconv v0.104.0 // indirect - go.opentelemetry.io/collector/service v0.104.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel v1.27.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.27.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect - go.opentelemetry.io/proto/otlp v1.2.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/test/e2e/containers/otlp_sender/go.sum b/test/e2e/containers/otlp_sender/go.sum deleted file mode 100644 index 5f88f0d5eafab..0000000000000 --- a/test/e2e/containers/otlp_sender/go.sum +++ /dev/null @@ -1,344 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= -github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= -github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= -github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= -github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= -github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= -github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= -github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= -github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.5 h1:gGsArG5K6vmsh5hcFOHaPm87UD003CaDMkAOweSQjhM= -github.com/shirou/gopsutil/v4 v4.24.5/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= -github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.104.0 h1:R3zjM4O3K3+ttzsjPV75P80xalxRbwYTURlK0ys7uyo= -go.opentelemetry.io/collector v0.104.0/go.mod h1:Tm6F3na9ajnOm6I5goU9dURKxq1fSBK1yA94nvUix3k= -go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE= -go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw= -go.opentelemetry.io/collector/config/configauth v0.104.0 h1:ULtjugImijpKuLgGVt0E0HwiZT7+uDUEtMquh1ODB24= -go.opentelemetry.io/collector/config/configauth v0.104.0/go.mod h1:Til+nLLrQwwhgmfcGTX4ZRcNuMhdaWhBW1jH9DLTabQ= -go.opentelemetry.io/collector/config/configcompression v1.11.0 h1:oTwbcLh7mWHSDUIZXkRJVdNAMoBGS39XF68goTMOQq8= -go.opentelemetry.io/collector/config/configcompression v1.11.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/configgrpc v0.104.0 h1:E3RtqryQPOm/trJmhlJZj6cCqJNKgv9fOEQvSEpzsFM= -go.opentelemetry.io/collector/config/configgrpc v0.104.0/go.mod h1:tu3ifnJ5pv+4rZcaqNWfvVLjNKb8icSPoClN3THN8PU= -go.opentelemetry.io/collector/config/confighttp v0.104.0 h1:KSY0FSHSjuPyrR6iA2g5oFTozYFpYcy0ssJny8gTNTQ= -go.opentelemetry.io/collector/config/confighttp v0.104.0/go.mod h1:YgSXwuMYHANzzv+IBjHXaBMG/4G2mrseIpICHj+LB3U= -go.opentelemetry.io/collector/config/confignet v0.104.0 h1:i7AOTJf4EQox3SEt1YtQFQR+BwXr3v5D9x3Ai9/ovy8= -go.opentelemetry.io/collector/config/confignet v0.104.0/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= -go.opentelemetry.io/collector/config/configopaque v1.11.0 h1:Pt06PXWVmRaiSX63mzwT8Z9SV/hOc6VHNZbfZ10YY4o= -go.opentelemetry.io/collector/config/configopaque v1.11.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configretry v1.11.0 h1:UdEDD0ThxPU7+n2EiKJxVTvDCGygXu9hTfT6LOQv9DY= -go.opentelemetry.io/collector/config/configretry v1.11.0/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E= -go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v0.104.0 h1:bMmLz2+r+REpO7cDOR+srOJHfitqTZfSZCffDpKfwWk= -go.opentelemetry.io/collector/config/configtls v0.104.0/go.mod h1:e33o7TWcKfe4ToLFyGISEPGMgp6ezf3yHRGY4gs9nKk= -go.opentelemetry.io/collector/config/internal v0.104.0 h1:h3OkxTfXWWrHRyPEGMpJb4fH+54puSBuzm6GQbuEZ2o= -go.opentelemetry.io/collector/config/internal v0.104.0/go.mod h1:KjH43jsAUFyZPeTOz7GrPORMQCK13wRMCyQpWk99gMo= -go.opentelemetry.io/collector/confmap v0.104.0 h1:d3yuwX+CHpoyCh0iMv3rqb/vwAekjSm4ZDL6UK1nZSA= -go.opentelemetry.io/collector/confmap v0.104.0/go.mod h1:F8Lue+tPPn2oldXcfqI75PPMJoyzgUsKVtM/uHZLA4w= -go.opentelemetry.io/collector/connector v0.104.0 h1:Y82ytwZZ+EruWafEebO0dgWMH+TdkcSONEqZ5bm9JYA= -go.opentelemetry.io/collector/connector v0.104.0/go.mod h1:78SEHel3B3taFnSBg/syW4OV9aU1Ec9KjgbgHf/L8JA= -go.opentelemetry.io/collector/consumer v0.104.0 h1:Z1ZjapFp5mUcbkGEL96ljpqLIUMhRgQQpYKkDRtxy+4= -go.opentelemetry.io/collector/consumer v0.104.0/go.mod h1:60zcIb0W9GW0z9uJCv6NmjpSbCfBOeRUyrtEwqK6Hzo= -go.opentelemetry.io/collector/exporter v0.104.0 h1:C2HmnfBa05IQ2T+p9T7K7gXVxjrBLd+JxEtAWo7JNbg= -go.opentelemetry.io/collector/exporter v0.104.0/go.mod h1:Rx0oB0E4Ccg1JuAnEWwhtrq1ygRBkfx4mco1DpR3WaQ= -go.opentelemetry.io/collector/exporter/debugexporter v0.104.0 h1:1Z63H/xxv6IzMP7GPmI6v/lQAqZwYZCVC0rWYcYOomw= -go.opentelemetry.io/collector/exporter/debugexporter v0.104.0/go.mod h1:NHVzTM0Z/bomgR7SAe3ysx4CZzh2UJ3TXWSCnaOB1Wo= -go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0 h1:EFOdhnc2yGhqou0Tud1HsM7fsgWo/H3tdQhYYytDprQ= -go.opentelemetry.io/collector/exporter/otlpexporter v0.104.0/go.mod h1:fAF7Q3Xh0OkxYWUycdrNNDXkyz3nhHIRKDkez0aQ6zg= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0 h1:JkNCOj7DdyJhcYIaRqtS/X+YtAPRjE4pcruyY6LoM7c= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.104.0/go.mod h1:6rs4Xugs7tIC3IFbAC+fj56zLiVc7osXC5UTjk/Mkw4= -go.opentelemetry.io/collector/extension v0.104.0 h1:bftkgFMKya/QIwK+bOxEAPVs/TvTez+s1mlaiUznJkA= -go.opentelemetry.io/collector/extension v0.104.0/go.mod h1:x7K0KyM1JGrtLbafEbRoVp0VpGBHpyx9hu87bsja6S4= -go.opentelemetry.io/collector/extension/auth v0.104.0 h1:SelhccGCrqLThPlkbv6lbAowHsjgOTAWcAPz085IEC4= -go.opentelemetry.io/collector/extension/auth v0.104.0/go.mod h1:s3/C7LTSfa91QK0JPMTRIvH/gCv+a4DGiiNeTAX9OhI= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0 h1:rJ9Sw6DR27s6bW7lWBjJhjth5CXpltAHBKIgUFgVwFs= -go.opentelemetry.io/collector/extension/zpagesextension v0.104.0/go.mod h1:85Exj8r237PIvaXL1a/S0KeVNnm3kQNpVXtu0O2Zk5k= -go.opentelemetry.io/collector/featuregate v1.11.0 h1:Z7puIymKoQRm3oNM/NH8reWc2zRPz2PNaJvuokh0lQY= -go.opentelemetry.io/collector/featuregate v1.11.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.104.0 h1:RnMx7RaSFmX4dq/l3wbXWwcUnFK7RU19AM/0FbMr0Ig= -go.opentelemetry.io/collector/otelcol v0.104.0/go.mod h1:hWFRiHIKT3zbUx6SRevusPRa6mfm+70bPG5CK0glqSU= -go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE= -go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y= -go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA= -go.opentelemetry.io/collector/pdata/testdata v0.104.0 h1:BKTZ7hIyAX5DMPecrXkVB2e86HwWtJyOlXn/5vSVXNw= -go.opentelemetry.io/collector/pdata/testdata v0.104.0/go.mod h1:3SnYKu8gLfxURJMWS/cFEUFs+jEKS6jvfqKXnOZsdkQ= -go.opentelemetry.io/collector/processor v0.104.0 h1:KSvMDu4DWmK1/k2z2rOzMtTvAa00jnTabtPEK9WOSYI= -go.opentelemetry.io/collector/processor v0.104.0/go.mod h1:qU2/xCCYdvVORkN6aq0H/WUWkvo505VGYg2eOwPvaTg= -go.opentelemetry.io/collector/receiver v0.104.0 h1:URL1ExkYYd+qbndm7CdGvI2mxzsv/pNfmwJ+1QSQ9/o= -go.opentelemetry.io/collector/receiver v0.104.0/go.mod h1:+enTCZQLf6dRRANWvykXEzrlRw2JDppXJtoYWd/Dd54= -go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k= -go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.104.0 h1:DTpkoX4C6qiA3v3cfB2cHv/cH705o5JI9J3P77SFUrE= -go.opentelemetry.io/collector/service v0.104.0/go.mod h1:eq68zgpqRDYaVp60NeRu973J0rA5vZJkezfw/EzxLXc= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= -go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= -go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/test/e2e/containers/otlp_sender/internal/filereceiver/factory.go b/test/e2e/containers/otlp_sender/internal/filereceiver/factory.go deleted file mode 100644 index 29864597b910a..0000000000000 --- a/test/e2e/containers/otlp_sender/internal/filereceiver/factory.go +++ /dev/null @@ -1,157 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2021-present Datadog, Inc. - -// Package filereceiver implements a receiver that reads OTLP metrics from a given file. -package filereceiver - -import ( - "bufio" - "context" - "errors" - "fmt" - "log" - "os" - "time" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/pdata/pmetric" - collectorreceiver "go.opentelemetry.io/collector/receiver" - - "go.uber.org/zap" -) - -const typeStr = "file" - -// NewFactory creates a new OTLP receiver factory. -func NewFactory() collectorreceiver.Factory { - cfgType, _ := component.NewType(typeStr) - return collectorreceiver.NewFactory( - cfgType, - createDefaultConfig, - collectorreceiver.WithMetrics(createMetricsReceiver, component.StabilityLevelAlpha), - ) -} - -// Config of filereceiver. -type Config struct { - collectorreceiver.Settings `mapstructure:",squash"` - // Path of metrics data. - Path string `mapstructure:"path"` - // LoopConfig is the loop configuration. - Loop LoopConfig `mapstructure:"loop"` -} - -// LoopConfig is the loop configuration. -type LoopConfig struct { - // Enabled states whether the feature is enabled. - Enabled bool `mapstructure:"enabled"` - // Period defines the loop period. - Period time.Duration `mapstructure:"period"` -} - -// Validate configuration of receiver. -func (cfg *Config) Validate() error { - if cfg.Path == "" { - return errors.New("path can't be empty") - } - return nil -} - -func createDefaultConfig() component.Config { - cfgType, _ := component.NewType(typeStr) - return &Config{ - Settings: collectorreceiver.Settings{ - ID: component.NewID(cfgType), - }, - Loop: LoopConfig{Enabled: false, Period: 10 * time.Second}, - } -} - -var _ collectorreceiver.Metrics = (*receiver)(nil) - -type receiver struct { - config *Config - logger *zap.Logger - unmarshaler pmetric.Unmarshaler - nextConsumer consumer.Metrics - stopCh chan struct{} -} - -func (r *receiver) Start(_ context.Context, host component.Host) error { - if r.config.Loop.Enabled { - r.logger.Info("Running in a loop") - go r.unmarshalLoop(host) - } else { - r.logger.Info("Running just once") - go r.unmarshalAndSend(host) - } - return nil -} - -func (r *receiver) unmarshalAndSend(_ component.Host) { - file, err := os.Open(r.config.Path) - if err != nil { - log.Fatal(fmt.Errorf("failed to open %q: %w", r.config.Path, err)) - return - } - - r.logger.Info("Sending metrics batch") - scanner := bufio.NewScanner(file) - for scanner.Scan() { - metrics, err := r.unmarshaler.UnmarshalMetrics(scanner.Bytes()) - if err != nil { - log.Fatal(fmt.Errorf("failed to unmarshal %q: %w", r.config.Path, err)) - return - } - - err = r.nextConsumer.ConsumeMetrics(context.Background(), metrics) - if err != nil { - log.Fatal(fmt.Errorf("failed to send %q: %w", r.config.Path, err)) - return - } - } - - if err := scanner.Err(); err != nil { - log.Fatal(fmt.Errorf("failed to scan %q: %w", r.config.Path, err)) - return - } - - if err := file.Close(); err != nil { - log.Fatal(fmt.Errorf("failed to close %q: %w", r.config.Path, err)) - return - } -} - -func (r *receiver) unmarshalLoop(host component.Host) { - for { - r.unmarshalAndSend(host) - select { - case <-time.After(r.config.Loop.Period): - case <-r.stopCh: - return - } - } -} - -func (r *receiver) Shutdown(context.Context) error { - close(r.stopCh) - return nil -} - -func createMetricsReceiver( - _ context.Context, - set collectorreceiver.Settings, - cfg component.Config, - consumer consumer.Metrics, -) (collectorreceiver.Metrics, error) { - return &receiver{ - config: cfg.(*Config), - logger: set.Logger, - unmarshaler: &pmetric.JSONUnmarshaler{}, - nextConsumer: consumer, - stopCh: make(chan struct{}), - }, nil -} diff --git a/test/e2e/cws-tests/README.md b/test/e2e/cws-tests/README.md deleted file mode 100644 index 5008231122df1..0000000000000 --- a/test/e2e/cws-tests/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# CSPM e2e tests - -## Docker flavors - -To run docker flavoured tests, local only, please run: - -For CSPM: -```sh -DD_API_KEY= \ -DD_APP_KEY= \ -DD_SITE=datadoghq.com \ -DD_AGENT_IMAGE=datadog/agent-dev:master \ -python3 tests/test_e2e_cspm_docker.py -``` - -Please change `DD_AGENT_IMAGE` to a branch specific tag if you need to test a specific branch. diff --git a/test/e2e/cws-tests/requirements.txt b/test/e2e/cws-tests/requirements.txt deleted file mode 100644 index cc9857c383b50..0000000000000 --- a/test/e2e/cws-tests/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -kubernetes==30.1.0 -datadog-api-client==2.27.0 -pyaml==24.7.0 -docker==7.1.0 -retry==0.9.2 -emoji==2.12.1 -requests==2.32.3 -jsonschema==4.23.0 \ No newline at end of file diff --git a/test/e2e/cws-tests/tests/lib/common/app.py b/test/e2e/cws-tests/tests/lib/common/app.py deleted file mode 100644 index 37a8dd8b5829d..0000000000000 --- a/test/e2e/cws-tests/tests/lib/common/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import time - -from datadog_api_client.v1 import ApiClient, Configuration -from datadog_api_client.v1.api.metrics_api import MetricsApi -from retry.api import retry_call - - -class App: - def __init__(self): - self.v1_api_client = ApiClient(Configuration()) - - def query_metric(self, name, **kw): - api_instance = MetricsApi(self.v1_api_client) - - tags = [] - for key, value in kw.items(): - tags.append(f"{key}:{value}") - if len(tags) == 0: - tags.append("*") - - response = api_instance.query_metrics(int(time.time()) - 30, int(time.time()), f"{name}{{{','.join(tags)}}}") - return response - - def wait_for_metric(self, name, tries=30, delay=10, **kw): - def expect_metric(): - metric = self.query_metric(name, **kw) - if len(metric.get("series")) == 0: - raise LookupError(f"no value found in {metric}") - return metric - - return retry_call(expect_metric, tries=tries, delay=delay) diff --git a/test/e2e/cws-tests/tests/lib/config.py b/test/e2e/cws-tests/tests/lib/config.py deleted file mode 100644 index 6051bee608045..0000000000000 --- a/test/e2e/cws-tests/tests/lib/config.py +++ /dev/null @@ -1,45 +0,0 @@ -import tempfile - -import yaml - - -def gen_system_probe_config(npm_enabled=False, rc_enabled=False, log_level="INFO", log_patterns=None): - fp = tempfile.NamedTemporaryFile(prefix="e2e-system-probe-", mode="w", delete=False) - - if not log_patterns: - log_patterns = [] - - data = { - "system_probe_config": {"log_level": log_level}, - "network_config": {"enabled": npm_enabled}, - "runtime_security_config": { - "log_patterns": log_patterns, - "network": {"enabled": True}, - "remote_configuration": {"enabled": rc_enabled, "refresh_interval": "5s"}, - }, - } - - yaml.dump(data, fp) - fp.close() - - return fp.name - - -def gen_datadog_agent_config(hostname="myhost", log_level="INFO", tags=None, rc_enabled=False, rc_key=None): - fp = tempfile.NamedTemporaryFile(prefix="e2e-datadog-agent-", mode="w", delete=False) - - if not tags: - tags = [] - - data = { - "log_level": log_level, - "hostname": hostname, - "tags": tags, - "security_agent.remote_workloadmeta": True, - "remote_configuration": {"enabled": rc_enabled, "refresh_interval": "5s", "key": rc_key}, - } - - yaml.dump(data, fp) - fp.close() - - return fp.name diff --git a/test/e2e/cws-tests/tests/lib/const.py b/test/e2e/cws-tests/tests/lib/const.py deleted file mode 100644 index 7dc3c90a1d31c..0000000000000 --- a/test/e2e/cws-tests/tests/lib/const.py +++ /dev/null @@ -1,3 +0,0 @@ -SECURITY_START_LOG = "Successfully connected to the runtime security module" -SYS_PROBE_START_LOG = "runtime security started" -SEC_AGENT_PATH = "/opt/datadog-agent/embedded/bin/security-agent" diff --git a/test/e2e/cws-tests/tests/lib/cspm/api.py b/test/e2e/cws-tests/tests/lib/cspm/api.py deleted file mode 100644 index a139c99ab9a3b..0000000000000 --- a/test/e2e/cws-tests/tests/lib/cspm/api.py +++ /dev/null @@ -1,56 +0,0 @@ -import os - -import lib.common.app as common -import requests -from retry.api import retry_call - - -def aggregate_logs(query, track): - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - app_key = os.environ["DD_APP_KEY"] - - url = f"https://api.{site}/api/v2/logs/analytics/aggregate?type={track}" - body = { - "compute": [{"aggregation": "count", "type": "total"}], - "filter": { - "from": "now-3m", - "to": "now", - "query": query, - }, - } - - r = requests.post( - url, - headers={"DD-API-KEY": api_key, "DD-APPLICATION-KEY": app_key}, - json=body, - ) - api_response = r.json() - if not api_response["data"] or not api_response["data"]["buckets"]: - raise LookupError(query) - - count = api_response["data"]["buckets"][0]["computes"]["c0"] - if count == 0: - raise LookupError(query) - - return api_response - - -def fetch_app_findings(query): - return aggregate_logs(query, track="cpfinding") - - -def fetch_app_compliance_event(query): - return aggregate_logs(query, track="compliance") - - -def wait_for_findings(query, tries=30, delay=5): - return retry_call(fetch_app_findings, fargs=[query], tries=tries, delay=delay) - - -def wait_for_compliance_event(query, tries=30, delay=5): - return retry_call(fetch_app_compliance_event, fargs=[query], tries=tries, delay=delay) - - -class App(common.App): - pass diff --git a/test/e2e/cws-tests/tests/lib/cspm/finding.py b/test/e2e/cws-tests/tests/lib/cspm/finding.py deleted file mode 100644 index e3a7839cc3f52..0000000000000 --- a/test/e2e/cws-tests/tests/lib/cspm/finding.py +++ /dev/null @@ -1,27 +0,0 @@ -import json - - -def extract_findings(lines): - if not lines: - return [] - - res_lines = ["["] - for line in lines: - if line == "}": - res_lines.append("},") - else: - res_lines.append(line) - res_lines.pop() - res_lines.extend(["}", "]"]) - return json.loads("".join(res_lines)) - - -def is_subset(subset, superset): - if isinstance(subset, dict): - return all(key in superset and is_subset(val, superset[key]) for key, val in subset.items()) - - if isinstance(subset, list) or isinstance(subset, set): - return all(any(is_subset(subitem, superitem) for superitem in superset) for subitem in subset) - - # assume that subset is a plain value if none of the above match - return subset == superset diff --git a/test/e2e/cws-tests/tests/lib/docker.py b/test/e2e/cws-tests/tests/lib/docker.py deleted file mode 100644 index 40047900f968e..0000000000000 --- a/test/e2e/cws-tests/tests/lib/docker.py +++ /dev/null @@ -1,154 +0,0 @@ -import os -import tarfile -import tempfile - -import docker -from retry.api import retry_call - -from lib.const import SEC_AGENT_PATH -from lib.log import LogGetter - - -def is_container_running(container): - container.reload() - if container.status != "running": - raise Exception - - -class DockerHelper(LogGetter): - def __init__(self): - self.client = docker.from_env() - - self.agent_container = None - - def start_cspm_agent(self, image, datadog_agent_config=None): - volumes = [ - "/var/run/docker.sock:/var/run/docker.sock:ro", - "/proc/:/host/proc/:ro", - "/sys/fs/cgroup/:/host/sys/fs/cgroup:ro", - "/etc/passwd:/etc/passwd:ro", - "/etc/os-release:/host/etc/os-release:ro", - "/:/host/root:ro", - ] - - if datadog_agent_config: - volumes.append(f"{datadog_agent_config}:/etc/datadog-agent/datadog.yaml") - - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - - self.agent_container = self.client.containers.run( - image, - environment=[ - "DD_COMPLIANCE_CONFIG_ENABLED=true", - "HOST_ROOT=/host/root", - f"DD_SITE={site}", - f"DD_API_KEY={api_key}", - ], - volumes=volumes, - detach=True, - ) - - return self.agent_container - - def start_cws_agent(self, image, datadog_agent_config=None, system_probe_config=None): - volumes = [ - "/var/run/docker.sock:/var/run/docker.sock:ro", - "/proc/:/host/proc/:ro", - "/sys/fs/cgroup/:/host/sys/fs/cgroup:ro", - "/etc/passwd:/etc/passwd:ro", - "/etc/group:/etc/group:ro", - "/:/host/root:ro", - "/sys/kernel/debug:/sys/kernel/debug", - "/etc/os-release:/etc/os-release", - ] - - if datadog_agent_config: - volumes.append(f"{datadog_agent_config}:/etc/datadog-agent/datadog.yaml") - - if system_probe_config: - volumes.append(f"{system_probe_config}:/etc/datadog-agent/system-probe.yaml") - - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - - self.agent_container = self.client.containers.run( - image, - cap_add=["SYS_ADMIN", "SYS_RESOURCE", "SYS_PTRACE", "NET_ADMIN", "IPC_LOCK"], - security_opt=["apparmor:unconfined"], - environment=[ - "DD_RUNTIME_SECURITY_CONFIG_ENABLED=true", - "DD_SYSTEM_PROBE_ENABLED=true", - "HOST_ROOT=/host/root", - f"DD_SITE={site}", - f"DD_API_KEY={api_key}", - ], - volumes=volumes, - detach=True, - ) - - return self.agent_container - - def download_policies(self): - command = SEC_AGENT_PATH + " runtime policy download" - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - app_key = os.environ["DD_APP_KEY"] - return self.agent_container.exec_run( - command, - stderr=False, - stdout=True, - stream=False, - environment=[ - f"DD_SITE={site}", - f"DD_API_KEY={api_key}", - f"DD_APP_KEY={app_key}", - ], - ) - - def push_policies(self, policies): - temppolicy = tempfile.NamedTemporaryFile(prefix="e2e-policy-", mode="w", delete=False) - temppolicy.write(policies) - temppolicy.close() - temppolicy_path = temppolicy.name - self.cp_file(temppolicy_path, "/etc/datadog-agent/runtime-security.d/default.policy") - os.remove(temppolicy_path) - - def cp_file(self, src, dst): - tar = tarfile.open(src + '.tar', mode='w') - try: - tar.add(src) - finally: - tar.close() - data = open(src + '.tar', 'rb').read() - self.agent_container.put_archive("/tmp", data) - self.agent_container.exec_run("mv /tmp/" + src + " " + dst) - - def reload_policies(self): - self.agent_container.exec_run(SEC_AGENT_PATH + " runtime policy reload") - - def wait_agent_container(self, tries=10, delay=5): - return retry_call(is_container_running, fargs=[self.agent_container], tries=tries, delay=delay) - - def get_log(self, agent_name): - log_prefix = None - if agent_name == "security-agent": - log_prefix = "SECURITY" - elif agent_name == "system-probe": - log_prefix = "SYS-PROBE" - else: - raise LookupError(agent_name) - - log = self.agent_container.logs(since=1).decode("utf-8") - - result = [line for line in log.splitlines() if log_prefix in line] - if result: - return result - raise LookupError(agent_name) - - def close(self): - if self.agent_container: - self.agent_container.stop() - self.agent_container.remove() - - self.client.close() diff --git a/test/e2e/cws-tests/tests/lib/kubernetes.py b/test/e2e/cws-tests/tests/lib/kubernetes.py deleted file mode 100644 index 6d5e4267150e2..0000000000000 --- a/test/e2e/cws-tests/tests/lib/kubernetes.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import tarfile -import tempfile - -from kubernetes import client, config -from kubernetes.stream import stream - -from lib.const import SEC_AGENT_PATH -from lib.log import LogGetter - - -class KubernetesHelper(LogGetter): - def __init__(self, namespace, in_cluster=False): - if in_cluster: - config.load_incluster_config() - else: - config.load_kube_config() - - self.api_client = client.CoreV1Api() - - self.namespace = namespace - self.pod_name = None - - def select_pod_name(self, label_selector): - resp = self.api_client.list_namespaced_pod(namespace=self.namespace, label_selector=label_selector) - for i in resp.items: - self.pod_name = i.metadata.name - return - raise LookupError(label_selector) - - def get_log(self, agent_name): - log = self.api_client.read_namespaced_pod_log( - name=self.pod_name, namespace=self.namespace, container=agent_name, follow=False, tail_lines=10000 - ) - - return log.splitlines() - - def exec_command(self, container, command=None): - if not command: - command = [] - - return stream( - self.api_client.connect_post_namespaced_pod_exec, - name=self.pod_name, - namespace=self.namespace, - container=container, - command=command, - stderr=False, - stdin=False, - stdout=True, - tty=False, - ) - - def reload_policies(self): - command = [SEC_AGENT_PATH, 'runtime', 'policy', 'reload'] - self.exec_command("security-agent", command=command) - - def download_policies(self): - site = os.environ["DD_SITE"] - api_key = os.environ["DD_API_KEY"] - app_key = os.environ["DD_APP_KEY"] - command = [ - "/bin/bash", - "-c", - "export DD_SITE=" - + site - + " ; export DD_API_KEY=" - + api_key - + " ; export DD_APP_KEY=" - + app_key - + " ; " - + SEC_AGENT_PATH - + " runtime policy download", - ] - return self.exec_command("security-agent", command=command) - - def push_policies(self, policies): - temppolicy = tempfile.NamedTemporaryFile(prefix="e2e-policy-", mode="w", delete=False) - temppolicy.write(policies) - temppolicy.close() - temppolicy_path = temppolicy.name - self.exec_command("security-agent", command=["mkdir", "-p", "/tmp/runtime-security.d"]) - self.cp_to_agent("security-agent", temppolicy_path, "/tmp/runtime-security.d/downloaded.policy") - os.remove(temppolicy_path) - - def cp_to_agent(self, agent_name, src_file, dst_file): - command = ['tar', 'xvf', '-', '-C', '/tmp'] - resp = stream( - self.api_client.connect_post_namespaced_pod_exec, - name=self.pod_name, - namespace=self.namespace, - container=agent_name, - command=command, - stderr=True, - stdin=True, - stdout=True, - tty=False, - _preload_content=False, - ) - - with tempfile.TemporaryFile() as tar_buffer: - with tarfile.open(fileobj=tar_buffer, mode='w') as tar: - tar.add(src_file) - - tar_buffer.seek(0) - commands = [] - commands.append(tar_buffer.read()) - - while resp.is_open(): - resp.update(timeout=1) - if commands: - c = commands.pop(0) - resp.write_stdin(c) - else: - break - resp.close() - - dirname = os.path.dirname(dst_file) - command = ['mkdir', '-p', dirname] - self.exec_command(agent_name, command=command) - - command = ['mv', f'/tmp/{src_file}', dst_file] - self.exec_command(agent_name, command=command) diff --git a/test/e2e/cws-tests/tests/lib/log.py b/test/e2e/cws-tests/tests/lib/log.py deleted file mode 100644 index 5bbf5c1d3349e..0000000000000 --- a/test/e2e/cws-tests/tests/lib/log.py +++ /dev/null @@ -1,21 +0,0 @@ -from abc import ABC, abstractmethod - -from retry.api import retry_call - - -class LogGetter(ABC): - @abstractmethod - def get_log(self, _agent_name): - raise NotImplementedError() - - -def _wait_agent_log(agent_name, log_getter, pattern): - lines = log_getter.get_log(agent_name) - for line in lines: - if pattern in line: - return - raise LookupError(f"{agent_name} | {pattern}") - - -def wait_agent_log(agent_name, log_getter, pattern, tries=10, delay=5): - return retry_call(_wait_agent_log, fargs=[agent_name, log_getter, pattern], tries=tries, delay=delay) diff --git a/test/e2e/cws-tests/tests/lib/stepper.py b/test/e2e/cws-tests/tests/lib/stepper.py deleted file mode 100644 index fbfec4312c060..0000000000000 --- a/test/e2e/cws-tests/tests/lib/stepper.py +++ /dev/null @@ -1,18 +0,0 @@ -import emoji - - -class Step: - def __init__(self, msg="", emoji=""): - self.msg = msg - self.emoji = emoji - - def __enter__(self): - _emoji = emoji.emojize(self.emoji) - print(f"{_emoji} {self.msg}... ", end="", flush=True) - return self - - def __exit__(self, exc_type, _exc_val, _exc_tb): - if exc_type is None: - print(emoji.emojize(":check_mark:"), flush=True) - else: - print(emoji.emojize(":cross_mark:"), flush=True) diff --git a/test/e2e/cws-tests/tests/test_e2e_cspm.py b/test/e2e/cws-tests/tests/test_e2e_cspm.py deleted file mode 100644 index 57b0ac3f3ae44..0000000000000 --- a/test/e2e/cws-tests/tests/test_e2e_cspm.py +++ /dev/null @@ -1,34 +0,0 @@ -from lib.cspm.finding import is_subset - - -def expect_findings(test_case, findings, expected_findings): - findings_by_rule = {} - for agent_rule_id, rule_findings in findings.items(): - findings_by_rule.setdefault(agent_rule_id, []).extend(rule_findings) - for finding in rule_findings: - print(f"finding {agent_rule_id} {finding}") - - for rule_id, expected_rule_findings in expected_findings.items(): - for expected_rule_finding in expected_rule_findings: - test_case.assertIn(rule_id, findings_by_rule) - found = False - rule_findings = findings_by_rule.get(rule_id, []) - for finding in rule_findings: - if is_subset(expected_rule_finding, finding): - found = True - break - - test_case.assertTrue(found, f"unexpected finding {finding} for rule {rule_id}") - del findings_by_rule[rule_id] - - for rule_id, rule_findings in findings_by_rule.items(): - for finding in rule_findings: - result = finding["result"] - print(f"finding {rule_id} {result}") - - for rule_id, rule_findings in findings_by_rule.items(): - for finding in rule_findings: - result = finding["result"] - test_case.assertNotIn( - result, ("failed", "error"), f"finding for rule {rule_id} not expected to be in failed or error state" - ) diff --git a/test/e2e/cws-tests/tests/test_e2e_cspm_docker.py b/test/e2e/cws-tests/tests/test_e2e_cspm_docker.py deleted file mode 100644 index bbc365807542c..0000000000000 --- a/test/e2e/cws-tests/tests/test_e2e_cspm_docker.py +++ /dev/null @@ -1,150 +0,0 @@ -import json -import os -import socket -import time -import unittest -import uuid -import warnings - -from lib.config import gen_datadog_agent_config -from lib.cspm.api import App -from lib.docker import DockerHelper -from lib.stepper import Step -from test_e2e_cspm import expect_findings - - -class TestE2EDocker(unittest.TestCase): - def setUp(self): - warnings.simplefilter("ignore", category=ResourceWarning) - warnings.simplefilter("ignore", category=UserWarning) - - self.docker_helper = DockerHelper() - self.app = App() - - def tearDown(self): - self.docker_helper.close() - - def test_privileged_container(self): - print("") - - test_id = str(uuid.uuid4())[:4] - with Step(msg="create privileged container", emoji=":construction:"): - pc = self.docker_helper.client.containers.run( - "ubuntu:latest", - command="sleep 7200", - detach=True, - remove=True, - privileged=True, - ) - self.container_id = pc.id - - with Step(msg="check agent start", emoji=":man_running:"): - image = os.getenv("DD_AGENT_IMAGE") - hostname = f"host_{test_id}" - self.datadog_agent_config = gen_datadog_agent_config( - hostname=hostname, log_level="DEBUG", tags=["tag1", "tag2"] - ) - - self.container = self.docker_helper.start_cspm_agent( - image, - datadog_agent_config=self.datadog_agent_config, - ) - self.assertIsNotNone(self.container, msg="unable to start container") - - self.docker_helper.wait_agent_container() - - with Step(msg="check agent events", emoji=":check_mark_button:"): - self.container.exec_run("security-agent compliance check --dump-reports /tmp/reports.json --report") - _, output = self.container.exec_run("cat /tmp/reports.json") - print(output) - findings = json.loads(output) - - expected_findings = { - "cis-docker-1.2.0-5.4": [ - { - "agent_rule_id": "cis-docker-1.2.0-5.4", - "agent_framework_id": "cis-docker", - "result": "failed", - "resource_type": "docker_container", - "data": { - "container.id": self.container_id, - }, - } - ], - "cis-docker-1.2.0-1.2.1": [{"result": "failed"}], - "cis-docker-1.2.0-1.2.3": [{"result": "error"}], - "cis-docker-1.2.0-1.2.4": [{"result": "error"}], - "cis-docker-1.2.0-1.2.5": [{"result": "error"}], - "cis-docker-1.2.0-1.2.6": [{"result": "error"}], - "cis-docker-1.2.0-1.2.7": [{"result": "error"}], - "cis-docker-1.2.0-1.2.8": [{"result": "error"}], - "cis-docker-1.2.0-1.2.9": [{"result": "error"}], - "cis-docker-1.2.0-1.2.10": [{"result": "error"}], - "cis-docker-1.2.0-1.2.11": [{"result": "error"}], - "cis-docker-1.2.0-1.2.12": [{"result": "error"}], - "cis-docker-1.2.0-2.2": [{"result": "failed"}], - "cis-docker-1.2.0-2.3": [{"result": "failed"}], - "cis-docker-1.2.0-2.4": [{"result": "failed"}], - "cis-docker-1.2.0-2.6": [{"result": "failed"}], - "cis-docker-1.2.0-3.10": [{"result": "error"}], - "cis-docker-1.2.0-3.11": [{"result": "error"}], - "cis-docker-1.2.0-3.12": [{"result": "error"}], - "cis-docker-1.2.0-3.13": [{"result": "error"}], - "cis-docker-1.2.0-3.14": [{"result": "error"}], - "cis-docker-1.2.0-3.15": [{"result": "error"}], - "cis-docker-1.2.0-3.16": [{"result": "error"}], - "cis-docker-1.2.0-3.17": [{"result": "error"}], - "cis-docker-1.2.0-3.18": [{"result": "error"}], - "cis-docker-1.2.0-3.19": [{"result": "error"}], - "cis-docker-1.2.0-3.20": [{"result": "error"}], - "cis-docker-1.2.0-3.21": [{"result": "error"}], - "cis-docker-1.2.0-3.22": [{"result": "error"}], - "cis-docker-1.2.0-3.7": [{"result": "error"}], - "cis-docker-1.2.0-3.8": [{"result": "error"}], - "cis-docker-1.2.0-3.9": [{"result": "error"}], - "cis-docker-1.2.0-4.1": [{"result": "failed"}], - "cis-docker-1.2.0-4.6": [{"result": "failed"}], - "cis-docker-1.2.0-5.1": [{"result": "failed"}], - "cis-docker-1.2.0-5.10": [{"result": "failed"}], - "cis-docker-1.2.0-5.11": [{"result": "failed"}], - "cis-docker-1.2.0-5.12": [{"result": "failed"}], - "cis-docker-1.2.0-5.14": [{"result": "failed"}], - "cis-docker-1.2.0-5.2": [{"result": "error"}], - "cis-docker-1.2.0-5.25": [{"result": "failed"}], - "cis-docker-1.2.0-5.26": [{"result": "failed"}], - "cis-docker-1.2.0-5.28": [{"result": "failed"}], - "cis-docker-1.2.0-5.31": [{"result": "failed"}], - "cis-docker-1.2.0-5.7": [{"result": "failed"}], - } - - expect_findings(self, findings, expected_findings) - - with Step(msg="wait for intake (~1m)", emoji=":alarm_clock:"): - time.sleep(1 * 60) - - with Step(msg="wait for datadog.security_agent.compliance.running metric", emoji="\N{BEER MUG}"): # fmt: off - self.app.wait_for_metric("datadog.security_agent.compliance.running", host=socket.gethostname()) - - ## Disabled while no CSPM API is available - # with Step(msg="check app compliance event", emoji=":SOON_arrow:"): - # wait_for_compliance_event(f"resource_id:*{self.container_id}") - - with Step(msg="wait for finding generation (~1m)", emoji=":alarm_clock:"): - time.sleep(1 * 60) - - with Step(msg="wait for datadog.security_agent.compliance.containers_running metric", emoji="\N{BEER MUG}"): # fmt: off - self.app.wait_for_metric( - "datadog.security_agent.compliance.containers_running", container_id=self.container_id - ) - - ## Disabled while no CSPM API is available - # with Step(msg="check app finding", emoji=":chart_increasing_with_yen:"): - # wait_for_findings(f"@resource_type:docker_container @container_id:{self.container_id}") - - -def main(): - unittest.main() - - -if __name__ == "__main__": - main() diff --git a/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py b/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py deleted file mode 100644 index ef0871c049f30..0000000000000 --- a/test/e2e/cws-tests/tests/test_e2e_cspm_kubernetes.py +++ /dev/null @@ -1,224 +0,0 @@ -import argparse -import sys -import time -import unittest -import warnings - -import emoji -from lib.cspm.api import App -from lib.kubernetes import KubernetesHelper -from lib.stepper import Step -from test_e2e_cspm import expect_findings - - -class TestE2EKubernetes(unittest.TestCase): - namespace = "default" - in_cluster = False - expectedFindingsMasterEtcdNode = { - "cis-kubernetes-1.5.1-1.1.12": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.16": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.19": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.21": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.22": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.23": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.24": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.25": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.26": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.33": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.2.6": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.3.2": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.3.3": [ - { - "result": "passed", - } - ], - "cis-kubernetes-1.5.1-1.3.4": [ - { - "result": "passed", - } - ], - "cis-kubernetes-1.5.1-1.3.5": [ - { - "result": "passed", - } - ], - "cis-kubernetes-1.5.1-1.3.6": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-1.3.7": [ - { - "result": "passed", - } - ], - "cis-kubernetes-1.5.1-1.4.1": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-3.2.1": [ - { - "result": "failed", - } - ], - } - expectedFindingsWorkerNode = { - "cis-kubernetes-1.5.1-4.2.1": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.3": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.4": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.5": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.6": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.10": [ - { - "result": "failed", - } - ], - "cis-kubernetes-1.5.1-4.2.12": [ - { - "result": "failed", - } - ], - } - hostname = "k8s-e2e-tests-control-plane" - - def setUp(self): - warnings.simplefilter("ignore", category=ResourceWarning) - warnings.simplefilter("ignore", category=UserWarning) - warnings.simplefilter("ignore", category=DeprecationWarning) - - self.kubernetes_helper = KubernetesHelper(namespace=self.namespace, in_cluster=self.in_cluster) - self.resource_id = "k8s-e2e-tests-control-plane_kubernetes_*_node" - self.app = App() - - def test_k8s(self): - print("") - - agent_name = "security-agent" - - with Step(msg="select pod", emoji=":man_running:"): - self.kubernetes_helper.select_pod_name("app.kubernetes.io/component=agent") - - with Step(msg="check agent events", emoji=":check_mark_button:"): - self.kubernetes_helper.exec_command( - agent_name, ["security-agent", "compliance", "check", "--dump-reports", "/tmp/reports", "--report"] - ) - output = self.kubernetes_helper.exec_command(agent_name, ["bash", "-c", "cat /tmp/reports"]) - print(output) - # if the output is JSON, it automatically calls json.loads on it. Yeah, I know... I've felt the same too - findings = eval(output) - expected_findings = dict( - **TestE2EKubernetes.expectedFindingsMasterEtcdNode, **TestE2EKubernetes.expectedFindingsWorkerNode - ) - expect_findings(self, findings, expected_findings) - - with Step(msg="wait for intake (~1m)", emoji=":alarm_clock:"): - time.sleep(1 * 60) - - with Step(msg="wait for datadog.security_agent.compliance.running metric", emoji="\N{beer mug}"): # fmt: off - self.app.wait_for_metric("datadog.security_agent.compliance.running", host=TestE2EKubernetes.hostname) - - ## Disabled while no CSPM API is available - # with Step(msg="check app compliance event", emoji=":SOON_arrow:"): - # wait_for_compliance_event(f"resource_id:{self.resource_id}") - - with Step(msg="wait for finding generation (~1m)", emoji=":alarm_clock:"): - time.sleep(1 * 60) - - with Step(msg="wait for datadog.security_agent.compliance.containers_running metric", emoji="\N{beer mug}"): # fmt: off - self.app.wait_for_metric( - "datadog.security_agent.compliance.containers_running", host=TestE2EKubernetes.hostname - ) - - ## Disabled while no CSPM API is available - # with Step(msg="check app findings", emoji=":chart_increasing_with_yen:"): - # wait_for_findings(f"@resource_type:kubernetes_*_node @resource:{self.resource_id}") - - print(emoji.emojize(":heart_on_fire:"), flush=True) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--namespace", default="default") - parser.add_argument("--in-cluster", action="store_true") - parser.add_argument("unittest_args", nargs="*") - args = parser.parse_args() - - # setup some specific tests - TestE2EKubernetes.namespace = args.namespace - TestE2EKubernetes.in_cluster = args.in_cluster - - unit_argv = [sys.argv[0]] + args.unittest_args - unittest.main(argv=unit_argv) - - -if __name__ == "__main__": - main() diff --git a/test/e2e/docs/run-instance.svg b/test/e2e/docs/run-instance.svg deleted file mode 100644 index 082cdb69d0b99..0000000000000 --- a/test/e2e/docs/run-instance.svg +++ /dev/null @@ -1,220 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
-
-
-
- - - - -
- -
-
-
-
- - - - -
- -
-
-
-
- - - - -
- -
-
-
-
- - - - -
- -
-
-
-
- - - - -
- wait completion -
-
-
-
- - - - -
- -
-
-
-
-
- - - - - - -
Run Instance
-
-
-
-
- - - - - -
kind create cluster
-
-
-
-
- - - - - -
kind cluster ready
-
-
-
-
- - - - - -
argo download
-
-
-
-
- - - - - -
argo setup
-
-
-
-
- - - - - -
argo submit
-
-
-
-
- - - - - -
argo get results
-
-
-
-
- - - - - -
exit with code
-
-
-
-
-
-
-
-
\ No newline at end of file diff --git a/test/e2e/docs/sequence.md b/test/e2e/docs/sequence.md deleted file mode 100644 index 7cbab0d4037e4..0000000000000 --- a/test/e2e/docs/sequence.md +++ /dev/null @@ -1,41 +0,0 @@ -# Generate sequence - -## Update process - -1. Copy paste the content of each sequence in the [online tool](https://github.com/mermaidjs/mermaid-live-editor). -2. Download the image generated -3. move it to replace the old one - -### Online data - -[setup-instance](../scripts/setup-instance): - -```text -graph TD -A{setup-instance} -->B(AWS specification) -B --> C[ignition] -C --> D(sshAuthorizedKeys) -D -->B -B --> E[ec2] -E --> F(request-spot-instances) -F --> G(describe-spot-instance-requests) -G -->|Instance created| H(create-tags) -H -->|instance and spot requests| I(describe-instances) -I -->|Get PrivateIpAddress| J(cancel-spot-instance-requests) -J --> K[ssh] -K --> L(git clone and checkout) -L --> M{run-instance} -``` - - -[run-instance](../scripts/run-instance) -```text -graph TD -A{Run Instance} -->B[kind create cluster] -B --> C[kind cluster ready] -C --> D[argo download] -D --> E[argo setup] -E --> F[argo submit] -F -->|wait completion| G[argo get results] -G --> H{exit with code} -``` diff --git a/test/e2e/docs/setup-instance.svg b/test/e2e/docs/setup-instance.svg deleted file mode 100644 index cc69bf8b9d108..0000000000000 --- a/test/e2e/docs/setup-instance.svg +++ /dev/null @@ -1,350 +0,0 @@ -
Instance created
instance and spot requests
Get PrivateIpAddress
setup-instance
AWS specification
ignition
sshAuthorizedKeys
ec2
request-spot-instances
describe-spot-instance-requests
create-tags
describe-instances
cancel-spot-instance-requests
ssh
git clone and checkout
run-instance
\ No newline at end of file diff --git a/test/e2e/scripts/generate-parameters.sh b/test/e2e/scripts/generate-parameters.sh deleted file mode 100755 index 5dffa47feabbf..0000000000000 --- a/test/e2e/scripts/generate-parameters.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/bin/bash - -##### A script to generate a unique namespace ##### -##### and a parameters file for a workflow ##### - - -##### Exit on error ##### -set -e - -##### Source utility functions ##### -source utils.sh - -##### Functions ##### - -usage() -{ - echo 'Usage: ./generate-parameters.sh [[-w workflow -g workflow_group] | [-h]] -Example: ./generate-parameters.sh -g workflow_group -w workflow -Flags: --w, --workflow workflow name --g, --workflow-group workflow group name --o, --output-file generated yaml file name (default parameters.yaml) --d, --workflows-dir the directory where workflows are defined (default ../argo-workflows)' -} - -validate_input() -{ - # Validate workflow name characters - if ! [[ $WORKFLOW =~ ^[0-9a-zA-Z-]+$ ]]; then - echo "Error: Invalid workflow name format: $WORKFLOW" - exit 1 - fi - - # Validate workflow group name characters - if ! [[ $WORKFLOW_GROUP =~ ^[0-9a-zA-Z._-]+$ ]]; then - echo "Error: Invalid workflow group name format: $WORKFLOW_GROUP" - exit 1 - fi -} - -# Usage: generate_parameters -generate_parameters() -{ - # Merging parameters - echo 'Info: Merging parameters...' - YK_MERGE_COMMAND='yq merge --overwrite --allow-empty' - DEFAULT_GLOBAL_PARAM="$WORKFLOWS_DIR/defaults/parameters.yaml" - DEFAULT_GROUP_PARAM="$WORKFLOWS_DIR/$WORKFLOW_GROUP/defaults/parameters.yaml" - WORKFLOW_PARAM="$WORKFLOWS_DIR/$WORKFLOW_GROUP/$WORKFLOW/parameters.yaml" - TMP_YAML_PATH="$1.tmp.yaml" - $YK_MERGE_COMMAND "$DEFAULT_GLOBAL_PARAM" "$DEFAULT_GROUP_PARAM" "$WORKFLOW_PARAM" > "$TMP_YAML_PATH" - - # Rendering namespace - echo 'Info: Parameters merged, rendering namespace and saving file...' - NAMESPACE_TEMPLATE_VAR="{{ namespace }}" - sed -e "s/$NAMESPACE_TEMPLATE_VAR/$1/g" "$TMP_YAML_PATH" > "$OUTPUT_YAML_FILE" - echo "Info: Generated parameters, yaml file saved: $OUTPUT_YAML_FILE" - - # Cleanup temp file - rm "$TMP_YAML_PATH" -} - - -##### Main ##### - -WORKFLOW="" -WORKFLOW_GROUP="" -NAMESPACE="" -OUTPUT_YAML_FILE="parameters.yaml" -WORKFLOWS_DIR="../argo-workflows" - -if [ "$1" == "" ]; then - usage - exit 1 -fi - -while [ "$1" != "" ]; do - case $1 in - -w | --workflow ) shift - WORKFLOW=$1 - ;; - -g | --workflow-group ) shift - WORKFLOW_GROUP=$1 - ;; - -o | --output-file ) shift - OUTPUT_YAML_FILE=$1 - ;; - -d | --workflows-dir ) shift - WORKFLOWS_DIR=$1 - ;; - -h | --help ) usage - exit - ;; - * ) usage - exit 1 - esac - shift -done - -# Only proceed when `yq` is installed -check_yq_installed - -# Validate the parameters -validate_input - -# Generate namespace -generate_namespace "$WORKFLOW_GROUP" "$WORKFLOW" - -# Generate the parameters file -generate_parameters "$NAMESPACE" diff --git a/test/e2e/scripts/run-instance/.gitignore b/test/e2e/scripts/run-instance/.gitignore deleted file mode 100644 index 9e2dbce48e383..0000000000000 --- a/test/e2e/scripts/run-instance/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -argo -argo.gz diff --git a/test/e2e/scripts/run-instance/10-setup-kind.sh b/test/e2e/scripts/run-instance/10-setup-kind.sh deleted file mode 100755 index bcb879510b035..0000000000000 --- a/test/e2e/scripts/run-instance/10-setup-kind.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -arch="" -case $(uname -m) in - x86_64) arch="amd64" ;; - aarch64) arch="arm64" ;; - *) - echo "Unsupported architecture" - exit 1 - ;; -esac - -download_and_install_kubectl() { - curl --retry 5 --fail --retry-all-errors -LO "https://dl.k8s.io/release/$(curl --retry 5 --fail --retry-all-errors -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$arch/kubectl" - sudo install kubectl /usr/local/bin/kubectl -} - -printf '=%.0s' {0..79} ; echo - -if [[ $(uname) == "Darwin" ]] -then - echo "Kind setup should not be run on Darwin" - exit 1 -fi - - -# if kubctl is not here, download it -if [[ ! -f ./kubectl ]]; then - download_and_install_kubectl -else - # else, download the SHA256 of the wanted version - curl --retry 5 --fail --retry-all-errors -LO "https://dl.k8s.io/release/$(curl --retry 5 --fail --retry-all-errors -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$arch/kubectl.sha256" - # And if it differs, force the download again - if ! echo "$(/dev/null 2>&1; then - # skip the usermod step if needless - sudo usermod -a -G docker core -fi - -echo "Kind setup finished" diff --git a/test/e2e/scripts/run-instance/11-setup-kind-cluster.sh b/test/e2e/scripts/run-instance/11-setup-kind-cluster.sh deleted file mode 100755 index 812a37c58943b..0000000000000 --- a/test/e2e/scripts/run-instance/11-setup-kind-cluster.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -is_cluster_running=$(kind get clusters|grep k8s-e2e-tests||echo none) -if [ "$is_cluster_running" == "k8s-e2e-tests" ]; then - echo "Cleanup: deleting cluster k8s-e2e-tests" - kind delete cluster --name k8s-e2e-tests -fi - -echo "Setup kind cluster: k8s-e2e-tests" -SCRIPT_DIR=$(dirname "$(readlink -f "$0")") -kind create cluster --name k8s-e2e-tests --wait 10m --config "$SCRIPT_DIR/kind-cluster.yaml" diff --git a/test/e2e/scripts/run-instance/20-argo-download.sh b/test/e2e/scripts/run-instance/20-argo-download.sh deleted file mode 100755 index a29702de2fd27..0000000000000 --- a/test/e2e/scripts/run-instance/20-argo-download.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -cd "$(dirname "$0")" - -set -e - -arch="" -case $(uname -m) in - x86_64) arch="amd64" ;; - aarch64) arch="arm64" ;; - *) - echo "Unsupported architecture" - exit 1 - ;; -esac - -# if argo is not here, or if the SHA doesnt match, (re)download it -if [[ ! -f ./argo.gz ]] || ! sha256sum -c "argo.$arch.sha256sum" ; then - curl -Lf "https://github.com/argoproj/argo-workflows/releases/download/v3.4.3/argo-linux-$arch.gz" -o argo.gz - # before gunziping it, check its SHA - if ! sha256sum -c "argo.$arch.sha256sum"; then - echo "SHA256 of argo.gz differs, exiting." - exit 1 - fi -fi -if [[ ! -f ./argo. ]]; then - gunzip -kf argo.gz -fi -chmod +x ./argo -./argo version diff --git a/test/e2e/scripts/run-instance/21-argo-setup.sh b/test/e2e/scripts/run-instance/21-argo-setup.sh deleted file mode 100755 index 1dae3970954a4..0000000000000 --- a/test/e2e/scripts/run-instance/21-argo-setup.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -cd "$(dirname "$0")" - -for i in {0..60} -do - kubectl get hpa,svc,ep,ds,deploy,job,po --all-namespaces -o wide && break - sleep 5 -done - -set -e - -kubectl create namespace argo -kubectl apply -n argo -f https://github.com/argoproj/argo-workflows/releases/download/v3.4.3/install.yaml - -# TODO use a more restrictive SA -kubectl apply -f - << EOF -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: argo-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: ServiceAccount - name: default - namespace: default -EOF - -set +e - -for i in {0..60} -do - ./argo list && exit 0 - kubectl get hpa,svc,ep,ds,deploy,job,po --all-namespaces -o wide - sleep 5 -done - -exit 1 diff --git a/test/e2e/scripts/run-instance/22-argo-submit.sh b/test/e2e/scripts/run-instance/22-argo-submit.sh deleted file mode 100755 index 9c59119372ee5..0000000000000 --- a/test/e2e/scripts/run-instance/22-argo-submit.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -# ${DATADOG_AGENT_IMAGE} and ${DATADOG_CLUSTER_AGENT_IMAGE} are provided by the CI -if [[ -z ${DATADOG_AGENT_IMAGE:+x} ]] || [[ -z ${DATADOG_CLUSTER_AGENT_IMAGE:+x} ]]; then - echo "DATADOG_AGENT_IMAGE and DATADOG_CLUSTER_AGENT_IMAGE environment variables need to be set" >&2 - exit 2 -fi - -ARGO_WORKFLOW=${ARGO_WORKFLOW:-''} - -echo "DATADOG_AGENT_IMAGE=${DATADOG_AGENT_IMAGE}" -echo "DATADOG_CLUSTER_AGENT_IMAGE=${DATADOG_CLUSTER_AGENT_IMAGE}" -echo "ARGO_WORKFLOW=${ARGO_WORKFLOW}" - -cd "$(dirname "$0")" - -if [[ -n ${DOCKER_REGISTRY_URL+x} ]] && [[ -n ${DOCKER_REGISTRY_LOGIN+x} ]] && [[ -n ${DOCKER_REGISTRY_PWD+x} ]]; then - oldstate=$(shopt -po xtrace ||:); set +x # Do not log credentials - kubectl create secret docker-registry docker-registry --docker-server="$DOCKER_REGISTRY_URL" --docker-username="$DOCKER_REGISTRY_LOGIN" --docker-password="$DOCKER_REGISTRY_PWD" - eval "$oldstate" -fi - -argo_submit_cws_cspm() { - DATADOG_AGENT_SITE=${DATADOG_AGENT_SITE:-""} - - oldstate=$(shopt -po xtrace ||:); set +x # Do not log credentials - - if [[ -z ${DATADOG_AGENT_API_KEY:+x} ]] || [[ -z ${DATADOG_AGENT_APP_KEY:+x} ]]; then - echo "DATADOG_AGENT_API_KEY, DATADOG_AGENT_APP_KEY environment variables need to be set" >&2 - exit 2 - fi - - kubectl create secret generic dd-keys \ - --from-literal=DD_API_KEY="${DATADOG_AGENT_API_KEY}" \ - --from-literal=DD_APP_KEY="${DATADOG_AGENT_APP_KEY}" \ - --from-literal=DD_DDDEV_API_KEY="${DD_API_KEY}" - - eval "$oldstate" - - ./argo template create ../../argo-workflows/templates/*.yaml - ./argo submit ../../argo-workflows/$1 --wait \ - --parameter datadog-agent-image-repository="${DATADOG_AGENT_IMAGE%:*}" \ - --parameter datadog-agent-image-tag="${DATADOG_AGENT_IMAGE#*:}" \ - --parameter datadog-cluster-agent-image-repository="${DATADOG_CLUSTER_AGENT_IMAGE%:*}" \ - --parameter datadog-cluster-agent-image-tag="${DATADOG_CLUSTER_AGENT_IMAGE#*:}" \ - --parameter datadog-agent-site="${DATADOG_AGENT_SITE#*:}" \ - --parameter ci_commit_short_sha="${CI_COMMIT_SHORT_SHA:-unknown}" \ - --parameter ci_pipeline_id="${CI_PIPELINE_ID:-unknown}" \ - --parameter ci_job_id="${CI_JOB_ID:-unknown}" || : -} - -case "$ARGO_WORKFLOW" in - "cspm") - argo_submit_cws_cspm cspm-workflow.yaml - ;; - *) - kubectl create secret generic dd-keys \ - --from-literal=DD_API_KEY=123er \ - --from-literal=DD_APP_KEY=123er1 \ - --from-literal=DD_DDDEV_API_KEY="${DD_API_KEY}" - - ./argo template create ../../argo-workflows/templates/*.yaml - ./argo submit "../../argo-workflows/${ARGO_WORKFLOW}-workflow.yaml" --wait \ - --parameter datadog-agent-image-repository="${DATADOG_AGENT_IMAGE%:*}" \ - --parameter datadog-agent-image-tag="${DATADOG_AGENT_IMAGE#*:}" \ - --parameter datadog-cluster-agent-image-repository="${DATADOG_CLUSTER_AGENT_IMAGE%:*}" \ - --parameter datadog-cluster-agent-image-tag="${DATADOG_CLUSTER_AGENT_IMAGE#*:}" \ - --parameter ci_commit_short_sha="${CI_COMMIT_SHORT_SHA:-unknown}" \ - --parameter ci_pipeline_id="${CI_PIPELINE_ID:-unknown}" \ - --parameter ci_job_id="${CI_JOB_ID:-unknown}" || : - ;; -esac - -# we are waiting for the end of the workflow but we don't care about its return code -exit 0 diff --git a/test/e2e/scripts/run-instance/23-argo-get.sh b/test/e2e/scripts/run-instance/23-argo-get.sh deleted file mode 100755 index 1c74939c56a94..0000000000000 --- a/test/e2e/scripts/run-instance/23-argo-get.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -ARGO_WORKFLOW=${ARGO_WORKFLOW:-''} - -cd "$(dirname "$0")" - -# Wait for any Running workflow -until [[ "$(./argo list --running -o name)" == "No workflows found" ]]; do - sleep 10 -done - -if [[ "$(./argo list -o name)" == "No workflows found" ]]; then - echo "No workflow found" - exit 1 -fi - -if ! locale -k LC_CTYPE | grep -qi 'charmap="utf-\+8"'; then - no_utf8_opt='--no-utf8' -fi - -for workflow in $(./argo list --status Succeeded -o name | grep -v 'No workflows found'); do - # CSPM always gets logs - if [ "$ARGO_WORKFLOW" = "cspm" ]; then - ./argo logs "$workflow" - fi - - ./argo get ${no_utf8_opt:-} "$workflow" -done - -EXIT_CODE=0 -for workflow in $(./argo list --status Failed -o name | grep -v 'No workflows found'); do - ./argo logs "$workflow" - ./argo get ${no_utf8_opt:-} "$workflow" - EXIT_CODE=2 -done - -# Make the Argo UI available from the user -kubectl --namespace argo patch service/argo-server --type json --patch $'[{"op": "replace", "path": "/spec/type", "value": "NodePort"}, {"op": "replace", "path": "/spec/ports", "value": [{"port": 2746, "nodePort": 30001, "targetPort": 2746}]}]' - -# In case of failure, let's keep the VM for 1 day instead of 2 hours for investigation -if [[ $EXIT_CODE != 0 ]]; then - sudo sed -i 's/^OnBootSec=.*/OnBootSec=86400/' /etc/systemd/system/terminate.timer - sudo systemctl daemon-reload - sudo systemctl restart terminate.timer -fi - -TIME_LEFT=$(systemctl status terminate.timer | awk '$1 == "Trigger:" {print gensub(/ *Trigger: (.*)/, "\\1", 1)}') -LOCAL_IP=$(curl -s http://169.254.169.254/2020-10-27/meta-data/local-ipv4) -BEGIN_TS=$(./argo list -o json | jq -r '.[] | .metadata.creationTimestamp' | while read -r ts; do date -d "$ts" +%s; done | sort -n | head -n 1) - -printf "\033[1mThe Argo UI will remain available at \033[1;34mhttps://%s\033[0m until \033[1;33m%s\033[0m.\n" "$LOCAL_IP" "$TIME_LEFT" -printf "\033[1mAll the logs of this job can be found at \033[1;34mhttps://dddev.datadoghq.com/logs?query=app%%3Aagent-e2e-tests%%20ci_commit_short_sha%%3A%s%%20ci_pipeline_id%%3A%s%%20ci_job_id%%3A%s&index=dd-agent-ci-e2e&from_ts=%d000&to_ts=%d000&live=false\033[0m.\n" "${CI_COMMIT_SHORT_SHA:-unknown}" "${CI_PIPELINE_ID:-unknown}" "${CI_JOB_ID:-unknown}" "$BEGIN_TS" "$(date +%s)" - -exit ${EXIT_CODE} diff --git a/test/e2e/scripts/run-instance/24-argo-to-ci-setup.sh b/test/e2e/scripts/run-instance/24-argo-to-ci-setup.sh deleted file mode 100755 index a971c9005a4ce..0000000000000 --- a/test/e2e/scripts/run-instance/24-argo-to-ci-setup.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -euo pipefail - -cd "$(dirname "$0")" - -docker build -t argo-to-junit-helper:local ./argo-to-junit \ No newline at end of file diff --git a/test/e2e/scripts/run-instance/25-argo-to-ci.sh b/test/e2e/scripts/run-instance/25-argo-to-ci.sh deleted file mode 100755 index 84512ab6a388a..0000000000000 --- a/test/e2e/scripts/run-instance/25-argo-to-ci.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -euo pipefail - -cd "$(dirname "$0")" - -if ! locale -k LC_CTYPE | grep -qi 'charmap="utf-\+8"'; then - no_utf8_opt='--no-utf8' -fi - -mkdir data - -for workflow in $(./argo list -o name | grep -v 'No workflows found'); do - JSON_CRD_FILE=data/$workflow.json - JUNIT_XML_FILE=data/$workflow-junit.xml - ./argo get ${no_utf8_opt:-} "$workflow" --output json > $JSON_CRD_FILE - docker run -v $PWD/data:/data:z argo-to-junit-helper:local /$JSON_CRD_FILE /$JUNIT_XML_FILE - DATADOG_API_KEY=$DD_API_KEY datadog-ci junit upload --service agent-e2e-tests $JUNIT_XML_FILE -done diff --git a/test/e2e/scripts/run-instance/argo-to-junit/Dockerfile b/test/e2e/scripts/run-instance/argo-to-junit/Dockerfile deleted file mode 100644 index 7f380f2a55eee..0000000000000 --- a/test/e2e/scripts/run-instance/argo-to-junit/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM gcr.io/google-containers/python:3.5.1-alpine - -COPY requirements.txt argo_to_junit.py entrypoint.sh / -RUN pip3 install -r requirements.txt - -ENTRYPOINT ["/entrypoint.sh"] diff --git a/test/e2e/scripts/run-instance/argo-to-junit/argo_to_junit.py b/test/e2e/scripts/run-instance/argo-to-junit/argo_to_junit.py deleted file mode 100755 index 7e1d75c94517b..0000000000000 --- a/test/e2e/scripts/run-instance/argo-to-junit/argo_to_junit.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python3 - -import json -from argparse import ArgumentParser -from datetime import datetime - -from junit_xml import TestCase, TestSuite - - -def _str_to_datetime(date_str): - return datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ') - - -def _generate_test_suites(root_name, argo_nodes): - """ - Groups argo nodes by parents, generate the test cases - and yields the corresponding test suites. - """ - for node_id, node_status in argo_nodes.items(): - if node_status.get("type") in ["StepGroup", "DAG"]: - test_cases = [] - tc = TestCase(node_status.get("displayName", node_id)) - children = node_status.get("children", []) - for child_id in children: - child_status = argo_nodes.get(child_id, None) - if not child_status or child_status.get("type") != "Pod": - continue - children.extend(child_status.get("children", [])) - end = _str_to_datetime(child_status.get("finishedAt")) - start = _str_to_datetime(child_status.get("startedAt")) - job_duration = (end - start).total_seconds() - tc = TestCase(child_status.get("displayName"), elapsed_sec=job_duration) - if child_status.get("phase") == "Failed": - tc.add_failure_info(child_status.get("message")) - test_cases.append(tc) - if len(test_cases) == 0: - continue - parent_name = argo_nodes.get(node_status.get("boundaryID")).get("displayName") - # Some steps are tied directly to the root workflow (i.e the parent is argo-datadog-agent-*) - # Thus, we use a deterministic format to generate the test suite name in that case. - ts_name = parent_name if parent_name != root_name else "root" + "/" + node_status.get("displayName") - yield TestSuite(ts_name, test_cases) - - -def main(): - parser = ArgumentParser() - parser.add_argument("-i", "--input-file", help="File containing the Argo CRD in JSON", required=True) - parser.add_argument("-o", "--output-file", default="junit.xml", help="The junit xml file") - args = parser.parse_args() - - with open(args.input_file) as f: - crd = json.loads(f.read()) - crd_name = crd.get("metadata", {}).get("name") - nodes = crd.get("status", {}).get("nodes") - if not crd_name or not nodes: - print(json.dumps(crd)) - raise Exception("Incompatible CRD") - - test_suites = [] - for ts in _generate_test_suites(crd_name, nodes): - test_suites.append(ts) - with open(args.output_file, "w") as f: - TestSuite.to_file(f, test_suites) - - -if __name__ == "__main__": - main() diff --git a/test/e2e/scripts/run-instance/argo-to-junit/entrypoint.sh b/test/e2e/scripts/run-instance/argo-to-junit/entrypoint.sh deleted file mode 100755 index 72f1650ada344..0000000000000 --- a/test/e2e/scripts/run-instance/argo-to-junit/entrypoint.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -set -e - -if [ "$#" -ne 2 ]; then - /argo_to_junit.py --help - exit 1 -fi - -/argo_to_junit.py --input-file $1 --output-file $2 diff --git a/test/e2e/scripts/run-instance/argo-to-junit/requirements.txt b/test/e2e/scripts/run-instance/argo-to-junit/requirements.txt deleted file mode 100644 index 37ea29569761f..0000000000000 --- a/test/e2e/scripts/run-instance/argo-to-junit/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -junit-xml==1.9 diff --git a/test/e2e/scripts/run-instance/argo.amd64.sha256sum b/test/e2e/scripts/run-instance/argo.amd64.sha256sum deleted file mode 100644 index de69f0053c844..0000000000000 --- a/test/e2e/scripts/run-instance/argo.amd64.sha256sum +++ /dev/null @@ -1 +0,0 @@ -834a1cc0972a8810dfc39469b176d4dead17b0bc29968974da52d89b59357ac2 argo.gz \ No newline at end of file diff --git a/test/e2e/scripts/run-instance/argo.arm64.sha256sum b/test/e2e/scripts/run-instance/argo.arm64.sha256sum deleted file mode 100644 index ed3d0797a7dfd..0000000000000 --- a/test/e2e/scripts/run-instance/argo.arm64.sha256sum +++ /dev/null @@ -1 +0,0 @@ -e54086fd80f2e5de1c4ea9e7b935565b4404233ea4c96264055a7e16e85c376c argo.gz \ No newline at end of file diff --git a/test/e2e/scripts/run-instance/kind-cluster.yaml b/test/e2e/scripts/run-instance/kind-cluster.yaml deleted file mode 100644 index 4a8aed1991464..0000000000000 --- a/test/e2e/scripts/run-instance/kind-cluster.yaml +++ /dev/null @@ -1,18 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -nodes: -- role: control-plane - extraMounts: - - containerPath: /var/lib/kubelet/config.json - hostPath: /root/.docker/config.json - - containerPath: /host/datadog-agent - hostPath: /home/core/datadog-agent - - containerPath: /host/proc - hostPath: /proc - extraPortMappings: - - containerPort: 30001 - hostPort: 443 -containerdConfigPatches: - - |- - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://mirror.gcr.io", "https://registry-1.docker.io"] diff --git a/test/e2e/scripts/setup-instance/.gitignore b/test/e2e/scripts/setup-instance/.gitignore deleted file mode 100644 index 680a4486820dd..0000000000000 --- a/test/e2e/scripts/setup-instance/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -fcct-* -kind -kubectl -kubectl.sha256 -fedora.gpg -butane-* diff --git a/test/e2e/scripts/setup-instance/00-entrypoint-dev.sh b/test/e2e/scripts/setup-instance/00-entrypoint-dev.sh deleted file mode 100755 index bf1c60c084704..0000000000000 --- a/test/e2e/scripts/setup-instance/00-entrypoint-dev.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -BASE64_FLAGS="-w 0" -# OSX with 2 types of base64 binary in PATH ... -if [[ $(uname) == "Darwin" ]] -then - echo "Currently running over Darwin" - # shellcheck disable=SC2086 - echo "osx base64" | base64 ${BASE64_FLAGS} || { - echo "current base64 binary does not support ${BASE64_FLAGS}" - BASE64_FLAGS="" - } -fi - -set -e - -cd "$(dirname "$0")" - -git clean -fdx . - -# Generate ssh-key and ignition files -./01-ignition.sh -# shellcheck disable=SC2086 -IGNITION_BASE64=$(base64 ${BASE64_FLAGS} ignition.json) - -REGION="${REGION:-us-east-1}" -UPDATE_STREAM="${UPDATE_STREAM:-stable}" -AMI="$(curl "https://builds.coreos.fedoraproject.org/streams/${UPDATE_STREAM}.json" | jq -r ".architectures.x86_64.images.aws.regions.\"$REGION\".image")" - -tee specification.json << EOF -{ - "ImageId": "${AMI}", - "InstanceType": "t3.2xlarge", - "Monitoring": { - "Enabled": false - }, - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "DeleteOnTermination": true, - "VolumeSize": 50, - "VolumeType": "gp2" - } - } - ], - "UserData": "${IGNITION_BASE64}", - - "SubnetId": "subnet-b89e00e2", - "SecurityGroupIds": ["sg-7fedd80a"] -} -EOF - -export CI_COMMIT_SHORT_SHA=${CI_COMMIT_SHORT_SHA:-$(git describe --tags --always --dirty --match 7.\*)} - -exec ./02-ec2.sh diff --git a/test/e2e/scripts/setup-instance/00-entrypoint-gitlab.sh b/test/e2e/scripts/setup-instance/00-entrypoint-gitlab.sh deleted file mode 100755 index 7279be3fabb27..0000000000000 --- a/test/e2e/scripts/setup-instance/00-entrypoint-gitlab.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -cd "$(dirname "$0")" - -git clean -fdx . - -# Generate ssh-key and ignition files -./01-ignition.sh -IGNITION_BASE64=$(base64 -w 0 ignition.json) - -REGION="${REGION:-us-east-1}" -UPDATE_STREAM="${UPDATE_STREAM:-stable}" -if [ -z "${AMI+x}" ]; then - AMI="$(curl "https://builds.coreos.fedoraproject.org/streams/${UPDATE_STREAM}.json" | jq -r ".architectures.x86_64.images.aws.regions.\"$REGION\".image")" -fi -ARGO_WORKFLOW=${ARGO_WORKFLOW:-''} - -# TODO remove the IamInstanceProfile -tee specification.json << EOF -{ - "ImageId": "${AMI}", - "InstanceType": "t3.2xlarge", - "Monitoring": { - "Enabled": false - }, - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvda", - "Ebs": { - "DeleteOnTermination": true, - "VolumeSize": 50, - "VolumeType": "gp2" - } - } - ], - "UserData": "${IGNITION_BASE64}", - - "SubnetId": "subnet-05d7c6b1b5cfea811", - "IamInstanceProfile": { - "Name": "ci-datadog-agent-e2e-runner" - }, - "SecurityGroupIds": ["sg-019917348cb0eb7e7"] -} -EOF - -echo "Running inside a gitlab pipeline," -echo "using DATADOG_AGENT_IMAGE=${DATADOG_AGENT_IMAGE}" -echo "using DATADOG_CLUSTER_AGENT_IMAGE=${DATADOG_CLUSTER_AGENT_IMAGE}" -echo "using ARGO_WORKFLOW=${ARGO_WORKFLOW}" - -# Check if the image is hosted on a docker registry and if it's available -echo "${DATADOG_AGENT_IMAGE} is hosted on a docker registry, checking if it's available" -IMAGE_REPOSITORY=${DATADOG_AGENT_IMAGE%:*} -IMAGE_TAG=${DATADOG_AGENT_IMAGE#*:} -if ! curl -Lfs --head "https://hub.docker.com/v2/repositories/${IMAGE_REPOSITORY}/tags/${IMAGE_TAG}" > /dev/null ; then - echo "The DATADOG_AGENT_IMAGE=${DATADOG_AGENT_IMAGE} is not available on DockerHub" - echo "Ensure that the manual jobs in dev_container_deploy has been run/rerun" - echo "*dev_branch* -> k8s-e2e-*-dev" - echo "*dev_master* -> k8s-e2e-*-main" - exit 2 -fi - -echo "${DATADOG_CLUSTER_AGENT_IMAGE} is hosted on a docker registry, checking if it's available" -IMAGE_REPOSITORY=${DATADOG_CLUSTER_AGENT_IMAGE%:*} -IMAGE_TAG=${DATADOG_CLUSTER_AGENT_IMAGE#*:} -if ! curl -Lfs --head "https://hub.docker.com/v2/repositories/${IMAGE_REPOSITORY}/tags/${IMAGE_TAG}" > /dev/null ; then - echo "The DATADOG_CLUSTER_AGENT_IMAGE=${DATADOG_CLUSTER_AGENT_IMAGE} is not available on DockerHub" - echo "Ensure that the manual jobs in dev_container_deploy has been run/rerun" - echo "*dev_branch* -> k8s-e2e-*-dev" - echo "*dev_master* -> k8s-e2e-*-main" - exit 2 -fi - -exec ./02-ec2.sh diff --git a/test/e2e/scripts/setup-instance/00-entrypoint-local.sh b/test/e2e/scripts/setup-instance/00-entrypoint-local.sh deleted file mode 100755 index 6bf51fd15970b..0000000000000 --- a/test/e2e/scripts/setup-instance/00-entrypoint-local.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -printf '=%.0s' {0..79} ; echo -set -e -cd "$(dirname "$0")" - -../run-instance/10-setup-kind.sh -../run-instance/11-setup-kind-cluster.sh -../run-instance/20-argo-download.sh -../run-instance/21-argo-setup.sh -../run-instance/22-argo-submit.sh -../run-instance/23-argo-get.sh diff --git a/test/e2e/scripts/setup-instance/01-ignition.sh b/test/e2e/scripts/setup-instance/01-ignition.sh deleted file mode 100755 index 870d85ddeb4b7..0000000000000 --- a/test/e2e/scripts/setup-instance/01-ignition.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -printf '=%.0s' {0..79} ; echo - -cd "$(dirname "$0")" -ssh-keygen -b 4096 -t rsa -C "datadog" -N "" -f "id_rsa" -SSH_RSA=$(cat id_rsa.pub) - -arch=$(uname -m) -if [ "$arch" = "arm64" ]; -then - arch="aarch64" -fi - -case "$(uname)" in - Linux) butane="butane-$arch-unknown-linux-gnu";; - Darwin) butane="butane-$arch-apple-darwin";; -esac - -curl -O "https://fedoraproject.org/fedora.gpg" -curl -LOC - "https://github.com/coreos/butane/releases/download/v0.20.0/${butane}" -curl -LO "https://github.com/coreos/butane/releases/download/v0.20.0/${butane}.asc" - -gpgv --keyring ./fedora.gpg "${butane}.asc" "$butane" -chmod +x "$butane" - -"./$butane" --pretty --strict < -generate_namespace() -{ - # Generate unique namespace from workflow_group and workflow - # namespace format: --- - echo 'Info: Generating namespace...' - PREFIX=$1-$2 - # `_` and `.` are not allowed in namespace names, replace them with `-` - PREFIX=${PREFIX//[_.]/-} - CHECK_SUM=$(echo -n "$PREFIX" | md5sum | cut -c1-15) - NAMESPACE=$PREFIX-$CHECK_SUM - if ! [[ $NAMESPACE =~ ^[0-9a-zA-Z-]+$ ]]; then - echo "Error: Invalid namespace format: $NAMESPACE" - exit 1 - fi - echo "Info: Generated namespace: $NAMESPACE" -} - -# Usage: check_yq_installed -check_yq_installed() -{ - if ! [ -x "$(command -v yq)" ]; then - echo 'Error: yq is not installed.' - exit 1 - fi -} \ No newline at end of file diff --git a/test/fakeintake/Dockerfile b/test/fakeintake/Dockerfile index 4e2b2eb9ab7d7..aa77ba5a45231 100644 --- a/test/fakeintake/Dockerfile +++ b/test/fakeintake/Dockerfile @@ -2,7 +2,7 @@ # syntax=docker/dockerfile:1 ## Build -FROM golang:1.22.6-alpine3.20 AS build +FROM golang:1.22.7-alpine3.20 AS build # need gcc to build with CGO_ENABLED=1 # need musl-dev to get stdlib.h diff --git a/test/fakeintake/client/client.go b/test/fakeintake/client/client.go index 54df7e45a9df8..d31e22c992628 100644 --- a/test/fakeintake/client/client.go +++ b/test/fakeintake/client/client.go @@ -784,7 +784,11 @@ func (c *Client) get(route string) ([]byte, error) { defer tmpResp.Body.Close() if tmpResp.StatusCode != http.StatusOK { - return fmt.Errorf("expected %d got %d", http.StatusOK, tmpResp.StatusCode) + var errStr string + if errBody, _ := io.ReadAll(tmpResp.Body); len(errBody) > 0 { + errStr = string(errBody) + } + return fmt.Errorf("expected %d got %d: %s", http.StatusOK, tmpResp.StatusCode, errStr) } // If strictFakeintakeIDCheck is enabled, we check that the fakeintake ID is the same as the one we expect // If the fakeintake ID is not set yet we set the one we get from the first request diff --git a/test/integration/config_providers/etcd/etcd_provider_test.go b/test/integration/config_providers/etcd/etcd_provider_test.go index 228495b66e077..1d481a49695e9 100644 --- a/test/integration/config_providers/etcd/etcd_provider_test.go +++ b/test/integration/config_providers/etcd/etcd_provider_test.go @@ -17,8 +17,8 @@ import ( etcd_client "go.etcd.io/etcd/client/v2" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" - "github.com/DataDog/datadog-agent/pkg/config" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/test/integration/utils" ) @@ -142,7 +142,7 @@ func (suite *EtcdTestSuite) toggleEtcdAuth(enable bool) { func (suite *EtcdTestSuite) TestWorkingConnectionAnon() { ctx := context.Background() - config := config.ConfigurationProviders{ + config := pkgconfigsetup.ConfigurationProviders{ TemplateURL: suite.etcdURL, TemplateDir: "/foo", } @@ -163,7 +163,7 @@ func (suite *EtcdTestSuite) TestWorkingConnectionAnon() { func (suite *EtcdTestSuite) TestBadConnection() { ctx := context.Background() - config := config.ConfigurationProviders{ + config := pkgconfigsetup.ConfigurationProviders{ TemplateURL: "http://127.0.0.1:1337", TemplateDir: "/foo", } @@ -178,7 +178,7 @@ func (suite *EtcdTestSuite) TestBadConnection() { func (suite *EtcdTestSuite) TestWorkingAuth() { ctx := context.Background() suite.toggleEtcdAuth(true) - config := config.ConfigurationProviders{ + config := pkgconfigsetup.ConfigurationProviders{ TemplateURL: suite.etcdURL, TemplateDir: "/foo", Username: etcdUser, @@ -195,7 +195,7 @@ func (suite *EtcdTestSuite) TestWorkingAuth() { func (suite *EtcdTestSuite) TestBadAuth() { ctx := context.Background() suite.toggleEtcdAuth(true) - config := config.ConfigurationProviders{ + config := pkgconfigsetup.ConfigurationProviders{ TemplateURL: suite.etcdURL, TemplateDir: "/foo", Username: etcdUser, diff --git a/test/integration/config_providers/zookeeper/zookeeper_provider_test.go b/test/integration/config_providers/zookeeper/zookeeper_provider_test.go index fd230819aff43..b9e3aab142edc 100644 --- a/test/integration/config_providers/zookeeper/zookeeper_provider_test.go +++ b/test/integration/config_providers/zookeeper/zookeeper_provider_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/test/integration/utils" ) @@ -69,7 +69,7 @@ type ZkTestSuite struct { containerName string zkVersion string zkURL string - providerConfig config.ConfigurationProviders + providerConfig pkgconfigsetup.ConfigurationProviders compose *utils.ComposeConf } @@ -110,7 +110,7 @@ func (suite *ZkTestSuite) TearDownSuite() { // put configuration back in a known state before each test func (suite *ZkTestSuite) SetupTest() { - suite.providerConfig = config.ConfigurationProviders{ + suite.providerConfig = pkgconfigsetup.ConfigurationProviders{ TemplateURL: suite.zkURL, TemplateDir: "/datadog/check_configs", } diff --git a/test/integration/corechecks/docker/main_test.go b/test/integration/corechecks/docker/main_test.go index 1289154aed688..5d2d8a73ebff5 100644 --- a/test/integration/corechecks/docker/main_test.go +++ b/test/integration/corechecks/docker/main_test.go @@ -30,9 +30,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/containers/docker" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/DataDog/datadog-agent/test/integration/utils" ) @@ -70,14 +71,15 @@ var ( func TestMain(m *testing.M) { flag.Parse() - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) retryTicker := time.NewTicker(time.Duration(*retryDelay) * time.Second) @@ -116,12 +118,12 @@ type testDeps struct { // Called before for first test run: compose up func setup() (workloadmeta.Component, error) { // Setup global conf - config.Datadog().SetConfigType("yaml") - err := config.Datadog().ReadConfig(strings.NewReader(datadogCfgString)) + pkgconfigsetup.Datadog().SetConfigType("yaml") + err := pkgconfigsetup.Datadog().ReadConfig(strings.NewReader(datadogCfgString)) if err != nil { return nil, err } - config.SetFeaturesNoCleanup(env.Docker) + env.SetFeaturesNoCleanup(env.Docker) // Note: workloadmeta will be started by fx with the App var deps testDeps diff --git a/test/integration/dogstatsd/origin_detection.go b/test/integration/dogstatsd/origin_detection.go index 0b2e87e5d6fa3..31b99916edfd3 100644 --- a/test/integration/dogstatsd/origin_detection.go +++ b/test/integration/dogstatsd/origin_detection.go @@ -27,7 +27,6 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap/pidmapimpl" - coreConfig "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/optional" @@ -43,7 +42,7 @@ const ( // we can't just `netcat` to the socket, that's why we run a custom python // script that will stay up after sending packets. func testUDSOriginDetection(t *testing.T, network string) { - coreConfig.SetFeatures(t, env.Docker) + env.SetFeatures(t, env.Docker) cfg := map[string]any{} diff --git a/test/integration/dogstatsd/origin_detection_test.go b/test/integration/dogstatsd/origin_detection_test.go index 3216f7c9ad438..1f158aea9612e 100644 --- a/test/integration/dogstatsd/origin_detection_test.go +++ b/test/integration/dogstatsd/origin_detection_test.go @@ -8,32 +8,35 @@ package dogstatsd import ( "testing" - "github.com/DataDog/datadog-agent/pkg/config" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" ) func TestUDSOriginDetectionDatagram(t *testing.T) { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) testUDSOriginDetection(t, "unixgram") } func TestUDSOriginDetectionStream(t *testing.T) { - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) testUDSOriginDetection(t, "unix") diff --git a/test/integration/listeners/docker/docker_listener_test.go b/test/integration/listeners/docker/docker_listener_test.go index 739cdea88ff58..33049fe225c3b 100644 --- a/test/integration/listeners/docker/docker_listener_test.go +++ b/test/integration/listeners/docker/docker_listener_test.go @@ -31,11 +31,12 @@ import ( wmcatalog "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/catalog" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" "github.com/DataDog/datadog-agent/pkg/util/optional" "github.com/DataDog/datadog-agent/test/integration/utils" ) @@ -63,14 +64,15 @@ type deps struct { func (suite *DockerListenerTestSuite) SetupSuite() { containers.ResetSharedFilter() - config.SetupLogger( - config.LoggerName("test"), + pkglogsetup.SetupLogger( + pkglogsetup.LoggerName("test"), "debug", "", "", false, true, false, + pkgconfigsetup.Datadog(), ) overrides := map[string]interface{}{ @@ -106,7 +108,7 @@ func (suite *DockerListenerTestSuite) TearDownSuite() { } func (suite *DockerListenerTestSuite) SetupTest() { - dl, err := listeners.NewContainerListener(&config.Listeners{}, optional.NewOption(suite.wmeta), suite.telemetryStore) + dl, err := listeners.NewContainerListener(&pkgconfigsetup.Listeners{}, optional.NewOption(suite.wmeta), suite.telemetryStore) if err != nil { panic(err) } diff --git a/test/integration/serverless/snapshots/error-csharp b/test/integration/serverless/snapshots/error-csharp index b96bb73eee4ce..eedb59e5c8b87 100644 --- a/test/integration/serverless/snapshots/error-csharp +++ b/test/integration/serverless/snapshots/error-csharp @@ -1152,6 +1152,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-csharp", + "functionname:integration-tests-extension-XXXXXX-error-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-java b/test/integration/serverless/snapshots/error-java index 754e7f0733ffd..61b596cc46b25 100644 --- a/test/integration/serverless/snapshots/error-java +++ b/test/integration/serverless/snapshots/error-java @@ -1152,6 +1152,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-java", + "functionname:integration-tests-extension-XXXXXX-error-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-node b/test/integration/serverless/snapshots/error-node index 97b02cd4fd0dd..2061aaf660e7e 100644 --- a/test/integration/serverless/snapshots/error-node +++ b/test/integration/serverless/snapshots/error-node @@ -1156,6 +1156,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-node", + "functionname:integration-tests-extension-XXXXXX-error-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-proxy b/test/integration/serverless/snapshots/error-proxy index 58e810c64e07a..00fe9b830d005 100644 --- a/test/integration/serverless/snapshots/error-proxy +++ b/test/integration/serverless/snapshots/error-proxy @@ -1152,6 +1152,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-proxy", + "functionname:integration-tests-extension-XXXXXX-error-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/error-python b/test/integration/serverless/snapshots/error-python index e7bd220d86bf5..fdb7620cf1e98 100644 --- a/test/integration/serverless/snapshots/error-python +++ b/test/integration/serverless/snapshots/error-python @@ -1158,6 +1158,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-error-python", + "functionname:integration-tests-extension-XXXXXX-error-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-error-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-csharp b/test/integration/serverless/snapshots/metric-csharp index 87d214181a82a..92fb2db40cac3 100644 --- a/test/integration/serverless/snapshots/metric-csharp +++ b/test/integration/serverless/snapshots/metric-csharp @@ -1104,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-csharp", + "functionname:integration-tests-extension-XXXXXX-metric-csharp", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-csharp", + "runtime:dotnet6", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-go b/test/integration/serverless/snapshots/metric-go index 950cacf5bd7b6..4abcdeff4f064 100644 --- a/test/integration/serverless/snapshots/metric-go +++ b/test/integration/serverless/snapshots/metric-go @@ -1104,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-go", + "functionname:integration-tests-extension-XXXXXX-metric-go", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-go", + "runtime:provided.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-java b/test/integration/serverless/snapshots/metric-java index 168e525ffc408..2f7655ad42374 100644 --- a/test/integration/serverless/snapshots/metric-java +++ b/test/integration/serverless/snapshots/metric-java @@ -1104,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-java", + "functionname:integration-tests-extension-XXXXXX-metric-java", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-java", + "runtime:java8.al2", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-node b/test/integration/serverless/snapshots/metric-node index c74d07d228aea..3576559a1be30 100644 --- a/test/integration/serverless/snapshots/metric-node +++ b/test/integration/serverless/snapshots/metric-node @@ -1104,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-node", + "functionname:integration-tests-extension-XXXXXX-metric-node", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-node", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-proxy b/test/integration/serverless/snapshots/metric-proxy index 9156964619d76..4948ad17b6646 100644 --- a/test/integration/serverless/snapshots/metric-proxy +++ b/test/integration/serverless/snapshots/metric-proxy @@ -1104,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-proxy", + "functionname:integration-tests-extension-XXXXXX-metric-proxy", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-proxy", + "runtime:nodejs18.x", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/serverless/snapshots/metric-python b/test/integration/serverless/snapshots/metric-python index 65d8ec77aa3f6..b630d6bc3fa25 100644 --- a/test/integration/serverless/snapshots/metric-python +++ b/test/integration/serverless/snapshots/metric-python @@ -1104,6 +1104,102 @@ "version:integration-tests-version" ] }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_max", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, + { + "distributions": null, + "dogsketches": [], + "metric": "aws.lambda.enhanced.threads_use", + "tags": [ + "account_id:############", + "architecture:XXX", + "aws_account:############", + "dd_extension_version:123", + "env:integration-tests-env", + "function_arn:arn:aws:lambda:eu-west-1:############:function:integration-tests-extension-XXXXXX-metric-python", + "functionname:integration-tests-extension-XXXXXX-metric-python", + "memorysize:1024", + "region:eu-west-1", + "resource:integration-tests-extension-XXXXXX-metric-python", + "runtime:python3.8", + "service:integration-tests-service", + "taga:valuea", + "tagb:valueb", + "tagc:valuec", + "tagd:valued", + "version:integration-tests-version" + ] + }, { "distributions": null, "dogsketches": [], diff --git a/test/integration/util/kube_apiserver/apiserver_test.go b/test/integration/util/kube_apiserver/apiserver_test.go index 6cdf7cee2b3e3..784fdf6072467 100644 --- a/test/integration/util/kube_apiserver/apiserver_test.go +++ b/test/integration/util/kube_apiserver/apiserver_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" @@ -45,7 +44,7 @@ func TestSuiteKube(t *testing.T) { s := &testSuite{} // Env detection - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) // Start compose stack compose, err := initAPIServerCompose() diff --git a/test/integration/util/kubelet/insecurekubelet_test.go b/test/integration/util/kubelet/insecurekubelet_test.go index 2206846f31c5a..776f7a771e7c0 100644 --- a/test/integration/util/kubelet/insecurekubelet_test.go +++ b/test/integration/util/kubelet/insecurekubelet_test.go @@ -16,7 +16,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" @@ -102,7 +101,7 @@ func (suite *InsecureTestSuite) TestInsecureHTTPS() { } func TestInsecureKubeletSuite(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) compose, err := initInsecureKubelet() require.Nil(t, err) diff --git a/test/integration/util/kubelet/securekubelet_test.go b/test/integration/util/kubelet/securekubelet_test.go index 58c016510f3d7..90124ac5e4282 100644 --- a/test/integration/util/kubelet/securekubelet_test.go +++ b/test/integration/util/kubelet/securekubelet_test.go @@ -17,7 +17,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/DataDog/datadog-agent/pkg/config" "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" @@ -141,7 +140,7 @@ func (suite *SecureTestSuite) TestTLSWithCACertificate() { } func TestSecureKubeletSuite(t *testing.T) { - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) compose, certsConfig, err := initSecureKubelet() defer os.Remove(certsConfig.CertFilePath) diff --git a/test/integration/util/leaderelection/leaderelection_test.go b/test/integration/util/leaderelection/leaderelection_test.go index 292e133165a73..317a40e04edea 100644 --- a/test/integration/util/leaderelection/leaderelection_test.go +++ b/test/integration/util/leaderelection/leaderelection_test.go @@ -21,7 +21,6 @@ import ( "testing" "time" - "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" log "github.com/cihub/seelog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -31,7 +30,8 @@ import ( "k8s.io/client-go/tools/clientcmd" rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "github.com/DataDog/datadog-agent/pkg/config" + "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" + "github.com/DataDog/datadog-agent/pkg/config/env" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" @@ -111,7 +111,7 @@ func TestSuiteAPIServer(t *testing.T) { }() mockConfig := configmock.New(t) - config.SetFeatures(t, env.Kubernetes) + env.SetFeatures(t, env.Kubernetes) mockConfig.SetWithoutSource("leader_election_default_resource", tt.leaderElectionDefaultResource) // Start compose stack diff --git a/test/new-e2e/examples/gke_test.go b/test/new-e2e/examples/gke_test.go new file mode 100644 index 0000000000000..5f01481914652 --- /dev/null +++ b/test/new-e2e/examples/gke_test.go @@ -0,0 +1,48 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. +package examples + +import ( + "context" + gcpkubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/gcp/kubernetes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" + "testing" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" +) + +type gkeSuite struct { + e2e.BaseSuite[environments.Kubernetes] +} + +func TestGKESuite(t *testing.T) { + e2e.Run(t, &gkeSuite{}, e2e.WithProvisioner(gcpkubernetes.GKEProvisioner())) +} + +func (v *gkeSuite) TestGKE() { + v.T().Log("Running GKE test") + res, _ := v.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(context.TODO(), v1.ListOptions{}) + var clusterAgent corev1.Pod + containsClusterAgent := false + for _, pod := range res.Items { + if strings.Contains(pod.Name, "cluster-agent") { + containsClusterAgent = true + clusterAgent = pod + break + } + } + assert.True(v.T(), containsClusterAgent, "Cluster Agent not found") + + stdout, stderr, err := v.Env().KubernetesCluster.KubernetesClient. + PodExec("datadog", clusterAgent.Name, "cluster-agent", []string{"ls"}) + require.NoError(v.T(), err) + assert.Empty(v.T(), stderr) + assert.NotEmpty(v.T(), stdout) +} diff --git a/test/new-e2e/examples/vm_with_file_operations_test.go b/test/new-e2e/examples/vm_with_file_operations_test.go index 7c4a7e7fa54f7..02f8592388bd6 100644 --- a/test/new-e2e/examples/vm_with_file_operations_test.go +++ b/test/new-e2e/examples/vm_with_file_operations_test.go @@ -9,11 +9,16 @@ import ( "io/fs" "testing" + "golang.org/x/crypto/ssh" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" "github.com/DataDog/test-infra-definitions/components/os" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + + "github.com/stretchr/testify/assert" ) type vmSuiteWithFileOperations struct { @@ -22,10 +27,85 @@ type vmSuiteWithFileOperations struct { // TestVMSuiteWithFileOperations runs tests for the VM interface to ensure its implementation is correct. func TestVMSuiteWithFileOperations(t *testing.T) { - suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))} + suiteParams := []e2e.SuiteOption{e2e.WithProvisioner(awshost.ProvisionerNoAgentNoFakeIntake(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))} e2e.Run(t, &vmSuiteWithFileOperations{}, suiteParams...) } +func assertExitCodeEqual(t *testing.T, err error, expected int, msgAndArgs ...interface{}) { + t.Helper() + var exitErr *ssh.ExitError + assert.ErrorAs(t, err, &exitErr) + assert.Equal(t, expected, exitErr.ExitStatus(), msgAndArgs) +} + +// TestCommandResults tests that commands return output or errors in the expected way +func (v *vmSuiteWithFileOperations) TestCommandResults() { + vm := v.Env().RemoteHost + + // successful command should return the output + out, err := vm.Execute("echo hello") + v.Assert().NoError(err) + v.Assert().Contains(out, "hello") + + // invalid commands should return an error + _, err = vm.Execute("not-a-command") + v.Assert().Error(err, "invalid command should return an error") + + // specific exit code should be returned + _, err = vm.Execute("exit 2") + v.Assert().Error(err, "non-zero exit code should return an error") + assertExitCodeEqual(v.T(), err, 2, "specific exit code should be returned") + + if vm.OSFamily == os.WindowsFamily { + v.testWindowsCommandResults() + } +} + +func (v *vmSuiteWithFileOperations) testWindowsCommandResults() { + vm := v.Env().RemoteHost + + // invalid commands should return an error + _, err := vm.Execute("not-a-command") + v.Assert().Error(err, "invalid command should return an error") + assertExitCodeEqual(v.T(), err, 1, "generic poewrshell error should return exit code 1") + + // native commands should return the exit status + _, err = vm.Execute("cmd.exe /c exit 2") + v.Assert().Error(err, "native command failure should return an error") + assertExitCodeEqual(v.T(), err, 2, "specific exit code should be returned") + + // a failing native command should continue to execute the rest of the command + // and the result should be from the lsat command + out, err := vm.Execute("cmd.exe /c exit 2; echo hello") + v.Assert().NoError(err, "result should come from the last command") + v.Assert().Contains(out, "hello", "native command failure should continue to execute the rest of the command") + + // Execute should auto-set $ErrorActionPreference to 'Stop', so + // a failing PowerShell cmdlet should fail immediately and not + // execute the rest of the command, so the output should not contain "hello" + out, err = vm.Execute(`Write-Error 'error'; echo he""llo`) + v.Assert().Error(err, "Execute should add ErrorActionPreference='Stop' to stop command execution on error") + v.Assert().NotContains(err.Error(), "hello") + v.Assert().NotContains(out, "hello") + assertExitCodeEqual(v.T(), err, 1, "failing PowerShell cmdlet should return exit code 1") + + // Execute should auto-set $ErrorActionPreference to 'Stop', so subcommands return an error + _, err = vm.Execute(`(Get-Service -Name 'not-a-service').Status`) + v.Assert().Error(err, "Execute should add ErrorActionPreference='Stop' to stop subcommand execution on error") + assertExitCodeEqual(v.T(), err, 1, "failing PowerShell cmdlet should return exit code 1") + // Sanity check default 'Continue' behavior does not return an error + _, err = vm.Execute(`$ErrorActionPreference='Continue'; (Get-Service -Name 'not-a-service').Status`) + v.Assert().NoError(err, "explicit ErrorActionPreference='Continue' should ignore subcommand error") + + // env vars should not leak between commands + _, err = vm.Execute(`$env:MYVAR1 = 'banana'`, client.WithEnvVariables(map[string]string{"MYVAR2": "orange"})) + v.Assert().NoError(err, "setting env vars should not return an error") + out, err = vm.Execute(`echo $env:MYVAR1; echo $env:MYVAR2`) + v.Assert().NoError(err) + v.Assert().NotContains(out, "banana", "env vars should not leak between commands") + v.Assert().NotContains(out, "orange", "env vars should not leak between commands") +} + func (v *vmSuiteWithFileOperations) TestFileOperations() { vm := v.Env().RemoteHost testFilePath := "test" diff --git a/test/new-e2e/go.mod b/test/new-e2e/go.mod index bbe8063d8489a..22abb63473005 100644 --- a/test/new-e2e/go.mod +++ b/test/new-e2e/go.mod @@ -9,6 +9,7 @@ go 1.22.0 replace ( github.com/DataDog/datadog-agent/comp/netflow/payload => ../../comp/netflow/payload github.com/DataDog/datadog-agent/pkg/proto => ../../pkg/proto + github.com/DataDog/datadog-agent/pkg/trace => ../../pkg/trace github.com/DataDog/datadog-agent/pkg/util/optional => ../../pkg/util/optional github.com/DataDog/datadog-agent/pkg/util/pointer => ../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../pkg/util/scrubber @@ -32,7 +33,7 @@ require ( // `TEST_INFRA_DEFINITIONS_BUILDIMAGES` matches the commit sha in the module version // Example: github.com/DataDog/test-infra-definitions v0.0.0-YYYYMMDDHHmmSS-0123456789AB // => TEST_INFRA_DEFINITIONS_BUILDIMAGES: 0123456789AB - github.com/DataDog/test-infra-definitions v0.0.0-20240910143843-ce6a4aad9299 + github.com/DataDog/test-infra-definitions v0.0.0-20240925140237-283b257025df github.com/aws/aws-sdk-go-v2 v1.30.5 github.com/aws/aws-sdk-go-v2/config v1.27.19 github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.2 @@ -104,7 +105,7 @@ require ( github.com/aws/smithy-go v1.20.4 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 github.com/chai2010/gettext-go v1.0.2 // indirect github.com/charmbracelet/bubbles v0.18.0 // indirect github.com/charmbracelet/bubbletea v0.25.0 // indirect @@ -261,7 +262,12 @@ require ( sigs.k8s.io/yaml v1.3.0 // indirect ) -require github.com/hairyhenderson/go-codeowners v0.5.0 +require ( + github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 + github.com/DataDog/datadog-go/v5 v5.5.0 + github.com/digitalocean/go-libvirt v0.0.0-20240812180835-9c6c0a310c6c + github.com/hairyhenderson/go-codeowners v0.5.0 +) require ( github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.60.0 // indirect diff --git a/test/new-e2e/go.sum b/test/new-e2e/go.sum index e31485a46f4bb..afef90f5a3f79 100644 --- a/test/new-e2e/go.sum +++ b/test/new-e2e/go.sum @@ -12,10 +12,12 @@ github.com/DataDog/datadog-api-client-go v1.16.0 h1:5jOZv1m98criCvYTa3qpW8Hzv301 github.com/DataDog/datadog-api-client-go v1.16.0/go.mod h1:PgrP2ABuJWL3Auw2iEkemAJ/r72ghG4DQQmb5sgnKW4= github.com/DataDog/datadog-api-client-go/v2 v2.27.0 h1:AGZj41frjnjMufQHQbJH2fzmifOs20wpmVDtIBCv33E= github.com/DataDog/datadog-api-client-go/v2 v2.27.0/go.mod h1:QKOu6vscsh87fMY1lHfLEmNSunyXImj8BUaUWJXOehc= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a h1:m9REhmyaWD5YJ0P53ygRHxKKo+KM+nw+zz0hEdKztMo= github.com/DataDog/mmh3 v0.0.0-20200805151601-30884ca2197a/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/test-infra-definitions v0.0.0-20240910143843-ce6a4aad9299 h1:lMzRshj0zEnNId74hiUsXSClnB0qKmQlC3VQ9kC6p+0= -github.com/DataDog/test-infra-definitions v0.0.0-20240910143843-ce6a4aad9299/go.mod h1:orHExiPWWT9f68UJZ92oIVX1OcTNlKvtbX7b6HM9e0Q= +github.com/DataDog/test-infra-definitions v0.0.0-20240925140237-283b257025df h1:heqQjrd6Qd1jautyOOZPPpRgnM7lDIgrW2BN0qHXru0= +github.com/DataDog/test-infra-definitions v0.0.0-20240925140237-283b257025df/go.mod h1:AOMxahok2mhJI2Isl9hFQjG69HLVVDuytmFhDMZYx90= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= @@ -26,6 +28,7 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= @@ -105,8 +108,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/charmbracelet/bubbles v0.18.0 h1:PYv1A036luoBGroX6VWjQIE9Syf2Wby2oOl/39KLfy0= @@ -137,6 +140,8 @@ github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/digitalocean/go-libvirt v0.0.0-20240812180835-9c6c0a310c6c h1:1y+eZhZOMDP86ErYQ7P7ebAvyhpr+HZhR5K6BlOkWoo= +github.com/digitalocean/go-libvirt v0.0.0-20240812180835-9c6c0a310c6c/go.mod h1:vhj0tZhS07ugaMVppAreQmBVHcqLwl5YR2DRu5/uJbY= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= @@ -211,6 +216,7 @@ github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwm github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= @@ -317,8 +323,6 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -384,15 +388,15 @@ github.com/pkg/term v1.1.0 h1:xIAAdCMh3QIAy+5FrE8Ad8XoDhEU4ufwbaSozViP9kk= github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= +github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= +github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 h1:vkHw5I/plNdTr435cARxCW6q9gc0S/Yxz7Mkd38pOb0= github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231/go.mod h1:murToZ2N9hNJzewjHBgfFdXhZKjY3z5cYC1VXk+lbFE= github.com/pulumi/esc v0.9.1 h1:HH5eEv8sgyxSpY5a8yePyqFXzA8cvBvapfH8457+mIs= @@ -489,8 +493,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVK github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= @@ -503,6 +507,7 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= @@ -548,6 +553,7 @@ golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -563,6 +569,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= @@ -580,6 +587,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= @@ -595,7 +603,9 @@ golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -644,6 +654,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= diff --git a/test/new-e2e/pkg/environments/aws/host/host.go b/test/new-e2e/pkg/environments/aws/host/host.go index 7652d9b846576..4668905ac83a8 100644 --- a/test/new-e2e/pkg/environments/aws/host/host.go +++ b/test/new-e2e/pkg/environments/aws/host/host.go @@ -261,7 +261,8 @@ func Run(ctx *pulumi.Context, env *environments.Host, runParams RunParams) error // todo: add agent once updater installs agent on bootstrap env.Agent = nil } else if params.agentOptions != nil { - agent, err := agent.NewHostAgent(&awsEnv, host, params.agentOptions...) + agentOptions := append(params.agentOptions, agentparams.WithTags([]string{fmt.Sprintf("stackid:%s", ctx.Stack())})) + agent, err := agent.NewHostAgent(&awsEnv, host, agentOptions...) if err != nil { return err } diff --git a/test/new-e2e/pkg/environments/aws/host/windows/host.go b/test/new-e2e/pkg/environments/aws/host/windows/host.go index 7fcb44f05da53..1fd5885a88c4d 100644 --- a/test/new-e2e/pkg/environments/aws/host/windows/host.go +++ b/test/new-e2e/pkg/environments/aws/host/windows/host.go @@ -8,7 +8,7 @@ package winawshost import ( "fmt" - installer "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components/datadog-installer" + "github.com/DataDog/test-infra-definitions/components/activedirectory" "github.com/DataDog/test-infra-definitions/components/datadog/agent" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" @@ -18,6 +18,8 @@ import ( "github.com/DataDog/test-infra-definitions/scenarios/aws/fakeintake" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + installer "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components/datadog-installer" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclientparams" @@ -202,7 +204,8 @@ func Run(ctx *pulumi.Context, env *environments.WindowsHost, params *Provisioner } if params.agentOptions != nil { - agent, err := agent.NewHostAgent(&awsEnv, host, params.agentOptions...) + agentOptions := append(params.agentOptions, agentparams.WithTags([]string{fmt.Sprintf("stackid:%s", ctx.Stack())})) + agent, err := agent.NewHostAgent(&awsEnv, host, agentOptions...) if err != nil { return err } diff --git a/test/new-e2e/pkg/environments/gcp/kubernetes/gke.go b/test/new-e2e/pkg/environments/gcp/kubernetes/gke.go new file mode 100644 index 0000000000000..10c315fb7b254 --- /dev/null +++ b/test/new-e2e/pkg/environments/gcp/kubernetes/gke.go @@ -0,0 +1,94 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package gcpkubernetes contains the provisioner for Google Kubernetes Engine (GKE) +package gcpkubernetes + +import ( + "github.com/DataDog/test-infra-definitions/resources/gcp" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/gke" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + + "github.com/DataDog/test-infra-definitions/components/datadog/agent/helm" + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" +) + +const ( + provisionerBaseID = "gcp-gke" +) + +// GKEProvisioner creates a new provisioner for GKE on GCP +func GKEProvisioner(opts ...ProvisionerOption) e2e.TypedProvisioner[environments.Kubernetes] { + // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times. + // and it's easy to forget about it, leading to hard to debug issues. + params := newProvisionerParams() + _ = optional.ApplyOptions(params, opts) + + provisioner := e2e.NewTypedPulumiProvisioner(provisionerBaseID+params.name, func(ctx *pulumi.Context, env *environments.Kubernetes) error { + // We ALWAYS need to make a deep copy of `params`, as the provisioner can be called multiple times. + // and it's easy to forget about it, leading to hard to debug issues. + params := newProvisionerParams() + _ = optional.ApplyOptions(params, opts) + + return GKERunFunc(ctx, env, params) + }, params.extraConfigParams) + + return provisioner +} + +// GKERunFunc is the run function for GKE provisioner +func GKERunFunc(ctx *pulumi.Context, env *environments.Kubernetes, params *ProvisionerParams) error { + gcpEnv, err := gcp.NewEnvironment(ctx) + if err != nil { + return err + } + + // Create the cluster + cluster, err := gke.NewGKECluster(gcpEnv, params.gkeOptions...) + if err != nil { + return err + } + err = cluster.Export(ctx, &env.KubernetesCluster.ClusterOutput) + if err != nil { + return err + } + + agentOptions := params.agentOptions + + // Deploy a fakeintake + if params.fakeintakeOptions != nil { + fakeIntake, err := fakeintake.NewVMInstance(gcpEnv, params.fakeintakeOptions...) + if err != nil { + return err + } + err = fakeIntake.Export(ctx, &env.FakeIntake.FakeintakeOutput) + if err != nil { + return err + } + agentOptions = append(agentOptions, kubernetesagentparams.WithFakeintake(fakeIntake)) + + } else { + env.FakeIntake = nil + } + + if params.agentOptions != nil { + agent, err := helm.NewKubernetesAgent(&gcpEnv, params.name, cluster.KubeProvider, agentOptions...) + if err != nil { + return err + } + err = agent.Export(ctx, &env.Agent.KubernetesAgentOutput) + if err != nil { + return err + } + } else { + env.Agent = nil + } + return nil +} diff --git a/test/new-e2e/pkg/environments/gcp/kubernetes/params.go b/test/new-e2e/pkg/environments/gcp/kubernetes/params.go new file mode 100644 index 0000000000000..d42a5dac75f9e --- /dev/null +++ b/test/new-e2e/pkg/environments/gcp/kubernetes/params.go @@ -0,0 +1,100 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package gcpkubernetes contains the provisioner for Google Kubernetes Engine (GKE) +package gcpkubernetes + +import ( + "fmt" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/gke" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/optional" + + "github.com/DataDog/test-infra-definitions/common/config" + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + kubeComp "github.com/DataDog/test-infra-definitions/components/kubernetes" + "github.com/DataDog/test-infra-definitions/scenarios/gcp/fakeintake" + + "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes" +) + +// ProvisionerParams contains all the parameters needed to create the environment +type ProvisionerParams struct { + name string + fakeintakeOptions []fakeintake.Option + agentOptions []kubernetesagentparams.Option + gkeOptions []gke.Option + workloadAppFuncs []WorkloadAppFunc + extraConfigParams runner.ConfigMap +} + +func newProvisionerParams(opts ...ProvisionerOption) *ProvisionerParams { + params := &ProvisionerParams{ + name: "gke", + fakeintakeOptions: []fakeintake.Option{}, + agentOptions: []kubernetesagentparams.Option{}, + workloadAppFuncs: []WorkloadAppFunc{}, + } + err := optional.ApplyOptions(params, opts) + if err != nil { + panic(fmt.Sprintf("failed to apply options: %v", err)) + } + return params +} + +// ProvisionerOption is a function that modifies the ProvisionerParams +type ProvisionerOption func(*ProvisionerParams) error + +// WithName sets the name of the provisioner +func WithName(name string) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.name = name + return nil + } +} + +// WithAgentOptions adds options to the agent +func WithAgentOptions(opts ...kubernetesagentparams.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.agentOptions = opts + return nil + } +} + +// WithFakeIntakeOptions adds options to the fake intake +func WithFakeIntakeOptions(opts ...fakeintake.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.fakeintakeOptions = opts + return nil + } +} + +// WithGKEOptions adds options to the cluster +func WithGKEOptions(opts ...gke.Option) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.gkeOptions = opts + return nil + } +} + +// WithExtraConfigParams adds extra config parameters to the environment +func WithExtraConfigParams(configMap runner.ConfigMap) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.extraConfigParams = configMap + return nil + } +} + +// WorkloadAppFunc is a function that deploys a workload app to a kube provider +type WorkloadAppFunc func(e config.Env, kubeProvider *kubernetes.Provider) (*kubeComp.Workload, error) + +// WithWorkloadApp adds a workload app to the environment +func WithWorkloadApp(appFunc WorkloadAppFunc) ProvisionerOption { + return func(params *ProvisionerParams) error { + params.workloadAppFuncs = append(params.workloadAppFuncs, appFunc) + return nil + } +} diff --git a/test/new-e2e/pkg/utils/e2e/client/host.go b/test/new-e2e/pkg/utils/e2e/client/host.go index 31a14f32aaf64..16d597805dd1a 100644 --- a/test/new-e2e/pkg/utils/e2e/client/host.go +++ b/test/new-e2e/pkg/utils/e2e/client/host.go @@ -40,7 +40,7 @@ const ( sshMaxRetries = 20 ) -type buildCommandFn func(host *Host, command string, envVars EnvVar) string +type buildCommandFn func(command string, envVars EnvVar) string type convertPathSeparatorFn func(string) string @@ -122,13 +122,13 @@ func (h *Host) Execute(command string, options ...ExecuteOption) (string, error) if err != nil { return "", err } - command = h.buildCommand(h, command, params.EnvVariables) + command = h.buildCommand(command, params.EnvVariables) return h.executeAndReconnectOnError(command) } func (h *Host) executeAndReconnectOnError(command string) (string, error) { scrubbedCommand := h.scrubber.ScrubLine(command) // scrub the command in case it contains secrets - h.context.T().Logf("Executing command `%s`", scrubbedCommand) + h.context.T().Logf("%s - %s - Executing command `%s`", time.Now().Format("02-01-2006 15:04:05"), h.context.T().Name(), scrubbedCommand) stdout, err := execute(h.client, command) if err != nil && strings.Contains(err.Error(), "failed to create session:") { err = h.Reconnect() @@ -487,12 +487,13 @@ func buildCommandFactory(osFamily oscomp.Family) buildCommandFn { return buildCommandOnLinuxAndMacOS } -func buildCommandOnWindows(h *Host, command string, envVar EnvVar) string { +func buildCommandOnWindows(command string, envVar EnvVar) string { cmd := "" - // Set $ErrorActionPreference to 'Stop' to cause PowerShell to stop on an erorr instead + // Set $ErrorActionPreference to 'Stop' to cause PowerShell to stop on an error instead // of the default 'Continue' behavior. - // This also ensures that Execute() will return an error when the command fails. + // This also ensures that Execute() will return an error when a command fails. + // Note that this only applies to PowerShell commands, not to external commands or native binaries. // // For example, if the command is (Get-Service -Name ddnpm).Status and the service does not exist, // then by default the command will print an error but the exit code will be 0 and Execute() will not return an error. @@ -506,26 +507,31 @@ func buildCommandOnWindows(h *Host, command string, envVar EnvVar) string { // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_preference_variables#erroractionpreference cmd += "$ErrorActionPreference='Stop'; " - envVarSave := map[string]string{} for envName, envValue := range envVar { - previousEnvVar, err := h.executeAndReconnectOnError(fmt.Sprintf("$env:%s", envName)) - if err != nil || previousEnvVar == "" { - previousEnvVar = "null" - } - envVarSave[envName] = previousEnvVar - cmd += fmt.Sprintf("$env:%s='%s'; ", envName, envValue) } - cmd += fmt.Sprintf("%s; ", command) - - for envName := range envVar { - cmd += fmt.Sprintf("$env:%s='%s'; ", envName, envVarSave[envName]) - } + // By default, powershell will just exit with 0 or 1, so we call exit to preserve + // the exit code of the command provided by the caller. + // The caller's command may not modify LASTEXITCODE, so manually reset it first, + // then only call exit if the command provided by the caller fails. + // + // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_automatic_variables?#lastexitcode + // https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_powershell_exe?#-command + cmd += fmt.Sprintf("$LASTEXITCODE=0; %s; if (-not $?) { exit $LASTEXITCODE }", command) + // NOTE: Do not add more commands after the command provided by the caller. + // + // `$ErrorActionPreference`='Stop' only applies to PowerShell commands, not to + // external commands or native binaries, thus later commands will still be executed. + // Additional commands will overwrite the exit code of the command provided by + // the caller which may cause errors to be missed/ignored. + // If it becomes necessary to run more commands after the command provided by the + // caller, we will need to find a way to ensure that the exit code of the command + // provided by the caller is preserved. return cmd } -func buildCommandOnLinuxAndMacOS(_ *Host, command string, envVar EnvVar) string { +func buildCommandOnLinuxAndMacOS(command string, envVar EnvVar) string { cmd := "" for envName, envValue := range envVar { cmd += fmt.Sprintf("%s='%s' ", envName, envValue) diff --git a/test/new-e2e/pkg/utils/infra/retriable_errors.go b/test/new-e2e/pkg/utils/infra/retriable_errors.go index b8d5c27b53195..7d28f17006460 100644 --- a/test/new-e2e/pkg/utils/infra/retriable_errors.go +++ b/test/new-e2e/pkg/utils/infra/retriable_errors.go @@ -24,22 +24,30 @@ func getKnownErrors() []knownError { // Add here errors that are known to be flakes and that should be retried return []knownError{ { - errorMessage: "i/o timeout", + errorMessage: `i\/o timeout`, retryType: ReCreate, }, { // https://datadoghq.atlassian.net/browse/ADXT-1 - errorMessage: "failed attempts: dial tcp :22: connect: connection refused", + errorMessage: `failed attempts: dial tcp :22: connect: connection refused`, retryType: ReCreate, }, { // https://datadoghq.atlassian.net/browse/ADXT-295 - errorMessage: "Resource provider reported that the resource did not exist while updating", + errorMessage: `Resource provider reported that the resource did not exist while updating`, retryType: ReCreate, }, { // https://datadoghq.atlassian.net/browse/ADXT-558 - errorMessage: "Process exited with status 2: running \" sudo cloud-init status --wait\"", + errorMessage: `Process exited with status 2: running " sudo cloud-init status --wait"`, + retryType: ReCreate, + }, + { + errorMessage: `waiting for ECS Service .+fakeintake-ecs.+ create: timeout while waiting for state to become 'tfSTABLE'`, + retryType: ReCreate, + }, + { + errorMessage: `error while waiting for fakeintake`, retryType: ReCreate, }, } diff --git a/test/new-e2e/pkg/utils/infra/stack_manager.go b/test/new-e2e/pkg/utils/infra/stack_manager.go index 2a580fd651d7d..15d9e44c2139b 100644 --- a/test/new-e2e/pkg/utils/infra/stack_manager.go +++ b/test/new-e2e/pkg/utils/infra/stack_manager.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "os" + "regexp" "runtime" "strings" "sync" @@ -56,16 +57,16 @@ var ( initStackManager sync.Once ) -// RetryStrategy is a function that given the current error and the number of retries, returns the type of retry to perform and a list of options to modify the configuration -type RetryStrategy func(error, int) (RetryType, []GetStackOption) +// RetryStrategyFromFn is a function that given the current error and the number of retries, returns the type of retry to perform and a list of options to modify the configuration +type RetryStrategyFromFn func(error, int) (RetryType, []GetStackOption) // StackManager handles type StackManager struct { stacks *safeStackMap knownErrors []knownError - // RetryStrategy defines how to handle retries. By default points to StackManager.getRetryStrategyFrom but can be overridden - RetryStrategy RetryStrategy + // GetRetryStrategyFrom defines how to handle retries. By default points to StackManager.getRetryStrategyFrom but can be overridden + GetRetryStrategyFrom RetryStrategyFromFn } type safeStackMap struct { @@ -120,7 +121,7 @@ func newStackManager() (*StackManager, error) { stacks: newSafeStackMap(), knownErrors: getKnownErrors(), } - sm.RetryStrategy = sm.getRetryStrategyFrom + sm.GetRetryStrategyFrom = sm.getRetryStrategyFrom return sm, nil } @@ -523,7 +524,7 @@ func (sm *StackManager) getStack(ctx context.Context, name string, deployFunc pu } } - retryStrategy, changedOpts := sm.RetryStrategy(upError, upCount) + retryStrategy, changedOpts := sm.GetRetryStrategyFrom(upError, upCount) sendEventToDatadog(params.DatadogEventSender, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", name), upError.Error(), []string{"operation:up", "result:fail", fmt.Sprintf("retry:%s", retryStrategy), fmt.Sprintf("stack:%s", stack.Name()), fmt.Sprintf("retries:%d", upCount)}) switch retryStrategy { @@ -619,7 +620,11 @@ func (sm *StackManager) getRetryStrategyFrom(err error, upCount int) (RetryType, } for _, knownError := range sm.knownErrors { - if strings.Contains(err.Error(), knownError.errorMessage) { + isMatch, err := regexp.MatchString(knownError.errorMessage, err.Error()) + if err != nil { + fmt.Printf("Error matching regex %s: %v\n", knownError.errorMessage, err) + } + if isMatch { return knownError.retryType, nil } } diff --git a/test/new-e2e/pkg/utils/infra/stack_manager_test.go b/test/new-e2e/pkg/utils/infra/stack_manager_test.go index c10aaba87ee4e..56de15ce98455 100644 --- a/test/new-e2e/pkg/utils/infra/stack_manager_test.go +++ b/test/new-e2e/pkg/utils/infra/stack_manager_test.go @@ -8,13 +8,15 @@ package infra import ( "context" + "errors" "fmt" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/common" "io" "strings" "testing" "time" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/common" + "github.com/DataDog/datadog-api-client-go/v2/api/datadogV1" "github.com/pulumi/pulumi/sdk/v3/go/auto" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" @@ -225,6 +227,50 @@ func TestStackManager(t *testing.T) { assert.Contains(t, mockDatadogEventSender.events[1].Title, fmt.Sprintf("[E2E] Stack %s : error on Pulumi stack up", stackName)) assert.Contains(t, mockDatadogEventSender.events[2].Title, fmt.Sprintf("[E2E] Stack %s : success on Pulumi stack up", stackName)) }) + + t.Run("should-return-retry-strategy-on-retriable-errors", func(t *testing.T) { + t.Parallel() + + type testError struct { + name string + errMessage string + expectedRetryType RetryType + } + + testErrors := []testError{ + { + name: "timeout", + errMessage: "i/o timeout", + expectedRetryType: ReCreate, + }, + { + name: "connection-refused", + errMessage: "failed attempts: dial tcp :22: connect: connection refused", + expectedRetryType: ReCreate, + }, + { + name: "resource-not-exist", + errMessage: "Resource provider reported that the resource did not exist while updating", + expectedRetryType: ReCreate, + }, + { + name: "cloud-init-timeout", + errMessage: "Process exited with status 2: running \" sudo cloud-init status --wait\"", + expectedRetryType: ReCreate, + }, + { + name: "ecs-fakeintake-timeout", + errMessage: "waiting for ECS Service (arn:aws:ecs:us-east-1:669783387624:service/fakeintake-ecs/ci-633219896-4670-e2e-dockersuite-80f62edf7bcc6194-aws-fakeintake-dockervm-srv) create: timeout while waiting for state to become 'tfSTABLE' (last state: 'tfPENDING', timeout: 20m0s)", + expectedRetryType: ReCreate, + }, + } + + for _, te := range testErrors { + err := errors.New(te.errMessage) + retryType, _ := stackManager.getRetryStrategyFrom(err, 0) + assert.Equal(t, te.expectedRetryType, retryType, te.name) + } + }) } func filterRetryOnErrorLogs(logs []string) []string { diff --git a/test/new-e2e/system-probe/errors.go b/test/new-e2e/system-probe/errors.go index 91cb123602fe2..297f80bb0184a 100644 --- a/test/new-e2e/system-probe/errors.go +++ b/test/new-e2e/system-probe/errors.go @@ -47,6 +47,8 @@ const ( ec2StateChangeTimeoutError ioTimeout tcp22ConnectionRefused + ec2InstanceCreateTimeout + ddAgentRepoFailure ) type handledError struct { @@ -104,6 +106,18 @@ var handledErrorsLs = []handledError{ metric: "ssh-connection-refused", action: retryStack | emitMetric, }, + { + errorType: ec2InstanceCreateTimeout, + errorString: "creating EC2 Instance: operation error", + metric: "ec2-instance-create-timeout", + action: retryStack | emitMetric, + }, + { + errorType: ddAgentRepoFailure, + errorString: "Failed to update the sources after adding the Datadog repository.", + metric: "apt-dd-agent-repo-failure", + action: emitMetric, + }, } type retryHandler struct { @@ -218,9 +232,6 @@ type pulumiError struct { } var commandRegex = regexp.MustCompile(`^ command:remote:Command \(([^\)]+)\):$`) -var archRegex = regexp.MustCompile(`distro_(arm64|x86_64)`) -var vmCmdRegex = regexp.MustCompile(`-cmd-.+-ddvm-\d+-\d+-(.+)$`) -var vmNameRegex = regexp.MustCompile(`-([^-]+)-distro`) func parsePulumiDiagnostics(message string) *pulumiError { var perr pulumiError @@ -245,20 +256,7 @@ func parsePulumiDiagnostics(message string) *pulumiError { if commandMatch != nil { perr.command = commandMatch[1] - archMatch := archRegex.FindStringSubmatch(perr.command) - if archMatch != nil { - perr.arch = archMatch[1] - } - - vmCmdMatch := vmCmdRegex.FindStringSubmatch(perr.command) - if vmCmdMatch != nil { - perr.vmCommand = vmCmdMatch[1] - } - - vmNameMatch := vmNameRegex.FindStringSubmatch(perr.command) - if vmNameMatch != nil { - perr.vmName = vmNameMatch[1] - } + perr.arch, perr.vmCommand, perr.vmName = parsePulumiComand(perr.command) } } else { perr.errorMessage += strings.Trim(line, " ") + "\n" @@ -267,3 +265,26 @@ func parsePulumiDiagnostics(message string) *pulumiError { return nil } + +var archRegex = regexp.MustCompile(`distro_(arm64|x86_64)`) +var vmCmdRegex = regexp.MustCompile(`-cmd-.+-(?:ddvm-\d+-\d+|distro_(?:x86_64|arm64))-(.+)$`) +var vmNameRegex = regexp.MustCompile(`-(?:conn|cmd)-(?:arm64|x86_64)-([^-]+)-`) + +func parsePulumiComand(command string) (arch, vmCommand, vmName string) { + archMatch := archRegex.FindStringSubmatch(command) + if archMatch != nil { + arch = archMatch[1] + } + + vmCmdMatch := vmCmdRegex.FindStringSubmatch(command) + if vmCmdMatch != nil { + vmCommand = vmCmdMatch[1] + } + + vmNameMatch := vmNameRegex.FindStringSubmatch(command) + if vmNameMatch != nil { + vmName = vmNameMatch[1] + } + + return +} diff --git a/test/new-e2e/system-probe/errors_test.go b/test/new-e2e/system-probe/errors_test.go index eeee52d43fb4f..84ae28f4f9baf 100644 --- a/test/new-e2e/system-probe/errors_test.go +++ b/test/new-e2e/system-probe/errors_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -const output = ` +const outputLocalError = ` Updating (gjulian-guillermo.julian-e2e-report-all-errors-ddvm): pulumi:pulumi:Stack e2elocal-gjulian-guillermo.julian-e2e-report-all-errors-ddvm running @@ -94,12 +94,142 @@ Resources: Duration: 6s ` +const outputSSHFailed = ` + pulumi:pulumi:Stack e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877 **failed** 1 error +Diagnostics: + pulumi:pulumi:Stack (e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877): + error: update failed + command:remote:Command (remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb): + error: proxy: after 60 failed attempts: ssh: rejected: connect failed (No route to host) +Outputs: + kmt-stack: [secret] +Resources: + +-8 replaced + 349 unchanged +Duration: 7m35s +` + +const outputSSHFailedWithChangedOrder = ` +@ updating.... + + pulumi:pulumi:Stack e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877 creating (933s) error: update failed + + pulumi:pulumi:Stack e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877 **creating failed (933s)** 1 error +Diagnostics: + command:remote:Command (remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb): + error: proxy: after 60 failed attempts: ssh: rejected: connect failed (No route to host) + + pulumi:pulumi:Stack (e2eci-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877): + error: update failed + +Outputs: + kmt-stack: [secret] + +Resources: + + 357 created + +Duration: 15m34s +` + func TestParseDiagnostics(t *testing.T) { - result := parsePulumiDiagnostics(output) - require.NotNil(t, result) - require.Equal(t, "remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb", result.command) - require.Equal(t, "arm64", result.arch) - require.Equal(t, "mount-disk-dev-vdb", result.vmCommand) - require.Equal(t, "error: Process exited with status 127: running \" nocommand /mnt/docker && mount /dev/vdb /mnt/docker\":\nbash: line 1: nocommand: command not found\n", result.errorMessage) - require.Equal(t, "ubuntu_22.04", result.vmName) + cases := []struct { + caseName string + output string + result pulumiError + }{ + { + caseName: "LocalError", + output: outputLocalError, + result: pulumiError{ + command: "remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb", + arch: "arm64", + vmCommand: "mount-disk-dev-vdb", + errorMessage: "error: Process exited with status 127: running \" nocommand /mnt/docker && mount /dev/vdb /mnt/docker\":\nbash: line 1: nocommand: command not found\n", + vmName: "ubuntu_22.04", + }, + }, + { + caseName: "SSHFailed", + output: outputSSHFailed, + result: pulumiError{ + command: "remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb", + arch: "arm64", + vmCommand: "mount-disk-dev-vdb", + vmName: "fedora_37", + errorMessage: "error: proxy: after 60 failed attempts: ssh: rejected: connect failed (No route to host)\n", + }, + }, + { + caseName: "SSHFailedWithChangedOrder", + output: outputSSHFailedWithChangedOrder, + result: pulumiError{ + command: "remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb", + arch: "arm64", + vmCommand: "mount-disk-dev-vdb", + vmName: "fedora_37", + errorMessage: "error: proxy: after 60 failed attempts: ssh: rejected: connect failed (No route to host)\n", + }, + }, + } + + for _, c := range cases { + t.Run(c.caseName, func(tt *testing.T) { + result := parsePulumiDiagnostics(c.output) + require.NotNil(tt, result) + require.Equal(tt, c.result, *result) + }) + } +} + +func TestParsePulumiCommand(t *testing.T) { + cases := []struct { + caseName string + command string + arch string + vmCmd string + vmName string + }{ + { + caseName: "NoVMSet", + command: "remote-gjulian-guillermo.julian-e2e-report-all-errors-ddvm-arm64-conn-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-cmd-arm64-ubuntu_22.04-distro_arm64-ddvm-4-8192-mount-disk-dev-vdb", + arch: "arm64", + vmCmd: "mount-disk-dev-vdb", + vmName: "ubuntu_22.04", + }, + { + caseName: "CommandWithoutVM", + command: "remote-aws-ci-634872953-4670-kernel-matrix-testing-system-probe-x86-64-44043832-x86_64-cmd-only_usm-distro_x86_64-download-with-curl", + arch: "x86_64", + vmCmd: "download-with-curl", + vmName: "", + }, + { + caseName: "DomainCreationCommand", + command: "remote-aws-ci-632806887-4670-kernel-matrix-testing-system-probe-arm64-43913143-arm64-cmd-arm64-debian_12-distro_arm64-no_usm-ddvm-4-12288-create-nvram", + arch: "arm64", + vmCmd: "create-nvram", + vmName: "debian_12", + }, + { + caseName: "AlteredTagOrder", + command: "remote-ci-632806887-4670-kernel-matrix-testing-system-probe-arm64-43913143-arm64-conn-arm64-ubuntu_23.10-only_usm-distro_arm64-ddvm-4-12288-cmd-arm64-ubuntu_23.10-only_usm-distro_arm64-ddvm-4-12288-set-docker-data-root", + arch: "arm64", + vmCmd: "set-docker-data-root", + vmName: "ubuntu_23.10", + }, + { + caseName: "CommandWithVMSet", + command: "remote-ci-630160752-4670-kernel-matrix-testing-system-probe-arm64-43724877-arm64-conn-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-cmd-arm64-fedora_37-no_usm-distro_arm64-ddvm-4-12288-mount-disk-dev-vdb", + arch: "arm64", + vmCmd: "mount-disk-dev-vdb", + vmName: "fedora_37", + }, + } + + for _, c := range cases { + t.Run(c.caseName, func(tt *testing.T) { + arch, vmCmd, vmName := parsePulumiComand(c.command) + require.Equal(tt, c.arch, arch) + require.Equal(tt, c.vmCmd, vmCmd) + require.Equal(tt, c.vmName, vmName) + }) + } } diff --git a/test/new-e2e/system-probe/system-probe-test-env.go b/test/new-e2e/system-probe/system-probe-test-env.go index 516f47d76244a..2148e4d94427e 100644 --- a/test/new-e2e/system-probe/system-probe-test-env.go +++ b/test/new-e2e/system-probe/system-probe-test-env.go @@ -252,7 +252,7 @@ func NewTestEnv(name, x86InstanceType, armInstanceType string, opts *EnvOpts) (* infraEnv: opts.InfraEnv, } - stackManager.RetryStrategy = retryHandler.HandleError + stackManager.GetRetryStrategyFrom = retryHandler.HandleError pulumiStack, upResult, pulumiErr := stackManager.GetStackNoDeleteOnFailure( systemProbeTestEnv.context, systemProbeTestEnv.name, diff --git a/test/new-e2e/system-probe/test-runner/main.go b/test/new-e2e/system-probe/test-runner/main.go index c5bc806155db1..a7b21754bd50e 100644 --- a/test/new-e2e/system-probe/test-runner/main.go +++ b/test/new-e2e/system-probe/test-runner/main.go @@ -65,7 +65,7 @@ var timeouts = map[*regexp.Regexp]time.Duration{ regexp.MustCompile("pkg/network/tracer$"): 55 * time.Minute, regexp.MustCompile("pkg/network/usm$"): 55 * time.Minute, regexp.MustCompile("pkg/network/usm/tests$"): 20 * time.Minute, - regexp.MustCompile("pkg/security.*"): 30 * time.Minute, + regexp.MustCompile("pkg/security.*"): 45 * time.Minute, } func getTimeout(pkg string) time.Duration { diff --git a/test/new-e2e/system-probe/vm-metrics/vm-metrics.go b/test/new-e2e/system-probe/vm-metrics/vm-metrics.go new file mode 100644 index 0000000000000..9d2d0edae409a --- /dev/null +++ b/test/new-e2e/system-probe/vm-metrics/vm-metrics.go @@ -0,0 +1,283 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +// Package systemprobe is sets up the remote testing environment for system-probe using the Kernel Matrix Testing framework +package main + +import ( + "flag" + "fmt" + "log" + "os" + "regexp" + "syscall" + "time" + + "github.com/DataDog/datadog-go/v5/statsd" + "github.com/digitalocean/go-libvirt" + "github.com/digitalocean/go-libvirt/socket/dialers" +) + +const kmtMicroVmsPrefix = "kmt.microvm." + +var distrosMatch = map[string]*regexp.Regexp{ + "ubuntu": regexp.MustCompile(`-(ubuntu_[\.,\d]{1,5}).*-`), + "fedora": regexp.MustCompile(`-(fedora_[\.,\d]{1,5}).*-`), + "debian": regexp.MustCompile(`-(debian_[\.,\d]{1,5}).*-`), + "centos": regexp.MustCompile(`-(centos_[\.,\d]{1,5}).*-`), + "amazon": regexp.MustCompile(`-(amazon_[\.,\d]{1,5}).*-`), + "rocky": regexp.MustCompile(`-(rocky_[\.,\d]{1,5}).*-`), + "oracle": regexp.MustCompile(`-(oracle_[\.,\d]{1,5}).*-`), + "opensuse": regexp.MustCompile(`-(opensuse_[\.,\d]{1,5}).*-`), + "suse": regexp.MustCompile(`-(suse_[\.,\d]{1,5}).*-`), +} + +var memStatTagToName = map[libvirt.DomainMemoryStatTags]string{ + libvirt.DomainMemoryStatSwapIn: "swap_in_bytes", // The total amount of data read from swap space (in kB). + libvirt.DomainMemoryStatSwapOut: "swap_out_bytes", // The total amount of memory written out to swap space (in kB). + libvirt.DomainMemoryStatMajorFault: "major_pagefault", // Page faults occur when a process makes a valid access to virtual memory that is not available. When servicing the page fault, if disk IO is required, it is considered a major fault. + libvirt.DomainMemoryStatAvailable: "memory_available_bytes", // The total amount of usable memory as seen by the domain. This value may be less than the amount of memory assigned to the domain if a balloon driver is in use or if the guest OS does not initialize all assigned pages. This value is expressed in kB. + libvirt.DomainMemoryStatActualBalloon: "memory_actual_balloon_bytes", // Current balloon value (in KB). + libvirt.DomainMemoryStatRss: "memory_rss_bytes", // Resident Set Size of the process running the domain. This value is in kB + libvirt.DomainMemoryStatUsable: "memory_usable_bytes", // How much the balloon can be inflated without pushing the guest system to swap, corresponds to 'Available' in /proc/meminfo + libvirt.DomainMemoryStatUnused: "memory_unused_bytes", // The amount of memory left completely unused by the system. Memory that is available but used for reclaimable caches should NOT be reported as free. This value is expressed in kB. +} + +type libvirtInterface interface { + ConnectListAllDomains(int32, libvirt.ConnectListAllDomainsFlags) ([]libvirt.Domain, uint32, error) + DomainMemoryStats(libvirt.Domain, uint32, uint32) ([]libvirt.DomainMemoryStat, error) +} + +type libvirtExporter struct { + libvirt libvirtInterface + statsdClient statsd.ClientInterface +} + +func newLibvirtExporter(l libvirtInterface, client statsd.ClientInterface) *libvirtExporter { + return &libvirtExporter{ + libvirt: l, + statsdClient: client, + } +} + +func (l *libvirtExporter) collect() ([]*domainMetrics, error) { + return collectLibvirtMetrics(l.libvirt) +} + +func (l *libvirtExporter) submit(metrics []*domainMetrics) error { + for _, dm := range metrics { + for _, m := range dm.metrics { + if err := l.statsdClient.Gauge(kmtMicroVmsPrefix+m.name, float64(m.value), m.tags, 1); err != nil { + return fmt.Errorf("error sending metric: %w", err) + } + } + } + if err := l.statsdClient.Flush(); err != nil { + return fmt.Errorf("failed to flush client: %w", err) + } + + return nil +} + +type statsdMetric struct { + name string + value uint64 + tags []string +} + +type domainMetrics struct { + osID string + metrics []statsdMetric + + libvirtDomain libvirt.Domain +} + +func (d *domainMetrics) addMetric(name string, value uint64, tags []string) { + d.metrics = append(d.metrics, statsdMetric{ + name: name, + value: value, + tags: tags, + }) +} + +func kbToBytes(kb uint64) uint64 { + return kb * 1024 +} + +func (d *domainMetrics) collectDomainMemoryStatInfo(l libvirtInterface) error { + memStats, err := l.DomainMemoryStats(d.libvirtDomain, uint32(libvirt.DomainMemoryStatNr), 0) + if err != nil { + return fmt.Errorf("failed to get memory stats: %w", err) + } + + tags := []string{fmt.Sprintf("os:%s", d.osID)} + for _, stat := range memStats { + if statString, ok := memStatTagToName[libvirt.DomainMemoryStatTags(stat.Tag)]; ok { + if stat.Tag == int32(libvirt.DomainMemoryStatMajorFault) { + d.addMetric(statString, stat.Val, tags) + } else { + d.addMetric(statString, kbToBytes(stat.Val), tags) + } + } + } + + return nil +} + +func collectLibvirtMetrics(l libvirtInterface) ([]*domainMetrics, error) { + var dMetrics []*domainMetrics + + domains, _, err := l.ConnectListAllDomains(1, libvirt.ConnectListDomainsActive) + if err != nil { + return nil, fmt.Errorf("failed to list domains: %w", err) + } + + for _, d := range domains { + osID := parseOSInformation(d.Name) + if osID == "" { + continue + } + + dMetrics = append(dMetrics, &domainMetrics{ + osID: osID, + libvirtDomain: d, + }) + } + + for _, d := range dMetrics { + if err := d.collectDomainMemoryStatInfo(l); err != nil { + return nil, fmt.Errorf("failed to collect memory stats for domain %s: %w", d.osID, err) + } + } + + return dMetrics, nil +} + +func parseOSInformation(name string) string { + for _, distro := range distrosMatch { + if match := distro.FindStringSubmatch(name); match != nil { + return match[1] + } + } + + return "" +} + +type tagsList []string + +func (t *tagsList) String() string { + return fmt.Sprintf("%v", *t) +} + +func (t *tagsList) Set(value string) error { + *t = append(*t, value) + return nil +} + +// runAsDaemon function runs the vm-metrics collector as a daemon +// To daemonize a process this function: +// - forksExec the vm-metrics binary, allowing the parent to exit. +// this makes the new process the child of the init process. +// - setsid() on child process. Make the child the session leader +// and release it from the original controlling terminal. +// - Reset umask, so that files are created with the requested +// permissions +func runAsDaemon(daemonLogFile string) error { + if daemonLogFile == "" { + daemonLogFile = "/tmp/vm-metrics.log" + } + + if _, isDaemon := os.LookupEnv("DAEMON_COLLECTOR"); !isDaemon { + f, err := os.OpenFile(daemonLogFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("failed to open daemon log file: %w", err) + } + + if _, err := syscall.ForkExec(os.Args[0], os.Args, &syscall.ProcAttr{ + Dir: "/", + Env: append(os.Environ(), "DAEMON_COLLECTOR=1"), + Sys: &syscall.SysProcAttr{ + Setsid: true, + }, + Files: []uintptr{0, f.Fd(), f.Fd()}, // print message to the same pty + }); err != nil { + return fmt.Errorf("failed to fork/exec parent process: %w", err) + } + + os.Exit(0) + } + + // close stdin + stdin := os.NewFile(0, "stdin") + stdin.Close() + + // open /dev/null as stdin + if _, err := os.Open("/dev/null"); err != nil { + return fmt.Errorf("failed to open '/dev/null' as stdin: %w", err) + } + + // clear umask + syscall.Umask(0) + + return nil +} + +func main() { + var globalTags tagsList + + statsdPort := flag.String("statsd-port", "8125", "Statsd port") + statsdHost := flag.String("statsd-host", "127.0.0.1", "Statsd host") + collectionInterval := flag.Duration("interval", time.Second*20, "interval for collecting vm stats") + libvirtDaemonURI := flag.String("libvirt-uri", "", "libvirt daemon URI") + daemonize := flag.Bool("daemon", false, "run collector as a daemon") + daemonLogFile := flag.String("log-file", "", "log file daemon") + flag.Var(&globalTags, "tag", "global tags to set") + flag.Parse() + + if *daemonize { + if err := runAsDaemon(*daemonLogFile); err != nil { + log.Printf("failed to run collector as daemon: %v", err) + return + } + } + + log.Printf("VM metrics collector started") + + dialer := dialers.NewLocal(dialers.WithSocket(*libvirtDaemonURI), dialers.WithLocalTimeout((5 * time.Second))) + l := libvirt.NewWithDialer(dialer) + if err := l.ConnectToURI(libvirt.QEMUSystem); err != nil { + log.Fatalf("failed to connect to libvirt: %v", err) + } + defer func() { + if err := l.Disconnect(); err != nil { + log.Printf("failed to disconnect: %v", err) + } + }() + + log.Printf("launching statsd with global tags: %v", globalTags) + dogstatsdClient, err := statsd.New(fmt.Sprintf("%s:%s", *statsdHost, *statsdPort), statsd.WithTags(globalTags)) + if err != nil { + log.Fatal(err) + } + + lexporter := newLibvirtExporter(l, dogstatsdClient) + + for range time.Tick(*collectionInterval) { + metrics, err := lexporter.collect() + if err != nil { + log.Fatal(err) + } + + log.Println("Submitting metrics to statsd:") + for _, m := range metrics { + log.Printf(" %v", *m) + } + if err := lexporter.submit(metrics); err != nil { + log.Fatal(err) + } + } +} diff --git a/test/new-e2e/system-probe/vm-metrics/vm-metrics_test.go b/test/new-e2e/system-probe/vm-metrics/vm-metrics_test.go new file mode 100644 index 0000000000000..657859a3d68fa --- /dev/null +++ b/test/new-e2e/system-probe/vm-metrics/vm-metrics_test.go @@ -0,0 +1,139 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build !windows + +package main + +import ( + "strings" + "testing" + + "github.com/DataDog/datadog-agent/pkg/trace/teststatsd" + "github.com/digitalocean/go-libvirt" + "github.com/stretchr/testify/require" +) + +var memStats = map[libvirt.DomainMemoryStatTags]uint64{ + libvirt.DomainMemoryStatSwapIn: 10, + libvirt.DomainMemoryStatSwapOut: 20, + libvirt.DomainMemoryStatMajorFault: 30, + libvirt.DomainMemoryStatAvailable: 40, + libvirt.DomainMemoryStatActualBalloon: 50, + libvirt.DomainMemoryStatRss: 60, + libvirt.DomainMemoryStatUnused: 70, + libvirt.DomainMemoryStatUsable: 80, +} + +var nameToTag = map[string]libvirt.DomainMemoryStatTags{ + "swap_in_bytes": libvirt.DomainMemoryStatSwapIn, + "swap_out_bytes": libvirt.DomainMemoryStatSwapOut, + "major_pagefault": libvirt.DomainMemoryStatMajorFault, + "memory_available_bytes": libvirt.DomainMemoryStatAvailable, + "memory_actual_balloon_bytes": libvirt.DomainMemoryStatActualBalloon, + "memory_rss_bytes": libvirt.DomainMemoryStatRss, + "memory_usable_bytes": libvirt.DomainMemoryStatUsable, + "memory_unused_bytes": libvirt.DomainMemoryStatUnused, +} + +func TestParseOSInformation(t *testing.T) { + cases := map[string]string{ + "x86_64-fedora_37-distro_x86_64-no_usm-ddvm-4-12288": "fedora_37", + "x86_64-fedora_38-distro_x86_64-no_usm-ddvm-4-12288": "fedora_38", + "x86_64-amazon_4.14-distro_x86_64-no_usm-ddvm-4-12288": "amazon_4.14", + "x86_64-amazon_5.10-distro_x86_64-no_usm-ddvm-4-12288": "amazon_5.10", + "x86_64-amazon_5.4-distro_x86_64-no_usm-ddvm-4-12288": "amazon_5.4", + "x86_64-amazon_2023-distro_x86_64-no_usm-ddvm-4-12288": "amazon_2023", + "x86_64-centos_7.9-distro_x86_64-no_usm-ddvm-4-12288": "centos_7.9", + "x86_64-centos_8-distro_x86_64-no_usm-ddvm-4-12288": "centos_8", + "x86_64-ubuntu_24.04-all_tests-distro_x86_64-ddvm-4-12288": "ubuntu_24.04", + "arm64-ubuntu_23.10-distro_arm64-no_usm-ddvm-4-12288": "ubuntu_23.10", + "arm64-ubuntu_22.04-distro_arm64-no_usm-ddvm-4-12288": "ubuntu_22.04", + "arm64-ubuntu_20.04-distro_arm64-no_usm-ddvm-4-12288": "ubuntu_20.04", + "arm64-ubuntu_18.04-distro_arm64-no_usm-ddvm-4-12288": "ubuntu_18.04", + "x86_64-ubuntu_16.04-distro_x86_64-no_usm-ddvm-4-12288": "ubuntu_16.04", + "x86_64-debian_9-distro_x86_64-no_usm-ddvm-4-12288": "debian_9", + "x86_64-debian_10-only_usm-distro_x86_64-ddvm-4-12288": "debian_10", + "x86_64-debian_11-only_usm-distro_x86_64-ddvm-4-12288": "debian_11", + "x86_64-debian_12-only_usm-distro_x86_64-ddvm-4-12288": "debian_12", + "x86_64-suse_12.5-all_tests-distro_x86_64-ddvm-4-12288": "suse_12.5", + "x86_64-opensuse_15.5-all_tests-distro_x86_64-ddvm-4-12288": "opensuse_15.5", + "x86_64-opensuse_15.3-all_tests-distro_x86_64-ddvm-4-12288": "opensuse_15.3", + "x86_64-rocky_9.3-all_tests-distro_x86_64-ddvm-4-12288": "rocky_9.3", + "x86_64-rocky_8.5-all_tests-distro_x86_64-ddvm-4-12288": "rocky_8.5", + "x86_64-oracle_9.3-all_tests-distro_x86_64-ddvm-4-12288": "oracle_9.3", + "x86_64-oracle_8.9-all_tests-distro_x86_64-ddvm-4-12288": "oracle_8.9", + } + + for id, os := range cases { + osID := parseOSInformation(id) + require.Equal(t, osID, os) + } +} + +type libvirtMock struct{} + +func (l *libvirtMock) ConnectListAllDomains(_ int32, _ libvirt.ConnectListAllDomainsFlags) ([]libvirt.Domain, uint32, error) { + return []libvirt.Domain{ + {Name: "x86_64-debian_12-only_usm-distro_x86_64-ddvm-4-12288"}, + {Name: "x86_64-ubuntu_16.04-distro_x86_64-no_usm-ddvm-4-12288"}, + }, 0, nil +} + +func (l *libvirtMock) DomainMemoryStats(_ libvirt.Domain, _ uint32, _ uint32) ([]libvirt.DomainMemoryStat, error) { + var stats []libvirt.DomainMemoryStat + for tag, val := range memStats { + stats = append(stats, libvirt.DomainMemoryStat{ + Tag: int32(tag), + Val: val, + }) + } + return stats, nil +} + +func bytesToKb(bytes uint64) uint64 { + return bytes / 1024 +} + +func TestLibvirtCollectMetrics(t *testing.T) { + lexporter := newLibvirtExporter(&libvirtMock{}, &teststatsd.Client{}) + + domainMetrics, err := lexporter.collect() + require.NoError(t, err) + + for _, dm := range domainMetrics { + for _, m := range dm.metrics { + tag, ok := nameToTag[m.name] + require.True(t, ok) + + if tag == libvirt.DomainMemoryStatMajorFault { + require.Equal(t, memStats[tag], m.value) + } else { + require.Equal(t, memStats[tag], bytesToKb(m.value)) + } + } + } +} +func TestLibvirtSubmitMetrics(t *testing.T) { + lexporter := newLibvirtExporter(&libvirtMock{}, &teststatsd.Client{}) + + domainMetrics, err := lexporter.collect() + require.NoError(t, err) + + err = lexporter.submit(domainMetrics) + require.NoError(t, err) + + for name, summary := range lexporter.statsdClient.(*teststatsd.Client).GetGaugeSummaries() { + statName := strings.TrimPrefix(name, kmtMicroVmsPrefix) + expectedVal := memStats[nameToTag[statName]] + if statName != "major_pagefault" { + expectedVal *= 1024 + } + + for _, call := range summary.Calls { + require.Equal(t, call.Value, float64(expectedVal)) + } + } +} diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/k8s-logs/file_tailing_test.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/k8s-logs/file_tailing_test.go index 958f2988e0ea9..bbf6d8ccf524e 100644 --- a/test/new-e2e/tests/agent-metrics-logs/log-agent/k8s-logs/file_tailing_test.go +++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/k8s-logs/file_tailing_test.go @@ -7,8 +7,8 @@ package k8sfiletailing import ( "context" + _ "embed" "fmt" - "os" "testing" "time" @@ -61,18 +61,23 @@ func (v *k8sSuite) TestSingleLogAndMetadata() { } _, err = v.Env().KubernetesCluster.Client().BatchV1().Jobs("default").Create(context.TODO(), jobSpcec, metav1.CreateOptions{}) - assert.NoError(v.T(), err, "Could not properly start job") + require.NoError(v.T(), err, "Could not properly start job") v.EventuallyWithT(func(c *assert.CollectT) { logsServiceNames, err := v.Env().FakeIntake.Client().GetLogServiceNames() assert.NoError(c, err, "Error starting job") + if err != nil { + return + } if assert.Contains(c, logsServiceNames, "ubuntu", "Ubuntu service not found") { filteredLogs, err := v.Env().FakeIntake.Client().FilterLogs("ubuntu") assert.NoError(c, err, "Error filtering logs") - if assert.NotEmpty(v.T(), filteredLogs, "Fake Intake returned no logs even though log service name exists") { + if err != nil { + return + } + if assert.NotEmpty(c, filteredLogs, "Fake Intake returned no logs even though log service name exists") { assert.Equal(c, testLogMessage, filteredLogs[0].Message, "Test log doesn't match") - // Check container metatdata assert.Equal(c, filteredLogs[0].Service, "ubuntu", "Could not find service") assert.NotNil(c, filteredLogs[0].HostName, "Hostname not found") @@ -83,12 +88,13 @@ func (v *k8sSuite) TestSingleLogAndMetadata() { }, 1*time.Minute, 10*time.Second) } +//go:embed long_line_log.txt +var longLineLog string + func (v *k8sSuite) TestLongLogLine() { err := v.Env().FakeIntake.Client().FlushServerAndResetAggregators() require.NoError(v.T(), err, "Could not reset the FakeIntake") var backOffLimit int32 = 4 - file, err := os.ReadFile("long_line_log.txt") - assert.NoError(v.T(), err, "Could not open long line file.") jobSpcec := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -102,7 +108,7 @@ func (v *k8sSuite) TestLongLogLine() { { Name: "long-line-job", Image: "ubuntu", - Command: []string{"echo", string(file)}, + Command: []string{"echo", longLineLog}, }, }, RestartPolicy: corev1.RestartPolicyNever, @@ -113,17 +119,23 @@ func (v *k8sSuite) TestLongLogLine() { } _, err = v.Env().KubernetesCluster.Client().BatchV1().Jobs("default").Create(context.TODO(), jobSpcec, metav1.CreateOptions{}) - assert.NoError(v.T(), err, "Could not properly start job") + require.NoError(v.T(), err, "Could not properly start job") v.EventuallyWithT(func(c *assert.CollectT) { logsServiceNames, err := v.Env().FakeIntake.Client().GetLogServiceNames() assert.NoError(c, err, "Error starting job") + if err != nil { + return + } if assert.Contains(c, logsServiceNames, "ubuntu", "Ubuntu service not found") { filteredLogs, err := v.Env().FakeIntake.Client().FilterLogs("ubuntu") assert.NoError(c, err, "Error filtering logs") - if assert.NotEmpty(v.T(), filteredLogs, "Fake Intake returned no logs even though log service name exists") { - assert.Equal(c, string(file), fmt.Sprintf("%s%s", filteredLogs[0].Message, "\n"), "Test log doesn't match") + if err != nil { + return + } + if assert.NotEmpty(c, filteredLogs, "Fake Intake returned no logs even though log service name exists") { + assert.Equal(c, longLineLog, fmt.Sprintf("%s%s", filteredLogs[0].Message, "\n"), "Test log doesn't match") } } @@ -142,7 +154,7 @@ func (v *k8sSuite) TestContainerExclude() { }, } _, err = v.Env().KubernetesCluster.Client().CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{}) - assert.NoError(v.T(), err, "Could not create namespace") + require.NoError(v.T(), err, "Could not create namespace") var backOffLimit int32 = 4 testLogMessage := "Test log message here" @@ -170,11 +182,14 @@ func (v *k8sSuite) TestContainerExclude() { } _, err = v.Env().KubernetesCluster.Client().BatchV1().Jobs(namespaceName).Create(context.TODO(), jobSpcec, metav1.CreateOptions{}) - assert.NoError(v.T(), err, "Could not properly start job") + require.NoError(v.T(), err, "Could not properly start job") v.EventuallyWithT(func(c *assert.CollectT) { logsServiceNames, err := v.Env().FakeIntake.Client().GetLogServiceNames() assert.NoError(c, err, "Error starting job") + if err != nil { + return + } assert.NotContains(c, logsServiceNames, "alpine", "Alpine service found after excluded") }, 1*time.Minute, 10*time.Second) } diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/utils/file_tailing_utils.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/utils/file_tailing_utils.go index 65eeaa1d19089..f8950e85c532a 100644 --- a/test/new-e2e/tests/agent-metrics-logs/log-agent/utils/file_tailing_utils.go +++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/utils/file_tailing_utils.go @@ -117,9 +117,8 @@ func CheckLogFilePresence(ls LogsTestSuite, logFileName string) { } // FetchAndFilterLogs fetches logs from the fake intake server and filters them by service and content. -func FetchAndFilterLogs(t *testing.T, fakeIntake *components.FakeIntake, service, content string) ([]*aggregator.Log, error) { +func FetchAndFilterLogs(fakeIntake *components.FakeIntake, service, content string) ([]*aggregator.Log, error) { client := fakeIntake.Client() - t.Helper() names, err := client.GetLogServiceNames() if err != nil { @@ -154,7 +153,8 @@ func CheckLogsExpected(t *testing.T, fakeIntake *components.FakeIntake, service, t.Helper() assert.EventuallyWithT(t, func(c *assert.CollectT) { - logs, err := FetchAndFilterLogs(t, fakeIntake, service, content) + logs, err := FetchAndFilterLogs(fakeIntake, service, content) + if assert.NoErrorf(c, err, "Error fetching logs: %s", err) { intakeLog := logsToString(logs) if assert.NotEmpty(c, logs, "Expected logs with content: '%s' not found. Instead, found: %s", content, intakeLog) { @@ -171,11 +171,11 @@ func CheckLogsExpected(t *testing.T, fakeIntake *components.FakeIntake, service, // CheckLogsNotExpected verifies the absence of unexpected logs. func CheckLogsNotExpected(t *testing.T, fakeIntake *components.FakeIntake, service, content string) { t.Helper() - + t.Logf("Checking for logs from service: '%s' with content: '%s' are not collected", service, content) assert.EventuallyWithT(t, func(c *assert.CollectT) { - logs, err := FetchAndFilterLogs(t, fakeIntake, service, content) - intakeLog := logsToString(logs) + logs, err := FetchAndFilterLogs(fakeIntake, service, content) if assert.NoErrorf(c, err, "Error fetching logs: %s", err) { + intakeLog := logsToString(logs) if assert.Empty(c, logs, "Unexpected logs with content: '%s' found. Instead, found: %s", content, intakeLog) { t.Logf("No logs from service: '%s' with content: '%s' collected as expected", service, content) } diff --git a/test/new-e2e/tests/agent-metrics-logs/log-agent/windows-log/file-tailing/file_tailing_test.go b/test/new-e2e/tests/agent-metrics-logs/log-agent/windows-log/file-tailing/file_tailing_test.go index 2f13e89793c07..c2becb766729a 100644 --- a/test/new-e2e/tests/agent-metrics-logs/log-agent/windows-log/file-tailing/file_tailing_test.go +++ b/test/new-e2e/tests/agent-metrics-logs/log-agent/windows-log/file-tailing/file_tailing_test.go @@ -14,13 +14,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + testos "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-metrics-logs/log-agent/utils" - "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" - testos "github.com/DataDog/test-infra-definitions/components/os" - "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" ) // WindowsFakeintakeSuite defines a test suite for the log agent interacting with a virtual machine and fake intake. @@ -137,17 +138,15 @@ func (s *WindowsFakeintakeSuite) testLogNoPermission() { assert.NoErrorf(t, err, "Unable to adjust permissions for the log file %s.", logFilePath) t.Logf("Read permissions revoked") - // Generate logs and check the intake for no new logs because of revoked permissions + // wait for agent to be ready after restart s.EventuallyWithT(func(c *assert.CollectT) { - agentReady := s.Env().Agent.Client.IsReady() - if assert.Truef(c, agentReady, "Agent is not ready after restart") { - // Generate log - utils.AppendLog(s, logFileName, "access-denied", 1) - // Check intake for new logs - utils.CheckLogsNotExpected(s.T(), s.Env().FakeIntake, "hello", "access-denied") - } + assert.Truef(c, s.Env().Agent.Client.IsReady(), "Agent is not ready after restart") }, 2*time.Minute, 5*time.Second) + // Generate logs and check the intake for no new logs because of revoked permissions + utils.AppendLog(s, logFileName, "access-denied", 1) + // Check intake for new logs + utils.CheckLogsNotExpected(s.T(), s.Env().FakeIntake, "hello", "access-denied") } func (s *WindowsFakeintakeSuite) testLogCollectionAfterPermission() { diff --git a/test/new-e2e/tests/agent-platform/common/agent_install.go b/test/new-e2e/tests/agent-platform/common/agent_install.go index 47a941e100f89..b3d8eafb661c4 100644 --- a/test/new-e2e/tests/agent-platform/common/agent_install.go +++ b/test/new-e2e/tests/agent-platform/common/agent_install.go @@ -120,6 +120,6 @@ func CheckUninstallation(t *testing.T, client *TestClient) { installFolderPath := client.Helper.GetInstallFolder() entries, err := client.FileManager.ReadDir(installFolderPath) - require.Error(tt, err, "should not find anything in install folder, found %v dir entries ", len(entries)) + require.Error(tt, err, "should not find anything in install folder, found %v dir entries.\nContent: %+v ", len(entries), entries) }) } diff --git a/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go b/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go index 5499a802a5e65..7745859ee3e3d 100644 --- a/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go +++ b/test/new-e2e/tests/agent-shared-components/config-refresh/non_core_agents_sync_win_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" @@ -29,6 +30,9 @@ type configRefreshWindowsSuite struct { } func TestConfigRefreshWindowsSuite(t *testing.T) { + // WINA-1014 + flake.Mark(t) + t.Parallel() e2e.Run(t, &configRefreshWindowsSuite{}, e2e.WithProvisioner(awshost.Provisioner(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))) } diff --git a/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go b/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go index 0528334d553c0..53d9f4abe4836 100644 --- a/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go +++ b/test/new-e2e/tests/agent-shared-components/secret/secret_win_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" secrets "github.com/DataDog/datadog-agent/test/new-e2e/tests/agent-shared-components/secretsutils" @@ -26,6 +27,9 @@ type windowsRuntimeSecretSuite struct { } func TestWindowsRuntimeSecretSuite(t *testing.T) { + // WINA-1014 + flake.Mark(t) + t.Parallel() e2e.Run(t, &windowsRuntimeSecretSuite{}, e2e.WithProvisioner(awshost.Provisioner( awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault)), diff --git a/test/new-e2e/tests/agent-subcommands/health/health_common_test.go b/test/new-e2e/tests/agent-subcommands/health/health_common_test.go new file mode 100644 index 0000000000000..572282b7e1875 --- /dev/null +++ b/test/new-e2e/tests/agent-subcommands/health/health_common_test.go @@ -0,0 +1,84 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package health + +import ( + "net/http" + "time" + + "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" + "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" + + "github.com/DataDog/datadog-agent/test/fakeintake/api" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" + + "github.com/cenkalti/backoff" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type baseHealthSuite struct { + e2e.BaseSuite[environments.Host] + descriptor os.Descriptor +} + +// section contains the content status of a specific section (e.g. Forwarder) +func (v *baseHealthSuite) TestDefaultInstallHealthy() { + interval := 1 * time.Second + + var output string + var err error + err = backoff.Retry(func() error { + output, err = v.Env().Agent.Client.Health() + if err != nil { + return err + } + return nil + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(interval), uint64(15))) + + assert.NoError(v.T(), err) + assert.Contains(v.T(), output, "Agent health: PASS") +} + +func (v *baseHealthSuite) TestDefaultInstallUnhealthy() { + // the fakeintake says that any API key is invalid by sending a 403 code + override := api.ResponseOverride{ + Endpoint: "/api/v1/validate", + StatusCode: 403, + Method: http.MethodGet, + Body: []byte("invalid API key"), + } + err := v.Env().FakeIntake.Client().ConfigureOverride(override) + require.NoError(v.T(), err) + + // restart the agent, which validates the key using the fakeintake at startup + v.UpdateEnv(awshost.Provisioner( + awshost.WithEC2InstanceOptions(ec2.WithOS(v.descriptor)), + awshost.WithAgentOptions(agentparams.WithAgentConfig("log_level: info\nforwarder_apikey_validation_interval: 1")), + )) + + require.EventuallyWithT(v.T(), func(collect *assert.CollectT) { + // forwarder should be unhealthy because the key is invalid + _, err = v.Env().Agent.Client.Health() + assert.ErrorContains(collect, err, "Agent health: FAIL") + assert.ErrorContains(collect, err, "=== 1 unhealthy components ===\nforwarder") + }, time.Second*30, time.Second) + + // the fakeintake now says that the api key is valid + override.StatusCode = 200 + override.Body = []byte("valid API key") + err = v.Env().FakeIntake.Client().ConfigureOverride(override) + require.NoError(v.T(), err) + + // the agent will check every minute if the key is valid, so wait at most 1m30 + require.EventuallyWithT(v.T(), func(collect *assert.CollectT) { + _, err = v.Env().Agent.Client.Health() + assert.NoError(collect, err) + }, time.Second*90, 3*time.Second) +} diff --git a/test/new-e2e/tests/agent-subcommands/health/health_commont_test.go b/test/new-e2e/tests/agent-subcommands/health/health_commont_test.go deleted file mode 100644 index 8a455a440d3c8..0000000000000 --- a/test/new-e2e/tests/agent-subcommands/health/health_commont_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package health - -import ( - "testing" - "time" - - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" - "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" - awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" - - "github.com/cenkalti/backoff" - "github.com/stretchr/testify/assert" -) - -type baseHealthSuite struct { - e2e.BaseSuite[environments.Host] -} - -func TestSubcommandSuite(t *testing.T) { - e2e.Run(t, &baseHealthSuite{}, e2e.WithProvisioner(awshost.Provisioner())) -} - -// section contains the content status of a specific section (e.g. Forwarder) -func (v *baseHealthSuite) TestDefaultInstallHealthy() { - interval := 1 * time.Second - - var output string - var err error - err = backoff.Retry(func() error { - output, err = v.Env().Agent.Client.Health() - if err != nil { - return err - } - return nil - }, backoff.WithMaxRetries(backoff.NewConstantBackOff(interval), uint64(15))) - - assert.NoError(v.T(), err) - assert.Contains(v.T(), output, "Agent health: PASS") -} diff --git a/test/new-e2e/tests/agent-subcommands/health/health_nix_test.go b/test/new-e2e/tests/agent-subcommands/health/health_nix_test.go index 15c51d4041354..4a064a2d88f93 100644 --- a/test/new-e2e/tests/agent-subcommands/health/health_nix_test.go +++ b/test/new-e2e/tests/agent-subcommands/health/health_nix_test.go @@ -8,10 +8,9 @@ package health import ( "testing" - "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" - "github.com/stretchr/testify/assert" + "github.com/DataDog/test-infra-definitions/components/os" + "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" - "github.com/DataDog/datadog-agent/test/fakeintake/api" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" ) @@ -22,29 +21,8 @@ type linuxHealthSuite struct { func TestLinuxHealthSuite(t *testing.T) { t.Parallel() - e2e.Run(t, &linuxHealthSuite{}, e2e.WithProvisioner(awshost.Provisioner())) -} - -func (v *linuxHealthSuite) TestDefaultInstallUnhealthy() { - // the fakeintake says that any API key is invalid by sending a 403 code - override := api.ResponseOverride{ - Endpoint: "/api/v1/validate", - StatusCode: 403, - ContentType: "text/plain", - Body: []byte("invalid API key"), - } - v.Env().FakeIntake.Client().ConfigureOverride(override) - - // restart the agent, which validates the key using the fakeintake at startup - v.UpdateEnv(awshost.Provisioner( - awshost.WithAgentOptions(agentparams.WithAgentConfig("log_level: info\n")), - )) - - // agent should be unhealthy because the key is invalid - _, err := v.Env().Agent.Client.Health() - if err == nil { - assert.Fail(v.T(), "agent expected to be unhealthy, but no error found!") - return - } - assert.Contains(v.T(), err.Error(), "Agent health: FAIL") + suite := &linuxHealthSuite{baseHealthSuite{descriptor: os.UbuntuDefault}} + e2e.Run(t, suite, e2e.WithProvisioner(awshost.Provisioner( + awshost.WithEC2InstanceOptions(ec2.WithOS(suite.descriptor)), + ))) } diff --git a/test/new-e2e/tests/agent-subcommands/health/health_win_test.go b/test/new-e2e/tests/agent-subcommands/health/health_win_test.go index e2bf2f531ca7a..84da19fdcadc5 100644 --- a/test/new-e2e/tests/agent-subcommands/health/health_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/health/health_win_test.go @@ -8,12 +8,9 @@ package health import ( "testing" - "github.com/DataDog/test-infra-definitions/components/datadog/agentparams" "github.com/DataDog/test-infra-definitions/components/os" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" - "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/test/fakeintake/api" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" ) @@ -24,28 +21,8 @@ type windowsHealthSuite struct { func TestWindowsHealthSuite(t *testing.T) { t.Parallel() - e2e.Run(t, &windowsHealthSuite{}, e2e.WithProvisioner(awshost.Provisioner(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))) -} - -func (v *windowsHealthSuite) TestDefaultInstallUnhealthy() { - v.T().Skip("FIXME: test is flaky") - // the fakeintake says that any API key is invalid by sending a 403 code - override := api.ResponseOverride{ - Endpoint: "/api/v1/validate", - StatusCode: 403, - ContentType: "text/plain", - Body: []byte("invalid API key"), - } - v.Env().FakeIntake.Client().ConfigureOverride(override) - // restart the agent, which validates the key using the fakeintake at startup - v.UpdateEnv(awshost.Provisioner(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault)), - awshost.WithAgentOptions(agentparams.WithAgentConfig("log_level: info\n")))) - - // agent should be unhealthy because the key is invalid - _, err := v.Env().Agent.Client.Health() - if err == nil { - assert.Fail(v.T(), "agent expected to be unhealthy, but no error found!") - return - } - assert.Contains(v.T(), err.Error(), "Agent health: FAIL") + suite := &windowsHealthSuite{baseHealthSuite{descriptor: os.WindowsDefault}} + e2e.Run(t, suite, e2e.WithProvisioner(awshost.Provisioner( + awshost.WithEC2InstanceOptions(ec2.WithOS(suite.descriptor)), + ))) } diff --git a/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go b/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go index 3429569e29e7a..8f527460691ad 100644 --- a/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/hostname/hostname_ec2_win_test.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" "github.com/stretchr/testify/assert" + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client" @@ -23,6 +24,9 @@ type windowsHostnameSuite struct { } func TestWindowsHostnameSuite(t *testing.T) { + // WINA-1014 + flake.Mark(t) + t.Parallel() osOption := awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault)) e2e.Run(t, &windowsHostnameSuite{baseHostnameSuite: baseHostnameSuite{osOption: osOption}}, e2e.WithProvisioner(awshost.ProvisionerNoFakeIntake(osOption))) diff --git a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go index d4e099410dda6..460c8bd4d88ca 100644 --- a/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go +++ b/test/new-e2e/tests/agent-subcommands/secret/secret_win_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/utils/e2e/client/agentclient" @@ -27,6 +28,8 @@ type windowsSecretSuite struct { } func TestWindowsSecretSuite(t *testing.T) { + // WINA-1014 + flake.Mark(t) t.Parallel() e2e.Run(t, &windowsSecretSuite{}, e2e.WithProvisioner(awshost.Provisioner(awshost.WithEC2InstanceOptions(ec2.WithOS(os.WindowsDefault))))) } diff --git a/test/new-e2e/tests/containers/ecs_test.go b/test/new-e2e/tests/containers/ecs_test.go index 38974e31821d3..55c4dd15f12b4 100644 --- a/test/new-e2e/tests/containers/ecs_test.go +++ b/test/new-e2e/tests/containers/ecs_test.go @@ -207,7 +207,8 @@ func (suite *ecsSuite) TestNginxECS() { `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:nginx$`, `^ecs_launch_type:ec2$`, - `^git.commit.sha:`, // org.opencontainers.image.revision docker image label + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-nginx-ec2$`, + `^git.commit.sha:`, // org.opencontainers.image.revision docker image label `^git.repository_url:https://github.com/DataDog/test-infra-definitions$`, // org.opencontainers.image.source docker image label `^image_id:sha256:`, `^image_name:ghcr.io/datadog/apps-nginx-server$`, @@ -237,7 +238,8 @@ func (suite *ecsSuite) TestNginxECS() { `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:nginx$`, `^ecs_launch_type:ec2$`, - `^git.commit.sha:`, // org.opencontainers.image.revision docker image label + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-nginx-ec2$`, + `^git.commit.sha:`, // org.opencontainers.image.revision docker image label `^git.repository_url:https://github.com/DataDog/test-infra-definitions$`, // org.opencontainers.image.source docker image label `^image_id:sha256:`, `^image_name:ghcr.io/datadog/apps-nginx-server$`, @@ -269,6 +271,7 @@ func (suite *ecsSuite) TestRedisECS() { `^docker_image:public.ecr.aws/docker/library/redis:latest$`, `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:redis$`, + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-redis-ec2$`, `^ecs_launch_type:ec2$`, `^image_id:sha256:`, `^image_name:public.ecr.aws/docker/library/redis$`, @@ -297,6 +300,7 @@ func (suite *ecsSuite) TestRedisECS() { `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:redis$`, `^ecs_launch_type:ec2$`, + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-redis-ec2$`, `^image_id:sha256:`, `^image_name:public.ecr.aws/docker/library/redis$`, `^image_tag:latest$`, @@ -462,6 +466,7 @@ func (suite *ecsSuite) TestCPU() { `^docker_image:ghcr.io/colinianking/stress-ng:409201de7458c639c68088d28ec8270ef599fe47$`, `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:stress-ng$`, + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-stress-ng$`, `^git.commit.sha:`, `^git.repository_url:https://github.com/ColinIanKing/stress-ng$`, `^image_id:sha256:`, @@ -506,7 +511,8 @@ func (suite *ecsSuite) testDogstatsd(taskName string) { `^docker_image:ghcr.io/datadog/apps-dogstatsd:main$`, `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:dogstatsd$`, - `^git.commit.sha:`, // org.opencontainers.image.revision docker image label + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-dogstatsd-ud[ps]$`, + `^git.commit.sha:`, // org.opencontainers.image.revision docker image label `^git.repository_url:https://github.com/DataDog/test-infra-definitions$`, // org.opencontainers.image.source docker image label `^image_id:sha256:`, `^image_name:ghcr.io/datadog/apps-dogstatsd$`, @@ -536,8 +542,9 @@ func (suite *ecsSuite) TestPrometheus() { `^docker_image:ghcr.io/datadog/apps-prometheus:main$`, `^ecs_cluster_name:` + regexp.QuoteMeta(suite.ecsClusterName) + `$`, `^ecs_container_name:prometheus$`, + `^ecs_service:` + regexp.QuoteMeta(strings.TrimSuffix(suite.ecsClusterName, "-ecs")) + `-prometheus$`, `^endpoint:http://.*:8080/metrics$`, - `^git.commit.sha:`, // org.opencontainers.image.revision docker image label + `^git.commit.sha:`, // org.opencontainers.image.revision docker image label `^git.repository_url:https://github.com/DataDog/test-infra-definitions$`, // org.opencontainers.image.source docker image label `^image_id:sha256:`, `^image_name:ghcr.io/datadog/apps-prometheus$`, diff --git a/test/new-e2e/tests/containers/k8s_test.go b/test/new-e2e/tests/containers/k8s_test.go index af2f9917a2f9e..2badd72b23401 100644 --- a/test/new-e2e/tests/containers/k8s_test.go +++ b/test/new-e2e/tests/containers/k8s_test.go @@ -929,8 +929,16 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, }, 5*time.Minute, 10*time.Second, "The deployment with name %s in namespace %s does not exist or does not have the auto detected languages annotation", name, namespace) } + // Record old pod, so we can be sure we are not looking at the incorrect one after deletion + oldPods, err := suite.K8sClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", name).String(), + }) + suite.Require().NoError(err) + suite.Require().Len(oldPods.Items, 1) + oldPod := oldPods.Items[0] + // Delete the pod to ensure it is recreated after the admission controller is deployed - err := suite.K8sClient.CoreV1().Pods(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ + err = suite.K8sClient.CoreV1().Pods(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: fields.OneTermEqualSelector("app", name).String(), }) suite.Require().NoError(err) @@ -948,6 +956,9 @@ func (suite *k8sSuite) testAdmissionControllerPod(namespace string, name string, return } pod = pods.Items[0] + if !assert.NotEqual(c, oldPod.Name, pod.Name) { + return + } }, 2*time.Minute, 10*time.Second, "Failed to witness the creation of pod with name %s in namespace %s", name, namespace) suite.Require().Len(pod.Spec.Containers, 1) diff --git a/test/new-e2e/tests/cspm/cspm_test.go b/test/new-e2e/tests/cspm/cspm_test.go new file mode 100644 index 0000000000000..2c7a03f8b67b1 --- /dev/null +++ b/test/new-e2e/tests/cspm/cspm_test.go @@ -0,0 +1,303 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package cspm contains the e2e tests for cspm +package cspm + +import ( + "context" + _ "embed" + "encoding/json" + "fmt" + "slices" + "testing" + "time" + + "k8s.io/apimachinery/pkg/fields" + + "github.com/DataDog/test-infra-definitions/components/datadog/kubernetesagentparams" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" + "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" + awskubernetes "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/kubernetes" +) + +type cspmTestSuite struct { + e2e.BaseSuite[environments.Kubernetes] +} + +type findings = map[string][]map[string]string + +var expectedFindingsMasterEtcdNode = findings{ + "cis-kubernetes-1.5.1-1.1.12": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.16": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.19": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.21": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.22": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.23": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.24": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.25": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.26": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.33": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.2.6": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.3.2": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.3.3": []map[string]string{ + { + "result": "passed", + }, + }, + "cis-kubernetes-1.5.1-1.3.4": []map[string]string{ + { + "result": "passed", + }, + }, + "cis-kubernetes-1.5.1-1.3.5": []map[string]string{ + { + "result": "passed", + }, + }, + "cis-kubernetes-1.5.1-1.3.6": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-1.3.7": []map[string]string{ + { + "result": "passed", + }, + }, + "cis-kubernetes-1.5.1-1.4.1": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-3.2.1": []map[string]string{ + { + "result": "failed", + }, + }, +} +var expectedFindingsWorkerNode = findings{ + "cis-kubernetes-1.5.1-4.2.1": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.3": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.4": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.5": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.6": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.10": []map[string]string{ + { + "result": "failed", + }, + }, + "cis-kubernetes-1.5.1-4.2.12": []map[string]string{ + { + "result": "failed", + }, + }, +} + +//go:embed values.yaml +var values string + +func TestCSPM(t *testing.T) { + e2e.Run(t, &cspmTestSuite{}, e2e.WithProvisioner(awskubernetes.KindProvisioner(awskubernetes.WithAgentOptions(kubernetesagentparams.WithHelmValues(values), kubernetesagentparams.WithoutDualShipping())))) +} + +func (s *cspmTestSuite) TestFindings() { + res, err := s.Env().KubernetesCluster.Client().CoreV1().Pods("datadog").List(context.Background(), metav1.ListOptions{ + LabelSelector: fields.OneTermEqualSelector("app", s.Env().Agent.LinuxNodeAgent.LabelSelectors["app"]).String(), + }) + require.NoError(s.T(), err) + require.Len(s.T(), res.Items, 1) + agentPod := res.Items[0] + _, _, err = s.Env().KubernetesCluster.KubernetesClient.PodExec("datadog", agentPod.Name, "security-agent", []string{"security-agent", "compliance", "check", "--dump-reports", "/tmp/reports", "--report"}) + require.NoError(s.T(), err) + dumpContent, _, err := s.Env().KubernetesCluster.KubernetesClient.PodExec("datadog", agentPod.Name, "security-agent", []string{"cat", "/tmp/reports"}) + require.NoError(s.T(), err) + findings, err := parseFindingOutput(dumpContent) + require.NoError(s.T(), err) + s.checkFindings(findings, mergeFindings(expectedFindingsMasterEtcdNode, expectedFindingsWorkerNode)) +} + +func (s *cspmTestSuite) TestMetrics() { + s.T().Log("Waiting for datadog.security_agent.compliance.running metrics") + assert.EventuallyWithT(s.T(), func(c *assert.CollectT) { + + metrics, err := s.Env().FakeIntake.Client().FilterMetrics("datadog.security_agent.compliance.running") + if !assert.NoError(c, err) { + return + } + if assert.NotEmpty(c, metrics) { + s.T().Log("Metrics found: datadog.security_agent.compliance.running") + } + }, 2*time.Minute, 10*time.Second) + + s.T().Log("Waiting for datadog.security_agent.compliance.containers_running metrics") + assert.EventuallyWithT(s.T(), func(c *assert.CollectT) { + metrics, err := s.Env().FakeIntake.Client().FilterMetrics("datadog.security_agent.compliance.containers_running") + if !assert.NoError(c, err) { + return + } + if assert.NotEmpty(c, metrics) { + s.T().Log("Metrics found: datadog.security_agent.compliance.containers_running") + } + }, 2*time.Minute, 10*time.Second) + +} +func (s *cspmTestSuite) checkFindings(findings, expectedFindings findings) { + s.T().Helper() + checkedRule := []string{} + for expectedRule, expectedRuleFindinds := range expectedFindings { + assert.Contains(s.T(), findings, expectedRule) + for _, expectedFinding := range expectedRuleFindinds { + found := false + for _, finding := range findings[expectedRule] { + if isSubset(expectedFinding, finding) { + found = true + break + } + } + assert.Truef(s.T(), found, "unexpected finding %v for rule %s", findings[expectedRule], expectedRule) + checkedRule = append(checkedRule, expectedRule) + } + } + for rule, ruleFindings := range findings { + if slices.Contains(checkedRule, rule) { + continue + } + for _, ruleFinding := range ruleFindings { + fmt.Printf("rule %s finding %v\n", rule, ruleFinding["result"]) + } + } + for rule, ruleFindings := range findings { + if slices.Contains(checkedRule, rule) { + continue + } + for _, ruleFinding := range ruleFindings { + assert.NotContains(s.T(), []string{"failed", "error"}, ruleFinding["result"], fmt.Sprintf("finding for rule %s not expected to be in failed or error state", rule)) + } + } + +} + +func isSubset(a, b map[string]string) bool { + for k, v := range a { + if vb, found := b[k]; !found || vb != v { + return false + } + } + return true +} + +func mergeFindings(a, b findings) findings { + for k, v := range b { + a[k] = v + } + return a +} + +func parseFindingOutput(output string) (findings, error) { + + result := map[string]any{} + parsedResult := findings{} + err := json.Unmarshal([]byte(output), &result) + if err != nil { + return nil, err + } + for rule, ruleFindings := range result { + ruleFindingsCasted, ok := ruleFindings.([]any) + if !ok { + return nil, fmt.Errorf("failed to parse output: %s for rule %s cannot be casted into []any", ruleFindings, rule) + } + parsedRuleFinding := []map[string]string{} + for _, finding := range ruleFindingsCasted { + findingCasted, ok := finding.(map[string]any) + if !ok { + return nil, fmt.Errorf("failed to parse output: %s for rule %s cannot be casted into map[string]any", finding, rule) + } + parsedFinding := map[string]string{} + for k, v := range findingCasted { + if _, ok := v.(string); ok { + parsedFinding[k] = v.(string) + } + } + parsedRuleFinding = append(parsedRuleFinding, parsedFinding) + + } + parsedResult[rule] = parsedRuleFinding + + } + return parsedResult, nil +} diff --git a/test/new-e2e/tests/cspm/values.yaml b/test/new-e2e/tests/cspm/values.yaml new file mode 100644 index 0000000000000..4a9aff86ca347 --- /dev/null +++ b/test/new-e2e/tests/cspm/values.yaml @@ -0,0 +1,4 @@ +datadog: + securityAgent: + compliance: + enabled: true diff --git a/test/new-e2e/tests/installer/all_packages.go b/test/new-e2e/tests/installer/all_packages.go new file mode 100644 index 0000000000000..2efb5f4db6e4e --- /dev/null +++ b/test/new-e2e/tests/installer/all_packages.go @@ -0,0 +1,40 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package installer contains tests for the datadog installer +package installer + +import ( + "fmt" + "os" +) + +// InstallMethodOption is the type for the install method to use for the tests +type InstallMethodOption string + +const ( + // InstallMethodInstallScript is the default install method + InstallMethodInstallScript InstallMethodOption = "install_script" + // InstallMethodAnsible is the install method for Ansible + InstallMethodAnsible InstallMethodOption = "ansible" + // InstallMethodWindows is the install method for Windows + InstallMethodWindows InstallMethodOption = "windows" +) + +// GetInstallMethodFromEnv returns the install method to use for the tests +func GetInstallMethodFromEnv() InstallMethodOption { + supportedValues := []string{string(InstallMethodAnsible), string(InstallMethodInstallScript), string(InstallMethodWindows)} + envValue := os.Getenv("FLEET_INSTALL_METHOD") + switch envValue { + case "install_script": + return InstallMethodInstallScript + case "ansible": + return InstallMethodAnsible + case "windows": + return InstallMethodWindows + default: + panic(fmt.Sprintf("unsupported install method: %s. Supported values are: %v", envValue, supportedValues)) + } +} diff --git a/test/new-e2e/tests/installer/all_packages_test.go b/test/new-e2e/tests/installer/all_packages_test.go index 67d8186196aba..85779c01e4d97 100644 --- a/test/new-e2e/tests/installer/all_packages_test.go +++ b/test/new-e2e/tests/installer/all_packages_test.go @@ -24,12 +24,12 @@ import ( "github.com/stretchr/testify/require" ) -type packageTests func(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite +type packageTests func(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite type packageTestsWithSkipedFlavors struct { t packageTests skippedFlavors []e2eos.Descriptor - skippedInstallationMethods []installMethodOption + skippedInstallationMethods []InstallMethodOption } var ( @@ -50,7 +50,7 @@ var ( packagesTestsWithSkippedFlavors = []packageTestsWithSkipedFlavors{ {t: testInstaller}, {t: testAgent}, - {t: testApmInjectAgent, skippedFlavors: []e2eos.Descriptor{e2eos.CentOS7, e2eos.RedHat9, e2eos.Fedora37, e2eos.Suse15}, skippedInstallationMethods: []installMethodOption{installMethodAnsible}}, + {t: testApmInjectAgent, skippedFlavors: []e2eos.Descriptor{e2eos.CentOS7, e2eos.RedHat9, e2eos.Fedora37, e2eos.Suse15}, skippedInstallationMethods: []InstallMethodOption{InstallMethodAnsible}}, {t: testUpgradeScenario}, } ) @@ -64,7 +64,7 @@ func shouldSkipFlavor(flavors []e2eos.Descriptor, flavor e2eos.Descriptor) bool return false } -func shouldSkipInstallMethod(methods []installMethodOption, method installMethodOption) bool { +func shouldSkipInstallMethod(methods []InstallMethodOption, method InstallMethodOption) bool { for _, m := range methods { if m == method { return true @@ -73,19 +73,6 @@ func shouldSkipInstallMethod(methods []installMethodOption, method installMethod return false } -func getInstallMethodFromEnv() installMethodOption { - supportedValues := []string{string(installMethodAnsible), string(installMethodInstallScript)} - envValue := os.Getenv("FLEET_INSTALL_METHOD") - switch envValue { - case "install_script": - return installMethodInstallScript - case "ansible": - return installMethodAnsible - default: - panic(fmt.Sprintf("unsupported install method: %s. Supported values are: %v", envValue, supportedValues)) - } -} - func TestPackages(t *testing.T) { if _, ok := os.LookupEnv("E2E_PIPELINE_ID"); !ok { @@ -93,7 +80,10 @@ func TestPackages(t *testing.T) { t.FailNow() } - method := getInstallMethodFromEnv() + method := GetInstallMethodFromEnv() + if method == InstallMethodWindows { + t.Skip("Windows install method - skipping") + } var flavors []e2eos.Descriptor for _, flavor := range amd64Flavors { @@ -114,7 +104,7 @@ func TestPackages(t *testing.T) { continue } // TODO: remove once ansible+suse is fully supported - if flavor.Flavor == e2eos.Suse && method == installMethodAnsible { + if flavor.Flavor == e2eos.Suse && method == InstallMethodAnsible { continue } @@ -126,10 +116,6 @@ func TestPackages(t *testing.T) { flake.Mark(t) } - // FIXME: Ansible tests are flaky on multiple tests/os - if method == installMethodAnsible { - flake.Mark(t) - } opts := []awshost.ProvisionerOption{ awshost.WithEC2InstanceOptions(ec2.WithOSArch(flavor, flavor.Architecture)), awshost.WithoutAgent(), @@ -159,17 +145,10 @@ type packageBaseSuite struct { pkg string arch e2eos.Architecture os e2eos.Descriptor - installMethod installMethodOption + installMethod InstallMethodOption } -type installMethodOption string - -const ( - installMethodInstallScript installMethodOption = "install_script" - installMethodAnsible installMethodOption = "ansible" -) - -func newPackageSuite(pkg string, os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption, opts ...awshost.ProvisionerOption) packageBaseSuite { +func newPackageSuite(pkg string, os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption, opts ...awshost.ProvisionerOption) packageBaseSuite { return packageBaseSuite{ os: os, arch: arch, @@ -214,14 +193,14 @@ func (s *packageBaseSuite) RunInstallScriptWithError(params ...string) error { func (s *packageBaseSuite) RunInstallScript(params ...string) { switch s.installMethod { - case installMethodInstallScript: + case InstallMethodInstallScript: // bugfix for https://major.io/p/systemd-in-fedora-22-failed-to-restart-service-access-denied/ if s.os.Flavor == e2eos.CentOS && s.os.Version == e2eos.CentOS7.Version { s.Env().RemoteHost.MustExecute("sudo systemctl daemon-reexec") } err := s.RunInstallScriptWithError(params...) require.NoErrorf(s.T(), err, "installer not properly installed. logs: \n%s\n%s", s.Env().RemoteHost.MustExecute("cat /tmp/datadog-installer-stdout.log"), s.Env().RemoteHost.MustExecute("cat /tmp/datadog-installer-stderr.log")) - case installMethodAnsible: + case InstallMethodAnsible: // Install ansible then install the agent ansiblePrefix := s.installAnsible(s.os) diff --git a/test/new-e2e/tests/installer/host/fixtures/run_http_server.sh b/test/new-e2e/tests/installer/host/fixtures/run_http_server.sh index 3055ed5f5927a..66b2ca5290cbb 100755 --- a/test/new-e2e/tests/installer/host/fixtures/run_http_server.sh +++ b/test/new-e2e/tests/installer/host/fixtures/run_http_server.sh @@ -8,6 +8,4 @@ python3 /opt/fixtures/http_server.py >/tmp/server.log 2>&1 & PID=$! disown $PID -while ! curl -s http://localhost:8080 > /dev/null; do - sleep 1 -done +timeout 30s bash -c 'while ! curl -s http://localhost:8080 > /dev/null; do sleep 1; done' diff --git a/test/new-e2e/tests/installer/package_agent_test.go b/test/new-e2e/tests/installer/package_agent_test.go index b1c672952ac92..30e4f52630a0d 100644 --- a/test/new-e2e/tests/installer/package_agent_test.go +++ b/test/new-e2e/tests/installer/package_agent_test.go @@ -36,7 +36,7 @@ type packageAgentSuite struct { packageBaseSuite } -func testAgent(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite { +func testAgent(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite { return &packageAgentSuite{ packageBaseSuite: newPackageSuite("agent", os, arch, method, awshost.WithoutFakeIntake()), } @@ -181,7 +181,7 @@ func (s *packageAgentSuite) TestExperimentTimeout() { Starting(agentUnitXP). Started(processUnitXP). Started(traceUnitXP). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnitXP), ). @@ -198,7 +198,7 @@ func (s *packageAgentSuite) TestExperimentTimeout() { Unordered(host.SystemdEvents(). Started(traceUnit). Started(processUnit). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnit), ), ) @@ -248,7 +248,7 @@ func (s *packageAgentSuite) TestExperimentIgnoringSigterm() { Starting(agentUnitXP). Started(processUnitXP). Started(traceUnitXP). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnitXP), ). @@ -271,7 +271,7 @@ func (s *packageAgentSuite) TestExperimentIgnoringSigterm() { Unordered(host.SystemdEvents(). Started(traceUnit). Started(processUnit). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnit), ), ) @@ -311,7 +311,7 @@ func (s *packageAgentSuite) TestExperimentExits() { Starting(agentUnitXP). Started(processUnitXP). Started(traceUnitXP). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnitXP), ). @@ -327,7 +327,7 @@ func (s *packageAgentSuite) TestExperimentExits() { Unordered(host.SystemdEvents(). Started(traceUnit). Started(processUnit). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnit), ), ) @@ -351,7 +351,7 @@ func (s *packageAgentSuite) TestExperimentStopped() { s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().Started(traceUnitXP)) s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().Started(processUnitXP)) s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().Skipped(securityUnitXP)) - s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible)) + s.host.AssertSystemdEvents(timestamp, host.SystemdEvents().SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible)) // stop experiment timestamp = s.host.LastJournaldTimestamp() @@ -370,7 +370,7 @@ func (s *packageAgentSuite) TestExperimentStopped() { Unordered(host.SystemdEvents(). Started(traceUnit). Started(processUnit). - SkippedIf(probeUnitXP, s.installMethod != installMethodAnsible). + SkippedIf(probeUnitXP, s.installMethod != InstallMethodAnsible). Skipped(securityUnit), ), ) @@ -397,6 +397,44 @@ func (s *packageAgentSuite) TestRunPath() { assert.True(s.T(), strings.HasPrefix(runPath, "/opt/datadog-packages/datadog-agent/"), "run_path is not in the expected location: %s", runPath) } +func (s *packageAgentSuite) TestUpgrade_DisabledAgentDebRPM_to_OCI() { + // install deb/rpm agent + s.RunInstallScript(envForceNoInstall("datadog-agent")) + s.host.AssertPackageInstalledByPackageManager("datadog-agent") + + defer s.Purge() + defer s.purgeAgentDebInstall() + + state := s.host.State() + s.assertUnits(state, true) + state.AssertDirExists("/opt/datadog-agent", 0755, "dd-agent", "dd-agent") + + // disable the unit + s.host.Run("sudo systemctl disable datadog-agent") + + // install OCI agent + s.RunInstallScript(envForceInstall("datadog-agent")) + + state = s.host.State() + s.assertUnits(state, false) + s.host.AssertPackageInstalledByInstaller("datadog-agent") + s.host.AssertPackageInstalledByPackageManager("datadog-agent") + + s.host.Run("sudo systemctl show datadog-agent -p ExecStart | grep /opt/datadog-packages") +} + +func (s *packageAgentSuite) TestInstallWithLeftoverDebDir() { + // create /opt/datadog-agent to simulate a disabled agent + s.host.Run("sudo mkdir -p /opt/datadog-agent") + + // install OCI agent + s.RunInstallScript(envForceInstall("datadog-agent")) + + state := s.host.State() + s.assertUnits(state, false) + s.host.Run("sudo systemctl show datadog-agent -p ExecStart | grep /opt/datadog-packages") +} + func (s *packageAgentSuite) purgeAgentDebInstall() { pkgManager := s.host.GetPkgManager() switch pkgManager { diff --git a/test/new-e2e/tests/installer/package_apm_inject_test.go b/test/new-e2e/tests/installer/package_apm_inject_test.go index dbdfa6de25483..389da3c392f3e 100644 --- a/test/new-e2e/tests/installer/package_apm_inject_test.go +++ b/test/new-e2e/tests/installer/package_apm_inject_test.go @@ -25,7 +25,7 @@ type packageApmInjectSuite struct { packageBaseSuite } -func testApmInjectAgent(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite { +func testApmInjectAgent(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite { return &packageApmInjectSuite{ packageBaseSuite: newPackageSuite("apm-inject", os, arch, method), } diff --git a/test/new-e2e/tests/installer/package_installer_test.go b/test/new-e2e/tests/installer/package_installer_test.go index d9b0363b3a4df..9f949c65354b8 100644 --- a/test/new-e2e/tests/installer/package_installer_test.go +++ b/test/new-e2e/tests/installer/package_installer_test.go @@ -15,7 +15,7 @@ type packageInstallerSuite struct { packageBaseSuite } -func testInstaller(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite { +func testInstaller(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite { return &packageInstallerSuite{ packageBaseSuite: newPackageSuite("installer", os, arch, method, awshost.WithoutFakeIntake()), } diff --git a/test/new-e2e/tests/installer/upgrade_scenario_test.go b/test/new-e2e/tests/installer/upgrade_scenario_test.go index e4ada47c79316..702dfca0d7a57 100644 --- a/test/new-e2e/tests/installer/upgrade_scenario_test.go +++ b/test/new-e2e/tests/installer/upgrade_scenario_test.go @@ -87,7 +87,7 @@ const ( previousInstallerImageVersion = "7.55.0-installer-0.4.1-1" ) -func testUpgradeScenario(os e2eos.Descriptor, arch e2eos.Architecture, method installMethodOption) packageSuite { +func testUpgradeScenario(os e2eos.Descriptor, arch e2eos.Architecture, method InstallMethodOption) packageSuite { return &upgradeScenarioSuite{ packageBaseSuite: newPackageSuite("upgrade_scenario", os, arch, method), } diff --git a/test/new-e2e/tests/installer/windows/base_suite.go b/test/new-e2e/tests/installer/windows/base_suite.go index 2daac352d8e14..5a3fd22833af5 100644 --- a/test/new-e2e/tests/installer/windows/base_suite.go +++ b/test/new-e2e/tests/installer/windows/base_suite.go @@ -6,13 +6,15 @@ package installer import ( + "os" + "strings" + agentVersion "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" - "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/windows/suite-assertions" - "os" - "strings" + instlr "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer" + suiteasserts "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/windows/suite-assertions" ) // PackageVersion is a helper type to store both the version and the package version of a binary. @@ -85,6 +87,10 @@ func (s *BaseInstallerSuite) StableAgentVersion() PackageVersion { func (s *BaseInstallerSuite) SetupSuite() { s.BaseSuite.SetupSuite() + if instlr.GetInstallMethodFromEnv() != instlr.InstallMethodWindows { + s.T().Skip("Skipping Windows-only tests as the install method isn't Windows") + } + // TODO:FA-779 if s.Env().Environment.PipelineID() == "" && os.Getenv("DD_INSTALLER_MSI_URL") == "" { s.FailNow("E2E_PIPELINE_ID env var is not set, this test requires this variable to be set to work") diff --git a/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go b/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go index fa75f2665a105..71c6521042a64 100644 --- a/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go +++ b/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go @@ -38,6 +38,84 @@ func (s *testAgentUpgradeSuite) TestUpgradeAgentPackage() { }) } +// TestDowngradeAgentPackage tests that it's possible to downgrade the Datadog Agent using the Datadog installer. +func (s *testAgentUpgradeSuite) TestDowngradeAgentPackage() { + // Arrange + _, err := s.Installer().InstallPackage(installerwindows.AgentPackage) + s.Require().NoErrorf(err, "failed to install the stable Datadog Agent package") + + // Act + _, err = s.Installer().InstallExperiment(installerwindows.AgentPackage, + installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithVersion(s.StableAgentVersion().PackageVersion()), + installer.WithAuthentication(""), + ) + + // Assert + s.Require().NoErrorf(err, "failed to downgrade to stable Datadog Agent package") + s.Require().Host(s.Env().RemoteHost). + HasARunningDatadogAgentService(). + WithVersionMatchPredicate(func(version string) { + s.Require().Contains(version, s.StableAgentVersion().Version()) + }). + DirExists(installerwindows.GetStableDirFor(installerwindows.AgentPackage)) +} + +func (s *testAgentUpgradeSuite) TestExperimentFailure() { + // Arrange + s.Run("Install stable", func() { + s.installStableAgent() + }) + + // Act + _, err := s.Installer().InstallExperiment(installerwindows.AgentPackage, + installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithVersion("unknown-version"), + installer.WithAuthentication(""), + ) + + // Assert + s.Require().Error(err, "expected an error when trying to start an experiment with an unknown version") + s.stopExperiment() + // TODO: is this the same test as TestStopWithoutExperiment? +} + +func (s *testAgentUpgradeSuite) TestExperimentCurrentVersion() { + // Arrange + s.Run("Install stable", func() { + s.installStableAgent() + }) + + // Act + _, err := s.Installer().InstallExperiment(installerwindows.AgentPackage, + installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithVersion(s.StableAgentVersion().PackageVersion()), + installer.WithAuthentication(""), + ) + + // Assert + s.Require().Error(err, "expected an error when trying to start an experiment with the same version as the current one") + s.Require().Host(s.Env().RemoteHost). + HasARunningDatadogAgentService(). + WithVersionMatchPredicate(func(version string) { + s.Require().Contains(version, s.StableAgentVersion().Version()) + }). + DirExists(installerwindows.GetStableDirFor(installerwindows.AgentPackage)) +} + +func (s *testAgentUpgradeSuite) TestStopWithoutExperiment() { + // Arrange + s.Run("Install stable", func() { + s.installStableAgent() + }) + + // Act + + // Assert + s.stopExperiment() + // TODO: Currently uninstalls stable then reinstalls stable. functional but a waste. +} + func (s *testAgentUpgradeSuite) installStableAgent() { // Arrange diff --git a/test/new-e2e/tests/npm/common_1host.go b/test/new-e2e/tests/npm/common_1host.go index 7a1fcd5824189..2f0456786a2ba 100644 --- a/test/new-e2e/tests/npm/common_1host.go +++ b/test/new-e2e/tests/npm/common_1host.go @@ -164,6 +164,10 @@ func test1HostFakeIntakeNPMTCPUDPDNS[Env any](v *e2e.BaseSuite[Env], FakeIntake v.EventuallyWithT(func(c *assert.CollectT) { cnx, err := FakeIntake.Client().GetConnections() assert.NoError(c, err, "GetConnections() errors") + if !assert.NotNil(c, cnx, "GetConnections() returned nil ConnectionsAggregator") { + return + } + if !assert.NotEmpty(c, cnx.GetNames(), "no connections yet") { return } diff --git a/test/new-e2e/tests/security-agent-functional/security_agent_test.go b/test/new-e2e/tests/security-agent-functional/security_agent_test.go index 6606c33a6d600..94da861eb7fa6 100644 --- a/test/new-e2e/tests/security-agent-functional/security_agent_test.go +++ b/test/new-e2e/tests/security-agent-functional/security_agent_test.go @@ -21,7 +21,6 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows" - windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent" ) @@ -76,13 +75,7 @@ func (v *vmSuite) TestSystemProbeCWSSuite() { // install the agent (just so we can get the driver(s) installed) agentPackage, err := windowsAgent.GetPackageFromEnv() require.NoError(t, err) - remoteMSIPath, err := windowsCommon.GetTemporaryFile(vm) - require.NoError(t, err) - t.Logf("Getting install package %s...", agentPackage.URL) - err = windowsCommon.PutOrDownloadFile(vm, agentPackage.URL, remoteMSIPath) - require.NoError(t, err) - - err = windowsCommon.InstallMSI(vm, remoteMSIPath, "", "") + _, err = windowsAgent.InstallAgent(vm, windowsAgent.WithPackage(agentPackage)) t.Log("Install complete") require.NoError(t, err) diff --git a/test/new-e2e/tests/sysprobe-functional/apmtags_test.go b/test/new-e2e/tests/sysprobe-functional/apmtags_test.go index 4f6f26623170f..7d75096f107cd 100644 --- a/test/new-e2e/tests/sysprobe-functional/apmtags_test.go +++ b/test/new-e2e/tests/sysprobe-functional/apmtags_test.go @@ -217,7 +217,7 @@ func setupTest(vm *components.RemoteHost, test usmTaggingTest) error { testRoot := path.Join("c:", "users", "administrator") clientJSONFile := path.Join(testRoot, "datadog.json") - clientAppConfig := path.Join(testRoot, "app.config") + clientAppConfig := path.Join(testRoot, "littleget.exe.config") removeIfExists(vm, clientJSONFile) removeIfExists(vm, clientAppConfig) diff --git a/test/new-e2e/tests/sysprobe-functional/sysprobe_test.go b/test/new-e2e/tests/sysprobe-functional/sysprobe_test.go index 190e2cfffa937..4e41c685bbd7d 100644 --- a/test/new-e2e/tests/sysprobe-functional/sysprobe_test.go +++ b/test/new-e2e/tests/sysprobe-functional/sysprobe_test.go @@ -18,7 +18,6 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments" awshost "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows" - windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" windowsAgent "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent" componentsos "github.com/DataDog/test-infra-definitions/components/os" "github.com/DataDog/test-infra-definitions/scenarios/aws/ec2" @@ -106,13 +105,7 @@ func (v *vmSuite) TestSystemProbeNPMSuite() { // install the agent (just so we can get the driver(s) installed) agentPackage, err := windowsAgent.GetPackageFromEnv() require.NoError(t, err) - remoteMSIPath, err := windowsCommon.GetTemporaryFile(vm) - require.NoError(t, err) - t.Logf("Getting install package %s...", agentPackage.URL) - err = windowsCommon.PutOrDownloadFile(vm, agentPackage.URL, remoteMSIPath) - require.NoError(t, err) - - err = windowsCommon.InstallMSI(vm, remoteMSIPath, "", "") + _, err = windowsAgent.InstallAgent(vm, windowsAgent.WithPackage(agentPackage)) t.Log("Install complete") require.NoError(t, err) diff --git a/test/new-e2e/tests/windows/common/agent/agent.go b/test/new-e2e/tests/windows/common/agent/agent.go index 693fa93db26fd..8833fb0f3cb6e 100644 --- a/test/new-e2e/tests/windows/common/agent/agent.go +++ b/test/new-e2e/tests/windows/common/agent/agent.go @@ -12,6 +12,7 @@ import ( "path/filepath" "strings" "testing" + "time" "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" @@ -20,6 +21,7 @@ import ( windowsCommon "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common" infraCommon "github.com/DataDog/test-infra-definitions/common" + "github.com/cenkalti/backoff/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -72,13 +74,23 @@ func InstallAgent(host *components.RemoteHost, options ...InstallAgentOption) (s p.LocalInstallLogFile = filepath.Join(os.TempDir(), "install.log") } + downloadBackOff := p.DownloadMSIBackOff + if downloadBackOff == nil { + // 5s, 7s, 11s, 17s, 25s, 38s, 60s, 60s...for up to 5 minutes + downloadBackOff = backoff.NewExponentialBackOff( + backoff.WithInitialInterval(5*time.Second), + backoff.WithMaxInterval(60*time.Second), + backoff.WithMaxElapsedTime(5*time.Minute), + ) + } + args := p.toArgs() remoteMSIPath, err := windowsCommon.GetTemporaryFile(host) if err != nil { return "", err } - err = windowsCommon.PutOrDownloadFile(host, p.Package.URL, remoteMSIPath) + err = windowsCommon.PutOrDownloadFileWithRetry(host, p.Package.URL, remoteMSIPath, downloadBackOff) if err != nil { return "", err } diff --git a/test/new-e2e/tests/windows/common/agent/agent_install_params.go b/test/new-e2e/tests/windows/common/agent/agent_install_params.go index 51f4a0b951057..a4f37403d27c4 100644 --- a/test/new-e2e/tests/windows/common/agent/agent_install_params.go +++ b/test/new-e2e/tests/windows/common/agent/agent_install_params.go @@ -13,11 +13,15 @@ import ( "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/runner/parameters" "github.com/DataDog/test-infra-definitions/components/datadog/agentparams/msi" + + "github.com/cenkalti/backoff/v4" ) // InstallAgentParams are the parameters used for installing the Agent using msiexec. type InstallAgentParams struct { - Package *Package + Package *Package + DownloadMSIBackOff backoff.BackOff + // Path on local test runner to save the MSI install log LocalInstallLogFile string @@ -146,6 +150,14 @@ func WithLastStablePackage() InstallAgentOption { } } +// WithDownloadMSIBackoff specifies the backoff strategy for downloading the MSI. +func WithDownloadMSIBackoff(backoff backoff.BackOff) InstallAgentOption { + return func(i *InstallAgentParams) error { + i.DownloadMSIBackOff = backoff + return nil + } +} + // WithFakeIntake configures the Agent to use a fake intake URL. func WithFakeIntake(fakeIntake *components.FakeIntake) InstallAgentOption { return func(i *InstallAgentParams) error { diff --git a/test/new-e2e/tests/windows/common/network.go b/test/new-e2e/tests/windows/common/network.go index af4291c732008..2a8e98f0266ca 100644 --- a/test/new-e2e/tests/windows/common/network.go +++ b/test/new-e2e/tests/windows/common/network.go @@ -11,6 +11,8 @@ import ( "strings" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/components" + + "github.com/cenkalti/backoff/v4" ) // BoundPort represents a port that is bound to a process @@ -80,8 +82,27 @@ func ListBoundPorts(host *components.RemoteHost) ([]*BoundPort, error) { // If the URL is a local file, it will be uploaded to the VM. // If the URL is a remote file, it will be downloaded from the VM func PutOrDownloadFile(host *components.RemoteHost, url string, destination string) error { + // no retry + return PutOrDownloadFileWithRetry(host, url, destination, &backoff.StopBackOff{}) +} + +// PutOrDownloadFileWithRetry is similar to PutOrDownloadFile but retries on download failure, +// local file copy is not retried. +func PutOrDownloadFileWithRetry(host *components.RemoteHost, url string, destination string, b backoff.BackOff) error { if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") { - return DownloadFile(host, url, destination) + err := backoff.Retry(func() error { + return DownloadFile(host, url, destination) + // TODO: it would be neat to only retry on web related errors but + // we don't have a way to distinguish them since DownloadFile + // throws a WebException for non web related errors such as + // filename is null or Empty. + // https://learn.microsoft.com/en-us/dotnet/api/system.net.webclient.downloadfile + // example error: Exception calling "DownloadFile" with "2" argument(s): "The remote server returned an error: (503) + }, b) + if err != nil { + return err + } + return nil } if strings.HasPrefix(url, "file://") { diff --git a/test/otel/go.mod b/test/otel/go.mod index 1a1e56a250a8a..4088b0e848bff 100644 --- a/test/otel/go.mod +++ b/test/otel/go.mod @@ -37,6 +37,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/mock => ./../../pkg/config/mock github.com/DataDog/datadog-agent/pkg/config/model => ./../../pkg/config/model github.com/DataDog/datadog-agent/pkg/config/setup => ./../../pkg/config/setup + github.com/DataDog/datadog-agent/pkg/config/structure => ../../pkg/config/structure github.com/DataDog/datadog-agent/pkg/config/utils => ./../../pkg/config/utils github.com/DataDog/datadog-agent/pkg/logs/auditor => ./../../pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ./../../pkg/logs/client @@ -98,8 +99,8 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.56.0-rc.1 - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/setup v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/config/model v0.57.0 + github.com/DataDog/datadog-agent/pkg/config/setup v0.57.0 github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 ) @@ -108,7 +109,7 @@ require ( github.com/DataDog/agent-payload/v5 v5.0.119 // indirect github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.57.0 // indirect github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect @@ -121,9 +122,10 @@ require ( github.com/DataDog/datadog-agent/comp/trace/compression/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.58.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.0.0-00010101000000-000000000000 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 // indirect @@ -151,22 +153,22 @@ require ( github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/optional v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.57.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.57.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.56.0-rc.3 // indirect github.com/DataDog/datadog-api-client-go/v2 v2.26.0 // indirect github.com/DataDog/datadog-go/v5 v5.5.0 // indirect @@ -174,10 +176,10 @@ require ( github.com/DataDog/go-sqllexer v0.0.14 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/DataDog/zstd v1.5.5 // indirect diff --git a/test/otel/go.sum b/test/otel/go.sum index cdf2e4f5f136d..6c32aacebcc70 100644 --- a/test/otel/go.sum +++ b/test/otel/go.sum @@ -14,18 +14,18 @@ github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4ti github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0 h1:weAPKDi/dTlBbWU4oDZ55ubomqUob6OWPoUcdBjWM2M= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.17.0/go.mod h1:VrcmO2+HTWXaGYin1pAAXWNEtaza/DCJDH/+t5IY5rs= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0 h1:KNiq6ofE5BBMQjl7w9fftg8z44C9z51w7qOWIKs5SCg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.18.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0 h1:FaUFQE8IuaNdpOQGIhoy2h58v8AVND+yZG3gVqKAwLQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.18.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1 h1:px2+7svK86oeCGd+sT1x/9f0pqIJdApGFnWI0AOPXwA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.16.1/go.mod h1:+LijQ2LdlocAQ4WB+7KsoIGe90bfogkRslubd9swVow= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0 h1:Fija8Qo0z/HngskYyBpMqmJKM2ejNr1NfXUyWszFDAw= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.18.0/go.mod h1:lNu6vfFNCV/tyWxs8x8nCN1TqK+bPeI2dbnlwFTs8VA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0 h1:x6re32f8gQ8fdCllywQyAbxQuXNrgxeimpLBfvwA97g= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.18.0/go.mod h1:R84ZVbxKSgMxzvJro/MftVrlkGm2C2gndUhV35wyR8A= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/test/regression/cases/idle_all_features/datadog-agent/datadog.yaml b/test/regression/cases/idle_all_features/datadog-agent/datadog.yaml new file mode 100644 index 0000000000000..1960b9b64d8f0 --- /dev/null +++ b/test/regression/cases/idle_all_features/datadog-agent/datadog.yaml @@ -0,0 +1,74 @@ +auth_token_file_path: /tmp/agent-auth-token +hostname: smp-regression + +dd_url: http://127.0.0.1:9092 + +confd_path: /etc/datadog-agent/conf.d + +# Disable cloud detection. This stops the Agent from poking around the +# execution environment & network. This is particularly important if the target +# has network access. +cloud_provider_metadata: [] + +dogstatsd_socket: '/tmp/dsd.socket' + +logs_enabled: true + +apm_config: + enabled: true + +process_config: + process_collection: + enabled: true + container_collection: + enabled: true + +network_path: + connections_monitoring: + enabled: true + +runtime_security_config: + ## Set to true to enable Threat Detection + enabled: true + +cluster_checks: + enabled: true + +otlp_config: + metrics: + enabled: true + traces: + enabled: true + logs: + enabled: true + +system_probe_config: + enabled: true + +network_config: + enabled: true + +# Per Cloud Security Management setup documentation +# https://docs.datadoghq.com/security/cloud_security_management/setup/agent/linux/ +remote_configuration: + # SMP environment does not support remote config currently. + enabled: false + +compliance_config: + ## Set to true to enable CIS benchmarks for Misconfigurations. + enabled: true + host_benchmarks: + enabled: true + +# Vulnerabilities are evaluated and scanned against your containers and hosts every hour. +sbom: + enabled: true + # Set to true to enable Container Vulnerability Management + container_image: + enabled: true + # Set to true to enable Host Vulnerability Management + host: + enabled: true + +container_image: + enabled: true diff --git a/test/regression/cases/idle_all_features/datadog-agent/security-agent.yaml b/test/regression/cases/idle_all_features/datadog-agent/security-agent.yaml new file mode 100644 index 0000000000000..d9ce27c518a1a --- /dev/null +++ b/test/regression/cases/idle_all_features/datadog-agent/security-agent.yaml @@ -0,0 +1,13 @@ +# Per https://docs.datadoghq.com/security/cloud_security_management/setup/agent/linux/ +runtime_security_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable Threat Detection + enabled: true + +compliance_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable CIS benchmarks for Misconfigurations. + # + enabled: true + host_benchmarks: + enabled: true diff --git a/test/regression/cases/idle_all_features/datadog-agent/system-probe.yaml b/test/regression/cases/idle_all_features/datadog-agent/system-probe.yaml new file mode 100644 index 0000000000000..a7da3c9140d50 --- /dev/null +++ b/test/regression/cases/idle_all_features/datadog-agent/system-probe.yaml @@ -0,0 +1,10 @@ +# Per https://docs.datadoghq.com/security/cloud_security_management/setup/agent/linux/ + +runtime_security_config: + ## @param enabled - boolean - optional - default: false + ## Set to true to enable Threat Detection + enabled: true + + remote_configuration: + ## @param enabled - boolean - optional - default: false + enabled: true diff --git a/test/regression/cases/idle_all_features/experiment.yaml b/test/regression/cases/idle_all_features/experiment.yaml new file mode 100644 index 0000000000000..34c27f279d298 --- /dev/null +++ b/test/regression/cases/idle_all_features/experiment.yaml @@ -0,0 +1,30 @@ +# Agent 'all features enabled' idle experiment. Represents an agent install with +# all sub-agents enabled in configuration and no active workload. + +optimization_goal: memory +erratic: false + +target: + name: datadog-agent + command: /bin/entrypoint.sh + + environment: + DD_TELEMETRY_ENABLED: true + DD_API_KEY: 00000001 + DD_HOSTNAME: smp-regression + DD_DD_URL: http://127.0.0.1:9092 + + profiling_environment: + DD_INTERNAL_PROFILING_BLOCK_PROFILE_RATE: 10000 + DD_INTERNAL_PROFILING_CPU_DURATION: 1m + DD_INTERNAL_PROFILING_DELTA_PROFILES: true + DD_INTERNAL_PROFILING_ENABLED: true + DD_INTERNAL_PROFILING_ENABLE_GOROUTINE_STACKTRACES: true + DD_INTERNAL_PROFILING_MUTEX_PROFILE_FRACTION: 10 + DD_INTERNAL_PROFILING_PERIOD: 1m + DD_INTERNAL_PROFILING_UNIX_SOCKET: /var/run/datadog/apm.socket + DD_PROFILING_EXECUTION_TRACE_ENABLED: true + DD_PROFILING_EXECUTION_TRACE_PERIOD: 1m + DD_PROFILING_WAIT_PROFILE: true + + DD_INTERNAL_PROFILING_EXTRA_TAGS: experiment:idle_all_features diff --git a/test/regression/cases/idle_all_features/lading/lading.yaml b/test/regression/cases/idle_all_features/lading/lading.yaml new file mode 100644 index 0000000000000..52888afb7176d --- /dev/null +++ b/test/regression/cases/idle_all_features/lading/lading.yaml @@ -0,0 +1,177 @@ +generator: [] + +blackhole: + - http: + binding_addr: "127.0.0.1:9091" + - http: + binding_addr: "127.0.0.1:9092" + +target_metrics: + - prometheus: #core agent telemetry + uri: "http://127.0.0.1:5000/telemetry" + tags: + sub_agent: "core" + - prometheus: #process agent telemetry + uri: "http://127.0.0.1:6062/telemetry" + tags: + sub_agent: "process" + - expvar: #trace agent telemetry + uri: "http://127.0.0.1:5012/debug/vars" + vars: + - "/Event" + - "/ServiceCheck" + - "/check_run_v1" + - "/cmdline" + - "/compressor/BytesIn" + - "/compressor/BytesOut" + - "/compressor/TotalCompressCycles" + - "/compressor/TotalPayloads" + - "/connections" + - "/container" + - "/events_v2" + - "/forwarder/APIKeyFailure" + - "/forwarder/APIKeyStatus" + - "/forwarder/FileStorage/CurrentSizeInBytes" + - "/forwarder/FileStorage/DeserializeCount" + - "/forwarder/FileStorage/DeserializeErrorsCount" + - "/forwarder/FileStorage/DeserializeTransactionsCount" + - "/forwarder/FileStorage/FileSize" + - "/forwarder/FileStorage/FilesCount" + - "/forwarder/FileStorage/FilesRemovedCount" + - "/forwarder/FileStorage/PointsDroppedCount" + - "/forwarder/FileStorage/SerializeCount" + - "/forwarder/FileStorage/StartupReloadedRetryFilesCount" + - "/forwarder/RemovalPolicy/FilesFromUnknownDomainCount" + - "/forwarder/RemovalPolicy/NewRemovalPolicyCount" + - "/forwarder/RemovalPolicy/OutdatedFilesCount" + - "/forwarder/RemovalPolicy/RegisteredDomainCount" + - "/forwarder/TransactionContainer/CurrentMemSizeInBytes" + - "/forwarder/TransactionContainer/ErrorsCount" + - "/forwarder/TransactionContainer/PointsDroppedCount" + - "/forwarder/TransactionContainer/TransactionsCount" + - "/forwarder/TransactionContainer/TransactionsDroppedCount" + - "/forwarder/Transactions/Cluster" + - "/forwarder/Transactions/ClusterRole" + - "/forwarder/Transactions/ClusterRoleBinding" + - "/forwarder/Transactions/ConnectionEvents/ConnectSuccess" + - "/forwarder/Transactions/ConnectionEvents/DNSSuccess" + - "/forwarder/Transactions/CronJob" + - "/forwarder/Transactions/CustomResource" + - "/forwarder/Transactions/CustomResourceDefinition" + - "/forwarder/Transactions/DaemonSet" + - "/forwarder/Transactions/Deployment" + - "/forwarder/Transactions/Dropped" + - "/forwarder/Transactions/DroppedByEndpoint" + - "/forwarder/Transactions/ECSTask" + - "/forwarder/Transactions/Errors" + - "/forwarder/Transactions/ErrorsByType/ConnectionErrors" + - "/forwarder/Transactions/ErrorsByType/DNSErrors" + - "/forwarder/Transactions/ErrorsByType/SentRequestErrors" + - "/forwarder/Transactions/ErrorsByType/TLSErrors" + - "/forwarder/Transactions/ErrorsByType/WroteRequestErrors" + - "/forwarder/Transactions/HTTPErrors" + - "/forwarder/Transactions/HTTPErrorsByCode" + - "/forwarder/Transactions/HighPriorityQueueFull" + - "/forwarder/Transactions/HorizontalPodAutoscaler" + - "/forwarder/Transactions/Ingress" + - "/forwarder/Transactions/InputBytesByEndpoint" + - "/forwarder/Transactions/InputCountByEndpoint" + - "/forwarder/Transactions/Job" + - "/forwarder/Transactions/LimitRange" + - "/forwarder/Transactions/Namespace" + - "/forwarder/Transactions/NetworkPolicy" + - "/forwarder/Transactions/Node" + - "/forwarder/Transactions/OrchestratorManifest" + - "/forwarder/Transactions/PersistentVolume" + - "/forwarder/Transactions/PersistentVolumeClaim" + - "/forwarder/Transactions/Pod" + - "/forwarder/Transactions/ReplicaSet" + - "/forwarder/Transactions/Requeued" + - "/forwarder/Transactions/RequeuedByEndpoint" + - "/forwarder/Transactions/Retried" + - "/forwarder/Transactions/RetriedByEndpoint" + - "/forwarder/Transactions/RetryQueueSize" + - "/forwarder/Transactions/Role" + - "/forwarder/Transactions/RoleBinding" + - "/forwarder/Transactions/Service" + - "/forwarder/Transactions/ServiceAccount" + - "/forwarder/Transactions/StatefulSet" + - "/forwarder/Transactions/StorageClass" + - "/forwarder/Transactions/Success" + - "/forwarder/Transactions/SuccessByEndpoint/check_run_v1" + - "/forwarder/Transactions/SuccessByEndpoint/connections" + - "/forwarder/Transactions/SuccessByEndpoint/container" + - "/forwarder/Transactions/SuccessByEndpoint/events_v2" + - "/forwarder/Transactions/SuccessByEndpoint/host_metadata_v2" + - "/forwarder/Transactions/SuccessByEndpoint/intake" + - "/forwarder/Transactions/SuccessByEndpoint/orchestrator" + - "/forwarder/Transactions/SuccessByEndpoint/process" + - "/forwarder/Transactions/SuccessByEndpoint/rtcontainer" + - "/forwarder/Transactions/SuccessByEndpoint/rtprocess" + - "/forwarder/Transactions/SuccessByEndpoint/series_v1" + - "/forwarder/Transactions/SuccessByEndpoint/series_v2" + - "/forwarder/Transactions/SuccessByEndpoint/services_checks_v2" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v1" + - "/forwarder/Transactions/SuccessByEndpoint/sketches_v2" + - "/forwarder/Transactions/SuccessByEndpoint/validate_v1" + - "/forwarder/Transactions/SuccessBytesByEndpoint" + - "/forwarder/Transactions/VerticalPodAutoscaler" + - "/host_metadata_v2" + - "/hostname/errors" + - "/hostname/provider" + - "/intake" + - "/jsonstream/CompressorLocks" + - "/jsonstream/ItemDrops" + - "/jsonstream/PayloadFulls" + - "/jsonstream/TotalCalls" + - "/jsonstream/TotalItems" + - "/jsonstream/TotalLockTime" + - "/jsonstream/TotalSerializationTime" + - "/jsonstream/WriteItemErrors" + - "/kubeletQueries" + - "/orchestrator" + - "/pid" + - "/process" + - "/rtcontainer" + - "/rtprocess" + - "/serializer/SendEventsErrItemTooBigs" + - "/serializer/SendEventsErrItemTooBigsFallback" + - "/series" + - "/series_v1" + - "/series_v2" + - "/services_checks_v2" + - "/sketch_series/ItemTooBig" + - "/sketch_series/PayloadFull" + - "/sketch_series/UnexpectedItemDrops" + - "/sketches_v1" + - "/sketches_v2" + - "/splitter/NotTooBig" + - "/splitter/PayloadDrops" + - "/splitter/TooBig" + - "/splitter/TotalLoops" + - "/stats_writer/Bytes" + - "/stats_writer/ClientPayloads" + - "/stats_writer/Errors" + - "/stats_writer/Payloads" + - "/stats_writer/Retries" + - "/stats_writer/Splits" + - "/stats_writer/StatsBuckets" + - "/stats_writer/StatsEntries" + - "/trace_writer/Bytes" + - "/trace_writer/BytesUncompressed" + - "/trace_writer/Errors" + - "/trace_writer/Events" + - "/trace_writer/Payloads" + - "/trace_writer/Retries" + - "/trace_writer/SingleMaxSize" + - "/trace_writer/Spans" + - "/trace_writer/Traces" + - "/uptime" + - "/validate_v1" + - "/version/Version" + - "/version/GitCommit" + - "/watchdog/CPU/UserAvg" + - "/watchdog/Mem/Alloc" + tags: + sub_agent: "trace" + diff --git a/test/regression/config.yaml b/test/regression/config.yaml index 9415547229ccf..4a678bef04087 100644 --- a/test/regression/config.yaml +++ b/test/regression/config.yaml @@ -1,5 +1,5 @@ lading: - version: 0.23.0 + version: 0.23.2 target: cpu_allotment: 8 diff --git a/test/required_files/agent-deb.txt b/test/required_files/agent-deb.txt new file mode 100644 index 0000000000000..5c5132de76164 --- /dev/null +++ b/test/required_files/agent-deb.txt @@ -0,0 +1,14 @@ +/lib/systemd/system/datadog-agent-security.service +/lib/systemd/system/datadog-agent-process.service +/lib/systemd/system/datadog-agent.service +/lib/systemd/system/datadog-agent-trace.service +/lib/systemd/system/datadog-agent-sysprobe.service +/etc/init.d/datadog-agent-process +/etc/init.d/datadog-agent-security +/etc/init.d/datadog-agent +/etc/init.d/datadog-agent-trace +/etc/init/datadog-agent.conf +/etc/init/datadog-agent-sysprobe.conf +/etc/init/datadog-agent-trace.conf +/etc/init/datadog-agent-process.conf +/etc/init/datadog-agent-security.conf diff --git a/test/required_files/agent-rpm.txt b/test/required_files/agent-rpm.txt new file mode 100644 index 0000000000000..bb5183bba4e15 --- /dev/null +++ b/test/required_files/agent-rpm.txt @@ -0,0 +1,10 @@ +/usr/lib/systemd/system/datadog-agent-security.service +/usr/lib/systemd/system/datadog-agent-process.service +/usr/lib/systemd/system/datadog-agent.service +/usr/lib/systemd/system/datadog-agent-trace.service +/usr/lib/systemd/system/datadog-agent-sysprobe.service +/etc/init/datadog-agent.conf +/etc/init/datadog-agent-sysprobe.conf +/etc/init/datadog-agent-trace.conf +/etc/init/datadog-agent-process.conf +/etc/init/datadog-agent-security.conf diff --git a/test/required_files/dogstatsd-deb.txt b/test/required_files/dogstatsd-deb.txt new file mode 100644 index 0000000000000..70c7c8de422d3 --- /dev/null +++ b/test/required_files/dogstatsd-deb.txt @@ -0,0 +1 @@ +/lib/systemd/system/datadog-dogstatsd.service diff --git a/test/required_files/dogstatsd-rpm.txt b/test/required_files/dogstatsd-rpm.txt new file mode 100644 index 0000000000000..70c7c8de422d3 --- /dev/null +++ b/test/required_files/dogstatsd-rpm.txt @@ -0,0 +1 @@ +/lib/systemd/system/datadog-dogstatsd.service diff --git a/test/required_files/iot-agent-deb.txt b/test/required_files/iot-agent-deb.txt new file mode 100644 index 0000000000000..864131156691d --- /dev/null +++ b/test/required_files/iot-agent-deb.txt @@ -0,0 +1 @@ +/lib/systemd/system/datadog-agent.service diff --git a/test/required_files/iot-agent-rpm.txt b/test/required_files/iot-agent-rpm.txt new file mode 100644 index 0000000000000..b53035e68fdf6 --- /dev/null +++ b/test/required_files/iot-agent-rpm.txt @@ -0,0 +1 @@ +/usr/lib/systemd/system/datadog-agent.service diff --git a/tools/agent_QA/ddqa_template_config.toml b/tools/agent_QA/ddqa_template_config.toml new file mode 100644 index 0000000000000..dfba3ea83d661 --- /dev/null +++ b/tools/agent_QA/ddqa_template_config.toml @@ -0,0 +1,11 @@ +global_config_source = "aHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL0RhdGFEb2cvZ2l0aHViLW1ldGFkYXRhL21hc3Rlci9qaXJhLnRvbWw=" +repo = "datadog-agent" +cache_dir = "" +jira_statuses = [ + "TODO", + "Testing", + "Done", +] +ignored_labels = [ + "qa/skip-qa", +] diff --git a/tools/ci/docker-login.ps1 b/tools/ci/docker-login.ps1 index e85da22733afa..840b6b786492d 100644 --- a/tools/ci/docker-login.ps1 +++ b/tools/ci/docker-login.ps1 @@ -7,12 +7,12 @@ If ($lastExitCode -ne "0") { } # DockerHub login $tmpfile = [System.IO.Path]::GetTempFileName() -& "C:\mnt\tools\ci\fetch_secret.ps1" "$Env:DOCKER_REGISTRY_LOGIN" "$tmpfile" +& "C:\mnt\tools\ci\fetch_secret.ps1" -parameterName "$Env:DOCKER_REGISTRY_LOGIN" -tempFile "$tmpfile" If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } $DOCKER_REGISTRY_LOGIN = $(cat "$tmpfile") -& "C:\mnt\tools\ci\fetch_secret.ps1" "$Env:DOCKER_REGISTRY_PWD" "$tmpfile" +& "C:\mnt\tools\ci\fetch_secret.ps1" -parameterName "$Env:DOCKER_REGISTRY_PWD" -tempFile "$tmpfile" If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } diff --git a/tools/ci/fetch_secret.ps1 b/tools/ci/fetch_secret.ps1 index 0e345e80ff32a..10ea137405d74 100644 --- a/tools/ci/fetch_secret.ps1 +++ b/tools/ci/fetch_secret.ps1 @@ -1,21 +1,29 @@ param ( [string]$parameterName, + [string]$parameterField, [string]$tempFile ) $retryCount = 0 $maxRetries = 10 +# To catch the error message from aws cli +$ErrorActionPreference = "Continue" + while ($retryCount -lt $maxRetries) { - $result = (aws ssm get-parameter --region us-east-1 --name $parameterName --with-decryption --query "Parameter.Value" --output text 2> awsErrorFile.txt) - $error = Get-Content awsErrorFile.txt + if ($parameterField) { + $result = (vault kv get -field="$parameterField" kv/k8s/gitlab-runner/datadog-agent/"$parameterName" 2> errorFile.txt) + } else { + $result = (aws ssm get-parameter --region us-east-1 --name $parameterName --with-decryption --query "Parameter.Value" --output text 2> errorFile.txt) + } + $error = Get-Content errorFile.txt if ($result) { "$result" | Out-File -FilePath "$tempFile" -Encoding ASCII exit 0 } if ($error -match "Unable to locate credentials") { # See 5th row in https://docs.google.com/spreadsheets/d/1JvdN0N-RdNEeOJKmW_ByjBsr726E3ZocCKU8QoYchAc - Write-Error "Permanent error: unable to locate AWS credentials, not retrying" + Write-Error "Permanent error: unable to locate credentials, not retrying" exit 42 } diff --git a/tools/ci/fetch_secret.sh b/tools/ci/fetch_secret.sh index d7b8406a7c458..dadbe93867331 100755 --- a/tools/ci/fetch_secret.sh +++ b/tools/ci/fetch_secret.sh @@ -3,20 +3,25 @@ retry_count=0 max_retries=10 parameter_name="$1" +parameter_field="$2" set +x while [[ $retry_count -lt $max_retries ]]; do - result=$(aws ssm get-parameter --region us-east-1 --name "$parameter_name" --with-decryption --query "Parameter.Value" --output text 2> awsErrorFile) - error=$( errorFile) + else + result=$(aws ssm get-parameter --region us-east-1 --name "$parameter_name" --with-decryption --query "Parameter.Value" --output text 2> errorFile) + fi + error=$(&2 echo "Permanent error: unable to locate AWS credentials, not retrying" - exit 1 + >&2 echo "Permanent error: unable to locate credentials, not retrying" + exit 42 fi retry_count=$((retry_count+1)) sleep $((2**retry_count)) diff --git a/tools/ci/junit_upload.sh b/tools/ci/junit_upload.sh index e4ab90ee9b70a..32c4621acf154 100755 --- a/tools/ci/junit_upload.sh +++ b/tools/ci/junit_upload.sh @@ -18,6 +18,7 @@ for file in $junit_files; do fi inv -e junit-upload --tgz-path "$file" || error=1 done +unset DATADOG_API_KEY GITLAB_TOKEN # Never fail on Junit upload failure since it would prevent the other after scripts to run. if [ $error -eq 1 ]; then echo "Error: Junit upload failed" diff --git a/tools/gdb/Dockerfile b/tools/gdb/Dockerfile index 4adfa70473a9a..5dfd0b06eda82 100644 --- a/tools/gdb/Dockerfile +++ b/tools/gdb/Dockerfile @@ -6,7 +6,7 @@ RUN rm -vf /etc/ssl/openssl.cnf RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install -y gdb build-essential strace less vim # Install go -RUN curl -fSL -o golang.tgz https://go.dev/dl/go1.22.6.linux-amd64.tar.gz +RUN curl -fSL -o golang.tgz https://go.dev/dl/go1.22.7.linux-amd64.tar.gz RUN tar xzvf golang.tgz RUN ln -s /go /goroot diff --git a/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 b/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 new file mode 100644 index 0000000000000..528a369b801b3 --- /dev/null +++ b/tools/windows/DatadogAgentInstallScript/Install-Datadog.ps1 @@ -0,0 +1,239 @@ +<# + .SYNOPSIS + Downloads and installs Datadog on the machine. +#> +[CmdletBinding(DefaultParameterSetName = 'Default')] +$SCRIPT_VERSION = "1.0.0" +$GENERAL_ERROR_CODE = 1 + +# ExitCodeException can be used to report failures from executables that set $LASTEXITCODE +class ExitCodeException : Exception { + [string] $LastExitCode + + ExitCodeException($message, $lastExitCode) : base($message) { + $this.LastExitCode = $lastExitCode + } +} + +function Update-ConfigFile($regex, $replacement) { + $configFile = Join-Path (Get-ItemPropertyValue -Path "HKLM:\\SOFTWARE\\Datadog\\Datadog Agent" -Name "ConfigRoot") "datadog.yaml" + if (-Not $configFile) { + $configFile = "C:\\ProgramData\\Datadog\\datadog.yaml" + } + if (-Not (Test-Path $configFile)) { + throw "datadog.yaml doesn't exist" + } + if (((Get-Content $configFile) | Select-String $regex | Measure-Object).Count -eq 0) { + Add-Content -Path $configFile -Value $replacement + } + else { + (Get-Content $configFile) -replace $regex, $replacement | Out-File $configFile + } +} + +function Send-Telemetry($payload) { + $telemetryUrl = "https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry" + if ($env:DD_SITE -eq "ddog-gov.com" -or -Not ($env:DD_API_KEY)) { + return + } + + if ($env:DD_SITE) { + $telemetryUrl = "https://instrumentation-telemetry-intake.$env:DD_SITE/api/v2/apmtelemetry" + } + $requestHeaders = @{ + "DD-Api-Key" = $env:DD_API_KEY + "Content-Type" = "application/json" + } + $result = Invoke-WebRequest -Uri $telemetryUrl -Method POST -Body $payload -Headers $requestHeaders + Write-Host "Sending telemetry: $($result.StatusCode)" +} + +function Show-Error($errorMessage, $errorCode) { + Write-Error -ErrorAction Continue @" + Datadog Install script failed: + + Error message: $($errorMessage) + Error code: $($errorCode) + +"@ + + $agentVersion = "7.x" + if ($env:DD_AGENT_MINOR_VERSION) { + $agentVersion = "7.$env:DD_AGENT_MINOR_VERSION" + } + $errorMessage = ($errorMessage -replace '"', '_' -replace '\n', ' ' -replace '\r', ' ') + + Send-Telemetry @" +{ + "request_type": "apm-onboarding-event", + "api_version": "v1", + "payload": { + "event_name": "agent.installation.error", + "tags": { + "install_id": "$(New-Guid)", + "install_type": "windows_powershell", + "install_time": "$([DateTimeOffset]::Now.ToUnixTimeSeconds())" + "agent_platform": "windows", + "agent_version: "$($agentVersion)", + "script_version": "$($SCRIPT_VERSION)" + }, + "error": { + "code": "$($errorCode)", + "message": "$($errorMessage)" + } + } +} +"@ +} + +function Start-ProcessWithOutput { + param ([string]$Path, [string[]]$ArgumentList) + $psi = New-object System.Diagnostics.ProcessStartInfo + $psi.CreateNoWindow = $true + $psi.UseShellExecute = $false + $psi.RedirectStandardOutput = $true + $psi.RedirectStandardError = $true + $psi.FileName = $Path + if ($ArgumentList.Count -gt 0) { + $psi.Arguments = $ArgumentList + } + $process = New-Object System.Diagnostics.Process + $process.StartInfo = $psi + $stdout = Register-ObjectEvent -InputObject $process -EventName 'OutputDataReceived'` + -Action { + if (![String]::IsNullOrEmpty($EventArgs.Data)) { + Write-Host $EventArgs.Data + } + } + $stderr = Register-ObjectEvent -InputObject $process -EventName 'ErrorDataReceived' ` + -Action { + if (![String]::IsNullOrEmpty($EventArgs.Data)) { + # Print stderr from process into host stderr + # Unfortunately that means this output cannot be captured from within PowerShell + # and it won't work within PowerShell ISE because it is not a console host. + [Console]::ForegroundColor = 'red' + [Console]::Error.WriteLine($EventArgs.Data) + [Console]::ResetColor() + } + } + [void]$process.Start() + $process.BeginOutputReadLine() + $process.BeginErrorReadLine() + $process.WaitForExit() + Unregister-Event -SourceIdentifier $stdout.Name + Unregister-Event -SourceIdentifier $stderr.Name + return $process.ExitCode +} + +# Set some defaults if not provided +$ddInstallerUrl = $env:DD_INSTALLER_URL +if (-Not $ddInstallerUrl) { + # Replace with https://s3.amazonaws.com/ddagent-windows-stable/datadog-installer-x86_64.exe when ready + $ddInstallerUrl = "https://s3.amazonaws.com/dd-agent-omnibus/datadog-installer-x86_64.exe" +} + +$ddRemoteUpdates = $env:DD_REMOTE_UPDATES +if (-Not $ddRemoteUpdates) { + $ddRemoteUpdates = "false" +} + +try { + Write-Host "Welcome to the Datadog Install Script" + if (-not [Environment]::Is64BitProcess) { + throw "This command must be run in a 64-bit environment." + } + + $myWindowsID = [System.Security.Principal.WindowsIdentity]::GetCurrent() + $myWindowsPrincipal = new-object System.Security.Principal.WindowsPrincipal($myWindowsID) + $adminRole = [System.Security.Principal.WindowsBuiltInRole]::Administrator + if ($myWindowsPrincipal.IsInRole($adminRole)) { + # We are running "as Administrator" + $Host.UI.RawUI.WindowTitle = $myInvocation.MyCommand.Definition + "(Elevated)" + } + else { + # We are not running "as Administrator" + $newProcess = new-object System.Diagnostics.ProcessStartInfo "PowerShell"; + $newProcess.Arguments = $myInvocation.MyCommand.Definition; + $newProcess.Verb = "runas"; + $proc = [System.Diagnostics.Process]::Start($newProcess); + $proc.WaitForExit() + return $proc.ExitCode + } + + # Powershell does not enable TLS 1.2 by default, & we want it enabled for faster downloads + Write-Host "Forcing web requests to TLS v1.2" + [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor [System.Net.SecurityProtocolType]::Tls12 + + $installer = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath "datadog-installer-x86_64.exe" + if (Test-Path $installer) { + Remove-Item -Force $installer + } + + Write-Host "Downloading installer from $ddInstallerUrl" + [System.Net.WebClient]::new().DownloadFile($ddInstallerUrl, $installer) + + # If not set the `default-packages` won't contain the Datadog Agent + $env:DD_INSTALLER_DEFAULT_PKG_INSTALL_DATADOG_AGENT = "True" + + Write-Host "Starting bootstrap process" + $result = Start-ProcessWithOutput -Path $installer -ArgumentList "bootstrap" + if ($result -ne 0) { + # bootstrap only fails if it fails to install to install the Datadog Installer, so it's possible the Agent was not installed + throw [ExitCodeException]::new("Bootstrap failed", $result) + } + Write-Host "Bootstrap execution done" + + if (-Not (Test-Path "HKLM:\\SOFTWARE\\Datadog\\Datadog Agent")) { + throw "Agent is not installed" + } + + if ($env:DD_API_KEY) { + Write-Host "Writing DD_API_KEY" + Update-ConfigFile "^[ #]*api_key:.*" "api_key: $env:DD_API_KEY" + } + + if ($env:DD_SITE) { + Write-Host "Writing DD_SITE" + Update-ConfigFile "^[ #]*site:.*" "site: $env:DD_SITE" + } + + if ($env:DD_URL) { + Write-Host "Writing DD_URL" + Update-ConfigFile "^[ #]*dd_url:.*" "dd_url: $env:DD_URL" + } + + if ($ddRemoteUpdates) { + Write-Host "Writing DD_REMOTE_UPDATES" + Update-ConfigFile "^[ #]*remote_updates:.*" "remote_updates: $($ddRemoteUpdates.ToLower())" + } + + Send-Telemetry @" +{ + "request_type": "apm-onboarding-event", + "api_version": "v1", + "payload": { + "event_name": "agent.installation.success", + "tags": { + "install_id": "$(New-Guid)", + "install_type": "windows_powershell", + "install_time": "$([DateTimeOffset]::Now.ToUnixTimeSeconds())" + "agent_platform": "windows", + "agent_version: "$($agentVersion)", + "script_version": "$($SCRIPT_VERSION)" + } + } +} +"@ + +} +catch [ExitCodeException] { + Show-Error $_.Exception.Message $_.Exception.LastExitCode +} +catch { + Show-Error $_.Exception.Message $GENERAL_ERROR_CODE +} +finally { + Write-Host "Cleaning up..." + Remove-Item -Force -EA SilentlyContinue $installer +} +Write-Host "Datadog Install Script finished!" diff --git a/tools/windows/DatadogAgentInstaller/CustomActions/InstallStateCustomActions.cs b/tools/windows/DatadogAgentInstaller/CustomActions/InstallStateCustomActions.cs index 34cce2d6b636d..f86da9332b282 100644 --- a/tools/windows/DatadogAgentInstaller/CustomActions/InstallStateCustomActions.cs +++ b/tools/windows/DatadogAgentInstaller/CustomActions/InstallStateCustomActions.cs @@ -3,6 +3,7 @@ using Datadog.CustomActions.Native; using Microsoft.Win32; using System; +using System.Security.Cryptography; using System.Security.Principal; using ServiceController = Datadog.CustomActions.Native.ServiceController; @@ -177,6 +178,7 @@ public void GetWindowsBuildVersion() { _session.Log("WindowsBuild not found"); } + _session.Log("FIPS enabled: " + CryptoConfig.AllowOnlyFipsAlgorithms); } public static SecurityIdentifier GetPreviousAgentUser(ISession session, IRegistryServices registryServices, diff --git a/tools/windows/DatadogAgentInstaller/CustomActions/ServiceCustomAction.cs b/tools/windows/DatadogAgentInstaller/CustomActions/ServiceCustomAction.cs index 51fe43565a7d2..06fde0b96bc1d 100644 --- a/tools/windows/DatadogAgentInstaller/CustomActions/ServiceCustomAction.cs +++ b/tools/windows/DatadogAgentInstaller/CustomActions/ServiceCustomAction.cs @@ -413,7 +413,7 @@ private ActionResult StartDDServices() } catch (Exception e) { - _session.Log($"Failed to stop services: {e}"); + _session.Log($"Failed to start services: {e}"); // Allow service start to fail and continue the install } return ActionResult.Success;